Merge tag 'net-5.18-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Apr 2022 05:01:47 +0000 (19:01 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Apr 2022 05:01:47 +0000 (19:01 -1000)
Pull networking fixes from Paolo Abeni:
 "Including fixes from bpf and netfilter.

  Current release - new code bugs:

   - mctp: correct mctp_i2c_header_create result

   - eth: fungible: fix reference to __udivdi3 on 32b builds

   - eth: micrel: remove latencies support lan8814

  Previous releases - regressions:

   - bpf: resolve to prog->aux->dst_prog->type only for BPF_PROG_TYPE_EXT

   - vrf: fix packet sniffing for traffic originating from ip tunnels

   - rxrpc: fix a race in rxrpc_exit_net()

   - dsa: revert "net: dsa: stop updating master MTU from master.c"

   - eth: ice: fix MAC address setting

  Previous releases - always broken:

   - tls: fix slab-out-of-bounds bug in decrypt_internal

   - bpf: support dual-stack sockets in bpf_tcp_check_syncookie

   - xdp: fix coalescing for page_pool fragment recycling

   - ovs: fix leak of nested actions

   - eth: sfc:
      - add missing xdp queue reinitialization
      - fix using uninitialized xdp tx_queue

   - eth: ice:
      - clear default forwarding VSI during VSI release
      - fix broken IFF_ALLMULTI handling
      - synchronize_rcu() when terminating rings

   - eth: qede: confirm skb is allocated before using

   - eth: aqc111: fix out-of-bounds accesses in RX fixup

   - eth: slip: fix NPD bug in sl_tx_timeout()"

* tag 'net-5.18-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (61 commits)
  drivers: net: slip: fix NPD bug in sl_tx_timeout()
  bpf: Adjust bpf_tcp_check_syncookie selftest to test dual-stack sockets
  bpf: Support dual-stack sockets in bpf_tcp_check_syncookie
  myri10ge: fix an incorrect free for skb in myri10ge_sw_tso
  net: usb: aqc111: Fix out-of-bounds accesses in RX fixup
  qede: confirm skb is allocated before using
  net: ipv6mr: fix unused variable warning with CONFIG_IPV6_PIMSM_V2=n
  net: phy: mscc-miim: reject clause 45 register accesses
  net: axiemac: use a phandle to reference pcs_phy
  dt-bindings: net: add pcs-handle attribute
  net: axienet: factor out phy_node in struct axienet_local
  net: axienet: setup mdio unconditionally
  net: sfc: fix using uninitialized xdp tx_queue
  rxrpc: fix a race in rxrpc_exit_net()
  net: openvswitch: fix leak of nested actions
  net: ethernet: mv643xx: Fix over zealous checking of_get_mac_address()
  net: openvswitch: don't send internal clone attribute to the userspace.
  net: micrel: Fix KS8851 Kconfig
  ice: clear cmd_type_offset_bsz for TX rings
  ice: xsk: fix VSI state check in ice_xsk_wakeup()
  ...

694 files changed:
.mailmap
Documentation/PCI/pci.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/core-api/xarray.rst
Documentation/dev-tools/kunit/architecture.rst
Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
Documentation/devicetree/bindings/arm/psci.yaml
Documentation/devicetree/bindings/cpu/idle-states.yaml [moved from Documentation/devicetree/bindings/arm/idle-states.yaml with 74% similarity]
Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/riscv/cpus.yaml
Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt [deleted file]
Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
Documentation/filesystems/cifs/ksmbd.rst
Documentation/filesystems/fsverity.rst
Documentation/filesystems/locking.rst
Documentation/filesystems/netfs_library.rst
Documentation/filesystems/vfs.rst
Documentation/kbuild/kbuild.rst
Documentation/kbuild/llvm.rst
Documentation/kbuild/makefiles.rst
Documentation/locking/locktypes.rst
Documentation/maintainer/index.rst
Documentation/maintainer/messy-diffstat.rst [new file with mode: 0644]
Documentation/riscv/index.rst
Documentation/sphinx/kernel_abi.py
Documentation/sphinx/kernel_feat.py
Documentation/sphinx/kernel_include.py
Documentation/sphinx/kerneldoc.py
Documentation/sphinx/kfigure.py
Documentation/sphinx/requirements.txt
Documentation/trace/user_events.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/index.rst
Documentation/virt/kvm/locking.rst
Documentation/virt/kvm/s390/index.rst [new file with mode: 0644]
Documentation/virt/kvm/s390/s390-diag.rst [moved from Documentation/virt/kvm/s390-diag.rst with 100% similarity]
Documentation/virt/kvm/s390/s390-pv-boot.rst [moved from Documentation/virt/kvm/s390-pv-boot.rst with 100% similarity]
Documentation/virt/kvm/s390/s390-pv.rst [moved from Documentation/virt/kvm/s390-pv.rst with 100% similarity]
Documentation/virt/kvm/vcpu-requests.rst
Documentation/virt/kvm/x86/amd-memory-encryption.rst [moved from Documentation/virt/kvm/amd-memory-encryption.rst with 100% similarity]
Documentation/virt/kvm/x86/cpuid.rst [moved from Documentation/virt/kvm/cpuid.rst with 100% similarity]
Documentation/virt/kvm/x86/errata.rst [new file with mode: 0644]
Documentation/virt/kvm/x86/halt-polling.rst [moved from Documentation/virt/kvm/halt-polling.rst with 100% similarity]
Documentation/virt/kvm/x86/hypercalls.rst [moved from Documentation/virt/kvm/hypercalls.rst with 100% similarity]
Documentation/virt/kvm/x86/index.rst [new file with mode: 0644]
Documentation/virt/kvm/x86/mmu.rst [moved from Documentation/virt/kvm/mmu.rst with 100% similarity]
Documentation/virt/kvm/x86/msr.rst [moved from Documentation/virt/kvm/msr.rst with 100% similarity]
Documentation/virt/kvm/x86/nested-vmx.rst [moved from Documentation/virt/kvm/nested-vmx.rst with 100% similarity]
Documentation/virt/kvm/x86/running-nested-guests.rst [moved from Documentation/virt/kvm/running-nested-guests.rst with 100% similarity]
Documentation/virt/kvm/x86/timekeeping.rst [moved from Documentation/virt/kvm/timekeeping.rst with 100% similarity]
Documentation/virt/uml/user_mode_linux_howto_v2.rst
Documentation/vm/page_owner.rst
Documentation/vm/unevictable-lru.rst
MAINTAINERS
Makefile
arch/alpha/include/asm/user.h
arch/alpha/kernel/syscalls/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/dts/spear1340.dtsi
arch/arm/boot/dts/spear13xx.dtsi
arch/arm/include/asm/user.h
arch/arm/kernel/setup.c
arch/arm/kernel/stacktrace.c
arch/arm/mach-omap2/omap-secure.c
arch/arm/mach-s3c/mach-jive.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mm.h
arch/arm/tools/Makefile
arch/arm64/boot/dts/amd/Makefile
arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
arch/arm64/boot/dts/amd/amd-overdrive.dts [deleted file]
arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
arch/arm64/boot/dts/amd/husky.dts [deleted file]
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
arch/h8300/include/asm/user.h
arch/ia64/include/asm/user.h
arch/ia64/kernel/syscalls/Makefile
arch/m68k/include/asm/user.h
arch/m68k/kernel/syscalls/Makefile
arch/microblaze/boot/Makefile
arch/microblaze/boot/dts/Makefile
arch/microblaze/kernel/syscalls/Makefile
arch/mips/crypto/crc32-mips.c
arch/mips/include/asm/mach-rc32434/rb.h
arch/mips/kernel/syscalls/Makefile
arch/mips/lantiq/falcon/sysctrl.c
arch/mips/lantiq/xway/gptu.c
arch/mips/lantiq/xway/sysctrl.c
arch/mips/rb532/gpio.c
arch/mips/sgi-ip22/ip22-gio.c
arch/parisc/kernel/syscalls/Makefile
arch/powerpc/include/asm/user.h
arch/powerpc/kernel/syscalls/Makefile
arch/riscv/Kconfig
arch/riscv/Kconfig.socs
arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
arch/riscv/configs/defconfig
arch/riscv/configs/nommu_k210_defconfig
arch/riscv/configs/nommu_k210_sdcard_defconfig
arch/riscv/configs/nommu_virt_defconfig
arch/riscv/configs/rv32_defconfig
arch/riscv/include/asm/asm.h
arch/riscv/include/asm/cpuidle.h [new file with mode: 0644]
arch/riscv/include/asm/current.h
arch/riscv/include/asm/module.lds.h
arch/riscv/include/asm/suspend.h [new file with mode: 0644]
arch/riscv/include/asm/thread_info.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/asm-offsets.c
arch/riscv/kernel/cpu.c
arch/riscv/kernel/cpu_ops_sbi.c
arch/riscv/kernel/head.S
arch/riscv/kernel/module.c
arch/riscv/kernel/perf_callchain.c
arch/riscv/kernel/process.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/suspend.c [new file with mode: 0644]
arch/riscv/kernel/suspend_entry.S [new file with mode: 0644]
arch/s390/Kconfig
arch/s390/include/asm/alternative-asm.h
arch/s390/include/asm/alternative.h
arch/s390/include/asm/ap.h
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/spinlock.h
arch/s390/include/asm/syscall_wrapper.h
arch/s390/include/asm/unwind.h
arch/s390/include/asm/user.h
arch/s390/kernel/entry.S
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/os_info.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/syscalls/Makefile
arch/s390/kernel/traps.c
arch/s390/kernel/unwind_bc.c
arch/s390/kvm/kvm-s390.c
arch/s390/lib/spinlock.c
arch/s390/lib/test_unwind.c
arch/s390/pci/pci.c
arch/s390/pci/pci_bus.h
arch/s390/pci/pci_clp.c
arch/s390/pci/pci_event.c
arch/sh/include/asm/user.h
arch/sh/kernel/syscalls/Makefile
arch/sparc/kernel/syscalls/Makefile
arch/um/drivers/mconsole_kern.c
arch/um/drivers/port_user.c
arch/um/drivers/ubd_kern.c
arch/um/drivers/vector_kern.c
arch/um/drivers/vector_kern.h
arch/um/drivers/vector_user.c
arch/um/drivers/vector_user.h
arch/um/include/asm/Kbuild
arch/um/include/asm/xor.h
arch/um/include/shared/os.h
arch/um/kernel/dtb.c
arch/um/os-Linux/file.c
arch/um/os-Linux/helper.c
arch/um/os-Linux/time.c
arch/x86/Kconfig
arch/x86/configs/i386_defconfig
arch/x86/configs/x86_64_defconfig
arch/x86/entry/syscalls/Makefile
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/svm.h
arch/x86/include/asm/user_32.h
arch/x86/include/asm/user_64.h
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/kvm.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/i8254.c
arch/x86/kvm/kvm_emulate.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/pmu.c
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/svm_onhyperv.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
arch/x86/lib/csum-partial_64.c
arch/x86/lib/iomem.c
arch/x86/power/cpu.c
arch/x86/um/Kconfig
arch/x86/um/shared/sysdep/syscalls_64.h
arch/x86/um/syscalls_64.c
arch/xtensa/kernel/syscalls/Makefile
block/blk-cgroup.c
block/blk-ioc.c
block/blk-mq.c
block/blk-wbt.h
block/genhd.c
certs/Makefile
certs/system_certificates.S
drivers/acpi/acpi_ipmi.c
drivers/acpi/apei/apei-base.c
drivers/acpi/cppc_acpi.c
drivers/acpi/tables.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/libata-core.c
drivers/ata/libata-sff.c
drivers/ata/sata_dwc_460ex.c
drivers/auxdisplay/lcd2s.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_req.c
drivers/block/loop.c
drivers/block/n64cart.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/char/Kconfig
drivers/char/random.c
drivers/clk/clk.c
drivers/clk/clk_test.c
drivers/clk/sunxi-ng/Kconfig
drivers/clk/sunxi-ng/Makefile
drivers/clk/sunxi-ng/ccu-sun6i-rtc.c [new file with mode: 0644]
drivers/clk/sunxi-ng/ccu-sun6i-rtc.h [new file with mode: 0644]
drivers/clk/sunxi-ng/ccu_common.h
drivers/clk/sunxi-ng/ccu_mux.c
drivers/cpuidle/Kconfig
drivers/cpuidle/Kconfig.arm
drivers/cpuidle/Kconfig.riscv [new file with mode: 0644]
drivers/cpuidle/Makefile
drivers/cpuidle/cpuidle-psci-domain.c
drivers/cpuidle/cpuidle-psci.h
drivers/cpuidle/cpuidle-riscv-sbi.c [new file with mode: 0644]
drivers/cpuidle/dt_idle_genpd.c [new file with mode: 0644]
drivers/cpuidle/dt_idle_genpd.h [new file with mode: 0644]
drivers/crypto/virtio/Kconfig
drivers/crypto/virtio/Makefile
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c [new file with mode: 0644]
drivers/crypto/virtio/virtio_crypto_common.h
drivers/crypto/virtio/virtio_crypto_core.c
drivers/crypto/virtio/virtio_crypto_mgr.c
drivers/crypto/virtio/virtio_crypto_skcipher_algs.c [moved from drivers/crypto/virtio/virtio_crypto_algs.c with 99% similarity]
drivers/gpio/gpio-ts4900.c
drivers/gpio/gpio-ts5500.c
drivers/hid/Kconfig
drivers/hid/Makefile
drivers/hid/hid-google-hammer.c
drivers/hid/hid-vivaldi-common.c [new file with mode: 0644]
drivers/hid/hid-vivaldi-common.h [new file with mode: 0644]
drivers/hid/hid-vivaldi.c
drivers/hv/channel_mgmt.c
drivers/hv/hv_balloon.c
drivers/hv/hv_common.c
drivers/hv/ring_buffer.c
drivers/hv/vmbus_drv.c
drivers/input/Kconfig
drivers/input/Makefile
drivers/input/input.c
drivers/input/joystick/adi.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/Makefile
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/cros_ec_keyb.c
drivers/input/keyboard/mt6779-keypad.c [new file with mode: 0644]
drivers/input/keyboard/mtk-pmic-keys.c
drivers/input/misc/da9063_onkey.c
drivers/input/mouse/synaptics.c
drivers/input/serio/ps2-gpio.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/goodix.h
drivers/input/touchscreen/imagis.c [new file with mode: 0644]
drivers/input/touchscreen/iqs5xx.c
drivers/input/touchscreen/stmfts.c
drivers/input/touchscreen/tsc200x-core.c
drivers/input/vivaldi-fmap.c [new file with mode: 0644]
drivers/md/dm-core.h
drivers/md/dm-integrity.c
drivers/md/dm-ioctl.c
drivers/md/dm.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/vmt.c
drivers/net/virtio_net.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/fc.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/io-cmd-file.c
drivers/nvme/target/loop.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/passthru.c
drivers/nvme/target/rdma.c
drivers/nvme/target/tcp.c
drivers/pci/controller/pci-hyperv.c
drivers/platform/chrome/Makefile
drivers/platform/chrome/cros_ec_debugfs.c
drivers/platform/chrome/cros_ec_sensorhub_ring.c
drivers/platform/chrome/cros_ec_sensorhub_trace.h [new file with mode: 0644]
drivers/platform/chrome/cros_ec_trace.h
drivers/platform/chrome/cros_ec_typec.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/class.c
drivers/rtc/interface.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-ds1685.c
drivers/rtc/rtc-efi.c
drivers/rtc/rtc-gamecube.c
drivers/rtc/rtc-hym8563.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-mc146818-lib.c
drivers/rtc/rtc-mpc5121.c
drivers/rtc/rtc-opal.c
drivers/rtc/rtc-optee.c [new file with mode: 0644]
drivers/rtc/rtc-pcf2123.c
drivers/rtc/rtc-pcf2127.c
drivers/rtc/rtc-pcf85063.c
drivers/rtc/rtc-pcf8523.c
drivers/rtc/rtc-pcf8563.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-pm8xxx.c
drivers/rtc/rtc-spear.c
drivers/rtc/rtc-sun6i.c
drivers/rtc/rtc-wm8350.c
drivers/rtc/rtc-xgene.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp_con.c
drivers/s390/char/sclp_vt220.c
drivers/s390/char/tape_34xx.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/eadm_sch.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_ep11misc.c
drivers/vdpa/ifcvf/ifcvf_base.c
drivers/vdpa/ifcvf/ifcvf_base.h
drivers/vdpa/ifcvf/ifcvf_main.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa.c
drivers/vhost/iotlb.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/virt/vmgenid.c
drivers/virtio/Kconfig
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_pci_modern_dev.c
drivers/virtio/virtio_ring.c
drivers/watchdog/Kconfig
drivers/watchdog/aspeed_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/watchdog/ixp4xx_wdt.c
drivers/watchdog/orion_wdt.c
drivers/watchdog/renesas_wdt.c
drivers/watchdog/rti_wdt.c
drivers/watchdog/sp5100_tco.c
drivers/watchdog/sp5100_tco.h
drivers/watchdog/watchdog_dev.c
fs/9p/cache.c
fs/9p/v9fs.c
fs/9p/v9fs.h
fs/9p/vfs_addr.c
fs/9p/vfs_inode.c
fs/afs/dynroot.c
fs/afs/file.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/super.c
fs/afs/write.c
fs/aio.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/reflink.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/buffer.c
fs/cachefiles/io.c
fs/ceph/addr.c
fs/ceph/cache.c
fs/ceph/cache.h
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/super.h
fs/cifs/cifs_debug.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/fscache.c
fs/cifs/fscache.h
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb2glob.h
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/crypto/crypto.c
fs/exfat/exfat_fs.h
fs/exfat/file.c
fs/exfat/namei.c
fs/exfat/super.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/readpage.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/file.c
fs/f2fs/node.c
fs/fscache/internal.h
fs/fuse/fuse_i.h
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/gfs2/lock_dlm.c
fs/gfs2/rgrp.c
fs/gfs2/rgrp.h
fs/gfs2/super.c
fs/internal.h
fs/io_uring.c
fs/ioctl.c
fs/iomap/buffered-io.c
fs/jffs2/build.c
fs/jffs2/fs.c
fs/jffs2/jffs2_fs_i.h
fs/jffs2/scan.c
fs/kernfs/file.c
fs/ksmbd/oplock.c
fs/ksmbd/server.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb2pdu.h
fs/ksmbd/transport_tcp.c
fs/namespace.c
fs/netfs/Makefile
fs/netfs/buffered_read.c [new file with mode: 0644]
fs/netfs/internal.h
fs/netfs/io.c [new file with mode: 0644]
fs/netfs/main.c [new file with mode: 0644]
fs/netfs/objects.c [new file with mode: 0644]
fs/netfs/read_helper.c [deleted file]
fs/netfs/stats.c
fs/nfs/file.c
fs/nfs/fscache.c
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/btree.c
fs/nilfs2/dat.c
fs/nilfs2/gcinode.c
fs/nilfs2/inode.c
fs/nilfs2/mdt.c
fs/nilfs2/mdt.h
fs/nilfs2/nilfs.h
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/segment.c
fs/nilfs2/super.c
fs/ntfs/aops.c
fs/ocfs2/quota_global.c
fs/ocfs2/quota_local.c
fs/proc/bootconfig.c
fs/read_write.c
fs/seq_file.c
fs/smbfs_common/smb2pdu.h
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/io.c
fs/ubifs/ioctl.c
fs/ubifs/journal.c
fs/ubifs/ubifs.h
fs/unicode/Makefile
fs/verity/verify.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_alloc.h
fs/xfs/xfs_bio_io.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_inode_item.h
fs/xfs/xfs_linux.h
fs/xfs/xfs_log.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_super.c
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans_ail.c
include/asm-generic/mshyperv.h
include/dt-bindings/clock/sun6i-rtc.h [new file with mode: 0644]
include/linux/balloon_compaction.h
include/linux/blk-cgroup.h
include/linux/blk_types.h
include/linux/clk/sunxi-ng.h
include/linux/cma.h
include/linux/fs.h
include/linux/fscache.h
include/linux/fsverity.h
include/linux/ftrace.h
include/linux/gfp.h
include/linux/gpio/driver.h
include/linux/input.h
include/linux/input/vivaldi-fmap.h [new file with mode: 0644]
include/linux/kvm_host.h
include/linux/kvm_types.h
include/linux/mc146818rtc.h
include/linux/net.h
include/linux/netfs.h
include/linux/nvme.h
include/linux/pagemap.h
include/linux/pci-dma-compat.h [deleted file]
include/linux/pci.h
include/linux/rtc.h
include/linux/rtc/ds1685.h
include/linux/sbitmap.h
include/linux/sched.h
include/linux/seq_file.h
include/linux/user_events.h [moved from include/uapi/linux/user_events.h with 58% similarity]
include/linux/vdpa.h
include/linux/virtio_config.h
include/linux/xarray.h
include/sound/pcm.h
include/trace/events/cachefiles.h
include/trace/events/netfs.h
include/trace/stages/stage1_struct_define.h [moved from include/trace/stages/stage1_defines.h with 100% similarity]
include/trace/stages/stage2_data_offsets.h [moved from include/trace/stages/stage2_defines.h with 100% similarity]
include/trace/stages/stage3_trace_output.h [moved from include/trace/stages/stage3_defines.h with 100% similarity]
include/trace/stages/stage4_event_fields.h [moved from include/trace/stages/stage4_defines.h with 100% similarity]
include/trace/stages/stage5_get_offsets.h [moved from include/trace/stages/stage5_defines.h with 100% similarity]
include/trace/stages/stage6_event_callback.h [moved from include/trace/stages/stage6_defines.h with 100% similarity]
include/trace/stages/stage7_class_define.h [moved from include/trace/stages/stage7_defines.h with 100% similarity]
include/trace/trace_custom_events.h
include/trace/trace_events.h
include/uapi/linux/io_uring.h
include/uapi/linux/loop.h
include/uapi/linux/rtc.h
include/uapi/linux/vhost.h
include/uapi/linux/virtio_config.h
include/uapi/linux/virtio_crypto.h
init/Kconfig
kernel/Kconfig.preempt
kernel/dma/direct.c
kernel/dma/mapping.c
kernel/entry/common.c
kernel/signal.c
kernel/trace/Kconfig
kernel/trace/fgraph.c
kernel/trace/trace_events_user.c
kernel/watch_queue.c
lib/logic_iomem.c
lib/sbitmap.c
lib/test_xarray.c
lib/xarray.c
mm/balloon_compaction.c
mm/damon/core.c
mm/filemap.c
mm/gup.c
mm/internal.h
mm/kfence/core.c
mm/kfence/kfence.h
mm/kmemleak.c
mm/madvise.c
mm/memory.c
mm/migrate.c
mm/mlock.c
mm/page_alloc.c
mm/readahead.c
mm/rmap.c
mm/swap.c
scripts/Makefile.build
scripts/Makefile.clean
scripts/Makefile.lib
scripts/basic/fixdep.c
scripts/get_abi.pl
scripts/get_feat.pl
scripts/kallsyms.c
scripts/kconfig/confdata.c
scripts/kernel-doc
scripts/link-vmlinux.sh
scripts/mod/modpost.c
security/Kconfig
sound/core/pcm.c
sound/core/pcm_lib.c
sound/core/pcm_native.c
sound/isa/cs423x/cs4236.c
sound/pci/hda/patch_cs8409-tables.c
sound/pci/hda/patch_cs8409.c
sound/pci/hda/patch_cs8409.h
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/mt6358.c
sound/soc/fsl/fsl-asoc-card.c
sound/soc/rockchip/rockchip_i2s_tdm.c
sound/soc/sof/intel/Kconfig
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/msr-index.h
tools/bpf/bpftool/Makefile
tools/build/Makefile
tools/counter/Makefile
tools/gpio/Makefile
tools/hv/Makefile
tools/iio/Makefile
tools/include/uapi/asm-generic/mman-common.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/kvm.h
tools/lib/api/Makefile
tools/lib/bpf/Makefile
tools/lib/perf/Makefile
tools/lib/perf/cpumap.c
tools/lib/perf/evlist.c
tools/lib/perf/include/internal/cpumap.h
tools/lib/perf/include/internal/evlist.h
tools/lib/subcmd/Makefile
tools/objtool/Makefile
tools/pci/Makefile
tools/perf/Makefile.perf
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm64/util/arm-spe.c
tools/perf/arch/x86/util/intel-bts.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/bench/evlist-open-close.c
tools/perf/builtin-ftrace.c
tools/perf/builtin-record.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/python/tracepoint.py
tools/perf/trace/beauty/include/linux/socket.h
tools/perf/util/auxtrace.c
tools/perf/util/bpf_ftrace.c
tools/perf/util/evlist.c
tools/perf/util/hashmap.c
tools/perf/util/record.c
tools/perf/util/sideband_evlist.c
tools/perf/util/stat-display.c
tools/perf/util/synthetic-events.c
tools/perf/util/top.c
tools/power/x86/intel-speed-select/Makefile
tools/scripts/Makefile.include
tools/scripts/utilities.mak
tools/spi/Makefile
tools/testing/selftests/lib.mk
tools/testing/selftests/x86/amx.c
tools/tracing/rtla/Makefile
tools/usb/Makefile
tools/virtio/Makefile
tools/virtio/linux/dma-mapping.h
tools/vm/page_owner_sort.c
usr/Makefile
usr/include/Makefile
virt/kvm/kvm_main.c
virt/kvm/pfncache.c

index 8fd9b3c..b9d3582 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -213,6 +213,7 @@ Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
 Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
 Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
 Kenneth W Chen <kenneth.w.chen@intel.com>
+Kirill Tkhai <kirill.tkhai@openvz.org> <ktkhai@virtuozzo.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
index 87c6f4a..67a850b 100644 (file)
@@ -278,20 +278,20 @@ appropriate parameters.  In general this allows more efficient DMA
 on systems where System RAM exists above 4G _physical_ address.
 
 Drivers for all PCI-X and PCIe compliant devices must call
-pci_set_dma_mask() as they are 64-bit DMA devices.
+set_dma_mask() as they are 64-bit DMA devices.
 
 Similarly, drivers must also "register" this capability if the device
-can directly address "consistent memory" in System RAM above 4G physical
-address by calling pci_set_consistent_dma_mask().
+can directly address "coherent memory" in System RAM above 4G physical
+address by calling dma_set_coherent_mask().
 Again, this includes drivers for all PCI-X and PCIe compliant devices.
 Many 64-bit "PCI" devices (before PCI-X) and some PCI-X devices are
 64-bit DMA capable for payload ("streaming") data but not control
-("consistent") data.
+("coherent") data.
 
 
 Setup shared control data
 -------------------------
-Once the DMA masks are set, the driver can allocate "consistent" (a.k.a. shared)
+Once the DMA masks are set, the driver can allocate "coherent" (a.k.a. shared)
 memory.  See Documentation/core-api/dma-api.rst for a full description of
 the DMA APIs. This section is just a reminder that it needs to be done
 before enabling DMA on the device.
@@ -367,7 +367,7 @@ steps need to be performed:
   - Disable the device from generating IRQs
   - Release the IRQ (free_irq())
   - Stop all DMA activity
-  - Release DMA buffers (both streaming and consistent)
+  - Release DMA buffers (both streaming and coherent)
   - Unregister from other subsystems (e.g. scsi or netdev)
   - Disable device from responding to MMIO/IO Port addresses
   - Release MMIO/IO Port resource(s)
@@ -420,7 +420,7 @@ Once DMA is stopped, clean up streaming DMA first.
 I.e. unmap data buffers and return buffers to "upstream"
 owners if there is one.
 
-Then clean up "consistent" buffers which contain the control data.
+Then clean up "coherent" buffers which contain the control data.
 
 See Documentation/core-api/dma-api.rst for details on unmapping interfaces.
 
index b7ccaa2..3f1cc5e 100644 (file)
                        fully seed the kernel's CRNG. Default is controlled
                        by CONFIG_RANDOM_TRUST_CPU.
 
+       random.trust_bootloader={on,off}
+                       [KNL] Enable or disable trusting the use of a
+                       seed passed by the bootloader (if available) to
+                       fully seed the kernel's CRNG. Default is controlled
+                       by CONFIG_RANDOM_TRUST_BOOTLOADER.
+
        randomize_kstack_offset=
                        [KNL] Enable or disable kernel stack offset
                        randomization, which provides roughly 5 bits of
index a137a0e..77e0ece 100644 (file)
@@ -315,11 +315,15 @@ indeed the normal API is implemented in terms of the advanced API.  The
 advanced API is only available to modules with a GPL-compatible license.
 
 The advanced API is based around the xa_state.  This is an opaque data
-structure which you declare on the stack using the XA_STATE()
-macro.  This macro initialises the xa_state ready to start walking
-around the XArray.  It is used as a cursor to maintain the position
-in the XArray and let you compose various operations together without
-having to restart from the top every time.
+structure which you declare on the stack using the XA_STATE() macro.
+This macro initialises the xa_state ready to start walking around the
+XArray.  It is used as a cursor to maintain the position in the XArray
+and let you compose various operations together without having to restart
+from the top every time.  The contents of the xa_state are protected by
+the rcu_read_lock() or the xas_lock().  If you need to drop whichever of
+those locks is protecting your state and tree, you must call xas_pause()
+so that future calls do not rely on the parts of the state which were
+left unprotected.
 
 The xa_state is also used to store errors.  You can call
 xas_error() to retrieve the error.  All operations check whether
index aa2cea8..ff9c85a 100644 (file)
@@ -26,10 +26,7 @@ The fundamental unit in KUnit is the test case. The KUnit test cases are
 grouped into KUnit suites. A KUnit test case is a function with type
 signature ``void (*)(struct kunit *test)``.
 These test case functions are wrapped in a struct called
-``struct kunit_case``. For code, see:
-
-.. kernel-doc:: include/kunit/test.h
-       :identifiers: kunit_case
+struct kunit_case.
 
 .. note:
        ``generate_params`` is optional for non-parameterized tests.
@@ -152,18 +149,12 @@ Parameterized Tests
 Each KUnit parameterized test is associated with a collection of
 parameters. The test is invoked multiple times, once for each parameter
 value and the parameter is stored in the ``param_value`` field.
-The test case includes a ``KUNIT_CASE_PARAM()`` macro that accepts a
+The test case includes a KUNIT_CASE_PARAM() macro that accepts a
 generator function.
 The generator function is passed the previous parameter and returns the next
 parameter. It also provides a macro to generate common-case generators based on
 arrays.
 
-For code, see:
-
-.. kernel-doc:: include/kunit/test.h
-       :identifiers: KUNIT_ARRAY_PARAM
-
-
 kunit_tool (Command Line Test Harness)
 ======================================
 
index 6ce0b21..606b4b1 100644 (file)
@@ -81,4 +81,4 @@ Example:
                };
        };
 
-[1]. Documentation/devicetree/bindings/arm/idle-states.yaml
+[1]. Documentation/devicetree/bindings/cpu/idle-states.yaml
index 8b77cf8..dd83ef2 100644 (file)
@@ -101,7 +101,7 @@ properties:
       bindings in [1]) must specify this property.
 
       [1] Kernel documentation - ARM idle states bindings
-        Documentation/devicetree/bindings/arm/idle-states.yaml
+        Documentation/devicetree/bindings/cpu/idle-states.yaml
 
 patternProperties:
   "^power-domain-":
@@ -1,25 +1,30 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/arm/idle-states.yaml#
+$id: http://devicetree.org/schemas/cpu/idle-states.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: ARM idle states binding description
+title: Idle states binding description
 
 maintainers:
   - Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+  - Anup Patel <anup@brainfault.org>
 
 description: |+
   ==========================================
   1 - Introduction
   ==========================================
 
-  ARM systems contain HW capable of managing power consumption dynamically,
-  where cores can be put in different low-power states (ranging from simple wfi
-  to power gating) according to OS PM policies. The CPU states representing the
-  range of dynamic idle states that a processor can enter at run-time, can be
-  specified through device tree bindings representing the parameters required to
-  enter/exit specific idle states on a given processor.
+  ARM and RISC-V systems contain HW capable of managing power consumption
+  dynamically, where cores can be put in different low-power states (ranging
+  from simple wfi to power gating) according to OS PM policies. The CPU states
+  representing the range of dynamic idle states that a processor can enter at
+  run-time, can be specified through device tree bindings representing the
+  parameters required to enter/exit specific idle states on a given processor.
+
+  ==========================================
+  2 - ARM idle states
+  ==========================================
 
   According to the Server Base System Architecture document (SBSA, [3]), the
   power states an ARM CPU can be put into are identified by the following list:
@@ -43,8 +48,23 @@ description: |+
   The device tree binding definition for ARM idle states is the subject of this
   document.
 
+  ==========================================
+  3 - RISC-V idle states
+  ==========================================
+
+  On RISC-V systems, the HARTs (or CPUs) [6] can be put in platform specific
+  suspend (or idle) states (ranging from simple WFI, power gating, etc). The
+  RISC-V SBI v0.3 (or higher) [7] hart state management extension provides a
+  standard mechanism for OS to request HART state transitions.
+
+  The platform specific suspend (or idle) states of a hart can be either
+  retentive or non-rententive in nature. A retentive suspend state will
+  preserve HART registers and CSR values for all privilege modes whereas
+  a non-retentive suspend state will not preserve HART registers and CSR
+  values.
+
   ===========================================
-  2 - idle-states definitions
+  4 - idle-states definitions
   ===========================================
 
   Idle states are characterized for a specific system through a set of
@@ -211,10 +231,10 @@ description: |+
   properties specification that is the subject of the following sections.
 
   ===========================================
-  3 - idle-states node
+  5 - idle-states node
   ===========================================
 
-  ARM processor idle states are defined within the idle-states node, which is
+  The processor idle states are defined within the idle-states node, which is
   a direct child of the cpus node [1] and provides a container where the
   processor idle states, defined as device tree nodes, are listed.
 
@@ -223,7 +243,7 @@ description: |+
   just supports idle_standby, an idle-states node is not required.
 
   ===========================================
-  4 - References
+  6 - References
   ===========================================
 
   [1] ARM Linux Kernel documentation - CPUs bindings
@@ -238,9 +258,15 @@ description: |+
   [4] ARM Architecture Reference Manuals
       http://infocenter.arm.com/help/index.jsp
 
-  [6] ARM Linux Kernel documentation - Booting AArch64 Linux
+  [5] ARM Linux Kernel documentation - Booting AArch64 Linux
       Documentation/arm64/booting.rst
 
+  [6] RISC-V Linux Kernel documentation - CPUs bindings
+      Documentation/devicetree/bindings/riscv/cpus.yaml
+
+  [7] RISC-V Supervisor Binary Interface (SBI)
+      http://github.com/riscv/riscv-sbi-doc/riscv-sbi.adoc
+
 properties:
   $nodename:
     const: idle-states
@@ -253,7 +279,7 @@ properties:
       On ARM 32-bit systems this property is optional
 
       This assumes that the "enable-method" property is set to "psci" in the cpu
-      node[6] that is responsible for setting up CPU idle management in the OS
+      node[5] that is responsible for setting up CPU idle management in the OS
       implementation.
     const: psci
 
@@ -265,8 +291,8 @@ patternProperties:
       as follows.
 
       The idle state entered by executing the wfi instruction (idle_standby
-      SBSA,[3][4]) is considered standard on all ARM platforms and therefore
-      must not be listed.
+      SBSA,[3][4]) is considered standard on all ARM and RISC-V platforms and
+      therefore must not be listed.
 
       In addition to the properties listed above, a state node may require
       additional properties specific to the entry-method defined in the
@@ -275,7 +301,27 @@ patternProperties:
 
     properties:
       compatible:
-        const: arm,idle-state
+        enum:
+          - arm,idle-state
+          - riscv,idle-state
+
+      arm,psci-suspend-param:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        description: |
+          power_state parameter to pass to the ARM PSCI suspend call.
+
+          Device tree nodes that require usage of PSCI CPU_SUSPEND function
+          (i.e. idle states node with entry-method property is set to "psci")
+          must specify this property.
+
+      riscv,sbi-suspend-param:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        description: |
+          suspend_type parameter to pass to the RISC-V SBI HSM suspend call.
+
+          This property is required in idle state nodes of device tree meant
+          for RISC-V systems. For more details on the suspend_type parameter
+          refer the SBI specifiation v0.3 (or higher) [7].
 
       local-timer-stop:
         description:
@@ -317,6 +363,8 @@ patternProperties:
         description:
           A string used as a descriptive name for the idle state.
 
+    additionalProperties: false
+
     required:
       - compatible
       - entry-latency-us
@@ -658,4 +706,150 @@ examples:
         };
     };
 
+  - |
+    // Example 3 (RISC-V 64-bit, 4-cpu systems, two clusters):
+
+    cpus {
+        #size-cells = <0>;
+        #address-cells = <1>;
+
+        cpu@0 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x0>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_0_0>, <&CPU_NONRET_0_0>,
+                            <&CLUSTER_RET_0>, <&CLUSTER_NONRET_0>;
+
+            cpu_intc0: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        cpu@1 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x1>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_0_0>, <&CPU_NONRET_0_0>,
+                            <&CLUSTER_RET_0>, <&CLUSTER_NONRET_0>;
+
+            cpu_intc1: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        cpu@10 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x10>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_1_0>, <&CPU_NONRET_1_0>,
+                            <&CLUSTER_RET_1>, <&CLUSTER_NONRET_1>;
+
+            cpu_intc10: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        cpu@11 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x11>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_1_0>, <&CPU_NONRET_1_0>,
+                            <&CLUSTER_RET_1>, <&CLUSTER_NONRET_1>;
+
+            cpu_intc11: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        idle-states {
+            CPU_RET_0_0: cpu-retentive-0-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x10000000>;
+                entry-latency-us = <20>;
+                exit-latency-us = <40>;
+                min-residency-us = <80>;
+            };
+
+            CPU_NONRET_0_0: cpu-nonretentive-0-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x90000000>;
+                entry-latency-us = <250>;
+                exit-latency-us = <500>;
+                min-residency-us = <950>;
+            };
+
+            CLUSTER_RET_0: cluster-retentive-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x11000000>;
+                local-timer-stop;
+                entry-latency-us = <50>;
+                exit-latency-us = <100>;
+                min-residency-us = <250>;
+                wakeup-latency-us = <130>;
+            };
+
+            CLUSTER_NONRET_0: cluster-nonretentive-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x91000000>;
+                local-timer-stop;
+                entry-latency-us = <600>;
+                exit-latency-us = <1100>;
+                min-residency-us = <2700>;
+                wakeup-latency-us = <1500>;
+            };
+
+            CPU_RET_1_0: cpu-retentive-1-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x10000010>;
+                entry-latency-us = <20>;
+                exit-latency-us = <40>;
+                min-residency-us = <80>;
+            };
+
+            CPU_NONRET_1_0: cpu-nonretentive-1-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x90000010>;
+                entry-latency-us = <250>;
+                exit-latency-us = <500>;
+                min-residency-us = <950>;
+            };
+
+            CLUSTER_RET_1: cluster-retentive-1 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x11000010>;
+                local-timer-stop;
+                entry-latency-us = <50>;
+                exit-latency-us = <100>;
+                min-residency-us = <250>;
+                wakeup-latency-us = <130>;
+            };
+
+            CLUSTER_NONRET_1: cluster-nonretentive-1 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x91000010>;
+                local-timer-stop;
+                entry-latency-us = <600>;
+                exit-latency-us = <1100>;
+                min-residency-us = <2700>;
+                wakeup-latency-us = <1500>;
+            };
+        };
+    };
+
 ...
diff --git a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
new file mode 100644 (file)
index 0000000..b177064
--- /dev/null
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/mediatek,mt6779-keypad.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek's Keypad Controller device tree bindings
+
+maintainers:
+  - Fengping Yu <fengping.yu@mediatek.com>
+
+allOf:
+  - $ref: "/schemas/input/matrix-keymap.yaml#"
+
+description: |
+  Mediatek's Keypad controller is used to interface a SoC with a matrix-type
+  keypad device. The keypad controller supports multiple row and column lines.
+  A key can be placed at each intersection of a unique row and a unique column.
+  The keypad controller can sense a key-press and key-release and report the
+  event using a interrupt to the cpu.
+
+properties:
+  compatible:
+    oneOf:
+      - const: mediatek,mt6779-keypad
+      - items:
+          - enum:
+              - mediatek,mt6873-keypad
+          - const: mediatek,mt6779-keypad
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: kpd
+
+  wakeup-source:
+    description: use any event on keypad as wakeup event
+    type: boolean
+
+  debounce-delay-ms:
+    maximum: 256
+    default: 16
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/input/input.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    soc {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        keyboard@10010000 {
+          compatible = "mediatek,mt6779-keypad";
+          reg = <0 0x10010000 0 0x1000>;
+          interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_FALLING>;
+          clocks = <&clk26m>;
+          clock-names = "kpd";
+        };
+    };
index 535d928..9d00f2a 100644 (file)
@@ -9,7 +9,10 @@ For MT6397/MT6323 MFD bindings see:
 Documentation/devicetree/bindings/mfd/mt6397.txt
 
 Required properties:
-- compatible: "mediatek,mt6397-keys" or "mediatek,mt6323-keys"
+- compatible: Should be one of:
+       - "mediatek,mt6397-keys"
+       - "mediatek,mt6323-keys"
+       - "mediatek,mt6358-keys"
 - linux,keycodes: See Documentation/devicetree/bindings/input/input.yaml
 
 Optional Properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
new file mode 100644 (file)
index 0000000..e3a2b87
--- /dev/null
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/imagis,ist3038c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Imagis IST30XXC family touchscreen controller bindings
+
+maintainers:
+  - Markuss Broks <markuss.broks@gmail.com>
+
+allOf:
+  - $ref: touchscreen.yaml#
+
+properties:
+  $nodename:
+    pattern: "^touchscreen@[0-9a-f]+$"
+
+  compatible:
+    enum:
+      - imagis,ist3038c
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  vdd-supply:
+    description: Power supply regulator for the chip
+
+  vddio-supply:
+    description: Power supply regulator for the I2C bus
+
+  touchscreen-size-x: true
+  touchscreen-size-y: true
+  touchscreen-fuzz-x: true
+  touchscreen-fuzz-y: true
+  touchscreen-inverted-x: true
+  touchscreen-inverted-y: true
+  touchscreen-swapped-x-y: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - touchscreen-size-x
+  - touchscreen-size-y
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      touchscreen@50 {
+        compatible = "imagis,ist3038c";
+        reg = <0x50>;
+        interrupt-parent = <&gpio>;
+        interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+        vdd-supply = <&ldo1_reg>;
+        vddio-supply = <&ldo2_reg>;
+        touchscreen-size-x = <720>;
+        touchscreen-size-y = <1280>;
+        touchscreen-fuzz-x = <10>;
+        touchscreen-fuzz-y = <10>;
+        touchscreen-inverted-x;
+        touchscreen-inverted-y;
+      };
+    };
+
+...
index aa5fb64..d632ac7 100644 (file)
@@ -99,6 +99,14 @@ properties:
       - compatible
       - interrupt-controller
 
+  cpu-idle-states:
+    $ref: '/schemas/types.yaml#/definitions/phandle-array'
+    items:
+      maxItems: 1
+    description: |
+      List of phandles to idle state nodes supported
+      by this hart (see ./idle-states.yaml).
+
 required:
   - riscv,isa
   - interrupt-controller
index beeb90e..0b767fe 100644 (file)
@@ -16,16 +16,22 @@ properties:
 
   compatible:
     oneOf:
-      - const: allwinner,sun6i-a31-rtc
-      - const: allwinner,sun8i-a23-rtc
-      - const: allwinner,sun8i-h3-rtc
-      - const: allwinner,sun8i-r40-rtc
-      - const: allwinner,sun8i-v3-rtc
-      - const: allwinner,sun50i-h5-rtc
+      - enum:
+          - allwinner,sun6i-a31-rtc
+          - allwinner,sun8i-a23-rtc
+          - allwinner,sun8i-h3-rtc
+          - allwinner,sun8i-r40-rtc
+          - allwinner,sun8i-v3-rtc
+          - allwinner,sun50i-h5-rtc
+          - allwinner,sun50i-h6-rtc
+          - allwinner,sun50i-h616-rtc
+          - allwinner,sun50i-r329-rtc
       - items:
           - const: allwinner,sun50i-a64-rtc
           - const: allwinner,sun8i-h3-rtc
-      - const: allwinner,sun50i-h6-rtc
+      - items:
+          - const: allwinner,sun20i-d1-rtc
+          - const: allwinner,sun50i-r329-rtc
 
   reg:
     maxItems: 1
@@ -37,7 +43,12 @@ properties:
       - description: RTC Alarm 1
 
   clocks:
-    maxItems: 1
+    minItems: 1
+    maxItems: 4
+
+  clock-names:
+    minItems: 1
+    maxItems: 4
 
   clock-output-names:
     minItems: 1
@@ -85,6 +96,7 @@ allOf:
             enum:
               - allwinner,sun8i-h3-rtc
               - allwinner,sun50i-h5-rtc
+              - allwinner,sun50i-h6-rtc
 
     then:
       properties:
@@ -96,19 +108,68 @@ allOf:
       properties:
         compatible:
           contains:
-            const: allwinner,sun50i-h6-rtc
+            const: allwinner,sun50i-h616-rtc
 
     then:
       properties:
-        clock-output-names:
+        clocks:
           minItems: 3
           maxItems: 3
+          items:
+            - description: Bus clock for register access
+            - description: 24 MHz oscillator
+            - description: 32 kHz clock from the CCU
+
+        clock-names:
+          minItems: 3
+          maxItems: 3
+          items:
+            - const: bus
+            - const: hosc
+            - const: pll-32k
+
+      required:
+        - clocks
+        - clock-names
 
   - if:
       properties:
         compatible:
           contains:
-            const: allwinner,sun8i-r40-rtc
+            const: allwinner,sun50i-r329-rtc
+
+    then:
+      properties:
+        clocks:
+          minItems: 3
+          maxItems: 4
+          items:
+            - description: Bus clock for register access
+            - description: 24 MHz oscillator
+            - description: AHB parent for internal SPI clock
+            - description: External 32768 Hz oscillator
+
+        clock-names:
+          minItems: 3
+          maxItems: 4
+          items:
+            - const: bus
+            - const: hosc
+            - const: ahb
+            - const: ext-osc32k
+
+      required:
+        - clocks
+        - clock-names
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun8i-r40-rtc
+              - allwinner,sun50i-h616-rtc
+              - allwinner,sun50i-r329-rtc
 
     then:
       properties:
@@ -127,7 +188,6 @@ required:
   - compatible
   - reg
   - interrupts
-  - clock-output-names
 
 additionalProperties: false
 
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt b/Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt
deleted file mode 100644 (file)
index 3f0e2a5..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-Atmel AT91SAM9260 Real Time Timer
-
-Required properties:
-- compatible: should be one of the following:
-       - "atmel,at91sam9260-rtt"
-       - "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"
-- reg: should encode the memory region of the RTT controller
-- interrupts: rtt alarm/event interrupt
-- clocks: should contain the 32 KHz slow clk that will drive the RTT block.
-- atmel,rtt-rtc-time-reg: should encode the GPBR register used to store
-       the time base when the RTT is used as an RTC.
-       The first cell should point to the GPBR node and the second one
-       encode the offset within the GPBR block (or in other words, the
-       GPBR register used to store the time base).
-
-
-Example:
-
-rtt@fffffd20 {
-       compatible = "atmel,at91sam9260-rtt";
-       reg = <0xfffffd20 0x10>;
-       interrupts = <1 4 7>;
-       clocks = <&clk32k>;
-       atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml b/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml
new file mode 100644 (file)
index 0000000..0ef1b7f
--- /dev/null
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/atmel,at91sam9260-rtt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel AT91 RTT Device Tree Bindings
+
+allOf:
+  - $ref: "rtc.yaml#"
+
+maintainers:
+  - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - const: atmel,at91sam9260-rtt
+      - items:
+          - const: microchip,sam9x60-rtt
+          - const: atmel,at91sam9260-rtt
+      - items:
+          - const: microchip,sama7g5-rtt
+          - const: microchip,sam9x60-rtt
+          - const: atmel,at91sam9260-rtt
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  atmel,rtt-rtc-time-reg:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    items:
+      - items:
+          - description: Phandle to the GPBR node.
+          - description: Offset within the GPBR block.
+    description:
+      Should encode the GPBR register used to store the time base when the
+      RTT is used as an RTC. The first cell should point to the GPBR node
+      and the second one encodes the offset within the GPBR block (or in
+      other words, the GPBR register used to store the time base).
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - atmel,rtt-rtc-time-reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    rtc@fffffd20 {
+        compatible = "atmel,at91sam9260-rtt";
+        reg = <0xfffffd20 0x10>;
+        interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+        clocks = <&clk32k>;
+        atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+    };
index 8fe2d93..0143097 100644 (file)
@@ -560,6 +560,8 @@ patternProperties:
     description: Ingenieurburo Fur Ic-Technologie (I/F/I)
   "^ilitek,.*":
     description: ILI Technology Corporation (ILITEK)
+  "^imagis,.*":
+    description: Imagis Technologies Co., Ltd.
   "^img,.*":
     description: Imagination Technologies Ltd.
   "^imi,.*":
index 91a98cc..d060438 100644 (file)
@@ -55,6 +55,11 @@ properties:
               - renesas,r8a779a0-wdt     # R-Car V3U
           - const: renesas,rcar-gen3-wdt # R-Car Gen3 and RZ/G2
 
+      - items:
+          - enum:
+              - renesas,r8a779f0-wdt     # R-Car S4-8
+          - const: renesas,rcar-gen4-wdt # R-Car Gen4
+
   reg:
     maxItems: 1
 
index b0d354f..1af600d 100644 (file)
@@ -82,10 +82,10 @@ Signing Update                 Supported.
 Pre-authentication integrity   Supported.
 SMB3 encryption(CCM, GCM)      Supported. (CCM and GCM128 supported, GCM256 in
                                progress)
-SMB direct(RDMA)               Partially Supported. SMB3 Multi-channel is
-                               required to connect to Windows client.
+SMB direct(RDMA)               Supported.
 SMB3 Multi-channel             Partially Supported. Planned to implement
                                replay/retry mechanisms for future.
+Receive Side Scaling mode      Supported.
 SMB3.1.1 POSIX extension       Supported.
 ACLs                           Partially Supported. only DACLs available, SACLs
                                (auditing) is planned for the future. For
index 1d831e3..8cc536d 100644 (file)
@@ -549,7 +549,7 @@ Pagecache
 ~~~~~~~~~
 
 For filesystems using Linux's pagecache, the ``->readpage()`` and
-``->readpages()`` methods must be modified to verify pages before they
+``->readahead()`` methods must be modified to verify pages before they
 are marked Uptodate.  Merely hooking ``->read_iter()`` would be
 insufficient, since ``->read_iter()`` is not used for memory maps.
 
@@ -611,7 +611,7 @@ workqueue, and then the workqueue work does the decryption or
 verification.  Finally, pages where no decryption or verity error
 occurred are marked Uptodate, and the pages are unlocked.
 
-Files on ext4 and f2fs may contain holes.  Normally, ``->readpages()``
+Files on ext4 and f2fs may contain holes.  Normally, ``->readahead()``
 simply zeroes holes and sets the corresponding pages Uptodate; no bios
 are issued.  To prevent this case from bypassing fs-verity, these
 filesystems use fsverity_verify_page() to verify hole pages.
@@ -778,7 +778,7 @@ weren't already directly answered in other parts of this document.
     - To prevent bypassing verification, pages must not be marked
       Uptodate until they've been verified.  Currently, each
       filesystem is responsible for marking pages Uptodate via
-      ``->readpages()``.  Therefore, currently it's not possible for
+      ``->readahead()``.  Therefore, currently it's not possible for
       the VFS to do the verification on its own.  Changing this would
       require significant changes to the VFS and all filesystems.
 
index 2998cec..c26d854 100644 (file)
@@ -241,8 +241,6 @@ prototypes::
        int (*writepages)(struct address_space *, struct writeback_control *);
        bool (*dirty_folio)(struct address_space *, struct folio *folio);
        void (*readahead)(struct readahead_control *);
-       int (*readpages)(struct file *filp, struct address_space *mapping,
-                       struct list_head *pages, unsigned nr_pages);
        int (*write_begin)(struct file *, struct address_space *mapping,
                                loff_t pos, unsigned len, unsigned flags,
                                struct page **pagep, void **fsdata);
@@ -274,7 +272,6 @@ readpage:           yes, unlocks                            shared
 writepages:
 dirty_folio            maybe
 readahead:             yes, unlocks                            shared
-readpages:             no                                      shared
 write_begin:           locks the page           exclusive
 write_end:             yes, unlocks             exclusive
 bmap:
@@ -300,9 +297,6 @@ completion.
 
 ->readahead() unlocks the pages that I/O is attempted on like ->readpage().
 
-->readpages() populates the pagecache with the passed pages and starts
-I/O against them.  They come unlocked upon I/O completion.
-
 ->writepage() is used for two purposes: for "memory cleansing" and for
 "sync".  These are quite different operations and the behaviour may differ
 depending upon the mode.
index 4f373a8..69f0017 100644 (file)
@@ -7,6 +7,8 @@ Network Filesystem Helper Library
 .. Contents:
 
  - Overview.
+ - Per-inode context.
+   - Inode context helper functions.
  - Buffered read helpers.
    - Read helper functions.
    - Read helper structures.
@@ -28,6 +30,69 @@ Note that the library module doesn't link against local caching directly, so
 access must be provided by the netfs.
 
 
+Per-Inode Context
+=================
+
+The network filesystem helper library needs a place to store a bit of state for
+its use on each netfs inode it is helping to manage.  To this end, a context
+structure is defined::
+
+       struct netfs_i_context {
+               const struct netfs_request_ops *ops;
+               struct fscache_cookie   *cache;
+       };
+
+A network filesystem that wants to use netfs lib must place one of these
+directly after the VFS ``struct inode`` it allocates, usually as part of its
+own struct.  This can be done in a way similar to the following::
+
+       struct my_inode {
+               struct {
+                       /* These must be contiguous */
+                       struct inode            vfs_inode;
+                       struct netfs_i_context  netfs_ctx;
+               };
+               ...
+       };
+
+This allows netfslib to find its state by simple offset from the inode pointer,
+thereby allowing the netfslib helper functions to be pointed to directly by the
+VFS/VM operation tables.
+
+The structure contains the following fields:
+
+ * ``ops``
+
+   The set of operations provided by the network filesystem to netfslib.
+
+ * ``cache``
+
+   Local caching cookie, or NULL if no caching is enabled.  This field does not
+   exist if fscache is disabled.
+
+
+Inode Context Helper Functions
+------------------------------
+
+To help deal with the per-inode context, a number helper functions are
+provided.  Firstly, a function to perform basic initialisation on a context and
+set the operations table pointer::
+
+       void netfs_i_context_init(struct inode *inode,
+                                 const struct netfs_request_ops *ops);
+
+then two functions to cast between the VFS inode structure and the netfs
+context::
+
+       struct netfs_i_context *netfs_i_context(struct inode *inode);
+       struct inode *netfs_inode(struct netfs_i_context *ctx);
+
+and finally, a function to get the cache cookie pointer from the context
+attached to an inode (or NULL if fscache is disabled)::
+
+       struct fscache_cookie *netfs_i_cookie(struct inode *inode);
+
+
 Buffered Read Helpers
 =====================
 
@@ -70,38 +135,22 @@ Read Helper Functions
 
 Three read helpers are provided::
 
-       void netfs_readahead(struct readahead_control *ractl,
-                            const struct netfs_read_request_ops *ops,
-                            void *netfs_priv);
+       void netfs_readahead(struct readahead_control *ractl);
        int netfs_readpage(struct file *file,
-                          struct folio *folio,
-                          const struct netfs_read_request_ops *ops,
-                          void *netfs_priv);
+                          struct page *page);
        int netfs_write_begin(struct file *file,
                              struct address_space *mapping,
                              loff_t pos,
                              unsigned int len,
                              unsigned int flags,
                              struct folio **_folio,
-                             void **_fsdata,
-                             const struct netfs_read_request_ops *ops,
-                             void *netfs_priv);
-
-Each corresponds to a VM operation, with the addition of a couple of parameters
-for the use of the read helpers:
+                             void **_fsdata);
 
- * ``ops``
-
-   A table of operations through which the helpers can talk to the filesystem.
-
- * ``netfs_priv``
+Each corresponds to a VM address space operation.  These operations use the
+state in the per-inode context.
 
-   Filesystem private data (can be NULL).
-
-Both of these values will be stored into the read request structure.
-
-For ->readahead() and ->readpage(), the network filesystem should just jump
-into the corresponding read helper; whereas for ->write_begin(), it may be a
+For ->readahead() and ->readpage(), the network filesystem just point directly
+at the corresponding read helper; whereas for ->write_begin(), it may be a
 little more complicated as the network filesystem might want to flush
 conflicting writes or track dirty data and needs to put the acquired folio if
 an error occurs after calling the helper.
@@ -116,7 +165,7 @@ occurs, the request will get partially completed if sufficient data is read.
 
 Additionally, there is::
 
-  * void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
+  * void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
                                 ssize_t transferred_or_error,
                                 bool was_async);
 
@@ -132,7 +181,7 @@ Read Helper Structures
 The read helpers make use of a couple of structures to maintain the state of
 the read.  The first is a structure that manages a read request as a whole::
 
-       struct netfs_read_request {
+       struct netfs_io_request {
                struct inode            *inode;
                struct address_space    *mapping;
                struct netfs_cache_resources cache_resources;
@@ -140,7 +189,7 @@ the read.  The first is a structure that manages a read request as a whole::
                loff_t                  start;
                size_t                  len;
                loff_t                  i_size;
-               const struct netfs_read_request_ops *netfs_ops;
+               const struct netfs_request_ops *netfs_ops;
                unsigned int            debug_id;
                ...
        };
@@ -187,8 +236,8 @@ The above fields are the ones the netfs can use.  They are:
 The second structure is used to manage individual slices of the overall read
 request::
 
-       struct netfs_read_subrequest {
-               struct netfs_read_request *rreq;
+       struct netfs_io_subrequest {
+               struct netfs_io_request *rreq;
                loff_t                  start;
                size_t                  len;
                size_t                  transferred;
@@ -244,32 +293,26 @@ Read Helper Operations
 The network filesystem must provide the read helpers with a table of operations
 through which it can issue requests and negotiate::
 
-       struct netfs_read_request_ops {
-               void (*init_rreq)(struct netfs_read_request *rreq, struct file *file);
-               bool (*is_cache_enabled)(struct inode *inode);
-               int (*begin_cache_operation)(struct netfs_read_request *rreq);
-               void (*expand_readahead)(struct netfs_read_request *rreq);
-               bool (*clamp_length)(struct netfs_read_subrequest *subreq);
-               void (*issue_op)(struct netfs_read_subrequest *subreq);
-               bool (*is_still_valid)(struct netfs_read_request *rreq);
+       struct netfs_request_ops {
+               void (*init_request)(struct netfs_io_request *rreq, struct file *file);
+               int (*begin_cache_operation)(struct netfs_io_request *rreq);
+               void (*expand_readahead)(struct netfs_io_request *rreq);
+               bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+               void (*issue_read)(struct netfs_io_subrequest *subreq);
+               bool (*is_still_valid)(struct netfs_io_request *rreq);
                int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
                                         struct folio *folio, void **_fsdata);
-               void (*done)(struct netfs_read_request *rreq);
+               void (*done)(struct netfs_io_request *rreq);
                void (*cleanup)(struct address_space *mapping, void *netfs_priv);
        };
 
 The operations are as follows:
 
- * ``init_rreq()``
+ * ``init_request()``
 
    [Optional] This is called to initialise the request structure.  It is given
    the file for reference and can modify the ->netfs_priv value.
 
- * ``is_cache_enabled()``
-
-   [Required] This is called by netfs_write_begin() to ask if the file is being
-   cached.  It should return true if it is being cached and false otherwise.
-
  * ``begin_cache_operation()``
 
    [Optional] This is called to ask the network filesystem to call into the
@@ -305,7 +348,7 @@ The operations are as follows:
 
    This should return 0 on success and an error code on error.
 
- * ``issue_op()``
+ * ``issue_read()``
 
    [Required] The helpers use this to dispatch a subrequest to the server for
    reading.  In the subrequest, ->start, ->len and ->transferred indicate what
@@ -420,12 +463,12 @@ The network filesystem's ->begin_cache_operation() method is called to set up a
 cache and this must call into the cache to do the work.  If using fscache, for
 example, the cache would call::
 
-       int fscache_begin_read_operation(struct netfs_read_request *rreq,
+       int fscache_begin_read_operation(struct netfs_io_request *rreq,
                                         struct fscache_cookie *cookie);
 
 passing in the request pointer and the cookie corresponding to the file.
 
-The netfs_read_request object contains a place for the cache to hang its
+The netfs_io_request object contains a place for the cache to hang its
 state::
 
        struct netfs_cache_resources {
@@ -443,7 +486,7 @@ operation table looks like the following::
                void (*expand_readahead)(struct netfs_cache_resources *cres,
                                         loff_t *_start, size_t *_len, loff_t i_size);
 
-               enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq,
+               enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
                                                       loff_t i_size);
 
                int (*read)(struct netfs_cache_resources *cres,
@@ -562,4 +605,5 @@ API Function Reference
 ======================
 
 .. kernel-doc:: include/linux/netfs.h
-.. kernel-doc:: fs/netfs/read_helper.c
+.. kernel-doc:: fs/netfs/buffered_read.c
+.. kernel-doc:: fs/netfs/io.c
index 4f14edf..794bd1a 100644 (file)
@@ -726,8 +726,6 @@ cache in your filesystem.  The following members are defined:
                int (*writepages)(struct address_space *, struct writeback_control *);
                bool (*dirty_folio)(struct address_space *, struct folio *);
                void (*readahead)(struct readahead_control *);
-               int (*readpages)(struct file *filp, struct address_space *mapping,
-                                struct list_head *pages, unsigned nr_pages);
                int (*write_begin)(struct file *, struct address_space *mapping,
                                   loff_t pos, unsigned len, unsigned flags,
                                struct page **pagep, void **fsdata);
@@ -817,15 +815,6 @@ cache in your filesystem.  The following members are defined:
        completes successfully.  Setting PageError on any page will be
        ignored; simply unlock the page if an I/O error occurs.
 
-``readpages``
-       called by the VM to read pages associated with the address_space
-       object.  This is essentially just a vector version of readpage.
-       Instead of just one page, several pages are requested.
-       readpages is only used for read-ahead, so read errors are
-       ignored.  If anything goes wrong, feel free to give up.
-       This interface is deprecated and will be removed by the end of
-       2020; implement readahead instead.
-
 ``write_begin``
        Called by the generic buffered write code to ask the filesystem
        to prepare to write len bytes at the given offset in the file.
index 2d1fc03..ef19b9c 100644 (file)
@@ -77,6 +77,17 @@ HOSTLDLIBS
 ----------
 Additional libraries to link against when building host programs.
 
+.. _userkbuildflags:
+
+USERCFLAGS
+----------
+Additional options used for $(CC) when compiling userprogs.
+
+USERLDFLAGS
+-----------
+Additional options used for $(LD) when linking userprogs. userprogs are linked
+with CC, so $(USERLDFLAGS) should include "-Wl," prefix as applicable.
+
 KBUILD_KCONFIG
 --------------
 Set the top-level Kconfig file to the value of this environment
index d326168..b854bb4 100644 (file)
@@ -49,17 +49,36 @@ example: ::
 LLVM Utilities
 --------------
 
-LLVM has substitutes for GNU binutils utilities. Kbuild supports ``LLVM=1``
-to enable them. ::
-
-       make LLVM=1
-
-They can be enabled individually. The full list of the parameters: ::
+LLVM has substitutes for GNU binutils utilities. They can be enabled individually.
+The full list of supported make variables::
 
        make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \
          OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump READELF=llvm-readelf \
          HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar HOSTLD=ld.lld
 
+To simplify the above command, Kbuild supports the ``LLVM`` variable::
+
+       make LLVM=1
+
+If your LLVM tools are not available in your PATH, you can supply their
+location using the LLVM variable with a trailing slash::
+
+       make LLVM=/path/to/llvm/
+
+which will use ``/path/to/llvm/clang``, ``/path/to/llvm/ld.lld``, etc.
+
+If your LLVM tools have a version suffix and you want to test with that
+explicit version rather than the unsuffixed executables like ``LLVM=1``, you
+can pass the suffix using the ``LLVM`` variable::
+
+       make LLVM=-14
+
+which will use ``clang-14``, ``ld.lld-14``, etc.
+
+``LLVM=0`` is not the same as omitting ``LLVM`` altogether, it will behave like
+``LLVM=1``. If you only wish to use certain LLVM utilities, use their respective
+make variables.
+
 The integrated assembler is enabled by default. You can pass ``LLVM_IAS=0`` to
 disable it.
 
index b008b90..11a296e 100644 (file)
@@ -982,6 +982,8 @@ The syntax is quite similar. The difference is to use "userprogs" instead of
 
        When linking bpfilter_umh, it will be passed the extra option -static.
 
+       From command line, :ref:`USERCFLAGS and USERLDFLAGS <userkbuildflags>` will also be used.
+
 5.4 When userspace programs are actually built
 ----------------------------------------------
 
index bfa75ea..9933faa 100644 (file)
@@ -211,9 +211,6 @@ raw_spinlock_t and spinlock_t
 raw_spinlock_t
 --------------
 
-raw_spinlock_t is a strict spinning lock implementation regardless of the
-kernel configuration including PREEMPT_RT enabled kernels.
-
 raw_spinlock_t is a strict spinning lock implementation in all kernels,
 including PREEMPT_RT kernels.  Use raw_spinlock_t only in real critical
 core code, low-level interrupt handling and places where disabling
index f0a6043..3e03283 100644 (file)
@@ -12,6 +12,7 @@ additions to this manual.
    configure-git
    rebasing-and-merging
    pull-requests
+   messy-diffstat
    maintainer-entry-profile
    modifying-patches
 
diff --git a/Documentation/maintainer/messy-diffstat.rst b/Documentation/maintainer/messy-diffstat.rst
new file mode 100644 (file)
index 0000000..c015f66
--- /dev/null
@@ -0,0 +1,96 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
+Handling messy pull-request diffstats
+=====================================
+
+Subsystem maintainers routinely use ``git request-pull`` as part of the
+process of sending work upstream.  Normally, the result includes a nice
+diffstat that shows which files will be touched and how much of each will
+be changed.  Occasionally, though, a repository with a relatively
+complicated development history will yield a massive diffstat containing a
+great deal of unrelated work.  The result looks ugly and obscures what the
+pull request is actually doing.  This document describes what is happening
+and how to fix things up; it is derived from The Wisdom of Linus Torvalds,
+found in Linus1_ and Linus2_.
+
+.. _Linus1: https://lore.kernel.org/lkml/CAHk-=wg3wXH2JNxkQi+eLZkpuxqV+wPiHhw_Jf7ViH33Sw7PHA@mail.gmail.com/
+.. _Linus2: https://lore.kernel.org/lkml/CAHk-=wgXbSa8yq8Dht8at+gxb_idnJ7X5qWZQWRBN4_CUPr=eQ@mail.gmail.com/
+
+A Git development history proceeds as a series of commits.  In a simplified
+manner, mainline kernel development looks like this::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+
+If one wants to see what has changed between two points, a command like
+this will do the job::
+
+  $ git diff --stat --summary vN-rc2..vN-rc3
+
+Here, there are two clear points in the history; Git will essentially
+"subtract" the beginning point from the end point and display the resulting
+differences.  The requested operation is unambiguous and easy enough to
+understand.
+
+When a subsystem maintainer creates a branch and commits changes to it, the
+result in the simplest case is a history that looks like::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+                          |
+                          +-- c1 --- c2 --- ... --- cN
+
+If that maintainer now uses ``git diff`` to see what has changed between
+the mainline branch (let's call it "linus") and cN, there are still two
+clear endpoints, and the result is as expected.  So a pull request
+generated with ``git request-pull`` will also be as expected.  But now
+consider a slightly more complex development history::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+                |         |
+                |         +-- c1 --- c2 --- ... --- cN
+                |                   /
+                +-- x1 --- x2 --- x3
+
+Our maintainer has created one branch at vN-rc1 and another at vN-rc2; the
+two were then subsequently merged into c2.  Now a pull request generated
+for cN may end up being messy indeed, and developers often end up wondering
+why.
+
+What is happening here is that there are no longer two clear end points for
+the ``git diff`` operation to use.  The development culminating in cN
+started in two different places; to generate the diffstat, ``git diff``
+ends up having pick one of them and hoping for the best.  If the diffstat
+starts at vN-rc1, it may end up including all of the changes between there
+and the second origin end point (vN-rc2), which is certainly not what our
+maintainer had in mind.  With all of that extra junk in the diffstat, it
+may be impossible to tell what actually happened in the changes leading up
+to cN.
+
+Maintainers often try to resolve this problem by, for example, rebasing the
+branch or performing another merge with the linus branch, then recreating
+the pull request.  This approach tends not to lead to joy at the receiving
+end of that pull request; rebasing and/or merging just before pushing
+upstream is a well-known way to get a grumpy response.
+
+So what is to be done?  The best response when confronted with this
+situation is to indeed to do a merge with the branch you intend your work
+to be pulled into, but to do it privately, as if it were the source of
+shame.  Create a new, throwaway branch and do the merge there::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+                |         |                                      |
+                |         +-- c1 --- c2 --- ... --- cN           |
+                |                   /               |            |
+                +-- x1 --- x2 --- x3                +------------+-- TEMP
+
+The merge operation resolves all of the complications resulting from the
+multiple beginning points, yielding a coherent result that contains only
+the differences from the mainline branch.  Now it will be possible to
+generate a diffstat with the desired information::
+
+  $ git diff -C --stat --summary linus..TEMP
+
+Save the output from this command, then simply delete the TEMP branch;
+definitely do not expose it to the outside world.  Take the saved diffstat
+output and edit it into the messy pull request, yielding a result that
+shows what is really going on.  That request can then be sent upstream.
index ea915c1..e23b876 100644 (file)
@@ -7,7 +7,6 @@ RISC-V architecture
 
     boot-image-header
     vm-layout
-    pmu
     patch-acceptance
 
     features
index 4392b3c..b5feb5b 100644 (file)
@@ -128,6 +128,7 @@ class KernelCmd(Directive):
         return out
 
     def nestedParse(self, lines, fname):
+        env = self.state.document.settings.env
         content = ViewList()
         node = nodes.section()
 
@@ -137,7 +138,7 @@ class KernelCmd(Directive):
                 code_block += "\n    " + l
             lines = code_block + "\n\n"
 
-        line_regex = re.compile("^#define LINENO (\S+)\#([0-9]+)$")
+        line_regex = re.compile("^\.\. LINENO (\S+)\#([0-9]+)$")
         ln = 0
         n = 0
         f = fname
@@ -154,6 +155,9 @@ class KernelCmd(Directive):
                     self.do_parse(content, node)
                     content = ViewList()
 
+                    # Add the file to Sphinx build dependencies
+                    env.note_dependency(os.path.abspath(f))
+
                 f = new_f
 
                 # sphinx counts lines from 0
index 8138d69..27b701e 100644 (file)
@@ -33,6 +33,7 @@ u"""
 
 import codecs
 import os
+import re
 import subprocess
 import sys
 
@@ -82,7 +83,7 @@ class KernelFeat(Directive):
 
         env = doc.settings.env
         cwd = path.dirname(doc.current_source)
-        cmd = "get_feat.pl rest --dir "
+        cmd = "get_feat.pl rest --enable-fname --dir "
         cmd += self.arguments[0]
 
         if len(self.arguments) > 1:
@@ -102,7 +103,22 @@ class KernelFeat(Directive):
         shell_env["srctree"] = srctree
 
         lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
-        nodeList = self.nestedParse(lines, fname)
+
+        line_regex = re.compile("^\.\. FILE (\S+)$")
+
+        out_lines = ""
+
+        for line in lines.split("\n"):
+            match = line_regex.search(line)
+            if match:
+                fname = match.group(1)
+
+                # Add the file to Sphinx build dependencies
+                env.note_dependency(os.path.abspath(fname))
+            else:
+                out_lines += line + "\n"
+
+        nodeList = self.nestedParse(out_lines, fname)
         return nodeList
 
     def runCmd(self, cmd, **kwargs):
index f523aa6..abe7680 100755 (executable)
@@ -59,6 +59,7 @@ class KernelInclude(Include):
     u"""KernelInclude (``kernel-include``) directive"""
 
     def run(self):
+        env = self.state.document.settings.env
         path = os.path.realpath(
             os.path.expandvars(self.arguments[0]))
 
@@ -70,6 +71,8 @@ class KernelInclude(Include):
 
         self.arguments[0] = path
 
+        env.note_dependency(os.path.abspath(path))
+
         #return super(KernelInclude, self).run() # won't work, see HINTs in _run()
         return self._run()
 
index 8189c33..9395892 100644 (file)
@@ -130,7 +130,7 @@ class KernelDocDirective(Directive):
             result = ViewList()
 
             lineoffset = 0;
-            line_regex = re.compile("^#define LINENO ([0-9]+)$")
+            line_regex = re.compile("^\.\. LINENO ([0-9]+)$")
             for line in lines:
                 match = line_regex.search(line)
                 if match:
index 24d2b2a..cefdbb7 100644 (file)
@@ -212,7 +212,7 @@ def setupTools(app):
         if convert_cmd:
             kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
         else:
-            kernellog.warn(app,
+            kernellog.verbose(app,
                 "Neither inkscape(1) nor convert(1) found.\n"
                 "For SVG to PDF conversion, "
                 "install either Inkscape (https://inkscape.org/) (preferred) or\n"
@@ -296,8 +296,10 @@ def convert_image(img_node, translator, src_fname=None):
 
         if translator.builder.format == 'latex':
             if not inkscape_cmd and convert_cmd is None:
-                kernellog.verbose(app,
-                                  "no SVG to PDF conversion available / include SVG raw.")
+                kernellog.warn(app,
+                                  "no SVG to PDF conversion available / include SVG raw."
+                                  "\nIncluding large raw SVGs can cause xelatex error."
+                                  "\nInstall Inkscape (preferred) or ImageMagick.")
                 img_node.replace_self(file2literal(src_fname))
             else:
                 dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
index 9a35f50..2c57354 100644 (file)
@@ -1,2 +1,4 @@
+# jinja2>=3.1 is not compatible with Sphinx<4.0
+jinja2<3.1
 sphinx_rtd_theme
 Sphinx==2.4.4
index bddedab..c180936 100644 (file)
@@ -7,7 +7,7 @@ user_events: User-based Event Tracing
 Overview
 --------
 User based trace events allow user processes to create events and trace data
-that can be viewed via existing tools, such as ftrace, perf and eBPF.
+that can be viewed via existing tools, such as ftrace and perf.
 To enable this feature, build your kernel with CONFIG_USER_EVENTS=y.
 
 Programs can view status of the events via
@@ -67,8 +67,7 @@ The command string format is as follows::
 
 Supported Flags
 ^^^^^^^^^^^^^^^
-**BPF_ITER** - EBPF programs attached to this event will get the raw iovec
-struct instead of any data copies for max performance.
+None yet
 
 Field Format
 ^^^^^^^^^^^^
@@ -160,7 +159,7 @@ The following values are defined to aid in checking what has been attached:
 
 **EVENT_STATUS_FTRACE** - Bit set if ftrace has been attached (Bit 0).
 
-**EVENT_STATUS_PERF** - Bit set if perf/eBPF has been attached (Bit 1).
+**EVENT_STATUS_PERF** - Bit set if perf has been attached (Bit 1).
 
 Writing Data
 ------------
@@ -204,13 +203,6 @@ It's advised for user programs to do the following::
 
 **NOTE:** *The write_index is not emitted out into the trace being recorded.*
 
-EBPF
-----
-EBPF programs that attach to a user-based event tracepoint are given a pointer
-to a struct user_bpf_context. The bpf context contains the data type (which can
-be a user or kernel buffer, or can be a pointer to the iovec) and the data
-length that was emitted (minus the write_index).
-
 Example Code
 ------------
 See sample code in samples/user_events.
index 07a4547..d13fa66 100644 (file)
@@ -151,12 +151,6 @@ In order to create user controlled virtual machines on S390, check
 KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
 privileged user (CAP_SYS_ADMIN).
 
-To use hardware assisted virtualization on MIPS (VZ ASE) rather than
-the default trap & emulate implementation (which changes the virtual
-memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
-flag KVM_VM_MIPS_VZ.
-
-
 On arm64, the physical address size for a VM (IPA Size limit) is limited
 to 40bits by default. The limit can be configured if the host supports the
 extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use
@@ -4081,6 +4075,11 @@ x2APIC MSRs are always allowed, independent of the ``default_allow`` setting,
 and their behavior depends on the ``X2APIC_ENABLE`` bit of the APIC base
 register.
 
+.. warning::
+   MSR accesses coming from nested vmentry/vmexit are not filtered.
+   This includes both writes to individual VMCS fields and reads/writes
+   through the MSR lists pointed to by the VMCS.
+
 If a bit is within one of the defined ranges, read and write accesses are
 guarded by the bitmap's value for the MSR index if the kind of access
 is included in the ``struct kvm_msr_filter_range`` flags.  If no range
@@ -5293,6 +5292,10 @@ type values:
 
 KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
   Sets the guest physical address of the vcpu_info for a given vCPU.
+  As with the shared_info page for the VM, the corresponding page may be
+  dirtied at any time if event channel interrupt delivery is enabled, so
+  userspace should always assume that the page is dirty without relying
+  on dirty logging.
 
 KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
   Sets the guest physical address of an additional pvclock structure
@@ -7719,3 +7722,49 @@ only be invoked on a VM prior to the creation of VCPUs.
 At this time, KVM_PMU_CAP_DISABLE is the only capability.  Setting
 this capability will disable PMU virtualization for that VM.  Usermode
 should adjust CPUID leaf 0xA to reflect that the PMU is disabled.
+
+9. Known KVM API problems
+=========================
+
+In some cases, KVM's API has some inconsistencies or common pitfalls
+that userspace need to be aware of.  This section details some of
+these issues.
+
+Most of them are architecture specific, so the section is split by
+architecture.
+
+9.1. x86
+--------
+
+``KVM_GET_SUPPORTED_CPUID`` issues
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In general, ``KVM_GET_SUPPORTED_CPUID`` is designed so that it is possible
+to take its result and pass it directly to ``KVM_SET_CPUID2``.  This section
+documents some cases in which that requires some care.
+
+Local APIC features
+~~~~~~~~~~~~~~~~~~~
+
+CPU[EAX=1]:ECX[21] (X2APIC) is reported by ``KVM_GET_SUPPORTED_CPUID``,
+but it can only be enabled if ``KVM_CREATE_IRQCHIP`` or
+``KVM_ENABLE_CAP(KVM_CAP_IRQCHIP_SPLIT)`` are used to enable in-kernel emulation of
+the local APIC.
+
+The same is true for the ``KVM_FEATURE_PV_UNHALT`` paravirtualized feature.
+
+CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``.
+It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
+has enabled in-kernel emulation of the local APIC.
+
+Obsolete ioctls and capabilities
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+KVM_CAP_DISABLE_QUIRKS does not let userspace know which quirks are actually
+available.  Use ``KVM_CHECK_EXTENSION(KVM_CAP_DISABLE_QUIRKS2)`` instead if
+available.
+
+Ordering of KVM_GET_*/KVM_SET_* ioctls
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+TBD
index b6833c7..e0a2c74 100644 (file)
@@ -8,25 +8,13 @@ KVM
    :maxdepth: 2
 
    api
-   amd-memory-encryption
-   cpuid
-   halt-polling
-   hypercalls
-   locking
-   mmu
-   msr
-   nested-vmx
-   ppc-pv
-   s390-diag
-   s390-pv
-   s390-pv-boot
-   timekeeping
-   vcpu-requests
-
-   review-checklist
+   devices/index
 
    arm/index
+   s390/index
+   ppc-pv
+   x86/index
 
-   devices/index
-
-   running-nested-guests
+   locking
+   vcpu-requests
+   review-checklist
index 5d27da3..845a561 100644 (file)
@@ -210,32 +210,47 @@ time it will be set using the Dirty tracking mechanism described above.
 3. Reference
 ------------
 
-:Name:         kvm_lock
+``kvm_lock``
+^^^^^^^^^^^^
+
 :Type:         mutex
 :Arch:         any
 :Protects:     - vm_list
 
-:Name:         kvm_count_lock
+``kvm_count_lock``
+^^^^^^^^^^^^^^^^^^
+
 :Type:         raw_spinlock_t
 :Arch:         any
 :Protects:     - hardware virtualization enable/disable
 :Comment:      'raw' because hardware enabling/disabling must be atomic /wrt
                migration.
 
-:Name:         kvm_arch::tsc_write_lock
-:Type:         raw_spinlock
+``kvm->mn_invalidate_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:Type:          spinlock_t
+:Arch:          any
+:Protects:      mn_active_invalidate_count, mn_memslots_update_rcuwait
+
+``kvm_arch::tsc_write_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:Type:         raw_spinlock_t
 :Arch:         x86
 :Protects:     - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset}
                - tsc offset in vmcb
 :Comment:      'raw' because updating the tsc offsets must not be preempted.
 
-:Name:         kvm->mmu_lock
-:Type:         spinlock_t
+``kvm->mmu_lock``
+^^^^^^^^^^^^^^^^^
+:Type:         spinlock_t or rwlock_t
 :Arch:         any
 :Protects:     -shadow page/shadow tlb entry
 :Comment:      it is a spinlock since it is used in mmu notifier.
 
-:Name:         kvm->srcu
+``kvm->srcu``
+^^^^^^^^^^^^^
 :Type:         srcu lock
 :Arch:         any
 :Protects:     - kvm->memslots
@@ -246,10 +261,20 @@ time it will be set using the Dirty tracking mechanism described above.
                The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu
                if it is needed by multiple functions.
 
-:Name:         blocked_vcpu_on_cpu_lock
+``kvm->slots_arch_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^
+:Type:          mutex
+:Arch:          any (only needed on x86 though)
+:Protects:      any arch-specific fields of memslots that have to be modified
+                in a ``kvm->srcu`` read-side critical section.
+:Comment:       must be held before reading the pointer to the current memslots,
+                until after all changes to the memslots are complete
+
+``wakeup_vcpus_on_cpu_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :Type:         spinlock_t
 :Arch:         x86
-:Protects:     blocked_vcpu_on_cpu
+:Protects:     wakeup_vcpus_on_cpu
 :Comment:      This is a per-CPU lock and it is used for VT-d posted-interrupts.
                When VT-d posted-interrupts is supported and the VM has assigned
                devices, we put the blocked vCPU on the list blocked_vcpu_on_cpu
diff --git a/Documentation/virt/kvm/s390/index.rst b/Documentation/virt/kvm/s390/index.rst
new file mode 100644 (file)
index 0000000..605f488
--- /dev/null
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+KVM for s390 systems
+====================
+
+.. toctree::
+   :maxdepth: 2
+
+   s390-diag
+   s390-pv
+   s390-pv-boot
index b61d48a..db43ee5 100644 (file)
@@ -135,6 +135,16 @@ KVM_REQ_UNHALT
   such as a pending signal, which does not indicate the VCPU's halt
   emulation should stop, and therefore does not make the request.
 
+KVM_REQ_OUTSIDE_GUEST_MODE
+
+  This "request" ensures the target vCPU has exited guest mode prior to the
+  sender of the request continuing on.  No action needs be taken by the target,
+  and so no request is actually logged for the target.  This request is similar
+  to a "kick", but unlike a kick it guarantees the vCPU has actually exited
+  guest mode.  A kick only guarantees the vCPU will exit at some point in the
+  future, e.g. a previous kick may have started the process, but there's no
+  guarantee the to-be-kicked vCPU has fully exited guest mode.
+
 KVM_REQUEST_MASK
 ----------------
 
diff --git a/Documentation/virt/kvm/x86/errata.rst b/Documentation/virt/kvm/x86/errata.rst
new file mode 100644 (file)
index 0000000..806f049
--- /dev/null
@@ -0,0 +1,39 @@
+
+=======================================
+Known limitations of CPU virtualization
+=======================================
+
+Whenever perfect emulation of a CPU feature is impossible or too hard, KVM
+has to choose between not implementing the feature at all or introducing
+behavioral differences between virtual machines and bare metal systems.
+
+This file documents some of the known limitations that KVM has in
+virtualizing CPU features.
+
+x86
+===
+
+``KVM_GET_SUPPORTED_CPUID`` issues
+----------------------------------
+
+x87 features
+~~~~~~~~~~~~
+
+Unlike most other CPUID feature bits, CPUID[EAX=7,ECX=0]:EBX[6]
+(FDP_EXCPTN_ONLY) and CPUID[EAX=7,ECX=0]:EBX]13] (ZERO_FCS_FDS) are
+clear if the features are present and set if the features are not present.
+
+Clearing these bits in CPUID has no effect on the operation of the guest;
+if these bits are set on hardware, the features will not be present on
+any virtual machine that runs on that hardware.
+
+**Workaround:** It is recommended to always set these bits in guest CPUID.
+Note however that any software (e.g ``WIN87EM.DLL``) expecting these features
+to be present likely predates these CPUID feature bits, and therefore
+doesn't know to check for them anyway.
+
+Nested virtualization features
+------------------------------
+
+TBD
+
diff --git a/Documentation/virt/kvm/x86/index.rst b/Documentation/virt/kvm/x86/index.rst
new file mode 100644 (file)
index 0000000..7ff5888
--- /dev/null
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+KVM for x86 systems
+===================
+
+.. toctree::
+   :maxdepth: 2
+
+   amd-memory-encryption
+   cpuid
+   errata
+   halt-polling
+   hypercalls
+   mmu
+   msr
+   nested-vmx
+   running-nested-guests
+   timekeeping
index d5ad96c..863f67b 100644 (file)
@@ -1193,6 +1193,26 @@ E.g. ``os_close_file()`` is just a wrapper around ``close()``
 which ensures that the userspace function close does not clash
 with similarly named function(s) in the kernel part.
 
+Using UML as a Test Platform
+============================
+
+UML is an excellent test platform for device driver development. As
+with most things UML, "some user assembly may be required". It is
+up to the user to build their emulation environment. UML at present
+provides only the kernel infrastructure.
+
+Part of this infrastructure is the ability to load and parse fdt
+device tree blobs as used in Arm or Open Firmware platforms. These
+are supplied as an optional extra argument to the kernel command
+line::
+
+    dtb=filename
+
+The device tree is loaded and parsed at boottime and is accessible by
+drivers which query it. At this moment in time this facility is
+intended solely for development purposes. UML's own devices do not
+query the device tree.
+
 Security Considerations
 -----------------------
 
index c4de6f8..65204d7 100644 (file)
@@ -125,7 +125,6 @@ Usage
    additional function:
 
        Cull:
-               -c              Cull by comparing stacktrace instead of total block.
                --cull <rules>
                                Specify culling rules.Culling syntax is key[,key[,...]].Choose a
                                multi-letter key from the **STANDARD FORMAT SPECIFIERS** section.
index eae3af1..b280367 100644 (file)
@@ -52,8 +52,13 @@ The infrastructure may also be able to handle other conditions that make pages
 unevictable, either by definition or by circumstance, in the future.
 
 
-The Unevictable Page List
--------------------------
+The Unevictable LRU Page List
+-----------------------------
+
+The Unevictable LRU page list is a lie.  It was never an LRU-ordered list, but a
+companion to the LRU-ordered anonymous and file, active and inactive page lists;
+and now it is not even a page list.  But following familiar convention, here in
+this document and in the source, we often imagine it as a fifth LRU page list.
 
 The Unevictable LRU infrastructure consists of an additional, per-node, LRU list
 called the "unevictable" list and an associated page flag, PG_unevictable, to
@@ -63,8 +68,8 @@ The PG_unevictable flag is analogous to, and mutually exclusive with, the
 PG_active flag in that it indicates on which LRU list a page resides when
 PG_lru is set.
 
-The Unevictable LRU infrastructure maintains unevictable pages on an additional
-LRU list for a few reasons:
+The Unevictable LRU infrastructure maintains unevictable pages as if they were
+on an additional LRU list for a few reasons:
 
  (1) We get to "treat unevictable pages just like we treat other pages in the
      system - which means we get to use the same code to manipulate them, the
@@ -72,13 +77,11 @@ LRU list for a few reasons:
      of the statistics, etc..." [Rik van Riel]
 
  (2) We want to be able to migrate unevictable pages between nodes for memory
-     defragmentation, workload management and memory hotplug.  The linux kernel
+     defragmentation, workload management and memory hotplug.  The Linux kernel
      can only migrate pages that it can successfully isolate from the LRU
-     lists.  If we were to maintain pages elsewhere than on an LRU-like list,
-     where they can be found by isolate_lru_page(), we would prevent their
-     migration, unless we reworked migration code to find the unevictable pages
-     itself.
-
+     lists (or "Movable" pages: outside of consideration here).  If we were to
+     maintain pages elsewhere than on an LRU-like list, where they can be
+     detected by isolate_lru_page(), we would prevent their migration.
 
 The unevictable list does not differentiate between file-backed and anonymous,
 swap-backed pages.  This differentiation is only important while the pages are,
@@ -92,8 +95,8 @@ Memory Control Group Interaction
 --------------------------------
 
 The unevictable LRU facility interacts with the memory control group [aka
-memory controller; see Documentation/admin-guide/cgroup-v1/memory.rst] by extending the
-lru_list enum.
+memory controller; see Documentation/admin-guide/cgroup-v1/memory.rst] by
+extending the lru_list enum.
 
 The memory controller data structure automatically gets a per-node unevictable
 list as a result of the "arrayification" of the per-node LRU lists (one per
@@ -143,7 +146,6 @@ These are currently used in three places in the kernel:
      and this mark remains for the life of the inode.
 
  (2) By SYSV SHM to mark SHM_LOCK'd address spaces until SHM_UNLOCK is called.
-
      Note that SHM_LOCK is not required to page in the locked pages if they're
      swapped out; the application must touch the pages manually if it wants to
      ensure they're in memory.
@@ -156,19 +158,19 @@ These are currently used in three places in the kernel:
 Detecting Unevictable Pages
 ---------------------------
 
-The function page_evictable() in vmscan.c determines whether a page is
+The function page_evictable() in mm/internal.h determines whether a page is
 evictable or not using the query function outlined above [see section
 :ref:`Marking address spaces unevictable <mark_addr_space_unevict>`]
 to check the AS_UNEVICTABLE flag.
 
 For address spaces that are so marked after being populated (as SHM regions
-might be), the lock action (eg: SHM_LOCK) can be lazy, and need not populate
+might be), the lock action (e.g. SHM_LOCK) can be lazy, and need not populate
 the page tables for the region as does, for example, mlock(), nor need it make
 any special effort to push any pages in the SHM_LOCK'd area to the unevictable
 list.  Instead, vmscan will do this if and when it encounters the pages during
 a reclamation scan.
 
-On an unlock action (such as SHM_UNLOCK), the unlocker (eg: shmctl()) must scan
+On an unlock action (such as SHM_UNLOCK), the unlocker (e.g. shmctl()) must scan
 the pages in the region and "rescue" them from the unevictable list if no other
 condition is keeping them unevictable.  If an unevictable region is destroyed,
 the pages are also "rescued" from the unevictable list in the process of
@@ -176,7 +178,7 @@ freeing them.
 
 page_evictable() also checks for mlocked pages by testing an additional page
 flag, PG_mlocked (as wrapped by PageMlocked()), which is set when a page is
-faulted into a VM_LOCKED vma, or found in a vma being VM_LOCKED.
+faulted into a VM_LOCKED VMA, or found in a VMA being VM_LOCKED.
 
 
 Vmscan's Handling of Unevictable Pages
@@ -186,28 +188,23 @@ If unevictable pages are culled in the fault path, or moved to the unevictable
 list at mlock() or mmap() time, vmscan will not encounter the pages until they
 have become evictable again (via munlock() for example) and have been "rescued"
 from the unevictable list.  However, there may be situations where we decide,
-for the sake of expediency, to leave a unevictable page on one of the regular
+for the sake of expediency, to leave an unevictable page on one of the regular
 active/inactive LRU lists for vmscan to deal with.  vmscan checks for such
 pages in all of the shrink_{active|inactive|page}_list() functions and will
 "cull" such pages that it encounters: that is, it diverts those pages to the
-unevictable list for the node being scanned.
+unevictable list for the memory cgroup and node being scanned.
 
 There may be situations where a page is mapped into a VM_LOCKED VMA, but the
 page is not marked as PG_mlocked.  Such pages will make it all the way to
-shrink_page_list() where they will be detected when vmscan walks the reverse
-map in try_to_unmap().  If try_to_unmap() returns SWAP_MLOCK,
-shrink_page_list() will cull the page at that point.
+shrink_active_list() or shrink_page_list() where they will be detected when
+vmscan walks the reverse map in page_referenced() or try_to_unmap().  The page
+is culled to the unevictable list when it is released by the shrinker.
 
 To "cull" an unevictable page, vmscan simply puts the page back on the LRU list
 using putback_lru_page() - the inverse operation to isolate_lru_page() - after
 dropping the page lock.  Because the condition which makes the page unevictable
-may change once the page is unlocked, putback_lru_page() will recheck the
-unevictable state of a page that it places on the unevictable list.  If the
-page has become unevictable, putback_lru_page() removes it from the list and
-retries, including the page_unevictable() test.  Because such a race is a rare
-event and movement of pages onto the unevictable list should be rare, these
-extra evictabilty checks should not occur in the majority of calls to
-putback_lru_page().
+may change once the page is unlocked, __pagevec_lru_add_fn() will recheck the
+unevictable state of a page before placing it on the unevictable list.
 
 
 MLOCKED Pages
@@ -227,16 +224,25 @@ Nick posted his patch as an alternative to a patch posted by Christoph Lameter
 to achieve the same objective: hiding mlocked pages from vmscan.
 
 In Nick's patch, he used one of the struct page LRU list link fields as a count
-of VM_LOCKED VMAs that map the page.  This use of the link field for a count
-prevented the management of the pages on an LRU list, and thus mlocked pages
-were not migratable as isolate_lru_page() could not find them, and the LRU list
-link field was not available to the migration subsystem.
+of VM_LOCKED VMAs that map the page (Rik van Riel had the same idea three years
+earlier).  But this use of the link field for a count prevented the management
+of the pages on an LRU list, and thus mlocked pages were not migratable as
+isolate_lru_page() could not detect them, and the LRU list link field was not
+available to the migration subsystem.
 
-Nick resolved this by putting mlocked pages back on the lru list before
+Nick resolved this by putting mlocked pages back on the LRU list before
 attempting to isolate them, thus abandoning the count of VM_LOCKED VMAs.  When
 Nick's patch was integrated with the Unevictable LRU work, the count was
-replaced by walking the reverse map to determine whether any VM_LOCKED VMAs
-mapped the page.  More on this below.
+replaced by walking the reverse map when munlocking, to determine whether any
+other VM_LOCKED VMAs still mapped the page.
+
+However, walking the reverse map for each page when munlocking was ugly and
+inefficient, and could lead to catastrophic contention on a file's rmap lock,
+when many processes which had it mlocked were trying to exit.  In 5.18, the
+idea of keeping mlock_count in Unevictable LRU list link field was revived and
+put to work, without preventing the migration of mlocked pages.  This is why
+the "Unevictable LRU list" cannot be a linked list of pages now; but there was
+no use for that linked list anyway - though its size is maintained for meminfo.
 
 
 Basic Management
@@ -250,22 +256,18 @@ PageMlocked() functions.
 A PG_mlocked page will be placed on the unevictable list when it is added to
 the LRU.  Such pages can be "noticed" by memory management in several places:
 
- (1) in the mlock()/mlockall() system call handlers;
+ (1) in the mlock()/mlock2()/mlockall() system call handlers;
 
  (2) in the mmap() system call handler when mmapping a region with the
      MAP_LOCKED flag;
 
  (3) mmapping a region in a task that has called mlockall() with the MCL_FUTURE
-     flag
+     flag;
 
- (4) in the fault path, if mlocked pages are "culled" in the fault path,
-     and when a VM_LOCKED stack segment is expanded; or
+ (4) in the fault path and when a VM_LOCKED stack segment is expanded; or
 
  (5) as mentioned above, in vmscan:shrink_page_list() when attempting to
-     reclaim a page in a VM_LOCKED VMA via try_to_unmap()
-
-all of which result in the VM_LOCKED flag being set for the VMA if it doesn't
-already have it set.
+     reclaim a page in a VM_LOCKED VMA by page_referenced() or try_to_unmap().
 
 mlocked pages become unlocked and rescued from the unevictable list when:
 
@@ -280,51 +282,53 @@ mlocked pages become unlocked and rescued from the unevictable list when:
  (4) before a page is COW'd in a VM_LOCKED VMA.
 
 
-mlock()/mlockall() System Call Handling
----------------------------------------
+mlock()/mlock2()/mlockall() System Call Handling
+------------------------------------------------
 
-Both [do\_]mlock() and [do\_]mlockall() system call handlers call mlock_fixup()
+mlock(), mlock2() and mlockall() system call handlers proceed to mlock_fixup()
 for each VMA in the range specified by the call.  In the case of mlockall(),
 this is the entire active address space of the task.  Note that mlock_fixup()
 is used for both mlocking and munlocking a range of memory.  A call to mlock()
-an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED is
-treated as a no-op, and mlock_fixup() simply returns.
+an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED, is
+treated as a no-op and mlock_fixup() simply returns.
 
-If the VMA passes some filtering as described in "Filtering Special Vmas"
+If the VMA passes some filtering as described in "Filtering Special VMAs"
 below, mlock_fixup() will attempt to merge the VMA with its neighbors or split
-off a subset of the VMA if the range does not cover the entire VMA.  Once the
-VMA has been merged or split or neither, mlock_fixup() will call
-populate_vma_page_range() to fault in the pages via get_user_pages() and to
-mark the pages as mlocked via mlock_vma_page().
+off a subset of the VMA if the range does not cover the entire VMA.  Any pages
+already present in the VMA are then marked as mlocked by mlock_page() via
+mlock_pte_range() via walk_page_range() via mlock_vma_pages_range().
+
+Before returning from the system call, do_mlock() or mlockall() will call
+__mm_populate() to fault in the remaining pages via get_user_pages() and to
+mark those pages as mlocked as they are faulted.
 
 Note that the VMA being mlocked might be mapped with PROT_NONE.  In this case,
 get_user_pages() will be unable to fault in the pages.  That's okay.  If pages
-do end up getting faulted into this VM_LOCKED VMA, we'll handle them in the
-fault path or in vmscan.
-
-Also note that a page returned by get_user_pages() could be truncated or
-migrated out from under us, while we're trying to mlock it.  To detect this,
-populate_vma_page_range() checks page_mapping() after acquiring the page lock.
-If the page is still associated with its mapping, we'll go ahead and call
-mlock_vma_page().  If the mapping is gone, we just unlock the page and move on.
-In the worst case, this will result in a page mapped in a VM_LOCKED VMA
-remaining on a normal LRU list without being PageMlocked().  Again, vmscan will
-detect and cull such pages.
-
-mlock_vma_page() will call TestSetPageMlocked() for each page returned by
-get_user_pages().  We use TestSetPageMlocked() because the page might already
-be mlocked by another task/VMA and we don't want to do extra work.  We
-especially do not want to count an mlocked page more than once in the
-statistics.  If the page was already mlocked, mlock_vma_page() need do nothing
-more.
-
-If the page was NOT already mlocked, mlock_vma_page() attempts to isolate the
-page from the LRU, as it is likely on the appropriate active or inactive list
-at that time.  If the isolate_lru_page() succeeds, mlock_vma_page() will put
-back the page - by calling putback_lru_page() - which will notice that the page
-is now mlocked and divert the page to the node's unevictable list.  If
-mlock_vma_page() is unable to isolate the page from the LRU, vmscan will handle
-it later if and when it attempts to reclaim the page.
+do end up getting faulted into this VM_LOCKED VMA, they will be handled in the
+fault path - which is also how mlock2()'s MLOCK_ONFAULT areas are handled.
+
+For each PTE (or PMD) being faulted into a VMA, the page add rmap function
+calls mlock_vma_page(), which calls mlock_page() when the VMA is VM_LOCKED
+(unless it is a PTE mapping of a part of a transparent huge page).  Or when
+it is a newly allocated anonymous page, lru_cache_add_inactive_or_unevictable()
+calls mlock_new_page() instead: similar to mlock_page(), but can make better
+judgments, since this page is held exclusively and known not to be on LRU yet.
+
+mlock_page() sets PageMlocked immediately, then places the page on the CPU's
+mlock pagevec, to batch up the rest of the work to be done under lru_lock by
+__mlock_page().  __mlock_page() sets PageUnevictable, initializes mlock_count
+and moves the page to unevictable state ("the unevictable LRU", but with
+mlock_count in place of LRU threading).  Or if the page was already PageLRU
+and PageUnevictable and PageMlocked, it simply increments the mlock_count.
+
+But in practice that may not work ideally: the page may not yet be on an LRU, or
+it may have been temporarily isolated from LRU.  In such cases the mlock_count
+field cannot be touched, but will be set to 0 later when __pagevec_lru_add_fn()
+returns the page to "LRU".  Races prohibit mlock_count from being set to 1 then:
+rather than risk stranding a page indefinitely as unevictable, always err with
+mlock_count on the low side, so that when munlocked the page will be rescued to
+an evictable LRU, then perhaps be mlocked again later if vmscan finds it in a
+VM_LOCKED VMA.
 
 
 Filtering Special VMAs
@@ -339,68 +343,48 @@ mlock_fixup() filters several classes of "special" VMAs:
    so there is no sense in attempting to visit them.
 
 2) VMAs mapping hugetlbfs page are already effectively pinned into memory.  We
-   neither need nor want to mlock() these pages.  However, to preserve the
-   prior behavior of mlock() - before the unevictable/mlock changes -
-   mlock_fixup() will call make_pages_present() in the hugetlbfs VMA range to
-   allocate the huge pages and populate the ptes.
+   neither need nor want to mlock() these pages.  But __mm_populate() includes
+   hugetlbfs ranges, allocating the huge pages and populating the PTEs.
 
 3) VMAs with VM_DONTEXPAND are generally userspace mappings of kernel pages,
-   such as the VDSO page, relay channel pages, etc. These pages
-   are inherently unevictable and are not managed on the LRU lists.
-   mlock_fixup() treats these VMAs the same as hugetlbfs VMAs.  It calls
-   make_pages_present() to populate the ptes.
+   such as the VDSO page, relay channel pages, etc.  These pages are inherently
+   unevictable and are not managed on the LRU lists.  __mm_populate() includes
+   these ranges, populating the PTEs if not already populated.
+
+4) VMAs with VM_MIXEDMAP set are not marked VM_LOCKED, but __mm_populate()
+   includes these ranges, populating the PTEs if not already populated.
 
 Note that for all of these special VMAs, mlock_fixup() does not set the
 VM_LOCKED flag.  Therefore, we won't have to deal with them later during
 munlock(), munmap() or task exit.  Neither does mlock_fixup() account these
 VMAs against the task's "locked_vm".
 
-.. _munlock_munlockall_handling:
 
 munlock()/munlockall() System Call Handling
 -------------------------------------------
 
-The munlock() and munlockall() system calls are handled by the same functions -
-do_mlock[all]() - as the mlock() and mlockall() system calls with the unlock vs
-lock operation indicated by an argument.  So, these system calls are also
-handled by mlock_fixup().  Again, if called for an already munlocked VMA,
-mlock_fixup() simply returns.  Because of the VMA filtering discussed above,
-VM_LOCKED will not be set in any "special" VMAs.  So, these VMAs will be
-ignored for munlock.
+The munlock() and munlockall() system calls are handled by the same
+mlock_fixup() function as mlock(), mlock2() and mlockall() system calls are.
+If called to munlock an already munlocked VMA, mlock_fixup() simply returns.
+Because of the VMA filtering discussed above, VM_LOCKED will not be set in
+any "special" VMAs.  So, those VMAs will be ignored for munlock.
 
 If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the
-specified range.  The range is then munlocked via the function
-populate_vma_page_range() - the same function used to mlock a VMA range -
-passing a flag to indicate that munlock() is being performed.
-
-Because the VMA access protections could have been changed to PROT_NONE after
-faulting in and mlocking pages, get_user_pages() was unreliable for visiting
-these pages for munlocking.  Because we don't want to leave pages mlocked,
-get_user_pages() was enhanced to accept a flag to ignore the permissions when
-fetching the pages - all of which should be resident as a result of previous
-mlocking.
-
-For munlock(), populate_vma_page_range() unlocks individual pages by calling
-munlock_vma_page().  munlock_vma_page() unconditionally clears the PG_mlocked
-flag using TestClearPageMlocked().  As with mlock_vma_page(),
-munlock_vma_page() use the Test*PageMlocked() function to handle the case where
-the page might have already been unlocked by another task.  If the page was
-mlocked, munlock_vma_page() updates that zone statistics for the number of
-mlocked pages.  Note, however, that at this point we haven't checked whether
-the page is mapped by other VM_LOCKED VMAs.
-
-We can't call page_mlock(), the function that walks the reverse map to
-check for other VM_LOCKED VMAs, without first isolating the page from the LRU.
-page_mlock() is a variant of try_to_unmap() and thus requires that the page
-not be on an LRU list [more on these below].  However, the call to
-isolate_lru_page() could fail, in which case we can't call page_mlock().  So,
-we go ahead and clear PG_mlocked up front, as this might be the only chance we
-have.  If we can successfully isolate the page, we go ahead and call
-page_mlock(), which will restore the PG_mlocked flag and update the zone
-page statistics if it finds another VMA holding the page mlocked.  If we fail
-to isolate the page, we'll have left a potentially mlocked page on the LRU.
-This is fine, because we'll catch it later if and if vmscan tries to reclaim
-the page.  This should be relatively rare.
+specified range.  All pages in the VMA are then munlocked by munlock_page() via
+mlock_pte_range() via walk_page_range() via mlock_vma_pages_range() - the same
+function used when mlocking a VMA range, with new flags for the VMA indicating
+that it is munlock() being performed.
+
+munlock_page() uses the mlock pagevec to batch up work to be done under
+lru_lock by  __munlock_page().  __munlock_page() decrements the page's
+mlock_count, and when that reaches 0 it clears PageMlocked and clears
+PageUnevictable, moving the page from unevictable state to inactive LRU.
+
+But in practice that may not work ideally: the page may not yet have reached
+"the unevictable LRU", or it may have been temporarily isolated from it.  In
+those cases its mlock_count field is unusable and must be assumed to be 0: so
+that the page will be rescued to an evictable LRU, then perhaps be mlocked
+again later if vmscan finds it in a VM_LOCKED VMA.
 
 
 Migrating MLOCKED Pages
@@ -410,33 +394,38 @@ A page that is being migrated has been isolated from the LRU lists and is held
 locked across unmapping of the page, updating the page's address space entry
 and copying the contents and state, until the page table entry has been
 replaced with an entry that refers to the new page.  Linux supports migration
-of mlocked pages and other unevictable pages.  This involves simply moving the
-PG_mlocked and PG_unevictable states from the old page to the new page.
+of mlocked pages and other unevictable pages.  PG_mlocked is cleared from the
+the old page when it is unmapped from the last VM_LOCKED VMA, and set when the
+new page is mapped in place of migration entry in a VM_LOCKED VMA.  If the page
+was unevictable because mlocked, PG_unevictable follows PG_mlocked; but if the
+page was unevictable for other reasons, PG_unevictable is copied explicitly.
 
 Note that page migration can race with mlocking or munlocking of the same page.
-This has been discussed from the mlock/munlock perspective in the respective
-sections above.  Both processes (migration and m[un]locking) hold the page
-locked.  This provides the first level of synchronization.  Page migration
-zeros out the page_mapping of the old page before unlocking it, so m[un]lock
-can skip these pages by testing the page mapping under page lock.
+There is mostly no problem since page migration requires unmapping all PTEs of
+the old page (including munlock where VM_LOCKED), then mapping in the new page
+(including mlock where VM_LOCKED).  The page table locks provide sufficient
+synchronization.
 
-To complete page migration, we place the new and old pages back onto the LRU
-after dropping the page lock.  The "unneeded" page - old page on success, new
-page on failure - will be freed when the reference count held by the migration
-process is released.  To ensure that we don't strand pages on the unevictable
-list because of a race between munlock and migration, page migration uses the
-putback_lru_page() function to add migrated pages back to the LRU.
+However, since mlock_vma_pages_range() starts by setting VM_LOCKED on a VMA,
+before mlocking any pages already present, if one of those pages were migrated
+before mlock_pte_range() reached it, it would get counted twice in mlock_count.
+To prevent that, mlock_vma_pages_range() temporarily marks the VMA as VM_IO,
+so that mlock_vma_page() will skip it.
+
+To complete page migration, we place the old and new pages back onto the LRU
+afterwards.  The "unneeded" page - old page on success, new page on failure -
+is freed when the reference count held by the migration process is released.
 
 
 Compacting MLOCKED Pages
 ------------------------
 
-The unevictable LRU can be scanned for compactable regions and the default
-behavior is to do so.  /proc/sys/vm/compact_unevictable_allowed controls
-this behavior (see Documentation/admin-guide/sysctl/vm.rst).  Once scanning of the
-unevictable LRU is enabled, the work of compaction is mostly handled by
-the page migration code and the same work flow as described in MIGRATING
-MLOCKED PAGES will apply.
+The memory map can be scanned for compactable regions and the default behavior
+is to let unevictable pages be moved.  /proc/sys/vm/compact_unevictable_allowed
+controls this behavior (see Documentation/admin-guide/sysctl/vm.rst).  The work
+of compaction is mostly handled by the page migration code and the same work
+flow as described in Migrating MLOCKED Pages will apply.
+
 
 MLOCKING Transparent Huge Pages
 -------------------------------
@@ -445,51 +434,44 @@ A transparent huge page is represented by a single entry on an LRU list.
 Therefore, we can only make unevictable an entire compound page, not
 individual subpages.
 
-If a user tries to mlock() part of a huge page, we want the rest of the
-page to be reclaimable.
+If a user tries to mlock() part of a huge page, and no user mlock()s the
+whole of the huge page, we want the rest of the page to be reclaimable.
 
 We cannot just split the page on partial mlock() as split_huge_page() can
-fail and new intermittent failure mode for the syscall is undesirable.
+fail and new intermittent failure mode for the syscall is undesirable.
 
-We handle this by keeping PTE-mapped huge pages on normal LRU lists: the
-PMD on border of VM_LOCKED VMA will be split into PTE table.
+We handle this by keeping PTE-mlocked huge pages on evictable LRU lists:
+the PMD on the border of a VM_LOCKED VMA will be split into a PTE table.
 
-This way the huge page is accessible for vmscan. Under memory pressure the
+This way the huge page is accessible for vmscan.  Under memory pressure the
 page will be split, subpages which belong to VM_LOCKED VMAs will be moved
-to unevictable LRU and the rest can be reclaimed.
+to the unevictable LRU and the rest can be reclaimed.
+
+/proc/meminfo's Unevictable and Mlocked amounts do not include those parts
+of a transparent huge page which are mapped only by PTEs in VM_LOCKED VMAs.
 
-See also comment in follow_trans_huge_pmd().
 
 mmap(MAP_LOCKED) System Call Handling
 -------------------------------------
 
-In addition the mlock()/mlockall() system calls, an application can request
-that a region of memory be mlocked supplying the MAP_LOCKED flag to the mmap()
-call. There is one important and subtle difference here, though. mmap() + mlock()
-will fail if the range cannot be faulted in (e.g. because mm_populate fails)
-and returns with ENOMEM while mmap(MAP_LOCKED) will not fail. The mmaped
-area will still have properties of the locked area - aka. pages will not get
-swapped out - but major page faults to fault memory in might still happen.
+In addition to the mlock(), mlock2() and mlockall() system calls, an application
+can request that a region of memory be mlocked by supplying the MAP_LOCKED flag
+to the mmap() call.  There is one important and subtle difference here, though.
+mmap() + mlock() will fail if the range cannot be faulted in (e.g. because
+mm_populate fails) and returns with ENOMEM while mmap(MAP_LOCKED) will not fail.
+The mmaped area will still have properties of the locked area - pages will not
+get swapped out - but major page faults to fault memory in might still happen.
 
-Furthermore, any mmap() call or brk() call that expands the heap by a
-task that has previously called mlockall() with the MCL_FUTURE flag will result
+Furthermore, any mmap() call or brk() call that expands the heap by a task
+that has previously called mlockall() with the MCL_FUTURE flag will result
 in the newly mapped memory being mlocked.  Before the unevictable/mlock
-changes, the kernel simply called make_pages_present() to allocate pages and
-populate the page table.
+changes, the kernel simply called make_pages_present() to allocate pages
+and populate the page table.
 
-To mlock a range of memory under the unevictable/mlock infrastructure, the
-mmap() handler and task address space expansion functions call
+To mlock a range of memory under the unevictable/mlock infrastructure,
+the mmap() handler and task address space expansion functions call
 populate_vma_page_range() specifying the vma and the address range to mlock.
 
-The callers of populate_vma_page_range() will have already added the memory range
-to be mlocked to the task's "locked_vm".  To account for filtered VMAs,
-populate_vma_page_range() returns the number of pages NOT mlocked.  All of the
-callers then subtract a non-negative return value from the task's locked_vm.  A
-negative return value represent an error - for example, from get_user_pages()
-attempting to fault in a VMA with PROT_NONE access.  In this case, we leave the
-memory range accounted as locked_vm, as the protections could be changed later
-and pages allocated into that region.
-
 
 munmap()/exit()/exec() System Call Handling
 -------------------------------------------
@@ -500,81 +482,53 @@ munlock the pages if we're removing the last VM_LOCKED VMA that maps the pages.
 Before the unevictable/mlock changes, mlocking did not mark the pages in any
 way, so unmapping them required no processing.
 
-To munlock a range of memory under the unevictable/mlock infrastructure, the
-munmap() handler and task address space call tear down function
-munlock_vma_pages_all().  The name reflects the observation that one always
-specifies the entire VMA range when munlock()ing during unmap of a region.
-Because of the VMA filtering when mlocking() regions, only "normal" VMAs that
-actually contain mlocked pages will be passed to munlock_vma_pages_all().
-
-munlock_vma_pages_all() clears the VM_LOCKED VMA flag and, like mlock_fixup()
-for the munlock case, calls __munlock_vma_pages_range() to walk the page table
-for the VMA's memory range and munlock_vma_page() each resident page mapped by
-the VMA.  This effectively munlocks the page, only if this is the last
-VM_LOCKED VMA that maps the page.
-
-
-try_to_unmap()
---------------
-
-Pages can, of course, be mapped into multiple VMAs.  Some of these VMAs may
-have VM_LOCKED flag set.  It is possible for a page mapped into one or more
-VM_LOCKED VMAs not to have the PG_mlocked flag set and therefore reside on one
-of the active or inactive LRU lists.  This could happen if, for example, a task
-in the process of munlocking the page could not isolate the page from the LRU.
-As a result, vmscan/shrink_page_list() might encounter such a page as described
-in section "vmscan's handling of unevictable pages".  To handle this situation,
-try_to_unmap() checks for VM_LOCKED VMAs while it is walking a page's reverse
-map.
-
-try_to_unmap() is always called, by either vmscan for reclaim or for page
-migration, with the argument page locked and isolated from the LRU.  Separate
-functions handle anonymous and mapped file and KSM pages, as these types of
-pages have different reverse map lookup mechanisms, with different locking.
-In each case, whether rmap_walk_anon() or rmap_walk_file() or rmap_walk_ksm(),
-it will call try_to_unmap_one() for every VMA which might contain the page.
-
-When trying to reclaim, if try_to_unmap_one() finds the page in a VM_LOCKED
-VMA, it will then mlock the page via mlock_vma_page() instead of unmapping it,
-and return SWAP_MLOCK to indicate that the page is unevictable: and the scan
-stops there.
-
-mlock_vma_page() is called while holding the page table's lock (in addition
-to the page lock, and the rmap lock): to serialize against concurrent mlock or
-munlock or munmap system calls, mm teardown (munlock_vma_pages_all), reclaim,
-holepunching, and truncation of file pages and their anonymous COWed pages.
-
-
-page_mlock() Reverse Map Scan
----------------------------------
-
-When munlock_vma_page() [see section :ref:`munlock()/munlockall() System Call
-Handling <munlock_munlockall_handling>` above] tries to munlock a
-page, it needs to determine whether or not the page is mapped by any
-VM_LOCKED VMA without actually attempting to unmap all PTEs from the
-page.  For this purpose, the unevictable/mlock infrastructure
-introduced a variant of try_to_unmap() called page_mlock().
-
-page_mlock() walks the respective reverse maps looking for VM_LOCKED VMAs. When
-such a VMA is found the page is mlocked via mlock_vma_page(). This undoes the
-pre-clearing of the page's PG_mlocked done by munlock_vma_page.
-
-Note that page_mlock()'s reverse map walk must visit every VMA in a page's
-reverse map to determine that a page is NOT mapped into any VM_LOCKED VMA.
-However, the scan can terminate when it encounters a VM_LOCKED VMA.
-Although page_mlock() might be called a great many times when munlocking a
-large region or tearing down a large address space that has been mlocked via
-mlockall(), overall this is a fairly rare event.
+For each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
+munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED
+(unless it was a PTE mapping of a part of a transparent huge page).
+
+munlock_page() uses the mlock pagevec to batch up work to be done under
+lru_lock by  __munlock_page().  __munlock_page() decrements the page's
+mlock_count, and when that reaches 0 it clears PageMlocked and clears
+PageUnevictable, moving the page from unevictable state to inactive LRU.
+
+But in practice that may not work ideally: the page may not yet have reached
+"the unevictable LRU", or it may have been temporarily isolated from it.  In
+those cases its mlock_count field is unusable and must be assumed to be 0: so
+that the page will be rescued to an evictable LRU, then perhaps be mlocked
+again later if vmscan finds it in a VM_LOCKED VMA.
+
+
+Truncating MLOCKED Pages
+------------------------
+
+File truncation or hole punching forcibly unmaps the deleted pages from
+userspace; truncation even unmaps and deletes any private anonymous pages
+which had been Copied-On-Write from the file pages now being truncated.
+
+Mlocked pages can be munlocked and deleted in this way: like with munmap(),
+for each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
+munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED
+(unless it was a PTE mapping of a part of a transparent huge page).
+
+However, if there is a racing munlock(), since mlock_vma_pages_range() starts
+munlocking by clearing VM_LOCKED from a VMA, before munlocking all the pages
+present, if one of those pages were unmapped by truncation or hole punch before
+mlock_pte_range() reached it, it would not be recognized as mlocked by this VMA,
+and would not be counted out of mlock_count.  In this rare case, a page may
+still appear as PageMlocked after it has been fully unmapped: and it is left to
+release_pages() (or __page_cache_release()) to clear it and update statistics
+before freeing (this event is counted in /proc/vmstat unevictable_pgs_cleared,
+which is usually 0).
 
 
 Page Reclaim in shrink_*_list()
 -------------------------------
 
-shrink_active_list() culls any obviously unevictable pages - i.e.
-!page_evictable(page) - diverting these to the unevictable list.
+vmscan's shrink_active_list() culls any obviously unevictable pages -
+i.e. !page_evictable(page) pages - diverting those to the unevictable list.
 However, shrink_active_list() only sees unevictable pages that made it onto the
-active/inactive lru lists.  Note that these pages do not have PageUnevictable
-set - otherwise they would be on the unevictable list and shrink_active_list
+active/inactive LRU lists.  Note that these pages do not have PageUnevictable
+set - otherwise they would be on the unevictable list and shrink_active_list()
 would never see them.
 
 Some examples of these unevictable pages on the LRU lists are:
@@ -586,20 +540,15 @@ Some examples of these unevictable pages on the LRU lists are:
      when an application accesses the page the first time after SHM_LOCK'ing
      the segment.
 
- (3) mlocked pages that could not be isolated from the LRU and moved to the
-     unevictable list in mlock_vma_page().
-
-shrink_inactive_list() also diverts any unevictable pages that it finds on the
-inactive lists to the appropriate node's unevictable list.
+ (3) pages still mapped into VM_LOCKED VMAs, which should be marked mlocked,
+     but events left mlock_count too low, so they were munlocked too early.
 
-shrink_inactive_list() should only see SHM_LOCK'd pages that became SHM_LOCK'd
-after shrink_active_list() had moved them to the inactive list, or pages mapped
-into VM_LOCKED VMAs that munlock_vma_page() couldn't isolate from the LRU to
-recheck via page_mlock().  shrink_inactive_list() won't notice the latter,
-but will pass on to shrink_page_list().
+vmscan's shrink_inactive_list() and shrink_page_list() also divert obviously
+unevictable pages found on the inactive lists to the appropriate memory cgroup
+and node unevictable list.
 
-shrink_page_list() again culls obviously unevictable pages that it could
-encounter for similar reason to shrink_inactive_list().  Pages mapped into
-VM_LOCKED VMAs but without PG_mlocked set will make it all the way to
-try_to_unmap().  shrink_page_list() will divert them to the unevictable list
-when try_to_unmap() returns SWAP_MLOCK, as discussed above.
+rmap's page_referenced_one(), called via vmscan's shrink_active_list() or
+shrink_page_list(), and rmap's try_to_unmap_one() called via shrink_page_list(),
+check for (3) pages still mapped into VM_LOCKED VMAs, and call mlock_vma_page()
+to correct them.  Such pages are culled to the unevictable list when released
+by the shrinker.
index cf89643..fd768d4 100644 (file)
@@ -4640,6 +4640,7 @@ F:        drivers/input/touchscreen/chipone_icn8505.c
 
 CHROME HARDWARE PLATFORM SUPPORT
 M:     Benson Leung <bleung@chromium.org>
+L:     chrome-platform@lists.linux.dev
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/chrome-platform/linux.git
 F:     drivers/platform/chrome/
@@ -4648,6 +4649,7 @@ CHROMEOS EC CODEC DRIVER
 M:     Cheng-Yi Chiang <cychiang@chromium.org>
 M:     Tzung-Bi Shih <tzungbi@google.com>
 R:     Guenter Roeck <groeck@chromium.org>
+L:     chrome-platform@lists.linux.dev
 S:     Maintained
 F:     Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
 F:     sound/soc/codecs/cros_ec_codec.*
@@ -4655,6 +4657,7 @@ F:        sound/soc/codecs/cros_ec_codec.*
 CHROMEOS EC SUBDRIVERS
 M:     Benson Leung <bleung@chromium.org>
 R:     Guenter Roeck <groeck@chromium.org>
+L:     chrome-platform@lists.linux.dev
 S:     Maintained
 F:     drivers/power/supply/cros_usbpd-charger.c
 N:     cros_ec
@@ -4662,11 +4665,13 @@ N:      cros-ec
 
 CHROMEOS EC USB TYPE-C DRIVER
 M:     Prashant Malani <pmalani@chromium.org>
+L:     chrome-platform@lists.linux.dev
 S:     Maintained
 F:     drivers/platform/chrome/cros_ec_typec.c
 
 CHROMEOS EC USB PD NOTIFY DRIVER
 M:     Prashant Malani <pmalani@chromium.org>
+L:     chrome-platform@lists.linux.dev
 S:     Maintained
 F:     drivers/platform/chrome/cros_usbpd_notify.c
 F:     include/linux/platform_data/cros_usbpd_notify.h
@@ -5157,6 +5162,20 @@ S:       Supported
 F:     drivers/cpuidle/cpuidle-psci.h
 F:     drivers/cpuidle/cpuidle-psci-domain.c
 
+CPUIDLE DRIVER - DT IDLE PM DOMAIN
+M:     Ulf Hansson <ulf.hansson@linaro.org>
+L:     linux-pm@vger.kernel.org
+S:     Supported
+F:     drivers/cpuidle/dt_idle_genpd.c
+F:     drivers/cpuidle/dt_idle_genpd.h
+
+CPUIDLE DRIVER - RISC-V SBI
+M:     Anup Patel <anup@brainfault.org>
+L:     linux-pm@vger.kernel.org
+L:     linux-riscv@lists.infradead.org
+S:     Maintained
+F:     drivers/cpuidle/cpuidle-riscv-sbi.c
+
 CRAMFS FILESYSTEM
 M:     Nicolas Pitre <nico@fluxnic.net>
 S:     Maintained
@@ -6038,6 +6057,7 @@ F:        drivers/scsi/dpt/
 DRBD DRIVER
 M:     Philipp Reisner <philipp.reisner@linbit.com>
 M:     Lars Ellenberg <lars.ellenberg@linbit.com>
+M:     Christoph Böhmwalder <christoph.boehmwalder@linbit.com>
 L:     drbd-dev@lists.linbit.com
 S:     Supported
 W:     http://www.drbd.org
@@ -9517,6 +9537,12 @@ M:       Stanislaw Gruszka <stf_xl@wp.pl>
 S:     Maintained
 F:     drivers/usb/atm/ueagle-atm.c
 
+IMAGIS TOUCHSCREEN DRIVER
+M:     Markuss Broks <markuss.broks@gmail.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
+F:     drivers/input/touchscreen/imagis.c
+
 IMGTEC ASCII LCD DRIVER
 M:     Paul Burton <paulburton@kernel.org>
 S:     Maintained
@@ -10648,9 +10674,9 @@ F:      tools/testing/selftests/
 
 KERNEL SMB3 SERVER (KSMBD)
 M:     Namjae Jeon <linkinjeon@kernel.org>
-M:     Sergey Senozhatsky <senozhatsky@chromium.org>
 M:     Steve French <sfrench@samba.org>
 M:     Hyunchul Lee <hyc.lee@gmail.com>
+R:     Sergey Senozhatsky <senozhatsky@chromium.org>
 L:     linux-cifs@vger.kernel.org
 S:     Maintained
 T:     git git://git.samba.org/ksmbd.git
@@ -14622,6 +14648,12 @@ L:     op-tee@lists.trustedfirmware.org
 S:     Maintained
 F:     drivers/char/hw_random/optee-rng.c
 
+OP-TEE RTC DRIVER
+M:     Clément Léger <clement.leger@bootlin.com>
+L:     linux-rtc@vger.kernel.org
+S:     Maintained
+F:     drivers/rtc/rtc-optee.c
+
 OPA-VNIC DRIVER
 M:     Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
 M:     Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
@@ -20539,14 +20571,15 @@ F:    Documentation/admin-guide/media/zr364xx*
 F:     drivers/media/usb/zr364xx/
 
 USER-MODE LINUX (UML)
-M:     Jeff Dike <jdike@addtoit.com>
 M:     Richard Weinberger <richard@nod.at>
 M:     Anton Ivanov <anton.ivanov@cambridgegreys.com>
+M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-um@lists.infradead.org
 S:     Maintained
 W:     http://user-mode-linux.sourceforge.net
 Q:     https://patchwork.ozlabs.org/project/linux-um/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux.git next
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux.git fixes
 F:     Documentation/virt/uml/
 F:     arch/um/
 F:     arch/x86/um/
index c28c5d9..8c7de9a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 5
-PATCHLEVEL = 17
+PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Superb Owl
 
 # *DOCUMENTATION*
@@ -424,19 +424,26 @@ HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
 HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
 
 ifneq ($(LLVM),)
-HOSTCC = clang
-HOSTCXX        = clang++
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+HOSTCC = $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+HOSTCXX        = $(LLVM_PREFIX)clang++$(LLVM_SUFFIX)
 else
 HOSTCC = gcc
 HOSTCXX        = g++
 endif
 
-export KBUILD_USERCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
-                             -O2 -fomit-frame-pointer -std=gnu11 \
-                             -Wdeclaration-after-statement
-export KBUILD_USERLDFLAGS :=
+KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
+                        -O2 -fomit-frame-pointer -std=gnu11 \
+                        -Wdeclaration-after-statement
+KBUILD_USERCFLAGS  := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
+KBUILD_USERLDFLAGS := $(USERLDFLAGS)
 
-KBUILD_HOSTCFLAGS   := $(KBUILD_USERCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
+KBUILD_HOSTCFLAGS   := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
 KBUILD_HOSTCXXFLAGS := -Wall -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS)
 KBUILD_HOSTLDFLAGS  := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
 KBUILD_HOSTLDLIBS   := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
@@ -444,14 +451,14 @@ KBUILD_HOSTLDLIBS   := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
 # Make variables (CC, etc...)
 CPP            = $(CC) -E
 ifneq ($(LLVM),)
-CC             = clang
-LD             = ld.lld
-AR             = llvm-ar
-NM             = llvm-nm
-OBJCOPY                = llvm-objcopy
-OBJDUMP                = llvm-objdump
-READELF                = llvm-readelf
-STRIP          = llvm-strip
+CC             = $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+LD             = $(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX)
+AR             = $(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX)
+NM             = $(LLVM_PREFIX)llvm-nm$(LLVM_SUFFIX)
+OBJCOPY                = $(LLVM_PREFIX)llvm-objcopy$(LLVM_SUFFIX)
+OBJDUMP                = $(LLVM_PREFIX)llvm-objdump$(LLVM_SUFFIX)
+READELF                = $(LLVM_PREFIX)llvm-readelf$(LLVM_SUFFIX)
+STRIP          = $(LLVM_PREFIX)llvm-strip$(LLVM_SUFFIX)
 else
 CC             = $(CROSS_COMPILE)gcc
 LD             = $(CROSS_COMPILE)ld
@@ -531,6 +538,7 @@ export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AW
 export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
 export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
 export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
+export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
 export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
@@ -785,10 +793,6 @@ ifdef CONFIG_CC_IS_CLANG
 KBUILD_CPPFLAGS += -Qunused-arguments
 # The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
 KBUILD_CFLAGS += -Wno-gnu
-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
-# source of a reference will be _MergedGlobals and not on of the whitelisted names.
-# See modpost pattern 2
-KBUILD_CFLAGS += -mno-global-merge
 else
 
 # gcc inanely warns about local variables called 'main'
@@ -1237,8 +1241,8 @@ define filechk_version.h
        echo \#define LINUX_VERSION_SUBLEVEL $(SUBLEVEL)
 endef
 
-$(version_h): PATCHLEVEL := $(if $(PATCHLEVEL), $(PATCHLEVEL), 0)
-$(version_h): SUBLEVEL := $(if $(SUBLEVEL), $(SUBLEVEL), 0)
+$(version_h): PATCHLEVEL := $(or $(PATCHLEVEL), 0)
+$(version_h): SUBLEVEL := $(or $(SUBLEVEL), 0)
 $(version_h): FORCE
        $(call filechk,version.h)
 
@@ -1621,7 +1625,7 @@ help:
        @$(MAKE) -f $(srctree)/Documentation/Makefile dochelp
        @echo  ''
        @echo  'Architecture specific targets ($(SRCARCH)):'
-       @$(if $(archhelp),$(archhelp),\
+       @$(or $(archhelp),\
                echo '  No architecture specific help defined for $(SRCARCH)')
        @echo  ''
        @$(if $(boards), \
@@ -1838,7 +1842,7 @@ $(clean-dirs):
 
 clean: $(clean-dirs)
        $(call cmd,rmfiles)
-       @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
+       @find $(or $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
                \( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
                -o -name '*.ko.*' \
                -o -name '*.dtb' -o -name '*.dtbo' -o -name '*.dtb.S' -o -name '*.dt.yaml' \
index 3df3749..c9f525a 100644 (file)
@@ -45,10 +45,4 @@ struct user {
        char            u_comm[32];             /* user command name */
 };
 
-#define NBPG                   PAGE_SIZE
-#define UPAGES                 1
-#define HOST_TEXT_START_ADDR   (u.start_code)
-#define HOST_DATA_START_ADDR   (u.start_data)
-#define HOST_STACK_END_ADDR    (u.start_stack + u.u_ssize * NBPG)
-
 #endif /* _ALPHA_USER_H */
index 6713c65..b265e4b 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 187a187..41bcbb4 100644 (file)
@@ -92,6 +92,8 @@ ifeq ($(CONFIG_USE_OF),y)
 OBJS   += $(libfdt_objs) fdt_check_mem_start.o
 endif
 
+OBJS   += lib1funcs.o ashldi3.o bswapsdi2.o
+
 targets       := vmlinux vmlinux.lds piggy_data piggy.o \
                 head.o $(OBJS)
 
@@ -126,8 +128,6 @@ endif
 # Next argument is a linker script
 LDFLAGS_vmlinux += -T
 
-OBJS   += lib1funcs.o ashldi3.o bswapsdi2.o
-
 # We need to prevent any GOTOFF relocs being used with references
 # to symbols in the .bss section since we cannot relocate them
 # independently from the rest at run time.  This can be achieved by
index 827e887..13e1bdb 100644 (file)
                                reg = <0xb4100000 0x1000>;
                                interrupts = <0 105 0x4>;
                                status = "disabled";
-                               dmas = <&dwdma0 12 0 1>,
-                                       <&dwdma0 13 1 0>;
-                               dma-names = "tx", "rx";
+                               dmas = <&dwdma0 13 0 1>,
+                                       <&dwdma0 12 1 0>;
+                               dma-names = "rx", "tx";
                        };
 
                        thermal@e07008c4 {
index c87b881..9135533 100644 (file)
                                #size-cells = <0>;
                                interrupts = <0 31 0x4>;
                                status = "disabled";
-                               dmas = <&dwdma0 4 0 0>,
-                                       <&dwdma0 5 0 0>;
-                               dma-names = "tx", "rx";
+                               dmas = <&dwdma0 5 0 0>,
+                                       <&dwdma0 4 0 0>;
+                               dma-names = "rx", "tx";
                        };
 
                        rtc@e0580000 {
index c799a3c..167d44b 100644 (file)
@@ -77,10 +77,6 @@ struct user{
   struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
                                /* the FP registers. */
 };
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 /*
  * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
index 039feb7..1e8a50a 100644 (file)
@@ -1004,7 +1004,8 @@ static void __init reserve_crashkernel(void)
        total_mem = get_total_mem();
        ret = parse_crashkernel(boot_command_line, total_mem,
                                &crash_size, &crash_base);
-       if (ret)
+       /* invalid value specified or crashkernel=0 */
+       if (ret || !crash_size)
                return;
 
        if (crash_base <= 0) {
index b5efecb..d0fa203 100644 (file)
@@ -54,17 +54,17 @@ int notrace unwind_frame(struct stackframe *frame)
                return -EINVAL;
 
        frame->sp = frame->fp;
-       frame->fp = *(unsigned long *)(fp);
-       frame->pc = *(unsigned long *)(fp + 4);
+       frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
+       frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
 #else
        /* check current frame pointer is within bounds */
        if (fp < low + 12 || fp > high - 4)
                return -EINVAL;
 
        /* restore the registers from the stack frame */
-       frame->fp = *(unsigned long *)(fp - 12);
-       frame->sp = *(unsigned long *)(fp - 8);
-       frame->pc = *(unsigned long *)(fp - 4);
+       frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
+       frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
+       frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
 #endif
 #ifdef CONFIG_KRETPROBES
        if (is_kretprobe_trampoline(frame->pc))
index 0659ab4..11677fc 100644 (file)
@@ -59,8 +59,13 @@ static void __init omap_optee_init_check(void)
 u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
                                                         u32 arg3, u32 arg4)
 {
+       static u32 buf[NR_CPUS][5];
+       u32 *param;
+       int cpu;
        u32 ret;
-       u32 param[5];
+
+       cpu = get_cpu();
+       param = buf[cpu];
 
        param[0] = nargs;
        param[1] = arg1;
@@ -76,6 +81,8 @@ u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
        outer_clean_range(__pa(param), __pa(param + 5));
        ret = omap_smc2(idx, flag, __pa(param));
 
+       put_cpu();
+
        return ret;
 }
 
@@ -119,8 +126,8 @@ phys_addr_t omap_secure_ram_mempool_base(void)
 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
 u32 omap3_save_secure_ram(void __iomem *addr, int size)
 {
+       static u32 param[5];
        u32 ret;
-       u32 param[5];
 
        if (size != OMAP3_SAVE_SECURE_RAM_SZ)
                return OMAP3_SAVE_SECURE_RAM_SZ;
@@ -153,8 +160,8 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size)
 u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
                           u32 arg1, u32 arg2, u32 arg3, u32 arg4)
 {
+       static u32 param[5];
        u32 ret;
-       u32 param[5];
 
        param[0] = nargs+1; /* RX-51 needs number of arguments + 1 */
        param[1] = arg1;
index 285e1f0..0d7d408 100644 (file)
@@ -236,11 +236,11 @@ static int __init jive_mtdset(char *options)
        unsigned long set;
 
        if (options == NULL || options[0] == '\0')
-               return 0;
+               return 1;
 
        if (kstrtoul(options, 10, &set)) {
                printk(KERN_ERR "failed to parse mtdset=%s\n", options);
-               return 0;
+               return 1;
        }
 
        switch (set) {
@@ -256,7 +256,7 @@ static int __init jive_mtdset(char *options)
                       "using default.", set);
        }
 
-       return 0;
+       return 1;
 }
 
 /* parse the mtdset= option given to the kernel command line */
index 4b61541..82ffac6 100644 (file)
@@ -381,6 +381,7 @@ out:
  */
 postcore_initcall(atomic_pool_init);
 
+#ifdef CONFIG_CMA_AREAS
 struct dma_contig_early_reserve {
        phys_addr_t base;
        unsigned long size;
@@ -435,6 +436,7 @@ void __init dma_contiguous_remap(void)
                iotable_init(&map, 1);
        }
 }
+#endif
 
 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
 {
index 9ff6836..d7ffccb 100644 (file)
@@ -88,6 +88,10 @@ extern phys_addr_t arm_lowmem_limit;
 
 void __init bootmem_init(void);
 void arm_mm_memblock_reserve(void);
+#ifdef CONFIG_CMA_AREAS
 void dma_contiguous_remap(void);
+#else
+static inline void dma_contiguous_remap(void) { }
+#endif
 
 unsigned long __clear_cr(unsigned long mask);
index 4a5c50f..81f13bd 100644 (file)
@@ -29,8 +29,7 @@ kapi: $(kapi-hdrs-y) $(gen-y)
 uapi:  $(uapi-hdrs-y)
 
 # Create output directory if not already present
-_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') \
-          $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
+$(shell mkdir -p $(kapi) $(uapi))
 
 quiet_cmd_gen_mach = GEN     $@
       cmd_gen_mach = $(AWK) -f $(real-prereqs) > $@
index 6a60930..68103a8 100644 (file)
@@ -1,4 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb \
-                       amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \
-                       husky.dtb
+dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb
index 8e341be..c290d1c 100644 (file)
@@ -9,6 +9,7 @@
 /dts-v1/;
 
 /include/ "amd-seattle-soc.dtsi"
+/include/ "amd-seattle-cpus.dtsi"
 
 / {
        model = "AMD Seattle (Rev.B0) Development Board (Overdrive)";
        status = "ok";
 };
 
-&gpio2 {
-       status = "ok";
-};
-
-&gpio3 {
-       status = "ok";
-};
-
 &gpio4 {
        status = "ok";
 };
        };
 };
 
-&ipmi_kcs {
-       status = "ok";
-};
-
 &smb0 {
        /include/ "amd-seattle-xgbe-b.dtsi"
 };
index 92cef05..e0926f6 100644 (file)
@@ -9,6 +9,7 @@
 /dts-v1/;
 
 /include/ "amd-seattle-soc.dtsi"
+/include/ "amd-seattle-cpus.dtsi"
 
 / {
        model = "AMD Seattle (Rev.B1) Development Board (Overdrive)";
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive.dts b/arch/arm64/boot/dts/amd/amd-overdrive.dts
deleted file mode 100644 (file)
index 41b3a6c..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DTS file for AMD Seattle Overdrive Development Board
- *
- * Copyright (C) 2014 Advanced Micro Devices, Inc.
- */
-
-/dts-v1/;
-
-/include/ "amd-seattle-soc.dtsi"
-
-/ {
-       model = "AMD Seattle Development Board (Overdrive)";
-       compatible = "amd,seattle-overdrive", "amd,seattle";
-
-       chosen {
-               stdout-path = &serial0;
-       };
-};
-
-&ccp0 {
-       status = "ok";
-};
-
-&gpio0 {
-       status = "ok";
-};
-
-&gpio1 {
-       status = "ok";
-};
-
-&i2c0 {
-       status = "ok";
-};
-
-&pcie0 {
-       status = "ok";
-};
-
-&spi0 {
-       status = "ok";
-};
-
-&spi1 {
-       status = "ok";
-       sdcard0: sdcard@0 {
-               compatible = "mmc-spi-slot";
-               reg = <0>;
-               spi-max-frequency = <20000000>;
-               voltage-ranges = <3200 3400>;
-               gpios = <&gpio0 7 0>;
-               interrupt-parent = <&gpio0>;
-               interrupts = <7 3>;
-               pl022,hierarchy = <0>;
-               pl022,interface = <0>;
-               pl022,com-mode = <0x0>;
-               pl022,rx-level-trig = <0>;
-               pl022,tx-level-trig = <0>;
-       };
-};
-
-&v2m0 {
-       arm,msi-base-spi = <64>;
-       arm,msi-num-spis = <256>;
-};
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi
new file mode 100644 (file)
index 0000000..93688a0
--- /dev/null
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+       cpus {
+               #address-cells = <0x1>;
+               #size-cells = <0x0>;
+
+               cpu-map {
+                       cluster0 {
+                               core0 {
+                                       cpu = <&CPU0>;
+                               };
+                               core1 {
+                                       cpu = <&CPU1>;
+                               };
+                       };
+                       cluster1 {
+                               core0 {
+                                       cpu = <&CPU2>;
+                               };
+                               core1 {
+                                       cpu = <&CPU3>;
+                               };
+                       };
+                       cluster2 {
+                               core0 {
+                                       cpu = <&CPU4>;
+                               };
+                               core1 {
+                                       cpu = <&CPU5>;
+                               };
+                       };
+                       cluster3 {
+                               core0 {
+                                       cpu = <&CPU6>;
+                               };
+                               core1 {
+                                       cpu = <&CPU7>;
+                               };
+                       };
+               };
+
+               CPU0: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x0>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_0>;
+
+               };
+
+               CPU1: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x1>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_0>;
+               };
+
+               CPU2: cpu@100 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x100>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_1>;
+               };
+
+               CPU3: cpu@101 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x101>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_1>;
+               };
+
+               CPU4: cpu@200 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x200>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_2>;
+               };
+
+               CPU5: cpu@201 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x201>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_2>;
+               };
+
+               CPU6: cpu@300 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x300>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_3>;
+               };
+
+               CPU7: cpu@301 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x301>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_3>;
+               };
+       };
+
+       L2_0: l2-cache0 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L2_1: l2-cache1 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L2_2: l2-cache2 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L2_3: l2-cache3 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L3: l3-cache {
+               cache-level = <3>;
+               cache-size = <0x800000>;
+               cache-line-size = <64>;
+               cache-sets = <8192>;
+               cache-unified;
+       };
+
+       pmu {
+               compatible = "arm,cortex-a57-pmu";
+               interrupts = <0x0 0x7 0x4>,
+                            <0x0 0x8 0x4>,
+                            <0x0 0x9 0x4>,
+                            <0x0 0xa 0x4>,
+                            <0x0 0xb 0x4>,
+                            <0x0 0xc 0x4>,
+                            <0x0 0xd 0x4>,
+                            <0x0 0xe 0x4>;
+               interrupt-affinity = <&CPU0>,
+                                    <&CPU1>,
+                                    <&CPU2>,
+                                    <&CPU3>,
+                                    <&CPU4>,
+                                    <&CPU5>,
+                                    <&CPU6>,
+                                    <&CPU7>;
+       };
+};
index b664e7a..6900205 100644 (file)
                             <1 10 0xff04>;
        };
 
-       pmu {
-               compatible = "arm,armv8-pmuv3";
-               interrupts = <0 7 4>,
-                            <0 8 4>,
-                            <0 9 4>,
-                            <0 10 4>,
-                            <0 11 4>,
-                            <0 12 4>,
-                            <0 13 4>,
-                            <0 14 4>;
-       };
-
        smb0: smb {
                compatible = "simple-bus";
                #address-cells = <2>;
@@ -70,6 +58,7 @@
                        reg = <0 0xe0300000 0 0xf0000>;
                        interrupts = <0 355 4>;
                        clocks = <&sataclk_333mhz>;
+                       iommus = <&sata0_smmu 0x0 0x1f>;
                        dma-coherent;
                };
 
                        reg = <0 0xe0d00000 0 0xf0000>;
                        interrupts = <0 354 4>;
                        clocks = <&sataclk_333mhz>;
+                       iommus = <&sata1_smmu 0x0e>,
+                                <&sata1_smmu 0x0f>,
+                                <&sata1_smmu 0x1e>;
+                       dma-coherent;
+               };
+
+               sata0_smmu: iommu@e0200000 {
+                       compatible = "arm,mmu-401";
+                       reg = <0 0xe0200000 0 0x10000>;
+                       #global-interrupts = <1>;
+                       interrupts = <0 332 4>, <0 332 4>;
+                       #iommu-cells = <2>;
+                       dma-coherent;
+               };
+
+               sata1_smmu: iommu@e0c00000 {
+                       compatible = "arm,mmu-401";
+                       reg = <0 0xe0c00000 0 0x10000>;
+                       #global-interrupts = <1>;
+                       interrupts = <0 331 4>, <0 331 4>;
+                       #iommu-cells = <1>;
                        dma-coherent;
                };
 
                        reg = <0 0xe0100000 0 0x10000>;
                        interrupts = <0 3 4>;
                        dma-coherent;
+                       iommus = <&sata1_smmu 0x00>,
+                                <&sata1_smmu 0x02>,
+                                <&sata1_smmu 0x40>,
+                                <&sata1_smmu 0x42>;
                };
 
                pcie0: pcie@f0000000 {
                        msi-parent = <&v2m0>;
                        reg = <0 0xf0000000 0 0x10000000>;
 
-                       interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+                       interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
                        interrupt-map =
-                               <0x1000 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x120 0x1>,
-                               <0x1000 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x121 0x1>,
-                               <0x1000 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x122 0x1>,
-                               <0x1000 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>;
+                               <0x1100 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x120 0x1>,
+                               <0x1100 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x121 0x1>,
+                               <0x1100 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x122 0x1>,
+                               <0x1100 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>,
+
+                               <0x1200 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x124 0x1>,
+                               <0x1200 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x125 0x1>,
+                               <0x1200 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x126 0x1>,
+                               <0x1200 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x127 0x1>,
+
+                               <0x1300 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x128 0x1>,
+                               <0x1300 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x129 0x1>,
+                               <0x1300 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x12a 0x1>,
+                               <0x1300 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x12b 0x1>;
 
                        dma-coherent;
                        dma-ranges = <0x43000000 0x0 0x0 0x0 0x0 0x100 0x0>;
                                <0x01000000 0x00 0x00000000 0x00 0xefff0000 0x00 0x00010000>,
                                /* 32-bit MMIO (size=2G) */
                                <0x02000000 0x00 0x40000000 0x00 0x40000000 0x00 0x80000000>,
-                               /* 64-bit MMIO (size= 124G) */
+                               /* 64-bit MMIO (size= 508G) */
                                <0x03000000 0x01 0x00000000 0x01 0x00000000 0x7f 0x00000000>;
+                       iommu-map = <0x0 &pcie_smmu 0x0 0x10000>;
+               };
+
+               pcie_smmu: iommu@e0a00000 {
+                       compatible = "arm,mmu-401";
+                       reg = <0 0xe0a00000 0 0x10000>;
+                       #global-interrupts = <1>;
+                       interrupts = <0 333 4>, <0 333 4>;
+                       #iommu-cells = <1>;
+                       dma-coherent;
                };
 
                /* Perf CCN504 PMU */
index d974983..9259e54 100644 (file)
@@ -55,7 +55,7 @@
                clocks = <&xgmacclk0_dma_250mhz>, <&xgmacclk0_ptp_250mhz>;
                clock-names = "dma_clk", "ptp_clk";
                phy-mode = "xgmii";
-               #stream-id-cells = <16>;
+               iommus = <&xgmac0_smmu 0x00 0x17>; /* 0-7, 16-23 */
                dma-coherent;
        };
 
                clocks = <&xgmacclk1_dma_250mhz>, <&xgmacclk1_ptp_250mhz>;
                clock-names = "dma_clk", "ptp_clk";
                phy-mode = "xgmii";
-               #stream-id-cells = <16>;
+               iommus = <&xgmac1_smmu 0x00 0x17>; /* 0-7, 16-23 */
                dma-coherent;
        };
 
-       xgmac0_smmu: smmu@e0600000 {
+       xgmac0_smmu: iommu@e0600000 {
                 compatible = "arm,mmu-401";
                 reg = <0 0xe0600000 0 0x10000>;
                 #global-interrupts = <1>;
                               */
                              <0 336 4>,
                              <0 336 4>;
-
-                mmu-masters = <&xgmac0
-                         0  1  2  3  4  5  6  7
-                        16 17 18 19 20 21 22 23
-                >;
+               #iommu-cells = <2>;
+               dma-coherent;
         };
 
-        xgmac1_smmu: smmu@e0800000 {
+        xgmac1_smmu: iommu@e0800000 {
                 compatible = "arm,mmu-401";
                 reg = <0 0xe0800000 0 0x10000>;
                 #global-interrupts = <1>;
                               */
                              <0 335 4>,
                              <0 335 4>;
-
-                mmu-masters = <&xgmac1
-                         0  1  2  3  4  5  6  7
-                        16 17 18 19 20 21 22 23
-                >;
+               #iommu-cells = <2>;
+               dma-coherent;
         };
diff --git a/arch/arm64/boot/dts/amd/husky.dts b/arch/arm64/boot/dts/amd/husky.dts
deleted file mode 100644 (file)
index 7acde34..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DTS file for AMD/Linaro 96Boards Enterprise Edition Server (Husky) Board
- * Note: Based-on AMD Seattle Rev.B0
- *
- * Copyright (C) 2015 Advanced Micro Devices, Inc.
- */
-
-/dts-v1/;
-
-/include/ "amd-seattle-soc.dtsi"
-
-/ {
-       model = "Linaro 96Boards Enterprise Edition Server (Husky) Board";
-       compatible = "amd,seattle-overdrive", "amd,seattle";
-
-       chosen {
-               stdout-path = &serial0;
-       };
-
-       psci {
-               compatible   = "arm,psci-0.2";
-               method       = "smc";
-       };
-};
-
-&ccp0 {
-       status = "ok";
-       amd,zlib-support = <1>;
-};
-
-/**
- * NOTE: In Rev.B, gpio0 is reserved.
- */
-&gpio1 {
-       status = "ok";
-};
-
-&gpio2 {
-       status = "ok";
-};
-
-&gpio3 {
-       status = "ok";
-};
-
-&gpio4 {
-       status = "ok";
-};
-
-&i2c0 {
-       status = "ok";
-};
-
-&i2c1 {
-       status = "ok";
-};
-
-&pcie0 {
-       status = "ok";
-};
-
-&spi0 {
-       status = "ok";
-};
-
-&spi1 {
-       status = "ok";
-       sdcard0: sdcard@0 {
-               compatible = "mmc-spi-slot";
-               reg = <0>;
-               spi-max-frequency = <20000000>;
-               voltage-ranges = <3200 3400>;
-               pl022,hierarchy = <0>;
-               pl022,interface = <0>;
-               pl022,com-mode = <0x0>;
-               pl022,rx-level-trig = <0>;
-               pl022,tx-level-trig = <0>;
-       };
-};
-
-&smb0 {
-       /include/ "amd-seattle-xgbe-b.dtsi"
-};
index 01b01e3..35d1939 100644 (file)
                        clock-names = "i2c";
                        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
                                            QORIQ_CLK_PLL_DIV(1)>;
-                       dmas = <&edma0 1 39>,
-                              <&edma0 1 38>;
-                       dma-names = "tx", "rx";
+                       dmas = <&edma0 1 38>,
+                              <&edma0 1 39>;
+                       dma-names = "rx", "tx";
                        status = "disabled";
                };
 
index 687fea6..4e7bd04 100644 (file)
                        interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
                                            QORIQ_CLK_PLL_DIV(2)>;
-                       dmas = <&edma0 1 39>,
-                              <&edma0 1 38>;
-                       dma-names = "tx", "rx";
+                       dmas = <&edma0 1 38>,
+                              <&edma0 1 39>;
+                       dma-names = "rx", "tx";
                        status = "disabled";
                };
 
index 2298909..161653d 100644 (file)
@@ -67,9 +67,5 @@ struct user {
        unsigned long magic;            /* To uniquely identify a core file */
        char u_comm[32];                /* User command that was responsible */
 };
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 #endif
index 0ba4866..ec03d3a 100644 (file)
@@ -50,10 +50,4 @@ struct user {
        char            u_comm[32];             /* user command name */
 };
 
-#define NBPG                   PAGE_SIZE
-#define UPAGES                 1
-#define HOST_TEXT_START_ADDR   (u.start_code)
-#define HOST_DATA_START_ADDR   (u.start_data)
-#define HOST_STACK_END_ADDR    (u.start_stack + u.u_ssize * NBPG)
-
 #endif /* _ASM_IA64_USER_H */
index 14f40ec..d009f92 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 509d555..61413bf 100644 (file)
@@ -79,9 +79,5 @@ struct user{
   unsigned long magic;         /* To uniquely identify a core file */
   char u_comm[32];             /* User command that was responsible */
 };
-#define NBPG 4096
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 #endif
index 6713c65..b265e4b 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index cff570a..2b42c37 100644 (file)
@@ -29,7 +29,7 @@ $(obj)/simpleImage.$(DTB).ub: $(obj)/simpleImage.$(DTB) FORCE
        $(call if_changed,uimage)
 
 $(obj)/simpleImage.$(DTB).unstrip: vmlinux FORCE
-       $(call if_changed,shipped)
+       $(call if_changed,copy)
 
 $(obj)/simpleImage.$(DTB).strip: vmlinux FORCE
        $(call if_changed,strip)
index ef00dd3..b84e2cb 100644 (file)
@@ -12,7 +12,7 @@ $(obj)/linked_dtb.o: $(obj)/system.dtb
 # Generate system.dtb from $(DTB).dtb
 ifneq ($(DTB),system)
 $(obj)/system.dtb: $(obj)/$(DTB).dtb
-       $(call if_changed,shipped)
+       $(call if_changed,copy)
 endif
 endif
 
index 6713c65..b265e4b 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 0a03529..3e4f5ba 100644 (file)
@@ -28,7 +28,7 @@ enum crc_type {
 };
 
 #ifndef TOOLCHAIN_SUPPORTS_CRC
-#define _ASM_MACRO_CRC32(OP, SZ, TYPE)                                   \
+#define _ASM_SET_CRC(OP, SZ, TYPE)                                       \
 _ASM_MACRO_3R(OP, rt, rs, rt2,                                           \
        ".ifnc  \\rt, \\rt2\n\t"                                          \
        ".error \"invalid operands \\\"" #OP " \\rt,\\rs,\\rt2\\\"\"\n\t" \
@@ -37,30 +37,36 @@ _ASM_MACRO_3R(OP, rt, rs, rt2,                                                \
                          ((SZ) <<  6) | ((TYPE) << 8))                   \
        _ASM_INSN32_IF_MM(0x00000030 | (__rs << 16) | (__rt << 21) |      \
                          ((SZ) << 14) | ((TYPE) << 3)))
-_ASM_MACRO_CRC32(crc32b,  0, 0);
-_ASM_MACRO_CRC32(crc32h,  1, 0);
-_ASM_MACRO_CRC32(crc32w,  2, 0);
-_ASM_MACRO_CRC32(crc32d,  3, 0);
-_ASM_MACRO_CRC32(crc32cb, 0, 1);
-_ASM_MACRO_CRC32(crc32ch, 1, 1);
-_ASM_MACRO_CRC32(crc32cw, 2, 1);
-_ASM_MACRO_CRC32(crc32cd, 3, 1);
-#define _ASM_SET_CRC ""
+#define _ASM_UNSET_CRC(op, SZ, TYPE) ".purgem " #op "\n\t"
 #else /* !TOOLCHAIN_SUPPORTS_CRC */
-#define _ASM_SET_CRC ".set\tcrc\n\t"
+#define _ASM_SET_CRC(op, SZ, TYPE) ".set\tcrc\n\t"
+#define _ASM_UNSET_CRC(op, SZ, TYPE)
 #endif
 
-#define _CRC32(crc, value, size, type)         \
-do {                                           \
-       __asm__ __volatile__(                   \
-               ".set   push\n\t"               \
-               _ASM_SET_CRC                    \
-               #type #size "   %0, %1, %0\n\t" \
-               ".set   pop"                    \
-               : "+r" (crc)                    \
-               : "r" (value));                 \
+#define __CRC32(crc, value, op, SZ, TYPE)              \
+do {                                                   \
+       __asm__ __volatile__(                           \
+               ".set   push\n\t"                       \
+               _ASM_SET_CRC(op, SZ, TYPE)              \
+               #op "   %0, %1, %0\n\t"                 \
+               _ASM_UNSET_CRC(op, SZ, TYPE)            \
+               ".set   pop"                            \
+               : "+r" (crc)                            \
+               : "r" (value));                         \
 } while (0)
 
+#define _CRC32_crc32b(crc, value)      __CRC32(crc, value, crc32b, 0, 0)
+#define _CRC32_crc32h(crc, value)      __CRC32(crc, value, crc32h, 1, 0)
+#define _CRC32_crc32w(crc, value)      __CRC32(crc, value, crc32w, 2, 0)
+#define _CRC32_crc32d(crc, value)      __CRC32(crc, value, crc32d, 3, 0)
+#define _CRC32_crc32cb(crc, value)     __CRC32(crc, value, crc32cb, 0, 1)
+#define _CRC32_crc32ch(crc, value)     __CRC32(crc, value, crc32ch, 1, 1)
+#define _CRC32_crc32cw(crc, value)     __CRC32(crc, value, crc32cw, 2, 1)
+#define _CRC32_crc32cd(crc, value)     __CRC32(crc, value, crc32cd, 3, 1)
+
+#define _CRC32(crc, value, size, op) \
+       _CRC32_##op##size(crc, value)
+
 #define CRC32(crc, value, size) \
        _CRC32(crc, value, size, crc32)
 
index 34d179c..dd9d4b0 100644 (file)
 #define DEV3TC         0x01003C
 #define BTCS           0x010040
 #define BTCOMPARE      0x010044
-#define GPIOBASE       0x050000
-/* Offsets relative to GPIOBASE */
-#define GPIOFUNC       0x00
-#define GPIOCFG                0x04
-#define GPIOD          0x08
-#define GPIOILEVEL     0x0C
-#define GPIOISTAT      0x10
-#define GPIONMIEN      0x14
-#define IMASK6         0x38
 #define LO_WPX         (1 << 0)
 #define LO_ALE         (1 << 1)
 #define LO_CLE         (1 << 2)
index 10bf90d..e6b21de 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syshdr := $(srctree)/scripts/syscallhdr.sh
 sysnr := $(srctree)/$(src)/syscallnr.sh
index 64726c6..5204fc6 100644 (file)
@@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module,
 {
        struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
+       if (!clk)
+               return;
        clk->cl.dev_id = dev;
        clk->cl.con_id = NULL;
        clk->cl.clk = clk;
index 3d5683e..200fe9f 100644 (file)
@@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con,
 {
        struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
+       if (!clk)
+               return;
        clk->cl.dev_id = dev_name(dev);
        clk->cl.con_id = con;
        clk->cl.clk = clk;
index 917fac1..084f6ca 100644 (file)
@@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
 {
        struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
+       if (!clk)
+               return;
        clk->cl.dev_id = dev;
        clk->cl.con_id = con;
        clk->cl.clk = clk;
@@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con,
 {
        struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
+       if (!clk)
+               return;
        clk->cl.dev_id = dev;
        clk->cl.con_id = con;
        clk->cl.clk = clk;
@@ -356,24 +360,28 @@ static void clkdev_add_pci(void)
        struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
        /* main pci clock */
-       clk->cl.dev_id = "17000000.pci";
-       clk->cl.con_id = NULL;
-       clk->cl.clk = clk;
-       clk->rate = CLOCK_33M;
-       clk->rates = valid_pci_rates;
-       clk->enable = pci_enable;
-       clk->disable = pmu_disable;
-       clk->module = 0;
-       clk->bits = PMU_PCI;
-       clkdev_add(&clk->cl);
+       if (clk) {
+               clk->cl.dev_id = "17000000.pci";
+               clk->cl.con_id = NULL;
+               clk->cl.clk = clk;
+               clk->rate = CLOCK_33M;
+               clk->rates = valid_pci_rates;
+               clk->enable = pci_enable;
+               clk->disable = pmu_disable;
+               clk->module = 0;
+               clk->bits = PMU_PCI;
+               clkdev_add(&clk->cl);
+       }
 
        /* use internal/external bus clock */
-       clk_ext->cl.dev_id = "17000000.pci";
-       clk_ext->cl.con_id = "external";
-       clk_ext->cl.clk = clk_ext;
-       clk_ext->enable = pci_ext_enable;
-       clk_ext->disable = pci_ext_disable;
-       clkdev_add(&clk_ext->cl);
+       if (clk_ext) {
+               clk_ext->cl.dev_id = "17000000.pci";
+               clk_ext->cl.con_id = "external";
+               clk_ext->cl.clk = clk_ext;
+               clk_ext->enable = pci_ext_enable;
+               clk_ext->disable = pci_ext_disable;
+               clkdev_add(&clk_ext->cl);
+       }
 }
 
 /* xway socs can generate clocks on gpio pins */
@@ -393,9 +401,15 @@ static void clkdev_add_clkout(void)
                char *name;
 
                name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
+               if (!name)
+                       continue;
                sprintf(name, "clkout%d", i);
 
                clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+               if (!clk) {
+                       kfree(name);
+                       continue;
+               }
                clk->cl.dev_id = "1f103000.cgu";
                clk->cl.con_id = name;
                clk->cl.clk = clk;
index 94f02ad..29c21b9 100644 (file)
 #include <asm/mach-rc32434/rb.h>
 #include <asm/mach-rc32434/gpio.h>
 
+#define GPIOBASE       0x050000
+/* Offsets relative to GPIOBASE */
+#define GPIOFUNC       0x00
+#define GPIOCFG                0x04
+#define GPIOD          0x08
+#define GPIOILEVEL     0x0C
+#define GPIOISTAT      0x10
+#define GPIONMIEN      0x14
+#define IMASK6         0x38
+
 struct rb532_gpio_chip {
        struct gpio_chip chip;
        void __iomem     *regbase;
index dfc52f6..38d12f4 100644 (file)
@@ -363,6 +363,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
                printk(KERN_INFO "GIO: slot %d : %s (id %x)\n",
                       slotno, name, id);
                gio_dev = kzalloc(sizeof *gio_dev, GFP_KERNEL);
+               if (!gio_dev)
+                       return;
                gio_dev->name = name;
                gio_dev->slotno = slotno;
                gio_dev->id.id = id;
index d63f18d..8440c16 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 99443b8..7fae7e5 100644 (file)
@@ -44,9 +44,4 @@ struct user {
        char            u_comm[32];             /* user command name */
 };
 
-#define NBPG                   PAGE_SIZE
-#define UPAGES                 1
-#define HOST_TEXT_START_ADDR   (u.start_code)
-#define HOST_DATA_START_ADDR   (u.start_data)
-#define HOST_STACK_END_ADDR    (u.start_stack + u.u_ssize * NBPG)
 #endif /* _ASM_POWERPC_USER_H */
index 5476f62..9d7bd81 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index ea8ec8a..00fd9c5 100644 (file)
@@ -16,6 +16,7 @@ config RISCV
        select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
        select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
        select ARCH_HAS_BINFMT_FLAT
+       select ARCH_HAS_CURRENT_STACK_POINTER
        select ARCH_HAS_DEBUG_VM_PGTABLE
        select ARCH_HAS_DEBUG_VIRTUAL if MMU
        select ARCH_HAS_DEBUG_WX
@@ -47,6 +48,7 @@ config RISCV
        select CLONE_BACKWARDS
        select CLINT_TIMER if !MMU
        select COMMON_CLK
+       select CPU_PM if CPU_IDLE
        select EDAC_SUPPORT
        select GENERIC_ARCH_TOPOLOGY if SMP
        select GENERIC_ATOMIC64 if !64BIT
@@ -533,4 +535,10 @@ source "kernel/power/Kconfig"
 
 endmenu
 
+menu "CPU Power Management"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
 source "arch/riscv/kvm/Kconfig"
index c112ab2..34592d0 100644 (file)
@@ -36,6 +36,9 @@ config SOC_VIRT
        select GOLDFISH
        select RTC_DRV_GOLDFISH if RTC_CLASS
        select SIFIVE_PLIC
+       select PM_GENERIC_DOMAINS if PM
+       select PM_GENERIC_DOMAINS_OF if PM && OF
+       select RISCV_SBI_CPUIDLE if CPU_IDLE
        help
          This enables support for QEMU Virt Machine.
 
index 984872f..b9e30df 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index 7ba99b4..8d23401 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index be9b12c..24fd83b 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index 031c0c2..25341f3 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index 7cd10de..30e3017 100644 (file)
@@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_PROFILING=y
 CONFIG_SOC_MICROCHIP_POLARFIRE=y
 CONFIG_SOC_SIFIVE=y
 CONFIG_SOC_VIRT=y
 CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
+CONFIG_PM=y
+CONFIG_CPU_IDLE=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_JUMP_LABEL=y
@@ -64,8 +67,6 @@ CONFIG_INPUT_MOUSEDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_HVC_RISCV_SBI=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
index 3f42ed8..2438fa3 100644 (file)
@@ -21,7 +21,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_AIO is not set
 # CONFIG_IO_URING is not set
 # CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
 # CONFIG_KALLSYMS is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index af64b95..9a133e6 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_AIO is not set
 # CONFIG_IO_URING is not set
 # CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
 # CONFIG_KALLSYMS is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index e1c9864..5269fbb 100644 (file)
@@ -19,7 +19,6 @@ CONFIG_EXPERT=y
 # CONFIG_AIO is not set
 # CONFIG_IO_URING is not set
 # CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_COMPAT_BRK is not set
index e0e5c7c..7e5efdc 100644 (file)
@@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_PROFILING=y
 CONFIG_SOC_SIFIVE=y
 CONFIG_SOC_VIRT=y
 CONFIG_ARCH_RV32I=y
 CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
+CONFIG_PM=y
+CONFIG_CPU_IDLE=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_JUMP_LABEL=y
@@ -62,8 +65,6 @@ CONFIG_INPUT_MOUSEDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_HVC_RISCV_SBI=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
index 618d7c5..8c2549b 100644 (file)
 #error "Unexpected __SIZEOF_SHORT__"
 #endif
 
+#ifdef __ASSEMBLY__
+
+/* Common assembly source macros */
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+       REG_L t0, _xip_fixup
+       add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+       la t1, __data_loc
+       REG_L t1, _xip_phys_offset
+       sub \reg, \reg, t1
+       add \reg, \reg, t0
+.endm
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif /* __ASSEMBLY__ */
+
 #endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/cpuidle.h b/arch/riscv/include/asm/cpuidle.h
new file mode 100644 (file)
index 0000000..71fdc60
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Allwinner Ltd
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_CPUIDLE_H
+#define _ASM_RISCV_CPUIDLE_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+static inline void cpu_do_idle(void)
+{
+       /*
+        * Add mb() here to ensure that all
+        * IO/MEM accesses are completed prior
+        * to entering WFI.
+        */
+       mb();
+       wait_for_interrupt();
+}
+
+#endif
index 1de233d..21774d8 100644 (file)
@@ -33,6 +33,8 @@ static __always_inline struct task_struct *get_current(void)
 
 #define current get_current()
 
+register unsigned long current_stack_pointer __asm__("sp");
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_RISCV_CURRENT_H */
index 4254ff2..1075bea 100644 (file)
@@ -2,8 +2,8 @@
 /* Copyright (C) 2017 Andes Technology Corporation */
 #ifdef CONFIG_MODULE_SECTIONS
 SECTIONS {
-       .plt (NOLOAD) : { BYTE(0) }
-       .got (NOLOAD) : { BYTE(0) }
-       .got.plt (NOLOAD) : { BYTE(0) }
+       .plt : { BYTE(0) }
+       .got : { BYTE(0) }
+       .got.plt : { BYTE(0) }
 }
 #endif
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
new file mode 100644 (file)
index 0000000..8be391c
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _ASM_RISCV_SUSPEND_H
+#define _ASM_RISCV_SUSPEND_H
+
+#include <asm/ptrace.h>
+
+struct suspend_context {
+       /* Saved and restored by low-level functions */
+       struct pt_regs regs;
+       /* Saved and restored by high-level functions */
+       unsigned long scratch;
+       unsigned long tvec;
+       unsigned long ie;
+#ifdef CONFIG_MMU
+       unsigned long satp;
+#endif
+};
+
+/* Low-level CPU suspend entry function */
+int __cpu_suspend_enter(struct suspend_context *context);
+
+/* High-level CPU suspend which will save context and call finish() */
+int cpu_suspend(unsigned long arg,
+               int (*finish)(unsigned long arg,
+                             unsigned long entry,
+                             unsigned long context));
+
+/* Low-level CPU resume entry function */
+int __cpu_resume_enter(unsigned long hartid, unsigned long context);
+
+#endif
index 60da0dc..74d888c 100644 (file)
 #include <asm/page.h>
 #include <linux/const.h>
 
+#ifdef CONFIG_KASAN
+#define KASAN_STACK_ORDER 1
+#else
+#define KASAN_STACK_ORDER 0
+#endif
+
 /* thread information allocation */
 #ifdef CONFIG_64BIT
-#define THREAD_SIZE_ORDER      (2)
+#define THREAD_SIZE_ORDER      (2 + KASAN_STACK_ORDER)
 #else
-#define THREAD_SIZE_ORDER      (1)
+#define THREAD_SIZE_ORDER      (1 + KASAN_STACK_ORDER)
 #endif
 #define THREAD_SIZE            (PAGE_SIZE << THREAD_SIZE_ORDER)
 
index e0133d1..87adbe4 100644 (file)
@@ -48,6 +48,8 @@ obj-$(CONFIG_RISCV_BOOT_SPINWAIT) += cpu_ops_spinwait.o
 obj-$(CONFIG_MODULES)          += module.o
 obj-$(CONFIG_MODULE_SECTIONS)  += module-sections.o
 
+obj-$(CONFIG_CPU_PM)           += suspend_entry.o suspend.o
+
 obj-$(CONFIG_FUNCTION_TRACER)  += mcount.o ftrace.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += mcount-dyn.o
 
index df0519a..df94443 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/thread_info.h>
 #include <asm/ptrace.h>
 #include <asm/cpu_ops_sbi.h>
+#include <asm/suspend.h>
 
 void asm_offsets(void);
 
@@ -113,6 +114,8 @@ void asm_offsets(void)
        OFFSET(PT_BADADDR, pt_regs, badaddr);
        OFFSET(PT_CAUSE, pt_regs, cause);
 
+       OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
+
        OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
        OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
        OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
index d2a9361..ccb6177 100644 (file)
@@ -69,11 +69,11 @@ int riscv_of_parent_hartid(struct device_node *node)
                .uprop = #UPROP,                                \
                .isa_ext_id = EXTID,                            \
        }
-/**
+/*
  * Here are the ordering rules of extension naming defined by RISC-V
  * specification :
  * 1. All extensions should be separated from other multi-letter extensions
- *    from other multi-letter extensions by an underscore.
+ *    by an underscore.
  * 2. The first letter following the 'Z' conventionally indicates the most
  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
  *    If multiple 'Z' extensions are named, they should be ordered first
@@ -110,7 +110,7 @@ static void print_isa_ext(struct seq_file *f)
        }
 }
 
-/**
+/*
  * These are the only valid base (single letter) ISA extensions as per the spec.
  * It also specifies the canonical order in which it appears in the spec.
  * Some of the extension may just be a place holder for now (B, K, P, J).
index 2e16f67..4f5a6f8 100644 (file)
@@ -21,7 +21,7 @@ const struct cpu_operations cpu_ops_sbi;
  * be invoked from multiple threads in parallel. Define a per cpu data
  * to handle that.
  */
-DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
+static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
 
 static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
                              unsigned long priv)
index ec07f99..893b8bb 100644 (file)
 #include <asm/image.h>
 #include "efi-header.S"
 
-#ifdef CONFIG_XIP_KERNEL
-.macro XIP_FIXUP_OFFSET reg
-       REG_L t0, _xip_fixup
-       add \reg, \reg, t0
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-       la t0, __data_loc
-       REG_L t1, _xip_phys_offset
-       sub \reg, \reg, t1
-       add \reg, \reg, t0
-.endm
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
-#else
-.macro XIP_FIXUP_OFFSET reg
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-.endm
-#endif /* CONFIG_XIP_KERNEL */
-
 __HEAD
 ENTRY(_start)
        /*
@@ -89,7 +69,8 @@ pe_head_start:
 
 .align 2
 #ifdef CONFIG_MMU
-relocate:
+       .global relocate_enable_mmu
+relocate_enable_mmu:
        /* Relocate return address */
        la a1, kernel_map
        XIP_FIXUP_OFFSET a1
@@ -184,7 +165,7 @@ secondary_start_sbi:
        /* Enable virtual memory and relocate to virtual address */
        la a0, swapper_pg_dir
        XIP_FIXUP_OFFSET a0
-       call relocate
+       call relocate_enable_mmu
 #endif
        call setup_trap_vector
        tail smp_callin
@@ -328,7 +309,7 @@ clear_bss_done:
 #ifdef CONFIG_MMU
        la a0, early_pg_dir
        XIP_FIXUP_OFFSET a0
-       call relocate
+       call relocate_enable_mmu
 #endif /* CONFIG_MMU */
 
        call setup_trap_vector
index 4a48287..c29cef9 100644 (file)
@@ -69,7 +69,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
        return 0;
 }
 
-static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
        ptrdiff_t offset = (void *)v - (void *)location;
@@ -301,7 +301,7 @@ static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
        [R_RISCV_64]                    = apply_r_riscv_64_rela,
        [R_RISCV_BRANCH]                = apply_r_riscv_branch_rela,
        [R_RISCV_JAL]                   = apply_r_riscv_jal_rela,
-       [R_RISCV_RVC_BRANCH]            = apply_r_riscv_rcv_branch_rela,
+       [R_RISCV_RVC_BRANCH]            = apply_r_riscv_rvc_branch_rela,
        [R_RISCV_RVC_JUMP]              = apply_r_riscv_rvc_jump_rela,
        [R_RISCV_PCREL_HI20]            = apply_r_riscv_pcrel_hi20_rela,
        [R_RISCV_PCREL_LO12_I]          = apply_r_riscv_pcrel_lo12_i_rela,
index 55faa49..3348a61 100644 (file)
@@ -68,7 +68,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
 
 static bool fill_callchain(void *entry, unsigned long pc)
 {
-       return perf_callchain_store(entry, pc);
+       return perf_callchain_store(entry, pc) == 0;
 }
 
 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
index 03ac3aa..504b496 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/string.h>
 #include <asm/switch_to.h>
 #include <asm/thread_info.h>
+#include <asm/cpuidle.h>
 
 register unsigned long gp_in_global __asm__("gp");
 
@@ -37,7 +38,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
 
 void arch_cpu_idle(void)
 {
-       wait_for_interrupt();
+       cpu_do_idle();
        raw_local_irq_enable();
 }
 
index 14d2b53..08d11a5 100644 (file)
@@ -14,8 +14,6 @@
 
 #include <asm/stacktrace.h>
 
-register unsigned long sp_in_global __asm__("sp");
-
 #ifdef CONFIG_FRAME_POINTER
 
 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
@@ -30,7 +28,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
                fp = (unsigned long)__builtin_frame_address(0);
-               sp = sp_in_global;
+               sp = current_stack_pointer;
                pc = (unsigned long)walk_stackframe;
        } else {
                /* task blocked in __switch_to */
@@ -78,7 +76,7 @@ void notrace walk_stackframe(struct task_struct *task,
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
-               sp = sp_in_global;
+               sp = current_stack_pointer;
                pc = (unsigned long)walk_stackframe;
        } else {
                /* task blocked in __switch_to */
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
new file mode 100644 (file)
index 0000000..9ba24fb
--- /dev/null
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/ftrace.h>
+#include <asm/csr.h>
+#include <asm/suspend.h>
+
+static void suspend_save_csrs(struct suspend_context *context)
+{
+       context->scratch = csr_read(CSR_SCRATCH);
+       context->tvec = csr_read(CSR_TVEC);
+       context->ie = csr_read(CSR_IE);
+
+       /*
+        * No need to save/restore IP CSR (i.e. MIP or SIP) because:
+        *
+        * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
+        *    external devices (such as interrupt controller, timer, etc).
+        * 2. For MMU (S-mode) kernel, the bits in SIP are set by
+        *    M-mode firmware and external devices (such as interrupt
+        *    controller, etc).
+        */
+
+#ifdef CONFIG_MMU
+       context->satp = csr_read(CSR_SATP);
+#endif
+}
+
+static void suspend_restore_csrs(struct suspend_context *context)
+{
+       csr_write(CSR_SCRATCH, context->scratch);
+       csr_write(CSR_TVEC, context->tvec);
+       csr_write(CSR_IE, context->ie);
+
+#ifdef CONFIG_MMU
+       csr_write(CSR_SATP, context->satp);
+#endif
+}
+
+int cpu_suspend(unsigned long arg,
+               int (*finish)(unsigned long arg,
+                             unsigned long entry,
+                             unsigned long context))
+{
+       int rc = 0;
+       struct suspend_context context = { 0 };
+
+       /* Finisher should be non-NULL */
+       if (!finish)
+               return -EINVAL;
+
+       /* Save additional CSRs*/
+       suspend_save_csrs(&context);
+
+       /*
+        * Function graph tracer state gets incosistent when the kernel
+        * calls functions that never return (aka finishers) hence disable
+        * graph tracing during their execution.
+        */
+       pause_graph_tracing();
+
+       /* Save context on stack */
+       if (__cpu_suspend_enter(&context)) {
+               /* Call the finisher */
+               rc = finish(arg, __pa_symbol(__cpu_resume_enter),
+                           (ulong)&context);
+
+               /*
+                * Should never reach here, unless the suspend finisher
+                * fails. Successful cpu_suspend() should return from
+                * __cpu_resume_entry()
+                */
+               if (!rc)
+                       rc = -EOPNOTSUPP;
+       }
+
+       /* Enable function graph tracer */
+       unpause_graph_tracing();
+
+       /* Restore additional CSRs */
+       suspend_restore_csrs(&context);
+
+       return rc;
+}
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
new file mode 100644 (file)
index 0000000..4b07b80
--- /dev/null
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/csr.h>
+
+       .text
+       .altmacro
+       .option norelax
+
+ENTRY(__cpu_suspend_enter)
+       /* Save registers (except A0 and T0-T6) */
+       REG_S   ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+       REG_S   sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+       REG_S   gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+       REG_S   tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+       REG_S   s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+       REG_S   s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+       REG_S   a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+       REG_S   a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+       REG_S   a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+       REG_S   a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+       REG_S   a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+       REG_S   a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+       REG_S   a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+       REG_S   s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+       REG_S   s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+       REG_S   s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+       REG_S   s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+       REG_S   s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+       REG_S   s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+       REG_S   s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+       REG_S   s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+       REG_S   s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+       REG_S   s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+       /* Save CSRs */
+       csrr    t0, CSR_EPC
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+       csrr    t0, CSR_STATUS
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+       csrr    t0, CSR_TVAL
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+       csrr    t0, CSR_CAUSE
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+
+       /* Return non-zero value */
+       li      a0, 1
+
+       /* Return to C code */
+       ret
+END(__cpu_suspend_enter)
+
+ENTRY(__cpu_resume_enter)
+       /* Load the global pointer */
+       .option push
+       .option norelax
+               la gp, __global_pointer$
+       .option pop
+
+#ifdef CONFIG_MMU
+       /* Save A0 and A1 */
+       add     t0, a0, zero
+       add     t1, a1, zero
+
+       /* Enable MMU */
+       la      a0, swapper_pg_dir
+       XIP_FIXUP_OFFSET a0
+       call    relocate_enable_mmu
+
+       /* Restore A0 and A1 */
+       add     a0, t0, zero
+       add     a1, t1, zero
+#endif
+
+       /* Make A0 point to suspend context */
+       add     a0, a1, zero
+
+       /* Restore CSRs */
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+       csrw    CSR_EPC, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+       csrw    CSR_STATUS, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+       csrw    CSR_TVAL, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+       csrw    CSR_CAUSE, t0
+
+       /* Restore registers (except A0 and T0-T6) */
+       REG_L   ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+       REG_L   sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+       REG_L   gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+       REG_L   tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+       REG_L   s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+       REG_L   s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+       REG_L   a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+       REG_L   a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+       REG_L   a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+       REG_L   a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+       REG_L   a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+       REG_L   a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+       REG_L   a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+       REG_L   s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+       REG_L   s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+       REG_L   s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+       REG_L   s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+       REG_L   s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+       REG_L   s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+       REG_L   s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+       REG_L   s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+       REG_L   s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+       REG_L   s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+       /* Return zero value */
+       add     a0, zero, zero
+
+       /* Return to C code */
+       ret
+END(__cpu_resume_enter)
index 9b80e8b..77b5a03 100644 (file)
@@ -58,6 +58,7 @@ config S390
        select ALTERNATE_USER_ADDRESS_SPACE
        select ARCH_32BIT_USTAT_F_TINODE
        select ARCH_BINFMT_ELF_STATE
+       select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
        select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
        select ARCH_ENABLE_MEMORY_HOTREMOVE
        select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
index 955d620..bb3837d 100644 (file)
  * a 2-byte nop if the size of the area is not divisible by 6.
  */
 .macro alt_pad_fill bytes
-       .fill   ( \bytes ) / 6, 6, 0xc0040000
-       .fill   ( \bytes ) % 6 / 4, 4, 0x47000000
-       .fill   ( \bytes ) % 6 % 4 / 2, 2, 0x0700
+       .rept   ( \bytes ) / 6
+       brcl    0,0
+       .endr
+       .rept   ( \bytes ) % 6 / 4
+       nop
+       .endr
+       .rept   ( \bytes ) % 6 % 4 / 2
+       nopr
+       .endr
 .endm
 
 /*
index d3880ca..3f2856e 100644 (file)
@@ -71,11 +71,18 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
        ".if " oldinstr_pad_len(num) " > 6\n"                           \
        "\tjg " e_oldinstr_pad_end "f\n"                                \
        "6620:\n"                                                       \
-       "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+       "\t.rept (" oldinstr_pad_len(num) " - (6620b-662b)) / 2\n"      \
+       "\tnopr\n"                                                      \
        ".else\n"                                                       \
-       "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n"        \
-       "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n"   \
-       "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n"  \
+       "\t.rept " oldinstr_pad_len(num) " / 6\n"                       \
+       "\t.brcl 0,0\n"                                                 \
+       "\t.endr\n"                                                     \
+       "\t.rept " oldinstr_pad_len(num) " %% 6 / 4\n"                  \
+       "\tnop\n"                                                       \
+       "\t.endr\n"                                                     \
+       "\t.rept " oldinstr_pad_len(num) " %% 6 %% 4 / 2\n"             \
+       "\tnopr\n"                                                      \
+       ".endr\n"                                                       \
        ".endif\n"
 
 #define OLDINSTR(oldinstr, num)                                                \
index ae75da5..b515cfa 100644 (file)
@@ -60,11 +60,11 @@ static inline bool ap_instructions_available(void)
        unsigned long reg1 = 0;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid into gr0 */
-               "       lghi    1,0\n"         /* 0 into gr1 */
-               "       lghi    2,0\n"         /* 0 into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(TAPQ) */
-               "0:     la      %[reg1],1\n"   /* 1 into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid into gr0 */
+               "       lghi    1,0\n"                  /* 0 into gr1 */
+               "       lghi    2,0\n"                  /* 0 into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(TAPQ) */
+               "0:     la      %[reg1],1\n"            /* 1 into reg1 */
                "1:\n"
                EX_TABLE(0b, 1b)
                : [reg1] "+&d" (reg1)
@@ -86,11 +86,11 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
        unsigned long reg2;
 
        asm volatile(
-               "       lgr     0,%[qid]\n"    /* qid into gr0 */
-               "       lghi    2,0\n"         /* 0 into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(TAPQ) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
-               "       lgr     %[reg2],2\n"   /* gr2 into reg2 */
+               "       lgr     0,%[qid]\n"             /* qid into gr0 */
+               "       lghi    2,0\n"                  /* 0 into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(TAPQ) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
+               "       lgr     %[reg2],2\n"            /* gr2 into reg2 */
                : [reg1] "=&d" (reg1), [reg2] "=&d" (reg2)
                : [qid] "d" (qid)
                : "cc", "0", "1", "2");
@@ -128,9 +128,9 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
        struct ap_queue_status reg1;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"  /* qid arg into gr0 */
-               "       .long   0xb2af0000\n" /* PQAP(RAPQ) */
-               "       lgr     %[reg1],1\n"  /* gr1 (status) into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid arg into gr0 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(RAPQ) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
                : [reg1] "=&d" (reg1)
                : [reg0] "d" (reg0)
                : "cc", "0", "1");
@@ -149,9 +149,9 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
        struct ap_queue_status reg1;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid arg into gr0 */
-               "       .long   0xb2af0000\n"  /* PQAP(ZAPQ) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid arg into gr0 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(ZAPQ) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
                : [reg1] "=&d" (reg1)
                : [reg0] "d" (reg0)
                : "cc", "0", "1");
@@ -190,10 +190,10 @@ static inline int ap_qci(struct ap_config_info *config)
        struct ap_config_info *reg2 = config;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* QCI fc into gr0 */
-               "       lgr     2,%[reg2]\n"   /* ptr to config into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(QCI) */
-               "0:     la      %[reg1],0\n"   /* good case, QCI fc available */
+               "       lgr     0,%[reg0]\n"            /* QCI fc into gr0 */
+               "       lgr     2,%[reg2]\n"            /* ptr to config into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(QCI) */
+               "0:     la      %[reg1],0\n"            /* good case, QCI fc available */
                "1:\n"
                EX_TABLE(0b, 1b)
                : [reg1] "+&d" (reg1)
@@ -246,11 +246,11 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
        reg1.qirqctrl = qirqctrl;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid param into gr0 */
-               "       lgr     1,%[reg1]\n"   /* irq ctrl into gr1 */
-               "       lgr     2,%[reg2]\n"   /* ni addr into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(AQIC) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid param into gr0 */
+               "       lgr     1,%[reg1]\n"            /* irq ctrl into gr1 */
+               "       lgr     2,%[reg2]\n"            /* ni addr into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(AQIC) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
                : [reg1] "+&d" (reg1)
                : [reg0] "d" (reg0), [reg2] "d" (reg2)
                : "cc", "0", "1", "2");
@@ -297,11 +297,11 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
        reg1.value = apinfo->val;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid param into gr0 */
-               "       lgr     1,%[reg1]\n"   /* qact in info into gr1 */
-               "       .long   0xb2af0000\n"  /* PQAP(QACT) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
-               "       lgr     %[reg2],2\n"   /* qact out info into reg2 */
+               "       lgr     0,%[reg0]\n"            /* qid param into gr0 */
+               "       lgr     1,%[reg1]\n"            /* qact in info into gr1 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(QACT) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
+               "       lgr     %[reg2],2\n"            /* qact out info into reg2 */
                : [reg1] "+&d" (reg1), [reg2] "=&d" (reg2)
                : [reg0] "d" (reg0)
                : "cc", "0", "1", "2");
index c800199..82388da 100644 (file)
@@ -74,8 +74,17 @@ static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
        __ctl_load(reg, cr, cr);
 }
 
-void smp_ctl_set_bit(int cr, int bit);
-void smp_ctl_clear_bit(int cr, int bit);
+void smp_ctl_set_clear_bit(int cr, int bit, bool set);
+
+static inline void ctl_set_bit(int cr, int bit)
+{
+       smp_ctl_set_clear_bit(cr, bit, true);
+}
+
+static inline void ctl_clear_bit(int cr, int bit)
+{
+       smp_ctl_set_clear_bit(cr, bit, false);
+}
 
 union ctlreg0 {
        unsigned long val;
@@ -130,8 +139,5 @@ union ctlreg15 {
        };
 };
 
-#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
-
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_CTL_REG_H */
index 84ec631..eee8d96 100644 (file)
@@ -319,11 +319,18 @@ extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs);
 extern int memcpy_real(void *, unsigned long, size_t);
 extern void memcpy_absolute(void *, void *, size_t);
 
-#define mem_assign_absolute(dest, val) do {                    \
-       __typeof__(dest) __tmp = (val);                         \
-                                                               \
-       BUILD_BUG_ON(sizeof(__tmp) != sizeof(val));             \
-       memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));        \
+#define put_abs_lowcore(member, x) do {                                        \
+       unsigned long __abs_address = offsetof(struct lowcore, member); \
+       __typeof__(((struct lowcore *)0)->member) __tmp = (x);          \
+                                                                       \
+       memcpy_absolute(__va(__abs_address), &__tmp, sizeof(__tmp));    \
+} while (0)
+
+#define get_abs_lowcore(x, member) do {                                        \
+       unsigned long __abs_address = offsetof(struct lowcore, member); \
+       __typeof__(((struct lowcore *)0)->member) *__ptr = &(x);        \
+                                                                       \
+       memcpy_absolute(__ptr, __va(__abs_address), sizeof(*__ptr));    \
 } while (0)
 
 extern int s390_isolate_bp(void);
index 888a2f1..24a5444 100644 (file)
@@ -78,7 +78,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
 {
        typecheck(int, lp->lock);
        asm_inline volatile(
-               ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
+               ALTERNATIVE("", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
                "       sth     %1,%0\n"
                : "=R" (((unsigned short *) &lp->lock)[1])
                : "d" (0) : "cc", "memory");
index ad2c996..fde7e6b 100644 (file)
        __diag_pop();                                                                   \
        static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 
-#endif /* _ASM_X86_SYSCALL_WRAPPER_H */
+#endif /* _ASM_S390_SYSCALL_WRAPPER_H */
index 5ebf534..0bf06f1 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <linux/sched.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
+#include <linux/llist.h>
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
@@ -36,10 +38,21 @@ struct unwind_state {
        struct pt_regs *regs;
        unsigned long sp, ip;
        int graph_idx;
+       struct llist_node *kr_cur;
        bool reliable;
        bool error;
 };
 
+/* Recover the return address modified by kretprobe and ftrace_graph. */
+static inline unsigned long unwind_recover_ret_addr(struct unwind_state *state,
+                                                   unsigned long ip)
+{
+       ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
+       if (is_kretprobe_trampoline(ip))
+               ip = kretprobe_find_ret_addr(state->task, (void *)state->sp, &state->kr_cur);
+       return ip;
+}
+
 void __unwind_start(struct unwind_state *state, struct task_struct *task,
                    struct pt_regs *regs, unsigned long first_frame);
 bool unwind_next_frame(struct unwind_state *state);
index 0ca572c..8e8aaf4 100644 (file)
@@ -67,9 +67,5 @@ struct user {
   unsigned long magic;         /* To uniquely identify a core file */
   char u_comm[32];             /* User command that was responsible */
 };
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 #endif /* _S390_USER_H */
index a601a51..59b69c8 100644 (file)
@@ -121,22 +121,22 @@ _LPP_OFFSET       = __LC_LPP
        .endm
 
        .macro BPOFF
-       ALTERNATIVE "", ".long 0xb2e8c000", 82
+       ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,12,0", 82
        .endm
 
        .macro BPON
-       ALTERNATIVE "", ".long 0xb2e8d000", 82
+       ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,13,0", 82
        .endm
 
        .macro BPENTER tif_ptr,tif_mask
-       ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
+       ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
                    "", 82
        .endm
 
        .macro BPEXIT tif_ptr,tif_mask
        TSTMSK  \tif_ptr,\tif_mask
-       ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
-                   "jnz .+8; .long 0xb2e8d000", 82
+       ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
+                   "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
        .endm
 
        /*
index 28ae7df..1cc85b8 100644 (file)
@@ -1646,8 +1646,8 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
 
        csum = (__force unsigned int)
               csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
-       mem_assign_absolute(S390_lowcore.ipib, ipib);
-       mem_assign_absolute(S390_lowcore.ipib_checksum, csum);
+       put_abs_lowcore(ipib, ipib);
+       put_abs_lowcore(ipib_checksum, csum);
        dump_run(trigger);
 }
 
index e32c14f..0032bdb 100644 (file)
@@ -284,11 +284,11 @@ NOKPROBE_SYMBOL(pop_kprobe);
 
 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
-       ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
-       ri->fp = NULL;
+       ri->ret_addr = (kprobe_opcode_t *)regs->gprs[14];
+       ri->fp = (void *)regs->gprs[15];
 
        /* Replace the return addr with trampoline addr */
-       regs->gprs[14] = (unsigned long) &__kretprobe_trampoline;
+       regs->gprs[14] = (unsigned long)&__kretprobe_trampoline;
 }
 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
 
@@ -385,7 +385,7 @@ NOKPROBE_SYMBOL(arch_kretprobe_fixup_return);
  */
 void trampoline_probe_handler(struct pt_regs *regs)
 {
-       kretprobe_trampoline_handler(regs, NULL);
+       kretprobe_trampoline_handler(regs, (void *)regs->gprs[15]);
 }
 NOKPROBE_SYMBOL(trampoline_probe_handler);
 
index 088d57a..b2ef014 100644 (file)
@@ -226,7 +226,7 @@ void arch_crash_save_vmcoreinfo(void)
        vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
        vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
        vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
-       mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
+       put_abs_lowcore(vmcore_info, paddr_vmcoreinfo_note());
 }
 
 void machine_shutdown(void)
index 6b5b64e..1acc2e0 100644 (file)
@@ -63,7 +63,7 @@ void __init os_info_init(void)
        os_info.version_minor = OS_INFO_VERSION_MINOR;
        os_info.magic = OS_INFO_MAGIC;
        os_info.csum = os_info_csum(&os_info);
-       mem_assign_absolute(S390_lowcore.os_info, __pa(ptr));
+       put_abs_lowcore(os_info, __pa(ptr));
 }
 
 #ifdef CONFIG_CRASH_DUMP
index 84e23fc..d860ac3 100644 (file)
@@ -481,11 +481,11 @@ static void __init setup_lowcore_dat_off(void)
        lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
 
        /* Setup absolute zero lowcore */
-       mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
-       mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
-       mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
-       mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
-       mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
+       put_abs_lowcore(restart_stack, lc->restart_stack);
+       put_abs_lowcore(restart_fn, lc->restart_fn);
+       put_abs_lowcore(restart_data, lc->restart_data);
+       put_abs_lowcore(restart_source, lc->restart_source);
+       put_abs_lowcore(restart_psw, lc->restart_psw);
 
        lc->spinlock_lockval = arch_spin_lockval(0);
        lc->spinlock_index = 0;
@@ -501,6 +501,7 @@ static void __init setup_lowcore_dat_off(void)
 static void __init setup_lowcore_dat_on(void)
 {
        struct lowcore *lc = lowcore_ptr[0];
+       int cr;
 
        __ctl_clear_bit(0, 28);
        S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
@@ -509,10 +510,10 @@ static void __init setup_lowcore_dat_on(void)
        S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
        __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
        __ctl_set_bit(0, 28);
-       mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
-       mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
-       memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
-                       sizeof(S390_lowcore.cregs_save_area));
+       put_abs_lowcore(restart_flags, RESTART_FLAG_CTLREGS);
+       put_abs_lowcore(program_new_psw, lc->program_new_psw);
+       for (cr = 0; cr < ARRAY_SIZE(lc->cregs_save_area); cr++)
+               put_abs_lowcore(cregs_save_area[cr], lc->cregs_save_area[cr]);
 }
 
 static struct resource code_resource = {
index 127da18..30c91d5 100644 (file)
@@ -213,7 +213,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
        if (nmi_alloc_mcesa(&lc->mcesad))
                goto out;
        lowcore_ptr[cpu] = lc;
-       pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
+       pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
        return 0;
 
 out:
@@ -326,10 +326,17 @@ static void pcpu_delegate(struct pcpu *pcpu,
        /* Stop target cpu (if func returns this stops the current cpu). */
        pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
        /* Restart func on the target cpu and stop the current cpu. */
-       mem_assign_absolute(lc->restart_stack, stack);
-       mem_assign_absolute(lc->restart_fn, (unsigned long) func);
-       mem_assign_absolute(lc->restart_data, (unsigned long) data);
-       mem_assign_absolute(lc->restart_source, source_cpu);
+       if (lc) {
+               lc->restart_stack = stack;
+               lc->restart_fn = (unsigned long)func;
+               lc->restart_data = (unsigned long)data;
+               lc->restart_source = source_cpu;
+       } else {
+               put_abs_lowcore(restart_stack, stack);
+               put_abs_lowcore(restart_fn, (unsigned long)func);
+               put_abs_lowcore(restart_data, (unsigned long)data);
+               put_abs_lowcore(restart_source, source_cpu);
+       }
        __bpon();
        asm volatile(
                "0:     sigp    0,%0,%2 # sigp restart to target cpu\n"
@@ -570,39 +577,27 @@ static void smp_ctl_bit_callback(void *info)
 }
 
 static DEFINE_SPINLOCK(ctl_lock);
-static unsigned long ctlreg;
 
-/*
- * Set a bit in a control register of all cpus
- */
-void smp_ctl_set_bit(int cr, int bit)
+void smp_ctl_set_clear_bit(int cr, int bit, bool set)
 {
-       struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
-
-       spin_lock(&ctl_lock);
-       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
-       __set_bit(bit, &ctlreg);
-       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
-       spin_unlock(&ctl_lock);
-       on_each_cpu(smp_ctl_bit_callback, &parms, 1);
-}
-EXPORT_SYMBOL(smp_ctl_set_bit);
-
-/*
- * Clear a bit in a control register of all cpus
- */
-void smp_ctl_clear_bit(int cr, int bit)
-{
-       struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
+       struct ec_creg_mask_parms parms = { .cr = cr, };
+       u64 ctlreg;
 
+       if (set) {
+               parms.orval = 1UL << bit;
+               parms.andval = -1UL;
+       } else {
+               parms.orval = 0;
+               parms.andval = ~(1UL << bit);
+       }
        spin_lock(&ctl_lock);
-       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
-       __clear_bit(bit, &ctlreg);
-       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
+       get_abs_lowcore(ctlreg, cregs_save_area[cr]);
+       ctlreg = (ctlreg & parms.andval) | parms.orval;
+       put_abs_lowcore(cregs_save_area[cr], ctlreg);
        spin_unlock(&ctl_lock);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
-EXPORT_SYMBOL(smp_ctl_clear_bit);
+EXPORT_SYMBOL(smp_ctl_set_clear_bit);
 
 #ifdef CONFIG_CRASH_DUMP
 
index b98f250..fb85e79 100644 (file)
@@ -21,8 +21,7 @@ uapi: $(uapi-hdrs-y)
 
 
 # Create output directory if not already present
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 filechk_syshdr = $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2" < $<
 
index 674c650..1d2aa44 100644 (file)
@@ -141,10 +141,10 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
        do_trap(regs, SIGFPE, si_code, "floating point exception");
 }
 
-static void translation_exception(struct pt_regs *regs)
+static void translation_specification_exception(struct pt_regs *regs)
 {
        /* May never happen. */
-       panic("Translation exception");
+       panic("Translation-Specification Exception");
 }
 
 static void illegal_op(struct pt_regs *regs)
@@ -368,7 +368,7 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = {
        [0x0f]          = hfp_divide_exception,
        [0x10]          = do_dat_exception,
        [0x11]          = do_dat_exception,
-       [0x12]          = translation_exception,
+       [0x12]          = translation_specification_exception,
        [0x13]          = special_op_exception,
        [0x14]          = default_trap_handler,
        [0x15]          = operand_exception,
index 707fd99..0ece156 100644 (file)
@@ -64,8 +64,8 @@ bool unwind_next_frame(struct unwind_state *state)
                ip = READ_ONCE_NOCHECK(sf->gprs[8]);
                reliable = false;
                regs = NULL;
-               if (!__kernel_text_address(ip)) {
-                       /* skip bogus %r14 */
+               /* skip bogus %r14 or if is the same as regs->psw.addr */
+               if (!__kernel_text_address(ip) || state->ip == unwind_recover_ret_addr(state, ip)) {
                        state->regs = NULL;
                        return unwind_next_frame(state);
                }
@@ -103,13 +103,11 @@ bool unwind_next_frame(struct unwind_state *state)
        if (sp & 0x7)
                goto out_err;
 
-       ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp);
-
        /* Update unwind state */
        state->sp = sp;
-       state->ip = ip;
        state->regs = regs;
        state->reliable = reliable;
+       state->ip = unwind_recover_ret_addr(state, ip);
        return true;
 
 out_err:
@@ -161,12 +159,10 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
                ip = READ_ONCE_NOCHECK(sf->gprs[8]);
        }
 
-       ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
-
        /* Update unwind state */
        state->sp = sp;
-       state->ip = ip;
        state->reliable = true;
+       state->ip = unwind_recover_ret_addr(state, ip);
 
        if (!first_frame)
                return;
index ab73d99..156d1c2 100644 (file)
@@ -3462,7 +3462,7 @@ void exit_sie(struct kvm_vcpu *vcpu)
 /* Kick a guest cpu out of SIE to process a request synchronously */
 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
 {
-       kvm_make_request(req, vcpu);
+       __kvm_make_request(req, vcpu);
        kvm_s390_vcpu_request(vcpu);
 }
 
index 692dc84..5e7ea8b 100644 (file)
@@ -75,7 +75,7 @@ static inline int arch_load_niai4(int *lock)
        int owner;
 
        asm_inline volatile(
-               ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
+               ALTERNATIVE("", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */
                "       l       %0,%1\n"
                : "=d" (owner) : "Q" (*lock) : "memory");
        return owner;
@@ -86,7 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
        int expected = old;
 
        asm_inline volatile(
-               ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
+               ALTERNATIVE("", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */
                "       cs      %0,%3,%1\n"
                : "=d" (old), "=Q" (*lock)
                : "0" (old), "d" (new), "Q" (*lock)
index c01f028..9bb0673 100644 (file)
@@ -47,7 +47,7 @@ static void print_backtrace(char *bt)
 static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
                                unsigned long sp)
 {
-       int frame_count, prev_is_func2, seen_func2_func1;
+       int frame_count, prev_is_func2, seen_func2_func1, seen_kretprobe_trampoline;
        const int max_frames = 128;
        struct unwind_state state;
        size_t bt_pos = 0;
@@ -63,6 +63,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
        frame_count = 0;
        prev_is_func2 = 0;
        seen_func2_func1 = 0;
+       seen_kretprobe_trampoline = 0;
        unwind_for_each_frame(&state, task, regs, sp) {
                unsigned long addr = unwind_get_return_address(&state);
                char sym[KSYM_SYMBOL_LEN];
@@ -88,6 +89,8 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
                if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1"))
                        seen_func2_func1 = 1;
                prev_is_func2 = str_has_prefix(sym, "unwindme_func2");
+               if (str_has_prefix(sym, "__kretprobe_trampoline+0x0/"))
+                       seen_kretprobe_trampoline = 1;
        }
 
        /* Check the results. */
@@ -103,6 +106,10 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
                kunit_err(current_test, "Maximum number of frames exceeded\n");
                ret = -EINVAL;
        }
+       if (seen_kretprobe_trampoline) {
+               kunit_err(current_test, "__kretprobe_trampoline+0x0 in unwinding results\n");
+               ret = -EINVAL;
+       }
        if (ret || force_bt)
                print_backtrace(bt);
        kfree(bt);
@@ -132,36 +139,50 @@ static struct unwindme *unwindme;
 #define UWM_PGM                        0x40    /* Unwind from program check handler */
 #define UWM_KPROBE_ON_FTRACE   0x80    /* Unwind from kprobe handler called via ftrace. */
 #define UWM_FTRACE             0x100   /* Unwind from ftrace handler. */
-#define UWM_KRETPROBE          0x200   /* Unwind kretprobe handlers. */
+#define UWM_KRETPROBE          0x200   /* Unwind through kretprobed function. */
+#define UWM_KRETPROBE_HANDLER  0x400   /* Unwind from kretprobe handler. */
 
-static __always_inline unsigned long get_psw_addr(void)
+static __always_inline struct pt_regs fake_pt_regs(void)
 {
-       unsigned long psw_addr;
+       struct pt_regs regs;
+
+       memset(&regs, 0, sizeof(regs));
+       regs.gprs[15] = current_stack_pointer();
 
        asm volatile(
                "basr   %[psw_addr],0\n"
-               : [psw_addr] "=d" (psw_addr));
-       return psw_addr;
+               : [psw_addr] "=d" (regs.psw.addr));
+       return regs;
 }
 
 static int kretprobe_ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
        struct unwindme *u = unwindme;
 
+       if (!(u->flags & UWM_KRETPROBE_HANDLER))
+               return 0;
+
        u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL,
                             (u->flags & UWM_SP) ? u->sp : 0);
 
        return 0;
 }
 
-static noinline notrace void test_unwind_kretprobed_func(void)
+static noinline notrace int test_unwind_kretprobed_func(struct unwindme *u)
 {
-       asm volatile("  nop\n");
+       struct pt_regs regs;
+
+       if (!(u->flags & UWM_KRETPROBE))
+               return 0;
+
+       regs = fake_pt_regs();
+       return test_unwind(NULL, (u->flags & UWM_REGS) ? &regs : NULL,
+                          (u->flags & UWM_SP) ? u->sp : 0);
 }
 
-static noinline void test_unwind_kretprobed_func_caller(void)
+static noinline int test_unwind_kretprobed_func_caller(struct unwindme *u)
 {
-       test_unwind_kretprobed_func();
+       return test_unwind_kretprobed_func(u);
 }
 
 static int test_unwind_kretprobe(struct unwindme *u)
@@ -187,10 +208,12 @@ static int test_unwind_kretprobe(struct unwindme *u)
                return -EINVAL;
        }
 
-       test_unwind_kretprobed_func_caller();
+       ret = test_unwind_kretprobed_func_caller(u);
        unregister_kretprobe(&my_kretprobe);
        unwindme = NULL;
-       return u->ret;
+       if (u->flags & UWM_KRETPROBE_HANDLER)
+               ret = u->ret;
+       return ret;
 }
 
 static int kprobe_pre_handler(struct kprobe *p, struct pt_regs *regs)
@@ -304,16 +327,13 @@ static noinline int unwindme_func4(struct unwindme *u)
                return 0;
        } else if (u->flags & (UWM_PGM | UWM_KPROBE_ON_FTRACE)) {
                return test_unwind_kprobe(u);
-       } else if (u->flags & (UWM_KRETPROBE)) {
+       } else if (u->flags & (UWM_KRETPROBE | UWM_KRETPROBE_HANDLER)) {
                return test_unwind_kretprobe(u);
        } else if (u->flags & UWM_FTRACE) {
                return test_unwind_ftrace(u);
        } else {
-               struct pt_regs regs;
+               struct pt_regs regs = fake_pt_regs();
 
-               memset(&regs, 0, sizeof(regs));
-               regs.psw.addr = get_psw_addr();
-               regs.gprs[15] = current_stack_pointer();
                return test_unwind(NULL,
                                   (u->flags & UWM_REGS) ? &regs : NULL,
                                   (u->flags & UWM_SP) ? u->sp : 0);
@@ -452,6 +472,10 @@ static const struct test_params param_list[] = {
        TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP),
        TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_REGS),
        TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP | UWM_REGS),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_REGS),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP | UWM_REGS),
 };
 
 /*
index 792f8e0..e563cb6 100644 (file)
@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
        list_for_each_entry(tmp, &zpci_list, entry) {
                if (tmp->fid == fid) {
                        zdev = tmp;
+                       zpci_zdev_get(zdev);
                        break;
                }
        }
@@ -399,7 +400,7 @@ EXPORT_SYMBOL(pci_iounmap);
 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
                    int size, u32 *val)
 {
-       struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
+       struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
 
        return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
 }
@@ -407,7 +408,7 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
                     int size, u32 val)
 {
-       struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
+       struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
 
        return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
 }
index e359d26..e96c986 100644 (file)
@@ -19,7 +19,8 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error);
 void zpci_release_device(struct kref *kref);
 static inline void zpci_zdev_put(struct zpci_dev *zdev)
 {
-       kref_put(&zdev->kref, zpci_release_device);
+       if (zdev)
+               kref_put(&zdev->kref, zpci_release_device);
 }
 
 static inline void zpci_zdev_get(struct zpci_dev *zdev)
@@ -32,8 +33,8 @@ void zpci_free_domain(int domain);
 int zpci_setup_bus_resources(struct zpci_dev *zdev,
                             struct list_head *resources);
 
-static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
-                                              unsigned int devfn)
+static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
+                                            unsigned int devfn)
 {
        struct zpci_bus *zbus = bus->sysdata;
 
index 63f3e05..1057d7a 100644 (file)
@@ -23,6 +23,8 @@
 #include <asm/clp.h>
 #include <uapi/asm/clp.h>
 
+#include "pci_bus.h"
+
 bool zpci_unique_uid;
 
 void update_uid_checking(bool new)
@@ -404,8 +406,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
                return;
 
        zdev = get_zdev_by_fid(entry->fid);
-       if (!zdev)
-               zpci_create_device(entry->fid, entry->fh, entry->config_state);
+       if (zdev) {
+               zpci_zdev_put(zdev);
+               return;
+       }
+       zpci_create_device(entry->fid, entry->fh, entry->config_state);
 }
 
 int clp_scan_pci_devices(void)
index 2e3e5b2..ea9db5c 100644 (file)
@@ -269,7 +269,7 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
               pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
 
        if (!pdev)
-               return;
+               goto no_pdev;
 
        switch (ccdf->pec) {
        case 0x003a: /* Service Action or Error Recovery Successful */
@@ -286,6 +286,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
                break;
        }
        pci_dev_put(pdev);
+no_pdev:
+       zpci_zdev_put(zdev);
 }
 
 void zpci_event_error(void *data)
@@ -314,6 +316,7 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
 static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 {
        struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+       bool existing_zdev = !!zdev;
        enum zpci_state state;
 
        zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n",
@@ -378,6 +381,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
        default:
                break;
        }
+       if (existing_zdev)
+               zpci_zdev_put(zdev);
 }
 
 void zpci_event_availability(void *data)
index 7dfd3f6..12ea0f3 100644 (file)
@@ -52,10 +52,4 @@ struct user {
        char            u_comm[32];             /* user command name */
 };
 
-#define NBPG                   PAGE_SIZE
-#define UPAGES                 1
-#define HOST_TEXT_START_ADDR   (u.start_code)
-#define HOST_DATA_START_ADDR   (u.start_data)
-#define HOST_STACK_END_ADDR    (u.start_stack + u.u_ssize * NBPG)
-
 #endif /* __ASM_SH_USER_H */
index 6713c65..b265e4b 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index d63f18d..8440c16 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 6ead1e2..8ca67a6 100644 (file)
@@ -224,7 +224,7 @@ void mconsole_go(struct mc_request *req)
 
 void mconsole_stop(struct mc_request *req)
 {
-       deactivate_fd(req->originating_fd, MCONSOLE_IRQ);
+       block_signals();
        os_set_fd_block(req->originating_fd, 1);
        mconsole_reply(req, "stopped", 0, 0);
        for (;;) {
@@ -247,6 +247,7 @@ void mconsole_stop(struct mc_request *req)
        }
        os_set_fd_block(req->originating_fd, 0);
        mconsole_reply(req, "", 0, 0);
+       unblock_signals();
 }
 
 static DEFINE_SPINLOCK(mc_devices_lock);
index 5b5b64c..3c62ae8 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <stdio.h>
 #include <stdlib.h>
+#include <string.h>
 #include <errno.h>
 #include <termios.h>
 #include <unistd.h>
@@ -167,14 +168,29 @@ static void port_pre_exec(void *arg)
 int port_connection(int fd, int *socket, int *pid_out)
 {
        int new, err;
-       char *argv[] = { "/usr/sbin/in.telnetd", "-L",
+       char *env;
+       char *argv[] = { "in.telnetd", "-L",
                         OS_LIB_PATH "/uml/port-helper", NULL };
        struct port_pre_exec_data data;
 
+       if ((env = getenv("UML_PORT_HELPER")))
+               argv[2] = env;
+
        new = accept(fd, NULL, 0);
        if (new < 0)
                return -errno;
 
+       err = os_access(argv[2], X_OK);
+       if (err < 0) {
+               printk(UM_KERN_ERR "port_connection : error accessing port-helper "
+                      "executable at %s: %s\n", argv[2], strerror(-err));
+               if (env == NULL)
+                       printk(UM_KERN_ERR "Set UML_PORT_HELPER environment "
+                               "variable to path to uml-utilities port-helper "
+                               "binary\n");
+               goto out_close;
+       }
+
        err = os_pipe(socket, 0, 0);
        if (err < 0)
                goto out_close;
index 69d2d00..b03269f 100644 (file)
@@ -1526,13 +1526,19 @@ static void do_io(struct io_thread_req *req, struct io_desc *desc)
                        }
                        break;
                case REQ_OP_DISCARD:
-               case REQ_OP_WRITE_ZEROES:
                        n = os_falloc_punch(req->fds[bit], off, len);
                        if (n) {
                                req->error = map_error(-n);
                                return;
                        }
                        break;
+               case REQ_OP_WRITE_ZEROES:
+                       n = os_falloc_zeroes(req->fds[bit], off, len);
+                       if (n) {
+                               req->error = map_error(-n);
+                               return;
+                       }
+                       break;
                default:
                        WARN_ON_ONCE(1);
                        req->error = BLK_STS_NOTSUPP;
index 4fc1a5d..1d6f6a6 100644 (file)
@@ -67,6 +67,7 @@ static LIST_HEAD(vector_devices);
 static int driver_registered;
 
 static void vector_eth_configure(int n, struct arglist *def);
+static int vector_mmsg_rx(struct vector_private *vp, int budget);
 
 /* Argument accessors to set variables (and/or set default values)
  * mtu, buffer sizing, default headroom, etc
@@ -77,7 +78,6 @@ static void vector_eth_configure(int n, struct arglist *def);
 #define DEFAULT_VECTOR_SIZE 64
 #define TX_SMALL_PACKET 128
 #define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
-#define MAX_ITERATIONS 64
 
 static const struct {
        const char string[ETH_GSTRING_LEN];
@@ -458,7 +458,6 @@ static int vector_send(struct vector_queue *qi)
                                        vp->estats.tx_queue_running_average =
                                                (vp->estats.tx_queue_running_average + result) >> 1;
                                }
-                               netif_trans_update(qi->dev);
                                netif_wake_queue(qi->dev);
                                /* if TX is busy, break out of the send loop,
                                 *  poll write IRQ will reschedule xmit for us
@@ -470,8 +469,6 @@ static int vector_send(struct vector_queue *qi)
                        }
                }
                spin_unlock(&qi->head_lock);
-       } else {
-               tasklet_schedule(&vp->tx_poll);
        }
        return queue_depth;
 }
@@ -608,7 +605,7 @@ out_fail:
 
 /*
  * We do not use the RX queue as a proper wraparound queue for now
- * This is not necessary because the consumption via netif_rx()
+ * This is not necessary because the consumption via napi_gro_receive()
  * happens in-line. While we can try using the return code of
  * netif_rx() for flow control there are no drivers doing this today.
  * For this RX specific use we ignore the tail/head locks and
@@ -896,7 +893,7 @@ static int vector_legacy_rx(struct vector_private *vp)
                        skb->protocol = eth_type_trans(skb, skb->dev);
                        vp->dev->stats.rx_bytes += skb->len;
                        vp->dev->stats.rx_packets++;
-                       netif_rx(skb);
+                       napi_gro_receive(&vp->napi, skb);
                } else {
                        dev_kfree_skb_irq(skb);
                }
@@ -955,7 +952,7 @@ drop:
  * mmsg vector matched to an skb vector which we prepared earlier.
  */
 
-static int vector_mmsg_rx(struct vector_private *vp)
+static int vector_mmsg_rx(struct vector_private *vp, int budget)
 {
        int packet_count, i;
        struct vector_queue *qi = vp->rx_queue;
@@ -972,6 +969,9 @@ static int vector_mmsg_rx(struct vector_private *vp)
 
        /* Fire the Lazy Gun - get as many packets as we can in one go. */
 
+       if (budget > qi->max_depth)
+               budget = qi->max_depth;
+
        packet_count = uml_vector_recvmmsg(
                vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
 
@@ -1021,7 +1021,7 @@ static int vector_mmsg_rx(struct vector_private *vp)
                         */
                        vp->dev->stats.rx_bytes += skb->len;
                        vp->dev->stats.rx_packets++;
-                       netif_rx(skb);
+                       napi_gro_receive(&vp->napi, skb);
                } else {
                        /* Overlay header too short to do anything - discard.
                         * We can actually keep this skb and reuse it,
@@ -1044,23 +1044,6 @@ static int vector_mmsg_rx(struct vector_private *vp)
        return packet_count;
 }
 
-static void vector_rx(struct vector_private *vp)
-{
-       int err;
-       int iter = 0;
-
-       if ((vp->options & VECTOR_RX) > 0)
-               while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
-                       iter++;
-       else
-               while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
-                       iter++;
-       if ((err != 0) && net_ratelimit())
-               netdev_err(vp->dev, "vector_rx: error(%d)\n", err);
-       if (iter == MAX_ITERATIONS)
-               netdev_err(vp->dev, "vector_rx: device stuck, remote end may have closed the connection\n");
-}
-
 static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct vector_private *vp = netdev_priv(dev);
@@ -1085,25 +1068,15 @@ static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
        netdev_sent_queue(vp->dev, skb->len);
        queue_depth = vector_enqueue(vp->tx_queue, skb);
 
-       /* if the device queue is full, stop the upper layers and
-        * flush it.
-        */
-
-       if (queue_depth >= vp->tx_queue->max_depth - 1) {
-               vp->estats.tx_kicks++;
-               netif_stop_queue(dev);
-               vector_send(vp->tx_queue);
-               return NETDEV_TX_OK;
-       }
-       if (netdev_xmit_more()) {
+       if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) {
                mod_timer(&vp->tl, vp->coalesce);
                return NETDEV_TX_OK;
+       } else {
+               queue_depth = vector_send(vp->tx_queue);
+               if (queue_depth > 0)
+                       napi_schedule(&vp->napi);
        }
-       if (skb->len < TX_SMALL_PACKET) {
-               vp->estats.tx_kicks++;
-               vector_send(vp->tx_queue);
-       } else
-               tasklet_schedule(&vp->tx_poll);
+
        return NETDEV_TX_OK;
 }
 
@@ -1114,7 +1087,7 @@ static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
 
        if (!netif_running(dev))
                return IRQ_NONE;
-       vector_rx(vp);
+       napi_schedule(&vp->napi);
        return IRQ_HANDLED;
 
 }
@@ -1133,8 +1106,7 @@ static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
         * tweaking the IRQ mask less costly
         */
 
-       if (vp->in_write_poll)
-               tasklet_schedule(&vp->tx_poll);
+       napi_schedule(&vp->napi);
        return IRQ_HANDLED;
 
 }
@@ -1161,7 +1133,8 @@ static int vector_net_close(struct net_device *dev)
                um_free_irq(vp->tx_irq, dev);
                vp->tx_irq = 0;
        }
-       tasklet_kill(&vp->tx_poll);
+       napi_disable(&vp->napi);
+       netif_napi_del(&vp->napi);
        if (vp->fds->rx_fd > 0) {
                if (vp->bpf)
                        uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
@@ -1193,15 +1166,32 @@ static int vector_net_close(struct net_device *dev)
        return 0;
 }
 
-/* TX tasklet */
-
-static void vector_tx_poll(struct tasklet_struct *t)
+static int vector_poll(struct napi_struct *napi, int budget)
 {
-       struct vector_private *vp = from_tasklet(vp, t, tx_poll);
+       struct vector_private *vp = container_of(napi, struct vector_private, napi);
+       int work_done = 0;
+       int err;
+       bool tx_enqueued = false;
 
-       vp->estats.tx_kicks++;
-       vector_send(vp->tx_queue);
+       if ((vp->options & VECTOR_TX) != 0)
+               tx_enqueued = (vector_send(vp->tx_queue) > 0);
+       if ((vp->options & VECTOR_RX) > 0)
+               err = vector_mmsg_rx(vp, budget);
+       else {
+               err = vector_legacy_rx(vp);
+               if (err > 0)
+                       err = 1;
+       }
+       if (err > 0)
+               work_done += err;
+
+       if (tx_enqueued || err > 0)
+               napi_schedule(napi);
+       if (work_done < budget)
+               napi_complete_done(napi, work_done);
+       return work_done;
 }
+
 static void vector_reset_tx(struct work_struct *work)
 {
        struct vector_private *vp =
@@ -1265,6 +1255,9 @@ static int vector_net_open(struct net_device *dev)
                        goto out_close;
        }
 
+       netif_napi_add(vp->dev, &vp->napi, vector_poll, get_depth(vp->parsed));
+       napi_enable(&vp->napi);
+
        /* READ IRQ */
        err = um_request_irq(
                irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
@@ -1306,15 +1299,15 @@ static int vector_net_open(struct net_device *dev)
                uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
 
        netif_start_queue(dev);
+       vector_reset_stats(vp);
 
        /* clear buffer - it can happen that the host side of the interface
         * is full when we get here. In this case, new data is never queued,
         * SIGIOs never arrive, and the net never works.
         */
 
-       vector_rx(vp);
+       napi_schedule(&vp->napi);
 
-       vector_reset_stats(vp);
        vdevice = find_device(vp->unit);
        vdevice->opened = 1;
 
@@ -1543,15 +1536,16 @@ static const struct net_device_ops vector_netdev_ops = {
 #endif
 };
 
-
 static void vector_timer_expire(struct timer_list *t)
 {
        struct vector_private *vp = from_timer(vp, t, tl);
 
        vp->estats.tx_kicks++;
-       vector_send(vp->tx_queue);
+       napi_schedule(&vp->napi);
 }
 
+
+
 static void vector_eth_configure(
                int n,
                struct arglist *def
@@ -1634,7 +1628,6 @@ static void vector_eth_configure(
        });
 
        dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
-       tasklet_setup(&vp->tx_poll, vector_tx_poll);
        INIT_WORK(&vp->reset_tx, vector_reset_tx);
 
        timer_setup(&vp->tl, vector_timer_expire, 0);
index 8fff93a..2a1fa8e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ctype.h>
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
+
 #include "vector_user.h"
 
 /* Queue structure specially adapted for multiple enqueue/dequeue
@@ -72,6 +73,7 @@ struct vector_private {
        struct list_head list;
        spinlock_t lock;
        struct net_device *dev;
+       struct napi_struct              napi    ____cacheline_aligned;
 
        int unit;
 
@@ -115,7 +117,6 @@ struct vector_private {
 
        spinlock_t stats_lock;
 
-       struct tasklet_struct tx_poll;
        bool rexmit_scheduled;
        bool opened;
        bool in_write_poll;
index e4ffeb9..c650e42 100644 (file)
@@ -771,7 +771,7 @@ int uml_vector_detach_bpf(int fd, void *bpf)
                printk(KERN_ERR BPF_DETACH_FAIL, prog->len, prog->filter, fd, -errno);
        return err;
 }
-void *uml_vector_default_bpf(void *mac)
+void *uml_vector_default_bpf(const void *mac)
 {
        struct sock_filter *bpf;
        uint32_t *mac1 = (uint32_t *)(mac + 2);
index d29d5fd..3a73d17 100644 (file)
@@ -97,7 +97,7 @@ extern int uml_vector_recvmmsg(
        unsigned int vlen,
        unsigned int flags
 );
-extern void *uml_vector_default_bpf(void *mac);
+extern void *uml_vector_default_bpf(const void *mac);
 extern void *uml_vector_user_bpf(char *filename);
 extern int uml_vector_attach_bpf(int fd, void *bpf);
 extern int uml_vector_detach_bpf(int fd, void *bpf);
index b08bd29..f1f3f52 100644 (file)
@@ -24,7 +24,6 @@ generic-y += softirq_stack.h
 generic-y += switch_to.h
 generic-y += topology.h
 generic-y += trace_clock.h
-generic-y += word-at-a-time.h
 generic-y += kprobes.h
 generic-y += mm_hooks.h
 generic-y += vga.h
index f512704..22b39de 100644 (file)
@@ -4,8 +4,10 @@
 
 #ifdef CONFIG_64BIT
 #undef CONFIG_X86_32
+#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_sse_pf64))
 #else
 #define CONFIG_X86_32 1
+#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_8regs))
 #endif
 
 #include <asm/cpufeature.h>
@@ -16,7 +18,7 @@
 #undef XOR_SELECT_TEMPLATE
 /* pick an arbitrary one - measuring isn't possible with inf-cpu */
 #define XOR_SELECT_TEMPLATE(x) \
-       (time_travel_mode == TT_MODE_INFCPU ? &xor_block_8regs : NULL)
+       (time_travel_mode == TT_MODE_INFCPU ? TT_CPU_INF_XOR_DEFAULT : x))
 #endif
 
 #endif
index 0021405..fafde1d 100644 (file)
@@ -168,6 +168,7 @@ extern unsigned os_major(unsigned long long dev);
 extern unsigned os_minor(unsigned long long dev);
 extern unsigned long long os_makedev(unsigned major, unsigned minor);
 extern int os_falloc_punch(int fd, unsigned long long offset, int count);
+extern int os_falloc_zeroes(int fd, unsigned long long offset, int count);
 extern int os_eventfd(unsigned int initval, int flags);
 extern int os_sendmsg_fds(int fd, const void *buf, unsigned int len,
                          const int *fds, unsigned int fds_num);
index ca69d72..484141b 100644 (file)
@@ -25,8 +25,8 @@ void uml_dtb_init(void)
                return;
        }
 
-       unflatten_device_tree();
        early_init_fdt_scan_reserved_mem();
+       unflatten_device_tree();
 }
 
 static int __init uml_dtb_setup(char *line, int *add)
index e4421db..fc4450d 100644 (file)
@@ -625,6 +625,15 @@ int os_falloc_punch(int fd, unsigned long long offset, int len)
        return n;
 }
 
+int os_falloc_zeroes(int fd, unsigned long long offset, int len)
+{
+       int n = fallocate(fd, FALLOC_FL_ZERO_RANGE|FALLOC_FL_KEEP_SIZE, offset, len);
+
+       if (n < 0)
+               return -errno;
+       return n;
+}
+
 int os_eventfd(unsigned int initval, int flags)
 {
        int fd = eventfd(initval, flags);
index 32e88ba..b459745 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <stdlib.h>
+#include <string.h>
 #include <unistd.h>
 #include <errno.h>
 #include <sched.h>
@@ -99,6 +100,10 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
                CATCH_EINTR(waitpid(pid, NULL, __WALL));
        }
 
+       if (ret < 0)
+               printk(UM_KERN_ERR "run_helper : failed to exec %s on host: %s\n",
+                      argv[0], strerror(-ret));
+
 out_free2:
        kfree(data.buf);
 out_close:
index 6c5041c..4d5591d 100644 (file)
 
 static timer_t event_high_res_timer = 0;
 
-static inline long long timeval_to_ns(const struct timeval *tv)
-{
-       return ((long long) tv->tv_sec * UM_NSEC_PER_SEC) +
-               tv->tv_usec * UM_NSEC_PER_USEC;
-}
-
 static inline long long timespec_to_ns(const struct timespec *ts)
 {
        return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + ts->tv_nsec;
index ff45a27..b0142e0 100644 (file)
@@ -122,7 +122,6 @@ config X86
        select ARCH_WANT_GENERAL_HUGETLB
        select ARCH_WANT_HUGE_PMD_SHARE
        select ARCH_WANT_LD_ORPHAN_WARN
-       select ARCH_WANTS_RT_DELAYED_SIGNALS
        select ARCH_WANTS_THP_SWAP              if X86_64
        select ARCH_HAS_PARANOID_L1D_FLUSH
        select BUILDTIME_TABLE_SORT
index 71124cf..98a4852 100644 (file)
@@ -1,5 +1,7 @@
+CONFIG_WERROR=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_USELIB=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -11,23 +13,30 @@ CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_CGROUPS=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
 CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_MISC=y
+CONFIG_CGROUP_DEBUG=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
-# CONFIG_64BIT is not set
 CONFIG_SMP=y
-CONFIG_X86_GENERIC=y
-CONFIG_HPET_TIMER=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+CONFIG_NR_CPUS=8
 CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
-CONFIG_X86_REBOOTFIXUPS=y
 CONFIG_MICROCODE_AMD=y
 CONFIG_X86_MSR=y
 CONFIG_X86_CPUID=y
-CONFIG_HIGHPTE=y
 CONFIG_X86_CHECK_BIOS_CORRUPTION=y
 # CONFIG_MTRR_SANITIZER is not set
 CONFIG_EFI=y
@@ -43,12 +52,15 @@ CONFIG_ACPI_BGRT=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_X86_ACPI_CPUFREQ=y
-CONFIG_EFI_VARS=y
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
+CONFIG_COMPAT_32BIT_TIME=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
 CONFIG_BINFMT_MISC=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -103,12 +115,16 @@ CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_NET_SCHED=y
+CONFIG_NET_CLS_CGROUP=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_CLS_ACT=y
+CONFIG_CGROUP_NET_PRIO=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_MAC80211_LEDS=y
 CONFIG_RFKILL=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_MSI=y
@@ -119,13 +135,16 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DEBUG_DEVRES=y
 CONFIG_CONNECTOR=y
+CONFIG_EFI_VARS=y
+CONFIG_EFI_CAPSULE_LOADER=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_SPI_ATTRS=y
-# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_SCSI_VIRTIO=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_ATA_PIIX=y
@@ -143,6 +162,7 @@ CONFIG_MACINTOSH_DRIVERS=y
 CONFIG_MAC_EMUMOUSEBTN=y
 CONFIG_NETDEVICES=y
 CONFIG_NETCONSOLE=y
+CONFIG_VIRTIO_NET=y
 CONFIG_BNX2=y
 CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
@@ -170,6 +190,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
 CONFIG_SERIAL_8250_DETECT_IRQ=y
 CONFIG_SERIAL_8250_RSA=y
 CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_NVRAM=y
 CONFIG_HPET=y
@@ -181,12 +202,7 @@ CONFIG_AGP_AMD64=y
 CONFIG_AGP_INTEL=y
 CONFIG_DRM=y
 CONFIG_DRM_I915=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_FB_EFI=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_HRTIMER=y
@@ -219,6 +235,8 @@ CONFIG_USB_STORAGE=y
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_HCTOSYS is not set
 CONFIG_DMADEVICES=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_INPUT=y
 CONFIG_EEEPC_LAPTOP=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
@@ -240,6 +258,7 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
@@ -251,14 +270,15 @@ CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_PRINTK_TIME=y
+CONFIG_FRAME_WARN=1024
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
-CONFIG_DEBUG_STACKOVERFLOW=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_SCHEDSTATS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
 CONFIG_EARLY_PRINTK_DBGP=y
 CONFIG_DEBUG_BOOT_PARAMS=y
-CONFIG_KALLSYMS_ALL=y
+CONFIG_UNWINDER_FRAME_POINTER=y
+# CONFIG_64BIT is not set
index 92b1169..6978450 100644 (file)
@@ -1,3 +1,4 @@
+CONFIG_WERROR=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_AUDIT=y
@@ -11,14 +12,25 @@ CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_CGROUPS=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
 CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_MISC=y
+CONFIG_CGROUP_DEBUG=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_SMP=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
 CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
 CONFIG_MICROCODE_AMD=y
 CONFIG_X86_MSR=y
@@ -41,12 +53,14 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_X86_ACPI_CPUFREQ=y
 CONFIG_IA32_EMULATION=y
-CONFIG_EFI_VARS=y
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
 CONFIG_BINFMT_MISC=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -101,12 +115,16 @@ CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_NET_SCHED=y
+CONFIG_NET_CLS_CGROUP=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_CLS_ACT=y
+CONFIG_CGROUP_NET_PRIO=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_MAC80211_LEDS=y
 CONFIG_RFKILL=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_HOTPLUG_PCI=y
@@ -116,13 +134,15 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DEBUG_DEVRES=y
 CONFIG_CONNECTOR=y
+CONFIG_EFI_VARS=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_SPI_ATTRS=y
-# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_SCSI_VIRTIO=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_ATA_PIIX=y
@@ -138,6 +158,7 @@ CONFIG_MACINTOSH_DRIVERS=y
 CONFIG_MAC_EMUMOUSEBTN=y
 CONFIG_NETDEVICES=y
 CONFIG_NETCONSOLE=y
+CONFIG_VIRTIO_NET=y
 CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_E100=y
@@ -162,6 +183,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
 CONFIG_SERIAL_8250_DETECT_IRQ=y
 CONFIG_SERIAL_8250_RSA=y
 CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 # CONFIG_HW_RANDOM_INTEL is not set
 # CONFIG_HW_RANDOM_AMD is not set
@@ -175,12 +197,7 @@ CONFIG_AGP_AMD64=y
 CONFIG_AGP_INTEL=y
 CONFIG_DRM=y
 CONFIG_DRM_I915=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_FB_EFI=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_HRTIMER=y
@@ -213,6 +230,8 @@ CONFIG_USB_STORAGE=y
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_HCTOSYS is not set
 CONFIG_DMADEVICES=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_INPUT=y
 CONFIG_EEEPC_LAPTOP=y
 CONFIG_AMD_IOMMU=y
 CONFIG_INTEL_IOMMU=y
@@ -237,6 +256,7 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
@@ -257,4 +277,3 @@ CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
 CONFIG_EARLY_PRINTK_DBGP=y
 CONFIG_DEBUG_BOOT_PARAMS=y
-CONFIG_KALLSYMS_ALL=y
index 7f3886e..eca5d6e 100644 (file)
@@ -3,8 +3,7 @@ out := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
 # Create output directory if not already present
-_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
-         $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
+$(shell mkdir -p $(out) $(uapi))
 
 syscall32 := $(src)/syscall_32.tbl
 syscall64 := $(src)/syscall_64.tbl
index 4138939..d23e80a 100644 (file)
@@ -249,6 +249,7 @@ enum x86_intercept_stage;
 #define PFERR_SGX_BIT 15
 #define PFERR_GUEST_FINAL_BIT 32
 #define PFERR_GUEST_PAGE_BIT 33
+#define PFERR_IMPLICIT_ACCESS_BIT 48
 
 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
@@ -259,6 +260,7 @@ enum x86_intercept_stage;
 #define PFERR_SGX_MASK (1U << PFERR_SGX_BIT)
 #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
 #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
+#define PFERR_IMPLICIT_ACCESS (1ULL << PFERR_IMPLICIT_ACCESS_BIT)
 
 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |       \
                                 PFERR_WRITE_MASK |             \
@@ -430,7 +432,7 @@ struct kvm_mmu {
        void (*inject_page_fault)(struct kvm_vcpu *vcpu,
                                  struct x86_exception *fault);
        gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                           gpa_t gva_or_gpa, u32 access,
+                           gpa_t gva_or_gpa, u64 access,
                            struct x86_exception *exception);
        int (*sync_page)(struct kvm_vcpu *vcpu,
                         struct kvm_mmu_page *sp);
@@ -512,6 +514,7 @@ struct kvm_pmu {
        u64 global_ctrl_mask;
        u64 global_ovf_ctrl_mask;
        u64 reserved_bits;
+       u64 raw_event_mask;
        u8 version;
        struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
        struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
@@ -1040,14 +1043,16 @@ struct kvm_x86_msr_filter {
        struct msr_bitmap_range ranges[16];
 };
 
-#define APICV_INHIBIT_REASON_DISABLE    0
-#define APICV_INHIBIT_REASON_HYPERV     1
-#define APICV_INHIBIT_REASON_NESTED     2
-#define APICV_INHIBIT_REASON_IRQWIN     3
-#define APICV_INHIBIT_REASON_PIT_REINJ  4
-#define APICV_INHIBIT_REASON_X2APIC    5
-#define APICV_INHIBIT_REASON_BLOCKIRQ  6
-#define APICV_INHIBIT_REASON_ABSENT    7
+enum kvm_apicv_inhibit {
+       APICV_INHIBIT_REASON_DISABLE,
+       APICV_INHIBIT_REASON_HYPERV,
+       APICV_INHIBIT_REASON_NESTED,
+       APICV_INHIBIT_REASON_IRQWIN,
+       APICV_INHIBIT_REASON_PIT_REINJ,
+       APICV_INHIBIT_REASON_X2APIC,
+       APICV_INHIBIT_REASON_BLOCKIRQ,
+       APICV_INHIBIT_REASON_ABSENT,
+};
 
 struct kvm_arch {
        unsigned long n_used_mmu_pages;
@@ -1401,7 +1406,7 @@ struct kvm_x86_ops {
        void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
        void (*enable_irq_window)(struct kvm_vcpu *vcpu);
        void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
-       bool (*check_apicv_inhibit_reasons)(ulong bit);
+       bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit reason);
        void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
        void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
        void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
@@ -1585,7 +1590,7 @@ void kvm_mmu_module_exit(void);
 
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
-void kvm_mmu_init_vm(struct kvm *kvm);
+int kvm_mmu_init_vm(struct kvm *kvm);
 void kvm_mmu_uninit_vm(struct kvm *kvm);
 
 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
@@ -1795,11 +1800,22 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
 
 bool kvm_apicv_activated(struct kvm *kvm);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
-void kvm_request_apicv_update(struct kvm *kvm, bool activate,
-                             unsigned long bit);
+void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                     enum kvm_apicv_inhibit reason, bool set);
+void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                   enum kvm_apicv_inhibit reason, bool set);
+
+static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
+                                        enum kvm_apicv_inhibit reason)
+{
+       kvm_set_or_clear_apicv_inhibit(kvm, reason, true);
+}
 
-void __kvm_request_apicv_update(struct kvm *kvm, bool activate,
-                               unsigned long bit);
+static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
+                                          enum kvm_apicv_inhibit reason)
+{
+       kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
+}
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
index 7eb2df5..f70a510 100644 (file)
@@ -221,8 +221,14 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define SVM_NESTED_CTL_SEV_ES_ENABLE   BIT(2)
 
 
+#define SVM_TSC_RATIO_RSVD     0xffffff0000000000ULL
+#define SVM_TSC_RATIO_MIN      0x0000000000000001ULL
+#define SVM_TSC_RATIO_MAX      0x000000ffffffffffULL
+#define SVM_TSC_RATIO_DEFAULT  0x0100000000ULL
+
+
 /* AVIC */
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFFULL)
 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                        31
 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK               (1 << 31)
 
@@ -230,9 +236,11 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK       (0xFFFFFFFFFFULL << 12)
 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK         (1ULL << 62)
 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK              (1ULL << 63)
-#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK               (0xFF)
+#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK               (0xFFULL)
+
+#define AVIC_DOORBELL_PHYSICAL_ID_MASK                 GENMASK_ULL(11, 0)
 
-#define AVIC_DOORBELL_PHYSICAL_ID_MASK                 (0xFF)
+#define VMCB_AVIC_APIC_BAR_MASK                                0xFFFFFFFFFF000ULL
 
 #define AVIC_UNACCEL_ACCESS_WRITE_MASK         1
 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK                0xFF0
index d72c3d6..8963915 100644 (file)
@@ -124,9 +124,5 @@ struct user{
   char u_comm[32];             /* User command that was responsible */
   int u_debugreg[8];
 };
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 #endif /* _ASM_X86_USER_32_H */
index db90992..1dd10f0 100644 (file)
@@ -130,9 +130,5 @@ struct user {
   unsigned long error_code; /* CPU error code or 0 */
   unsigned long fault_address; /* CR3 or 0 */
 };
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 #endif /* _ASM_X86_USER_64_H */
index 19821f0..c049561 100644 (file)
@@ -415,9 +415,6 @@ int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
                xpkru = get_xsave_addr(&kstate->regs.xsave, XFEATURE_PKRU);
                *vpkru = xpkru->pkru;
        }
-
-       /* Ensure that XCOMP_BV is set up for XSAVES */
-       xstate_init_xcomp_bv(&kstate->regs.xsave, kstate->xfeatures);
        return 0;
 }
 EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
index 7c7824a..39e1c86 100644 (file)
@@ -81,10 +81,10 @@ static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
        { [ 0 ... XFEATURE_MAX - 1] = -1};
 static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init =
        { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_comp_offsets[XFEATURE_MAX] __ro_after_init =
-       { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] __ro_after_init =
-       { [ 0 ... XFEATURE_MAX - 1] = -1};
+static unsigned int xstate_flags[XFEATURE_MAX] __ro_after_init;
+
+#define XSTATE_FLAG_SUPERVISOR BIT(0)
+#define XSTATE_FLAG_ALIGNED64  BIT(1)
 
 /*
  * Return whether the system supports a given xfeature.
@@ -124,17 +124,41 @@ int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
 }
 EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
 
+static bool xfeature_is_aligned64(int xfeature_nr)
+{
+       return xstate_flags[xfeature_nr] & XSTATE_FLAG_ALIGNED64;
+}
+
 static bool xfeature_is_supervisor(int xfeature_nr)
 {
+       return xstate_flags[xfeature_nr] & XSTATE_FLAG_SUPERVISOR;
+}
+
+static unsigned int xfeature_get_offset(u64 xcomp_bv, int xfeature)
+{
+       unsigned int offs, i;
+
        /*
-        * Extended State Enumeration Sub-leaves (EAX = 0DH, ECX = n, n > 1)
-        * returns ECX[0] set to (1) for a supervisor state, and cleared (0)
-        * for a user state.
+        * Non-compacted format and legacy features use the cached fixed
+        * offsets.
         */
-       u32 eax, ebx, ecx, edx;
+       if (!cpu_feature_enabled(X86_FEATURE_XSAVES) || xfeature <= XFEATURE_SSE)
+               return xstate_offsets[xfeature];
 
-       cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
-       return ecx & 1;
+       /*
+        * Compacted format offsets depend on the actual content of the
+        * compacted xsave area which is determined by the xcomp_bv header
+        * field.
+        */
+       offs = FXSAVE_SIZE + XSAVE_HDR_SIZE;
+       for_each_extended_xfeature(i, xcomp_bv) {
+               if (xfeature_is_aligned64(i))
+                       offs = ALIGN(offs, 64);
+               if (i == xfeature)
+                       break;
+               offs += xstate_sizes[i];
+       }
+       return offs;
 }
 
 /*
@@ -182,7 +206,7 @@ static bool xfeature_enabled(enum xfeature xfeature)
  * Record the offsets and sizes of various xstates contained
  * in the XSAVE state memory layout.
  */
-static void __init setup_xstate_features(void)
+static void __init setup_xstate_cache(void)
 {
        u32 eax, ebx, ecx, edx, i;
        /* start at the beginning of the "extended state" */
@@ -205,6 +229,7 @@ static void __init setup_xstate_features(void)
                cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
 
                xstate_sizes[i] = eax;
+               xstate_flags[i] = ecx;
 
                /*
                 * If an xfeature is supervisor state, the offset in EBX is
@@ -264,94 +289,6 @@ static void __init print_xstate_features(void)
 } while (0)
 
 /*
- * We could cache this like xstate_size[], but we only use
- * it here, so it would be a waste of space.
- */
-static int xfeature_is_aligned(int xfeature_nr)
-{
-       u32 eax, ebx, ecx, edx;
-
-       CHECK_XFEATURE(xfeature_nr);
-
-       if (!xfeature_enabled(xfeature_nr)) {
-               WARN_ONCE(1, "Checking alignment of disabled xfeature %d\n",
-                         xfeature_nr);
-               return 0;
-       }
-
-       cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
-       /*
-        * The value returned by ECX[1] indicates the alignment
-        * of state component 'i' when the compacted format
-        * of the extended region of an XSAVE area is used:
-        */
-       return !!(ecx & 2);
-}
-
-/*
- * This function sets up offsets and sizes of all extended states in
- * xsave area. This supports both standard format and compacted format
- * of the xsave area.
- */
-static void __init setup_xstate_comp_offsets(void)
-{
-       unsigned int next_offset;
-       int i;
-
-       /*
-        * The FP xstates and SSE xstates are legacy states. They are always
-        * in the fixed offsets in the xsave area in either compacted form
-        * or standard form.
-        */
-       xstate_comp_offsets[XFEATURE_FP] = 0;
-       xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state,
-                                                    xmm_space);
-
-       if (!cpu_feature_enabled(X86_FEATURE_XSAVES)) {
-               for_each_extended_xfeature(i, fpu_kernel_cfg.max_features)
-                       xstate_comp_offsets[i] = xstate_offsets[i];
-               return;
-       }
-
-       next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
-
-       for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
-               if (xfeature_is_aligned(i))
-                       next_offset = ALIGN(next_offset, 64);
-
-               xstate_comp_offsets[i] = next_offset;
-               next_offset += xstate_sizes[i];
-       }
-}
-
-/*
- * Setup offsets of a supervisor-state-only XSAVES buffer:
- *
- * The offsets stored in xstate_comp_offsets[] only work for one specific
- * value of the Requested Feature BitMap (RFBM).  In cases where a different
- * RFBM value is used, a different set of offsets is required.  This set of
- * offsets is for when RFBM=xfeatures_mask_supervisor().
- */
-static void __init setup_supervisor_only_offsets(void)
-{
-       unsigned int next_offset;
-       int i;
-
-       next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
-
-       for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
-               if (!xfeature_is_supervisor(i))
-                       continue;
-
-               if (xfeature_is_aligned(i))
-                       next_offset = ALIGN(next_offset, 64);
-
-               xstate_supervisor_only_offsets[i] = next_offset;
-               next_offset += xstate_sizes[i];
-       }
-}
-
-/*
  * Print out xstate component offsets and sizes
  */
 static void __init print_xstate_offset_size(void)
@@ -360,7 +297,8 @@ static void __init print_xstate_offset_size(void)
 
        for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
                pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
-                        i, xstate_comp_offsets[i], i, xstate_sizes[i]);
+                       i, xfeature_get_offset(fpu_kernel_cfg.max_features, i),
+                       i, xstate_sizes[i]);
        }
 }
 
@@ -419,7 +357,6 @@ static void __init setup_init_fpu_buf(void)
        if (!boot_cpu_has(X86_FEATURE_XSAVE))
                return;
 
-       setup_xstate_features();
        print_xstate_features();
 
        xstate_init_xcomp_bv(&init_fpstate.regs.xsave, fpu_kernel_cfg.max_features);
@@ -448,25 +385,6 @@ static void __init setup_init_fpu_buf(void)
        fxsave(&init_fpstate.regs.fxsave);
 }
 
-static int xfeature_uncompacted_offset(int xfeature_nr)
-{
-       u32 eax, ebx, ecx, edx;
-
-       /*
-        * Only XSAVES supports supervisor states and it uses compacted
-        * format. Checking a supervisor state's uncompacted offset is
-        * an error.
-        */
-       if (XFEATURE_MASK_SUPERVISOR_ALL & BIT_ULL(xfeature_nr)) {
-               WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
-               return -1;
-       }
-
-       CHECK_XFEATURE(xfeature_nr);
-       cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
-       return ebx;
-}
-
 int xfeature_size(int xfeature_nr)
 {
        u32 eax, ebx, ecx, edx;
@@ -644,29 +562,15 @@ static bool __init check_xstate_against_struct(int nr)
 
 static unsigned int xstate_calculate_size(u64 xfeatures, bool compacted)
 {
-       unsigned int size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
-       int i;
+       unsigned int topmost = fls64(xfeatures) -  1;
+       unsigned int offset = xstate_offsets[topmost];
 
-       for_each_extended_xfeature(i, xfeatures) {
-               /* Align from the end of the previous feature */
-               if (xfeature_is_aligned(i))
-                       size = ALIGN(size, 64);
-               /*
-                * In compacted format the enabled features are packed,
-                * i.e. disabled features do not occupy space.
-                *
-                * In non-compacted format the offsets are fixed and
-                * disabled states still occupy space in the memory buffer.
-                */
-               if (!compacted)
-                       size = xfeature_uncompacted_offset(i);
-               /*
-                * Add the feature size even for non-compacted format
-                * to make the end result correct
-                */
-               size += xfeature_size(i);
-       }
-       return size;
+       if (topmost <= XFEATURE_SSE)
+               return sizeof(struct xregs_state);
+
+       if (compacted)
+               offset = xfeature_get_offset(xfeatures, topmost);
+       return offset + xstate_sizes[topmost];
 }
 
 /*
@@ -935,6 +839,10 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
 
        /* Enable xstate instructions to be able to continue with initialization: */
        fpu__init_cpu_xstate();
+
+       /* Cache size, offset and flags for initialization */
+       setup_xstate_cache();
+
        err = init_xstate_size();
        if (err)
                goto out_disable;
@@ -950,8 +858,6 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
                                  fpu_user_cfg.max_features);
 
        setup_init_fpu_buf();
-       setup_xstate_comp_offsets();
-       setup_supervisor_only_offsets();
 
        /*
         * Paranoia check whether something in the setup modified the
@@ -1006,13 +912,19 @@ void fpu__resume_cpu(void)
  */
 static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
 {
-       if (!xfeature_enabled(xfeature_nr)) {
-               WARN_ON_FPU(1);
+       u64 xcomp_bv = xsave->header.xcomp_bv;
+
+       if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
                return NULL;
+
+       if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
+               if (WARN_ON_ONCE(!(xcomp_bv & BIT_ULL(xfeature_nr))))
+                       return NULL;
        }
 
-       return (void *)xsave + xstate_comp_offsets[xfeature_nr];
+       return (void *)xsave + xfeature_get_offset(xcomp_bv, xfeature_nr);
 }
+
 /*
  * Given the xsave area and a state inside, this function returns the
  * address of the state.
@@ -1043,8 +955,9 @@ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
         * We should not ever be requesting features that we
         * have not enabled.
         */
-       WARN_ONCE(!(fpu_kernel_cfg.max_features & BIT_ULL(xfeature_nr)),
-                 "get of unsupported state");
+       if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
+               return NULL;
+
        /*
         * This assumes the last 'xsave*' instruction to
         * have requested that 'xfeature_nr' be saved.
@@ -1625,6 +1538,9 @@ static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
 
        /* Calculate the resulting kernel state size */
        mask = permitted | requested;
+       /* Take supervisor states into account on the host */
+       if (!guest)
+               mask |= xfeatures_mask_supervisor();
        ksize = xstate_calculate_size(mask, compacted);
 
        /* Calculate the resulting user state size */
@@ -1639,7 +1555,7 @@ static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
 
        perm = guest ? &fpu->guest_perm : &fpu->perm;
        /* Pairs with the READ_ONCE() in xstate_get_group_perm() */
-       WRITE_ONCE(perm->__state_perm, requested);
+       WRITE_ONCE(perm->__state_perm, mask);
        /* Protected by sighand lock */
        perm->__state_size = ksize;
        perm->__user_state_size = usize;
index 79e0b8d..a22deb5 100644 (file)
@@ -517,7 +517,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
                } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
                        ipi_bitmap <<= min - apic_id;
                        min = apic_id;
-               } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
+               } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) {
                        max = apic_id < max ? max : apic_id;
                } else {
                        ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
index a00cd97..b24ca7f 100644 (file)
@@ -735,6 +735,7 @@ static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
                        if (function > READ_ONCE(max_cpuid_80000000))
                                return entry;
                }
+               break;
 
        default:
                break;
index f9c00c8..89b11e7 100644 (file)
@@ -3540,8 +3540,10 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
 {
        u64 tsc_aux = 0;
 
-       if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
+       if (!ctxt->ops->guest_has_rdpid(ctxt))
                return emulate_ud(ctxt);
+
+       ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
        ctxt->dst.val = tsc_aux;
        return X86EMUL_CONTINUE;
 }
@@ -3642,7 +3644,7 @@ static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
 
        msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
                | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
-       r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
+       r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
 
        if (r == X86EMUL_IO_NEEDED)
                return r;
@@ -3659,7 +3661,7 @@ static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
        u64 msr_data;
        int r;
 
-       r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
+       r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
 
        if (r == X86EMUL_IO_NEEDED)
                return r;
index a32f54a..123b677 100644 (file)
@@ -122,9 +122,13 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
        else
                hv->synic_auto_eoi_used--;
 
-       __kvm_request_apicv_update(vcpu->kvm,
-                                  !hv->synic_auto_eoi_used,
-                                  APICV_INHIBIT_REASON_HYPERV);
+       /*
+        * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
+        * the hypervisor to manually inject IRQs.
+        */
+       __kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
+                                        APICV_INHIBIT_REASON_HYPERV,
+                                        !!hv->synic_auto_eoi_used);
 
        up_write(&vcpu->kvm->arch.apicv_update_lock);
 }
@@ -239,7 +243,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
        struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
        int ret;
 
-       if (!synic->active && !host)
+       if (!synic->active && (!host || data))
                return 1;
 
        trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
@@ -285,6 +289,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
        case HV_X64_MSR_EOM: {
                int i;
 
+               if (!synic->active)
+                       break;
+
                for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
                        kvm_hv_notify_acked_sint(vcpu, i);
                break;
@@ -449,6 +456,9 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
        struct kvm_lapic_irq irq;
        int ret, vector;
 
+       if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
+               return -EINVAL;
+
        if (sint >= ARRAY_SIZE(synic->sint))
                return -EINVAL;
 
@@ -661,7 +671,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
        struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
 
-       if (!synic->active && !host)
+       if (!synic->active && (!host || config))
                return 1;
 
        if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
@@ -690,7 +700,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
        struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
        struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
 
-       if (!synic->active && !host)
+       if (!synic->active && (!host || count))
                return 1;
 
        trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
index 0b65a76..1c83076 100644 (file)
@@ -305,15 +305,13 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
         * So, deactivate APICv when PIT is in reinject mode.
         */
        if (reinject) {
-               kvm_request_apicv_update(kvm, false,
-                                        APICV_INHIBIT_REASON_PIT_REINJ);
+               kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
                /* The initial state is preserved while ps->reinject == 0. */
                kvm_pit_reset_reinject(pit);
                kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
                kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
        } else {
-               kvm_request_apicv_update(kvm, true,
-                                        APICV_INHIBIT_REASON_PIT_REINJ);
+               kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
                kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
                kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
        }
index 840ddb4..8dff25d 100644 (file)
@@ -210,6 +210,8 @@ struct x86_emulate_ops {
        int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
        u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt);
        void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+       int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
+       int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
        int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
        int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
        int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
@@ -226,6 +228,7 @@ struct x86_emulate_ops {
        bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt);
        bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt);
        bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt);
+       bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
 
        void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
 
index 80a2020..66b0eb0 100644 (file)
@@ -1024,6 +1024,10 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
        *r = -1;
 
        if (irq->shorthand == APIC_DEST_SELF) {
+               if (KVM_BUG_ON(!src, kvm)) {
+                       *r = 0;
+                       return true;
+               }
                *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
                return true;
        }
index bf8dbc4..e6cae6f 100644 (file)
@@ -214,27 +214,27 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
  */
 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                                  unsigned pte_access, unsigned pte_pkey,
-                                 unsigned pfec)
+                                 u64 access)
 {
-       int cpl = static_call(kvm_x86_get_cpl)(vcpu);
+       /* strip nested paging fault error codes */
+       unsigned int pfec = access;
        unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
 
        /*
-        * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
+        * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
+        * For implicit supervisor accesses, SMAP cannot be overridden.
         *
-        * If CPL = 3, SMAP applies to all supervisor-mode data accesses
-        * (these are implicit supervisor accesses) regardless of the value
-        * of EFLAGS.AC.
+        * SMAP works on supervisor accesses only, and not_smap can
+        * be set or not set when user access with neither has any bearing
+        * on the result.
         *
-        * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
-        * the result in X86_EFLAGS_AC. We then insert it in place of
-        * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
-        * but it will be one in index if SMAP checks are being overridden.
-        * It is important to keep this branchless.
+        * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit;
+        * this bit will always be zero in pfec, but it will be one in index
+        * if SMAP checks are being disabled.
         */
-       unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
-       int index = (pfec >> 1) +
-                   (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
+       u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
+       bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
+       int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
        bool fault = (mmu->permissions[index] >> pte_access) & 1;
        u32 errcode = PFERR_PRESENT_MASK;
 
@@ -317,12 +317,12 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
        atomic64_add(count, &kvm->stat.pages[level - 1]);
 }
 
-gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
                           struct x86_exception *exception);
 
 static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
                                      struct kvm_mmu *mmu,
-                                     gpa_t gpa, u32 access,
+                                     gpa_t gpa, u64 access,
                                      struct x86_exception *exception)
 {
        if (mmu != &vcpu->arch.nested_mmu)
index 51671cb..8f19ea7 100644 (file)
@@ -2696,8 +2696,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
        if (*sptep == spte) {
                ret = RET_PF_SPURIOUS;
        } else {
-               trace_kvm_mmu_set_spte(level, gfn, sptep);
                flush |= mmu_spte_update(sptep, spte);
+               trace_kvm_mmu_set_spte(level, gfn, sptep);
        }
 
        if (wrprot) {
@@ -3703,7 +3703,7 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
 }
 
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                                 gpa_t vaddr, u32 access,
+                                 gpa_t vaddr, u64 access,
                                  struct x86_exception *exception)
 {
        if (exception)
@@ -4591,11 +4591,11 @@ static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
                         *   - X86_CR4_SMAP is set in CR4
                         *   - A user page is accessed
                         *   - The access is not a fetch
-                        *   - Page fault in kernel mode
-                        *   - if CPL = 3 or X86_EFLAGS_AC is clear
+                        *   - The access is supervisor mode
+                        *   - If implicit supervisor access or X86_EFLAGS_AC is clear
                         *
-                        * Here, we cover the first three conditions.
-                        * The fourth is computed dynamically in permission_fault();
+                        * Here, we cover the first four conditions.
+                        * The fifth is computed dynamically in permission_fault();
                         * PFERR_RSVD_MASK bit will be set in PFEC if the access is
                         * *not* subject to SMAP restrictions.
                         */
@@ -5768,17 +5768,24 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
        kvm_mmu_zap_all_fast(kvm);
 }
 
-void kvm_mmu_init_vm(struct kvm *kvm)
+int kvm_mmu_init_vm(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+       int r;
 
+       INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+       INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+       INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
        spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
 
-       kvm_mmu_init_tdp_mmu(kvm);
+       r = kvm_mmu_init_tdp_mmu(kvm);
+       if (r < 0)
+               return r;
 
        node->track_write = kvm_mmu_pte_write;
        node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
        kvm_page_track_register_notifier(kvm, node);
+       return 0;
 }
 
 void kvm_mmu_uninit_vm(struct kvm *kvm)
@@ -5842,8 +5849,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 
        if (is_tdp_mmu_enabled(kvm)) {
                for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
-                       flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
-                                                         gfn_end, flush);
+                       flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
+                                                     gfn_end, true, flush);
        }
 
        if (flush)
index 252c778..01fee5f 100644 (file)
@@ -34,9 +34,8 @@
        #define PT_HAVE_ACCESSED_DIRTY(mmu) true
        #ifdef CONFIG_X86_64
        #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
-       #define CMPXCHG cmpxchg
+       #define CMPXCHG "cmpxchgq"
        #else
-       #define CMPXCHG cmpxchg64
        #define PT_MAX_FULL_LEVELS 2
        #endif
 #elif PTTYPE == 32
@@ -52,7 +51,7 @@
        #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
        #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
        #define PT_HAVE_ACCESSED_DIRTY(mmu) true
-       #define CMPXCHG cmpxchg
+       #define CMPXCHG "cmpxchgl"
 #elif PTTYPE == PTTYPE_EPT
        #define pt_element_t u64
        #define guest_walker guest_walkerEPT
@@ -65,7 +64,9 @@
        #define PT_GUEST_DIRTY_SHIFT 9
        #define PT_GUEST_ACCESSED_SHIFT 8
        #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
-       #define CMPXCHG cmpxchg64
+       #ifdef CONFIG_X86_64
+       #define CMPXCHG "cmpxchgq"
+       #endif
        #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
 #else
        #error Invalid PTTYPE value
@@ -147,43 +148,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                               pt_element_t __user *ptep_user, unsigned index,
                               pt_element_t orig_pte, pt_element_t new_pte)
 {
-       int npages;
-       pt_element_t ret;
-       pt_element_t *table;
-       struct page *page;
-
-       npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
-       if (likely(npages == 1)) {
-               table = kmap_atomic(page);
-               ret = CMPXCHG(&table[index], orig_pte, new_pte);
-               kunmap_atomic(table);
-
-               kvm_release_page_dirty(page);
-       } else {
-               struct vm_area_struct *vma;
-               unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK;
-               unsigned long pfn;
-               unsigned long paddr;
-
-               mmap_read_lock(current->mm);
-               vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
-               if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
-                       mmap_read_unlock(current->mm);
-                       return -EFAULT;
-               }
-               pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
-               paddr = pfn << PAGE_SHIFT;
-               table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
-               if (!table) {
-                       mmap_read_unlock(current->mm);
-                       return -EFAULT;
-               }
-               ret = CMPXCHG(&table[index], orig_pte, new_pte);
-               memunmap(table);
-               mmap_read_unlock(current->mm);
-       }
+       signed char r;
 
-       return (ret != orig_pte);
+       if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
+               return -EFAULT;
+
+#ifdef CMPXCHG
+       asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
+                    "setnz %b[r]\n"
+                    "2:"
+                    _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
+                    : [ptr] "+m" (*ptep_user),
+                      [old] "+a" (orig_pte),
+                      [r] "=q" (r)
+                    : [new] "r" (new_pte)
+                    : "memory");
+#else
+       asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
+                    "setnz %b[r]\n"
+                    "2:"
+                    _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
+                    : [ptr] "+m" (*ptep_user),
+                      [old] "+A" (orig_pte),
+                      [r] "=q" (r)
+                    : [new_lo] "b" ((u32)new_pte),
+                      [new_hi] "c" ((u32)(new_pte >> 32))
+                    : "memory");
+#endif
+
+       user_access_end();
+       return r;
 }
 
 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
@@ -339,7 +333,7 @@ static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
  */
 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
                                    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                                   gpa_t addr, u32 access)
+                                   gpa_t addr, u64 access)
 {
        int ret;
        pt_element_t pte;
@@ -347,7 +341,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
        gfn_t table_gfn;
        u64 pt_access, pte_access;
        unsigned index, accessed_dirty, pte_pkey;
-       unsigned nested_access;
+       u64 nested_access;
        gpa_t pte_gpa;
        bool have_ad;
        int offset;
@@ -540,7 +534,7 @@ error:
 }
 
 static int FNAME(walk_addr)(struct guest_walker *walker,
-                           struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
+                           struct kvm_vcpu *vcpu, gpa_t addr, u64 access)
 {
        return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
                                        access);
@@ -988,7 +982,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 
 /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                              gpa_t addr, u32 access,
+                              gpa_t addr, u64 access,
                               struct x86_exception *exception)
 {
        struct guest_walker walker;
index e7e7876..d71d177 100644 (file)
@@ -14,21 +14,24 @@ static bool __read_mostly tdp_mmu_enabled = true;
 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
 
 /* Initializes the TDP MMU for the VM, if enabled. */
-bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
 {
+       struct workqueue_struct *wq;
+
        if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
-               return false;
+               return 0;
+
+       wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
+       if (!wq)
+               return -ENOMEM;
 
        /* This should not be changed for the lifetime of the VM. */
        kvm->arch.tdp_mmu_enabled = true;
-
        INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
        spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
        INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
-       kvm->arch.tdp_mmu_zap_wq =
-               alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
-
-       return true;
+       kvm->arch.tdp_mmu_zap_wq = wq;
+       return 1;
 }
 
 /* Arbitrarily returns true so that this may be used in if statements. */
@@ -906,10 +909,8 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
 }
 
 /*
- * Tears down the mappings for the range of gfns, [start, end), and frees the
- * non-root pages mapping GFNs strictly within that range. Returns true if
- * SPTEs have been cleared and a TLB flush is needed before releasing the
- * MMU lock.
+ * Zap leafs SPTEs for the range of gfns, [start, end). Returns true if SPTEs
+ * have been cleared and a TLB flush is needed before releasing the MMU lock.
  *
  * If can_yield is true, will release the MMU lock and reschedule if the
  * scheduler needs the CPU or there is contention on the MMU lock. If this
@@ -917,42 +918,25 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
  * the caller must ensure it does not supply too large a GFN range, or the
  * operation can cause a soft lockup.
  */
-static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield, bool flush)
+static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
+                             gfn_t start, gfn_t end, bool can_yield, bool flush)
 {
-       bool zap_all = (start == 0 && end >= tdp_mmu_max_gfn_host());
        struct tdp_iter iter;
 
-       /*
-        * No need to try to step down in the iterator when zapping all SPTEs,
-        * zapping the top-level non-leaf SPTEs will recurse on their children.
-        */
-       int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
-
        end = min(end, tdp_mmu_max_gfn_host());
 
        lockdep_assert_held_write(&kvm->mmu_lock);
 
        rcu_read_lock();
 
-       for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
+       for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
                if (can_yield &&
                    tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
                        flush = false;
                        continue;
                }
 
-               if (!is_shadow_present_pte(iter.old_spte))
-                       continue;
-
-               /*
-                * If this is a non-last-level SPTE that covers a larger range
-                * than should be zapped, continue, and zap the mappings at a
-                * lower level, except when zapping all SPTEs.
-                */
-               if (!zap_all &&
-                   (iter.gfn < start ||
-                    iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
+               if (!is_shadow_present_pte(iter.old_spte) ||
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
 
@@ -960,17 +944,13 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                flush = true;
        }
 
-       /*
-        * Need to flush before releasing RCU.  TODO: do it only if intermediate
-        * page tables were zapped; there is no need to flush under RCU protection
-        * if no 'struct kvm_mmu_page' is freed.
-        */
-       if (flush)
-               kvm_flush_remote_tlbs_with_address(kvm, start, end - start);
-
        rcu_read_unlock();
 
-       return false;
+       /*
+        * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
+        * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
+        */
+       return flush;
 }
 
 /*
@@ -979,13 +959,13 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
  * SPTEs have been cleared and a TLB flush is needed before releasing the
  * MMU lock.
  */
-bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
-                                gfn_t end, bool can_yield, bool flush)
+bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
+                          bool can_yield, bool flush)
 {
        struct kvm_mmu_page *root;
 
        for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
-               flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
+               flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
 
        return flush;
 }
@@ -1233,8 +1213,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
                                 bool flush)
 {
-       return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
-                                          range->end, range->may_block, flush);
+       return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
+                                    range->end, range->may_block, flush);
 }
 
 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
index 5e5ef25..c163f7c 100644 (file)
@@ -15,14 +15,8 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
                          bool shared);
 
-bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
+bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
                                 gfn_t end, bool can_yield, bool flush);
-static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
-                                            gfn_t start, gfn_t end, bool flush)
-{
-       return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
-}
-
 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
@@ -72,7 +66,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
                                        u64 *spte);
 
 #ifdef CONFIG_X86_64
-bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
 
@@ -93,7 +87,7 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
        return sp && is_tdp_mmu_page(sp) && sp->root_count;
 }
 #else
-static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
+static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
 static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
index b1a0299..eca39f5 100644 (file)
@@ -96,8 +96,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
 
 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
                                  u64 config, bool exclude_user,
-                                 bool exclude_kernel, bool intr,
-                                 bool in_tx, bool in_tx_cp)
+                                 bool exclude_kernel, bool intr)
 {
        struct perf_event *event;
        struct perf_event_attr attr = {
@@ -116,16 +115,14 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
 
        attr.sample_period = get_sample_period(pmc, pmc->counter);
 
-       if (in_tx)
-               attr.config |= HSW_IN_TX;
-       if (in_tx_cp) {
+       if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
+           guest_cpuid_is_intel(pmc->vcpu)) {
                /*
                 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
                 * period. Just clear the sample period so at least
                 * allocating the counter doesn't fail.
                 */
                attr.sample_period = 0;
-               attr.config |= HSW_IN_TX_CHECKPOINTED;
        }
 
        event = perf_event_create_kernel_counter(&attr, -1, current,
@@ -185,6 +182,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
        u32 type = PERF_TYPE_RAW;
        struct kvm *kvm = pmc->vcpu->kvm;
        struct kvm_pmu_event_filter *filter;
+       struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu);
        bool allow_event = true;
 
        if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
@@ -221,7 +219,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
        }
 
        if (type == PERF_TYPE_RAW)
-               config = eventsel & AMD64_RAW_EVENT_MASK;
+               config = eventsel & pmu->raw_event_mask;
 
        if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
                return;
@@ -232,9 +230,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
        pmc_reprogram_counter(pmc, type, config,
                              !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
                              !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
-                             eventsel & ARCH_PERFMON_EVENTSEL_INT,
-                             (eventsel & HSW_IN_TX),
-                             (eventsel & HSW_IN_TX_CHECKPOINTED));
+                             eventsel & ARCH_PERFMON_EVENTSEL_INT);
 }
 EXPORT_SYMBOL_GPL(reprogram_gp_counter);
 
@@ -270,7 +266,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
                              kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
                              !(en_field & 0x2), /* exclude user */
                              !(en_field & 0x1), /* exclude kernel */
-                             pmi, false, false);
+                             pmi);
 }
 EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
 
index b37b353..a1cf9c3 100644 (file)
@@ -726,7 +726,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
 {
        struct kvm_kernel_irq_routing_entry *e;
        struct kvm_irq_routing_table *irq_rt;
-       int idx, ret = -EINVAL;
+       int idx, ret = 0;
 
        if (!kvm_arch_has_assigned_device(kvm) ||
            !irq_remapping_cap(IRQ_POSTING_CAP))
@@ -737,7 +737,13 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
 
        idx = srcu_read_lock(&kvm->irq_srcu);
        irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
-       WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
+
+       if (guest_irq >= irq_rt->nr_rt_entries ||
+               hlist_empty(&irq_rt->map[guest_irq])) {
+               pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
+                            guest_irq, irq_rt->nr_rt_entries);
+               goto out;
+       }
 
        hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
                struct vcpu_data vcpu_info;
@@ -822,7 +828,7 @@ out:
        return ret;
 }
 
-bool avic_check_apicv_inhibit_reasons(ulong bit)
+bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
                          BIT(APICV_INHIBIT_REASON_ABSENT) |
@@ -833,7 +839,7 @@ bool avic_check_apicv_inhibit_reasons(ulong bit)
                          BIT(APICV_INHIBIT_REASON_X2APIC) |
                          BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
 
-       return supported & BIT(bit);
+       return supported & BIT(reason);
 }
 
 
index d4de524..24eb935 100644 (file)
@@ -262,12 +262,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        /* MSR_EVNTSELn */
        pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
        if (pmc) {
-               if (data == pmc->eventsel)
-                       return 0;
-               if (!(data & pmu->reserved_bits)) {
+               data &= ~pmu->reserved_bits;
+               if (data != pmc->eventsel)
                        reprogram_gp_counter(pmc, data);
-                       return 0;
-               }
+               return 0;
        }
 
        return 1;
@@ -284,6 +282,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
 
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
        pmu->reserved_bits = 0xfffffff000280000ull;
+       pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
        pmu->version = 1;
        /* not applicable to AMD; but clean them to prevent any fall out */
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
index 0884c34..bd4c64b 100644 (file)
@@ -62,20 +62,8 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
 #define SEG_TYPE_LDT 2
 #define SEG_TYPE_BUSY_TSS16 3
 
-#define SVM_FEATURE_LBRV           (1 <<  1)
-#define SVM_FEATURE_SVML           (1 <<  2)
-#define SVM_FEATURE_TSC_RATE       (1 <<  4)
-#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
-#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
-#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
-#define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
-
 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
 
-#define TSC_RATIO_RSVD          0xffffff0000000000ULL
-#define TSC_RATIO_MIN          0x0000000000000001ULL
-#define TSC_RATIO_MAX          0x000000ffffffffffULL
-
 static bool erratum_383_found __read_mostly;
 
 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
@@ -87,7 +75,6 @@ u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
 static uint64_t osvw_len = 4, osvw_status;
 
 static DEFINE_PER_CPU(u64, current_tsc_ratio);
-#define TSC_RATIO_DEFAULT      0x0100000000ULL
 
 static const struct svm_direct_access_msrs {
        u32 index;   /* Index of the MSR */
@@ -480,7 +467,7 @@ static void svm_hardware_disable(void)
 {
        /* Make sure we clean up behind us */
        if (tsc_scaling)
-               wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
+               wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
 
        cpu_svm_disable();
 
@@ -526,8 +513,8 @@ static int svm_hardware_enable(void)
                 * Set the default value, even if we don't use TSC scaling
                 * to avoid having stale value in the msr
                 */
-               wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
-               __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
+               wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
+               __this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT);
        }
 
 
@@ -2723,7 +2710,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                        break;
                }
 
-               if (data & TSC_RATIO_RSVD)
+               if (data & SVM_TSC_RATIO_RSVD)
                        return 1;
 
                svm->tsc_ratio_msr = data;
@@ -2918,7 +2905,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu)
         * In this case AVIC was temporarily disabled for
         * requesting the IRQ window and we have to re-enable it.
         */
-       kvm_request_apicv_update(vcpu->kvm, true, APICV_INHIBIT_REASON_IRQWIN);
+       kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
 
        ++vcpu->stat.irq_window_exits;
        return 1;
@@ -3516,7 +3503,7 @@ static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
                 * via AVIC. In such case, we need to temporarily disable AVIC,
                 * and fallback to injecting IRQ via V_IRQ.
                 */
-               kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_IRQWIN);
+               kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
                svm_set_vintr(svm);
        }
 }
@@ -3948,6 +3935,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_cpuid_entry2 *best;
+       struct kvm *kvm = vcpu->kvm;
 
        vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
                                    boot_cpu_has(X86_FEATURE_XSAVE) &&
@@ -3974,16 +3962,14 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                 * is exposed to the guest, disable AVIC.
                 */
                if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
-                       kvm_request_apicv_update(vcpu->kvm, false,
-                                                APICV_INHIBIT_REASON_X2APIC);
+                       kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_X2APIC);
 
                /*
                 * Currently, AVIC does not work with nested virtualization.
                 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
                 */
                if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-                       kvm_request_apicv_update(vcpu->kvm, false,
-                                                APICV_INHIBIT_REASON_NESTED);
+                       kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_NESTED);
        }
        init_vmcb_after_set_cpuid(vcpu);
 }
@@ -4766,10 +4752,10 @@ static __init int svm_hardware_setup(void)
                } else {
                        pr_info("TSC scaling supported\n");
                        kvm_has_tsc_control = true;
-                       kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
-                       kvm_tsc_scaling_ratio_frac_bits = 32;
                }
        }
+       kvm_max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX;
+       kvm_tsc_scaling_ratio_frac_bits = 32;
 
        tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
 
index e37bb35..f77a7d2 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/svm.h>
 #include <asm/sev-common.h>
 
+#include "kvm_cache_regs.h"
+
 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
 
 #define        IOPM_SIZE PAGE_SIZE * 3
@@ -569,17 +571,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
 
 /* avic.c */
 
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
-#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                        31
-#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK               (1 << 31)
-
-#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK   GENMASK_ULL(11, 0)
-#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK       (0xFFFFFFFFFFULL << 12)
-#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK         (1ULL << 62)
-#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK              (1ULL << 63)
-
-#define VMCB_AVIC_APIC_BAR_MASK                0xFFFFFFFFFF000ULL
-
 int avic_ga_log_notifier(u32 ga_tag);
 void avic_vm_destroy(struct kvm *kvm);
 int avic_vm_init(struct kvm *kvm);
@@ -592,7 +583,7 @@ void __avic_vcpu_put(struct kvm_vcpu *vcpu);
 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
-bool avic_check_apicv_inhibit_reasons(ulong bit);
+bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
 void avic_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
 void avic_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
 bool avic_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
index 98aa981..8cdc62c 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <linux/kvm_host.h>
-#include "kvm_cache_regs.h"
 
 #include <asm/mshyperv.h>
 
index 193f5ba..e3a24b8 100644 (file)
@@ -1339,23 +1339,25 @@ TRACE_EVENT(kvm_hv_stimer_cleanup,
                  __entry->vcpu_id, __entry->timer_index)
 );
 
-TRACE_EVENT(kvm_apicv_update_request,
-           TP_PROTO(bool activate, unsigned long bit),
-           TP_ARGS(activate, bit),
+TRACE_EVENT(kvm_apicv_inhibit_changed,
+           TP_PROTO(int reason, bool set, unsigned long inhibits),
+           TP_ARGS(reason, set, inhibits),
 
        TP_STRUCT__entry(
-               __field(bool, activate)
-               __field(unsigned long, bit)
+               __field(int, reason)
+               __field(bool, set)
+               __field(unsigned long, inhibits)
        ),
 
        TP_fast_assign(
-               __entry->activate = activate;
-               __entry->bit = bit;
+               __entry->reason = reason;
+               __entry->set = set;
+               __entry->inhibits = inhibits;
        ),
 
-       TP_printk("%s bit=%lu",
-                 __entry->activate ? "activate" : "deactivate",
-                 __entry->bit)
+       TP_printk("%s reason=%u, inhibits=0x%lx",
+                 __entry->set ? "set" : "cleared",
+                 __entry->reason, __entry->inhibits)
 );
 
 TRACE_EVENT(kvm_apicv_accept_irq,
index 0684e51..bc3f851 100644 (file)
@@ -389,6 +389,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        struct kvm_pmc *pmc;
        u32 msr = msr_info->index;
        u64 data = msr_info->data;
+       u64 reserved_bits;
 
        switch (msr) {
        case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -443,7 +444,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
                        if (data == pmc->eventsel)
                                return 0;
-                       if (!(data & pmu->reserved_bits)) {
+                       reserved_bits = pmu->reserved_bits;
+                       if ((pmc->idx == 2) &&
+                           (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
+                               reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
+                       if (!(data & reserved_bits)) {
                                reprogram_gp_counter(pmc, data);
                                return 0;
                        }
@@ -485,6 +490,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
        pmu->version = 0;
        pmu->reserved_bits = 0xffffffff00200000ull;
+       pmu->raw_event_mask = X86_RAW_EVENT_MASK;
 
        entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
        if (!entry || !vcpu->kvm->arch.enable_pmu)
@@ -533,8 +539,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        entry = kvm_find_cpuid_entry(vcpu, 7, 0);
        if (entry &&
            (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
-           (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
-               pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+           (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
+               pmu->reserved_bits ^= HSW_IN_TX;
+               pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
+       }
 
        bitmap_set(pmu->all_valid_pmc_idx,
                0, pmu->nr_arch_gp_counters);
index e8963f5..04d170c 100644 (file)
@@ -2866,21 +2866,17 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER);
 
        /* Nothing to do if hardware doesn't support EFER. */
-       if (!msr)
+       if (!vmx_find_uret_msr(vmx, MSR_EFER))
                return 0;
 
        vcpu->arch.efer = efer;
-       if (efer & EFER_LMA) {
-               vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
-               msr->data = efer;
-       } else {
-               vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
+       if (efer & EFER_LMA)
+               vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
+       else
+               vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
 
-               msr->data = efer & ~EFER_LME;
-       }
        vmx_setup_uret_msrs(vmx);
        return 0;
 }
@@ -2906,7 +2902,6 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
 
 static void exit_lmode(struct kvm_vcpu *vcpu)
 {
-       vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
        vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
 }
 
@@ -7705,14 +7700,14 @@ static void vmx_hardware_unsetup(void)
        free_kvm_area();
 }
 
-static bool vmx_check_apicv_inhibit_reasons(ulong bit)
+static bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
                          BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_HYPERV) |
                          BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
 
-       return supported & BIT(bit);
+       return supported & BIT(reason);
 }
 
 static struct kvm_x86_ops vmx_x86_ops __initdata = {
@@ -7980,12 +7975,11 @@ static __init int hardware_setup(void)
        if (!enable_apicv)
                vmx_x86_ops.sync_pir_to_irr = NULL;
 
-       if (cpu_has_vmx_tsc_scaling()) {
+       if (cpu_has_vmx_tsc_scaling())
                kvm_has_tsc_control = true;
-               kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
-               kvm_tsc_scaling_ratio_frac_bits = 48;
-       }
 
+       kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
+       kvm_tsc_scaling_ratio_frac_bits = 48;
        kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection();
 
        set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
index 02cf0a7..0c0ca59 100644 (file)
@@ -1748,9 +1748,6 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
 {
        struct msr_data msr;
 
-       if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
-               return KVM_MSR_RET_FILTERED;
-
        switch (index) {
        case MSR_FS_BASE:
        case MSR_GS_BASE:
@@ -1832,9 +1829,6 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
        struct msr_data msr;
        int ret;
 
-       if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
-               return KVM_MSR_RET_FILTERED;
-
        switch (index) {
        case MSR_TSC_AUX:
                if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
@@ -1871,6 +1865,20 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
        return ret;
 }
 
+static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+{
+       if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
+               return KVM_MSR_RET_FILTERED;
+       return kvm_get_msr_ignored_check(vcpu, index, data, false);
+}
+
+static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
+{
+       if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
+               return KVM_MSR_RET_FILTERED;
+       return kvm_set_msr_ignored_check(vcpu, index, data, false);
+}
+
 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
 {
        return kvm_get_msr_ignored_check(vcpu, index, data, false);
@@ -1953,7 +1961,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
        u64 data;
        int r;
 
-       r = kvm_get_msr(vcpu, ecx, &data);
+       r = kvm_get_msr_with_filter(vcpu, ecx, &data);
 
        if (!r) {
                trace_kvm_msr_read(ecx, data);
@@ -1978,7 +1986,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
        u64 data = kvm_read_edx_eax(vcpu);
        int r;
 
-       r = kvm_set_msr(vcpu, ecx, data);
+       r = kvm_set_msr_with_filter(vcpu, ecx, data);
 
        if (!r) {
                trace_kvm_msr_write(ecx, data);
@@ -5938,7 +5946,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
                kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
-               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
+               kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
                r = 0;
 split_irqchip_unlock:
                mutex_unlock(&kvm->lock);
@@ -6335,7 +6343,7 @@ set_identity_unlock:
                /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
-               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
+               kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
        create_irqchip_unlock:
                mutex_unlock(&kvm->lock);
                break;
@@ -6726,7 +6734,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
        static_call(kvm_x86_get_segment)(vcpu, var, seg);
 }
 
-gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
                           struct x86_exception *exception)
 {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -6746,7 +6754,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
 
-       u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
        return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
@@ -6756,7 +6764,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
 
-       u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
        access |= PFERR_FETCH_MASK;
        return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
 }
@@ -6766,7 +6774,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
 
-       u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
        access |= PFERR_WRITE_MASK;
        return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
 }
@@ -6782,7 +6790,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
 }
 
 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
-                                     struct kvm_vcpu *vcpu, u32 access,
+                                     struct kvm_vcpu *vcpu, u64 access,
                                      struct x86_exception *exception)
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -6819,7 +6827,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
-       u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
        unsigned offset;
        int ret;
 
@@ -6844,7 +6852,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
                               gva_t addr, void *val, unsigned int bytes,
                               struct x86_exception *exception)
 {
-       u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
 
        /*
         * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
@@ -6863,9 +6871,11 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
                             struct x86_exception *exception, bool system)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-       u32 access = 0;
+       u64 access = 0;
 
-       if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
+       if (system)
+               access |= PFERR_IMPLICIT_ACCESS;
+       else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
                access |= PFERR_USER_MASK;
 
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
@@ -6881,7 +6891,7 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
 }
 
 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
-                                     struct kvm_vcpu *vcpu, u32 access,
+                                     struct kvm_vcpu *vcpu, u64 access,
                                      struct x86_exception *exception)
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -6915,9 +6925,11 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
                              bool system)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-       u32 access = PFERR_WRITE_MASK;
+       u64 access = PFERR_WRITE_MASK;
 
-       if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
+       if (system)
+               access |= PFERR_IMPLICIT_ACCESS;
+       else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
                access |= PFERR_USER_MASK;
 
        return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
@@ -6984,7 +6996,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
                                bool write)
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
-       u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
+       u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
                | (write ? PFERR_WRITE_MASK : 0);
 
        /*
@@ -7627,13 +7639,13 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
        return;
 }
 
-static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
-                           u32 msr_index, u64 *pdata)
+static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
+                                       u32 msr_index, u64 *pdata)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        int r;
 
-       r = kvm_get_msr(vcpu, msr_index, pdata);
+       r = kvm_get_msr_with_filter(vcpu, msr_index, pdata);
 
        if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
                                    complete_emulated_rdmsr, r)) {
@@ -7644,13 +7656,13 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
        return r;
 }
 
-static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
-                           u32 msr_index, u64 data)
+static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
+                                       u32 msr_index, u64 data)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        int r;
 
-       r = kvm_set_msr(vcpu, msr_index, data);
+       r = kvm_set_msr_with_filter(vcpu, msr_index, data);
 
        if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
                                    complete_emulated_msr_access, r)) {
@@ -7661,6 +7673,18 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
        return r;
 }
 
+static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
+                           u32 msr_index, u64 *pdata)
+{
+       return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
+}
+
+static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
+                           u32 msr_index, u64 data)
+{
+       return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
+}
+
 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
@@ -7724,6 +7748,11 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
        return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
 }
 
+static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
+{
+       return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
+}
+
 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
 {
        return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
@@ -7794,6 +7823,8 @@ static const struct x86_emulate_ops emulate_ops = {
        .set_dr              = emulator_set_dr,
        .get_smbase          = emulator_get_smbase,
        .set_smbase          = emulator_set_smbase,
+       .set_msr_with_filter = emulator_set_msr_with_filter,
+       .get_msr_with_filter = emulator_get_msr_with_filter,
        .set_msr             = emulator_set_msr,
        .get_msr             = emulator_get_msr,
        .check_pmc           = emulator_check_pmc,
@@ -7806,6 +7837,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .guest_has_long_mode = emulator_guest_has_long_mode,
        .guest_has_movbe     = emulator_guest_has_movbe,
        .guest_has_fxsr      = emulator_guest_has_fxsr,
+       .guest_has_rdpid     = emulator_guest_has_rdpid,
        .set_nmi_mask        = emulator_set_nmi_mask,
        .get_hflags          = emulator_get_hflags,
        .exiting_smm         = emulator_exiting_smm,
@@ -9058,15 +9090,29 @@ bool kvm_apicv_activated(struct kvm *kvm)
 }
 EXPORT_SYMBOL_GPL(kvm_apicv_activated);
 
+
+static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
+                                      enum kvm_apicv_inhibit reason, bool set)
+{
+       if (set)
+               __set_bit(reason, inhibits);
+       else
+               __clear_bit(reason, inhibits);
+
+       trace_kvm_apicv_inhibit_changed(reason, set, *inhibits);
+}
+
 static void kvm_apicv_init(struct kvm *kvm)
 {
+       unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons;
+
        init_rwsem(&kvm->arch.apicv_update_lock);
 
-       set_bit(APICV_INHIBIT_REASON_ABSENT,
-               &kvm->arch.apicv_inhibit_reasons);
+       set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true);
+
        if (!enable_apicv)
-               set_bit(APICV_INHIBIT_REASON_DISABLE,
-                       &kvm->arch.apicv_inhibit_reasons);
+               set_or_clear_apicv_inhibit(inhibits,
+                                          APICV_INHIBIT_REASON_ABSENT, true);
 }
 
 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
@@ -9740,24 +9786,21 @@ out:
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
 
-void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                     enum kvm_apicv_inhibit reason, bool set)
 {
        unsigned long old, new;
 
        lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
 
-       if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(bit))
+       if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason))
                return;
 
        old = new = kvm->arch.apicv_inhibit_reasons;
 
-       if (activate)
-               __clear_bit(bit, &new);
-       else
-               __set_bit(bit, &new);
+       set_or_clear_apicv_inhibit(&new, reason, set);
 
        if (!!old != !!new) {
-               trace_kvm_apicv_update_request(activate, bit);
                /*
                 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid
                 * false positives in the sanity check WARN in svm_vcpu_run().
@@ -9776,20 +9819,22 @@ void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
                        unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
                        kvm_zap_gfn_range(kvm, gfn, gfn+1);
                }
-       } else
+       } else {
                kvm->arch.apicv_inhibit_reasons = new;
+       }
 }
 
-void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                   enum kvm_apicv_inhibit reason, bool set)
 {
        if (!enable_apicv)
                return;
 
        down_write(&kvm->arch.apicv_update_lock);
-       __kvm_request_apicv_update(kvm, activate, bit);
+       __kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
        up_write(&kvm->arch.apicv_update_lock);
 }
-EXPORT_SYMBOL_GPL(kvm_request_apicv_update);
+EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit);
 
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
@@ -10937,7 +10982,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
 {
-       bool inhibit = false;
+       bool set = false;
        struct kvm_vcpu *vcpu;
        unsigned long i;
 
@@ -10945,11 +10990,11 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) {
-                       inhibit = true;
+                       set = true;
                        break;
                }
        }
-       __kvm_request_apicv_update(kvm, !inhibit, APICV_INHIBIT_REASON_BLOCKIRQ);
+       __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set);
        up_write(&kvm->arch.apicv_update_lock);
 }
 
@@ -11557,10 +11602,8 @@ int kvm_arch_hardware_setup(void *opaque)
                u64 max = min(0x7fffffffULL,
                              __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
                kvm_max_guest_tsc_khz = max;
-
-               kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
        }
-
+       kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
        kvm_init_msr_list();
        return 0;
 }
@@ -11629,12 +11672,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        ret = kvm_page_track_init(kvm);
        if (ret)
-               return ret;
+               goto out;
+
+       ret = kvm_mmu_init_vm(kvm);
+       if (ret)
+               goto out_page_track;
 
        INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
-       INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
-       INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
-       INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
        atomic_set(&kvm->arch.noncoherent_dma_count, 0);
 
@@ -11666,10 +11710,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        kvm_apicv_init(kvm);
        kvm_hv_init_vm(kvm);
-       kvm_mmu_init_vm(kvm);
        kvm_xen_init_vm(kvm);
 
        return static_call(kvm_x86_vm_init)(kvm);
+
+out_page_track:
+       kvm_page_track_cleanup(kvm);
+out:
+       return ret;
 }
 
 int kvm_arch_post_init_vm(struct kvm *kvm)
@@ -12593,7 +12641,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
        struct x86_exception fault;
-       u32 access = error_code &
+       u64 access = error_code &
                (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
 
        if (!(error_code & PFERR_PRESENT_MASK) ||
@@ -12933,7 +12981,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
-EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
index 4aa0f2b..bf6cc25 100644 (file)
@@ -39,8 +39,8 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
        }
 
        do {
-               ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true,
-                                               gpa, PAGE_SIZE, false);
+               ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
+                                               gpa, PAGE_SIZE);
                if (ret)
                        goto out;
 
@@ -1025,8 +1025,7 @@ static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm
                        break;
 
                idx = srcu_read_lock(&kvm->srcu);
-               rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa,
-                                                 PAGE_SIZE, false);
+               rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
                srcu_read_unlock(&kvm->srcu, idx);
        } while(!rc);
 
index 1f8a8f8..50734a2 100644 (file)
@@ -93,7 +93,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
                buff += 8;
        }
        if (len & 7) {
-#ifdef CONFIG_DCACHE_WORD_ACCESS
                unsigned int shift = (8 - (len & 7)) * 8;
                unsigned long trail;
 
@@ -103,31 +102,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
                    "adcq $0,%[res]"
                        : [res] "+r" (temp64)
                        : [trail] "r" (trail));
-#else
-               if (len & 4) {
-                       asm("addq %[val],%[res]\n\t"
-                           "adcq $0,%[res]"
-                               : [res] "+r" (temp64)
-                               : [val] "r" ((u64)*(u32 *)buff)
-                               : "memory");
-                       buff += 4;
-               }
-               if (len & 2) {
-                       asm("addq %[val],%[res]\n\t"
-                           "adcq $0,%[res]"
-                               : [res] "+r" (temp64)
-                               : [val] "r" ((u64)*(u16 *)buff)
-                               : "memory");
-                       buff += 2;
-               }
-               if (len & 1) {
-                       asm("addq %[val],%[res]\n\t"
-                           "adcq $0,%[res]"
-                               : [res] "+r" (temp64)
-                               : [val] "r" ((u64)*(u8 *)buff)
-                               : "memory");
-               }
-#endif
        }
        result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
        if (unlikely(odd)) {
index df50451..3e2f33f 100644 (file)
@@ -22,7 +22,7 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
                     : "memory");
 }
 
-void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
 {
        if (unlikely(!n))
                return;
@@ -38,9 +38,8 @@ void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
        }
        rep_movs(to, (const void *)from, n);
 }
-EXPORT_SYMBOL(memcpy_fromio);
 
-void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
 {
        if (unlikely(!n))
                return;
@@ -56,14 +55,64 @@ void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
        }
        rep_movs((void *)to, (const void *) from, n);
 }
+
+static void unrolled_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+{
+       const volatile char __iomem *in = from;
+       char *out = to;
+       int i;
+
+       for (i = 0; i < n; ++i)
+               out[i] = readb(&in[i]);
+}
+
+static void unrolled_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+{
+       volatile char __iomem *out = to;
+       const char *in = from;
+       int i;
+
+       for (i = 0; i < n; ++i)
+               writeb(in[i], &out[i]);
+}
+
+static void unrolled_memset_io(volatile void __iomem *a, int b, size_t c)
+{
+       volatile char __iomem *mem = a;
+       int i;
+
+       for (i = 0; i < c; ++i)
+               writeb(b, &mem[i]);
+}
+
+void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+{
+       if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO))
+               unrolled_memcpy_fromio(to, from, n);
+       else
+               string_memcpy_fromio(to, from, n);
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+{
+       if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO))
+               unrolled_memcpy_toio(to, from, n);
+       else
+               string_memcpy_toio(to, from, n);
+}
 EXPORT_SYMBOL(memcpy_toio);
 
 void memset_io(volatile void __iomem *a, int b, size_t c)
 {
-       /*
-        * TODO: memset can mangle the IO patterns quite a bit.
-        * perhaps it would be better to use a dumb one:
-        */
-       memset((void *)a, b, c);
+       if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) {
+               unrolled_memset_io(a, b, c);
+       } else {
+               /*
+                * TODO: memset can mangle the IO patterns quite a bit.
+                * perhaps it would be better to use a dumb one:
+                */
+               memset((void *)a, b, c);
+       }
 }
 EXPORT_SYMBOL(memset_io);
index 9f2b251..3822666 100644 (file)
@@ -40,7 +40,8 @@ static void msr_save_context(struct saved_context *ctxt)
        struct saved_msr *end = msr + ctxt->saved_msrs.num;
 
        while (msr < end) {
-               msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
+               if (msr->valid)
+                       rdmsrl(msr->info.msr_no, msr->info.reg.q);
                msr++;
        }
 }
@@ -424,8 +425,10 @@ static int msr_build_context(const u32 *msr_id, const int num)
        }
 
        for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+               u64 dummy;
+
                msr_array[i].info.msr_no        = msr_id[j];
-               msr_array[i].valid              = false;
+               msr_array[i].valid              = !rdmsrl_safe(msr_id[j], &dummy);
                msr_array[i].info.reg.q         = 0;
        }
        saved_msrs->num   = total_num;
@@ -500,10 +503,24 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
        return ret;
 }
 
+static void pm_save_spec_msr(void)
+{
+       u32 spec_msr_id[] = {
+               MSR_IA32_SPEC_CTRL,
+               MSR_IA32_TSX_CTRL,
+               MSR_TSX_FORCE_ABORT,
+               MSR_IA32_MCU_OPT_CTRL,
+               MSR_AMD64_LS_CFG,
+       };
+
+       msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
+}
+
 static int pm_check_save_msr(void)
 {
        dmi_check_system(msr_save_dmi_table);
        pm_cpu_check(msr_save_cpu_table);
+       pm_save_spec_msr();
 
        return 0;
 }
index ead7e5b..1bcd42c 100644 (file)
@@ -9,6 +9,7 @@ endmenu
 config UML_X86
        def_bool y
        select ARCH_BINFMT_ELF_EXTRA_PHDRS if X86_32
+       select DCACHE_WORD_ACCESS
 
 config 64BIT
        bool "64-bit kernel" if "$(SUBARCH)" = "x86"
index 48d6cd1..b6b9972 100644 (file)
 #include <linux/msg.h>
 #include <linux/shm.h>
 
-typedef long syscall_handler_t(void);
+typedef long syscall_handler_t(long, long, long, long, long, long);
 
 extern syscall_handler_t *sys_call_table[];
 
 #define EXECUTE_SYSCALL(syscall, regs) \
-       (((long (*)(long, long, long, long, long, long)) \
-         (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
+       (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
                                      UPT_SYSCALL_ARG2(&regs->regs), \
                                      UPT_SYSCALL_ARG3(&regs->regs), \
                                      UPT_SYSCALL_ARG4(&regs->regs), \
index fe5323f..27b29ae 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/prctl.h> /* XXX This should get the constants from libc */
 #include <registers.h>
 #include <os.h>
-#include <registers.h>
 
 long arch_prctl(struct task_struct *task, int option,
                unsigned long __user *arg2)
index 6713c65..b265e4b 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 0430926..8dfe627 100644 (file)
@@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q,
        return pol && test_bit(pol->plid, q->blkcg_pols);
 }
 
-/**
- * blkg_free - free a blkg
- * @blkg: blkg to free
- *
- * Free @blkg which may be partially allocated.
- */
-static void blkg_free(struct blkcg_gq *blkg)
+static void blkg_free_workfn(struct work_struct *work)
 {
+       struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+                                            free_work);
        int i;
 
-       if (!blkg)
-               return;
-
        for (i = 0; i < BLKCG_MAX_POLS; i++)
                if (blkg->pd[i])
                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
@@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg)
        kfree(blkg);
 }
 
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
+{
+       if (!blkg)
+               return;
+
+       /*
+        * Both ->pd_free_fn() and request queue's release handler may
+        * sleep, so free us by scheduling one work func
+        */
+       INIT_WORK(&blkg->free_work, blkg_free_workfn);
+       schedule_work(&blkg->free_work);
+}
+
 static void __blkg_release(struct rcu_head *rcu)
 {
        struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
index 11f49f7..df9cfe4 100644 (file)
@@ -280,7 +280,6 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
 
                task_lock(task);
                if (task->flags & PF_EXITING) {
-                       err = -ESRCH;
                        kmem_cache_free(iocontext_cachep, ioc);
                        goto out;
                }
@@ -292,7 +291,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
        task->io_context->ioprio = ioprio;
 out:
        task_unlock(task);
-       return err;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(set_task_ioprio);
 
index e6f24fa..ed3ed86 100644 (file)
@@ -4462,21 +4462,28 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
        return true;
 }
 
-static void blk_mq_elv_switch_back(struct list_head *head,
-               struct request_queue *q)
+static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
+                                               struct request_queue *q)
 {
        struct blk_mq_qe_pair *qe;
-       struct elevator_type *t = NULL;
 
        list_for_each_entry(qe, head, node)
-               if (qe->q == q) {
-                       t = qe->type;
-                       break;
-               }
+               if (qe->q == q)
+                       return qe;
 
-       if (!t)
-               return;
+       return NULL;
+}
 
+static void blk_mq_elv_switch_back(struct list_head *head,
+                                 struct request_queue *q)
+{
+       struct blk_mq_qe_pair *qe;
+       struct elevator_type *t;
+
+       qe = blk_lookup_qe_pair(head, q);
+       if (!qe)
+               return;
+       t = qe->type;
        list_del(&qe->node);
        kfree(qe);
 
index 2eb01be..7e44ecc 100644 (file)
@@ -101,9 +101,6 @@ u64 wbt_default_latency_nsec(struct request_queue *);
 
 #else
 
-static inline void wbt_track(struct request *rq, enum wbt_flags flags)
-{
-}
 static inline int wbt_init(struct request_queue *q)
 {
        return -EINVAL;
index c9a4fc9..b8b6759 100644 (file)
@@ -335,7 +335,7 @@ int blk_alloc_ext_minor(void)
 {
        int idx;
 
-       idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
+       idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL);
        if (idx == -ENOSPC)
                return -EBUSY;
        return idx;
index 3ea7fe6..d8443cf 100644 (file)
@@ -13,34 +13,20 @@ obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_nohashes.o
 endif
 
 quiet_cmd_extract_certs  = CERT    $@
-      cmd_extract_certs  = $(obj)/extract-cert $(2) $@
+      cmd_extract_certs  = $(obj)/extract-cert $(extract-cert-in) $@
+extract-cert-in = $(or $(filter-out $(obj)/extract-cert, $(real-prereqs)),"")
 
 $(obj)/system_certificates.o: $(obj)/x509_certificate_list
 
 $(obj)/x509_certificate_list: $(CONFIG_SYSTEM_TRUSTED_KEYS) $(obj)/extract-cert FORCE
-       $(call if_changed,extract_certs,$(if $(CONFIG_SYSTEM_TRUSTED_KEYS),$<,""))
+       $(call if_changed,extract_certs)
 
 targets += x509_certificate_list
 
-ifeq ($(CONFIG_MODULE_SIG),y)
-       SIGN_KEY = y
-endif
-
-ifeq ($(CONFIG_IMA_APPRAISE_MODSIG),y)
-ifeq ($(CONFIG_MODULES),y)
-       SIGN_KEY = y
-endif
-endif
-
-ifdef SIGN_KEY
-###############################################################################
-#
 # If module signing is requested, say by allyesconfig, but a key has not been
 # supplied, then one will need to be generated to make sure the build does not
 # fail and that the kernel may be used afterwards.
 #
-###############################################################################
-
 # We do it this way rather than having a boolean option for enabling an
 # external private key, because 'make randconfig' might enable such a
 # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
@@ -67,23 +53,22 @@ $(obj)/x509.genkey:
 
 endif # CONFIG_MODULE_SIG_KEY
 
-# If CONFIG_MODULE_SIG_KEY isn't a PKCS#11 URI, depend on it
-ifneq ($(filter-out pkcs11:%, $(CONFIG_MODULE_SIG_KEY)),)
-X509_DEP := $(CONFIG_MODULE_SIG_KEY)
-endif
-
 $(obj)/system_certificates.o: $(obj)/signing_key.x509
 
-$(obj)/signing_key.x509: $(X509_DEP) $(obj)/extract-cert FORCE
-       $(call if_changed,extract_certs,$(if $(CONFIG_MODULE_SIG_KEY),$(if $(X509_DEP),$<,$(CONFIG_MODULE_SIG_KEY)),""))
-endif # CONFIG_MODULE_SIG
+PKCS11_URI := $(filter pkcs11:%, $(CONFIG_MODULE_SIG_KEY))
+ifdef PKCS11_URI
+$(obj)/signing_key.x509: extract-cert-in := $(PKCS11_URI)
+endif
+
+$(obj)/signing_key.x509: $(filter-out $(PKCS11_URI),$(CONFIG_MODULE_SIG_KEY)) $(obj)/extract-cert FORCE
+       $(call if_changed,extract_certs)
 
 targets += signing_key.x509
 
 $(obj)/revocation_certificates.o: $(obj)/x509_revocation_list
 
 $(obj)/x509_revocation_list: $(CONFIG_SYSTEM_REVOCATION_KEYS) $(obj)/extract-cert FORCE
-       $(call if_changed,extract_certs,$(if $(CONFIG_SYSTEM_REVOCATION_KEYS),$<,""))
+       $(call if_changed,extract_certs)
 
 targets += x509_revocation_list
 
index e1645e6..003e25d 100644 (file)
@@ -9,10 +9,7 @@
 system_certificate_list:
 __cert_list_start:
 __module_cert_start:
-#if defined(CONFIG_MODULE_SIG) || (defined(CONFIG_IMA_APPRAISE_MODSIG) \
-                              && defined(CONFIG_MODULES))
        .incbin "certs/signing_key.x509"
-#endif
 __module_cert_end:
        .incbin "certs/x509_certificate_list"
 __cert_list_end:
index a5fe292..0555f68 100644 (file)
@@ -353,29 +353,27 @@ static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
 static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
                               struct acpi_ipmi_msg *msg)
 {
-       struct acpi_ipmi_msg *tx_msg, *temp;
-       bool msg_found = false;
+       struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
        unsigned long flags;
 
        spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
-       list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
-               if (msg == tx_msg) {
-                       msg_found = true;
-                       list_del(&tx_msg->head);
+       list_for_each_entry_safe(iter, temp, &ipmi->tx_msg_list, head) {
+               if (msg == iter) {
+                       tx_msg = iter;
+                       list_del(&iter->head);
                        break;
                }
        }
        spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 
-       if (msg_found)
+       if (tx_msg)
                acpi_ipmi_msg_put(tx_msg);
 }
 
 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 {
        struct acpi_ipmi_device *ipmi_device = user_msg_data;
-       bool msg_found = false;
-       struct acpi_ipmi_msg *tx_msg, *temp;
+       struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
        struct device *dev = ipmi_device->dev;
        unsigned long flags;
 
@@ -387,16 +385,16 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
        }
 
        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
-       list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
-               if (msg->msgid == tx_msg->tx_msgid) {
-                       msg_found = true;
-                       list_del(&tx_msg->head);
+       list_for_each_entry_safe(iter, temp, &ipmi_device->tx_msg_list, head) {
+               if (msg->msgid == iter->tx_msgid) {
+                       tx_msg = iter;
+                       list_del(&iter->head);
                        break;
                }
        }
        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 
-       if (!msg_found) {
+       if (!tx_msg) {
                dev_warn(dev,
                         "Unexpected response (msg id %ld) is returned.\n",
                         msg->msgid);
@@ -482,15 +480,14 @@ err_ref:
 
 static void ipmi_bmc_gone(int iface)
 {
-       struct acpi_ipmi_device *ipmi_device, *temp;
-       bool dev_found = false;
+       struct acpi_ipmi_device *ipmi_device = NULL, *iter, *temp;
 
        mutex_lock(&driver_data.ipmi_lock);
-       list_for_each_entry_safe(ipmi_device, temp,
+       list_for_each_entry_safe(iter, temp,
                                 &driver_data.ipmi_devices, head) {
-               if (ipmi_device->ipmi_ifnum != iface) {
-                       dev_found = true;
-                       __ipmi_dev_kill(ipmi_device);
+               if (iter->ipmi_ifnum != iface) {
+                       ipmi_device = iter;
+                       __ipmi_dev_kill(iter);
                        break;
                }
        }
@@ -500,7 +497,7 @@ static void ipmi_bmc_gone(int iface)
                                        struct acpi_ipmi_device, head);
        mutex_unlock(&driver_data.ipmi_lock);
 
-       if (dev_found) {
+       if (ipmi_device) {
                ipmi_flush_tx_msg(ipmi_device);
                acpi_ipmi_dev_put(ipmi_device);
        }
index c7fdb12..33b7fbb 100644 (file)
@@ -319,7 +319,7 @@ repeat:
        if (res_ins)
                list_add(&res_ins->list, res_list);
        else {
-               res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
+               res_ins = kmalloc(sizeof(*res_ins), GFP_KERNEL);
                if (!res_ins)
                        return -ENOMEM;
                res_ins->start = start;
index d418449..bc14547 100644 (file)
@@ -654,7 +654,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
        unsigned int num_ent, i, cpc_rev;
        int pcc_subspace_id = -1;
        acpi_status status;
-       int ret = -EFAULT;
+       int ret = -ENODATA;
 
        if (osc_sb_cppc_not_supported)
                return -ENODEV;
@@ -679,9 +679,14 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
        cpc_obj = &out_obj->package.elements[0];
        if (cpc_obj->type == ACPI_TYPE_INTEGER) {
                num_ent = cpc_obj->integer.value;
+               if (num_ent <= 1) {
+                       pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
+                                num_ent, pr->id);
+                       goto out_free;
+               }
        } else {
-               pr_debug("Unexpected entry type(%d) for NumEntries\n",
-                               cpc_obj->type);
+               pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
+                        cpc_obj->type, pr->id);
                goto out_free;
        }
        cpc_ptr->num_entries = num_ent;
@@ -691,8 +696,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
        if (cpc_obj->type == ACPI_TYPE_INTEGER) {
                cpc_rev = cpc_obj->integer.value;
        } else {
-               pr_debug("Unexpected entry type(%d) for Revision\n",
-                               cpc_obj->type);
+               pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
+                        cpc_obj->type, pr->id);
                goto out_free;
        }
        cpc_ptr->version = cpc_rev;
@@ -723,7 +728,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                                        if (pcc_data_alloc(pcc_subspace_id))
                                                goto out_free;
                                } else if (pcc_subspace_id != gas_t->access_width) {
-                                       pr_debug("Mismatched PCC ids.\n");
+                                       pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
+                                                pr->id);
                                        goto out_free;
                                }
                        } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -742,20 +748,21 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                                         * SystemIO doesn't implement 64-bit
                                         * registers.
                                         */
-                                       pr_debug("Invalid access width %d for SystemIO register\n",
-                                               gas_t->access_width);
+                                       pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
+                                                gas_t->access_width);
                                        goto out_free;
                                }
                                if (gas_t->address & OVER_16BTS_MASK) {
                                        /* SystemIO registers use 16-bit integer addresses */
-                                       pr_debug("Invalid IO port %llu for SystemIO register\n",
-                                               gas_t->address);
+                                       pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
+                                                gas_t->address);
                                        goto out_free;
                                }
                        } else {
                                if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
                                        /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
-                                       pr_debug("Unsupported register type: %d\n", gas_t->space_id);
+                                       pr_debug("Unsupported register type (%d) in _CPC\n",
+                                                gas_t->space_id);
                                        goto out_free;
                                }
                        }
@@ -763,7 +770,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                        cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
                        memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
                } else {
-                       pr_debug("Err in entry:%d in CPC table of CPU:%d\n", i, pr->id);
+                       pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
+                                i, pr->id);
                        goto out_free;
                }
        }
index ceee808..47ec11d 100644 (file)
@@ -151,8 +151,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_apic_override *p =
                            (struct acpi_madt_local_apic_override *)header;
-                       pr_info("LAPIC_ADDR_OVR (address[%p])\n",
-                               (void *)(unsigned long)p->address);
+                       pr_info("LAPIC_ADDR_OVR (address[0x%llx])\n",
+                               p->address);
                }
                break;
 
index e5641e6..bb45a9c 100644 (file)
@@ -115,14 +115,16 @@ config SATA_AHCI
 
          If unsure, say N.
 
-config SATA_LPM_POLICY
+config SATA_MOBILE_LPM_POLICY
        int "Default SATA Link Power Management policy for low power chipsets"
        range 0 4
        default 0
        depends on SATA_AHCI
        help
          Select the Default SATA Link Power Management (LPM) policy to use
-         for chipsets / "South Bridges" designated as supporting low power.
+         for chipsets / "South Bridges" supporting low-power modes. Such
+         chipsets are typically found on most laptops but desktops and
+         servers now also widely use chipsets supporting low power modes.
 
          The value set has the following meanings:
                0 => Keep firmware settings
index 84456c0..397dfd2 100644 (file)
@@ -1595,7 +1595,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
 static void ahci_update_initial_lpm_policy(struct ata_port *ap,
                                           struct ahci_host_priv *hpriv)
 {
-       int policy = CONFIG_SATA_LPM_POLICY;
+       int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
 
 
        /* Ignore processing for chipsets that don't use policy */
index 6ead58c..ad11a4c 100644 (file)
@@ -236,7 +236,7 @@ enum {
        AHCI_HFLAG_NO_WRITE_TO_RO       = (1 << 24), /* don't write to read
                                                        only registers */
        AHCI_HFLAG_USE_LPM_POLICY       = (1 << 25), /* chipset that should use
-                                                       SATA_LPM_POLICY
+                                                       SATA_MOBILE_LPM_POLICY
                                                        as default lpm_policy */
        AHCI_HFLAG_SUSPEND_PHYS         = (1 << 26), /* handle PHYs during
                                                        suspend/resume */
index cceedde..ca64837 100644 (file)
@@ -4014,6 +4014,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*MX100*",          "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Samsung SSD 840 EVO*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_NO_DMA_LOG |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung SSD 840*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung SSD 850*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
index b3be7a8..b1666ad 100644 (file)
@@ -1634,7 +1634,7 @@ EXPORT_SYMBOL_GPL(ata_sff_interrupt);
 
 void ata_sff_lost_interrupt(struct ata_port *ap)
 {
-       u8 status;
+       u8 status = 0;
        struct ata_queued_cmd *qc;
 
        /* Only one outstanding command per SFF channel */
index bec33d7..e3263e9 100644 (file)
@@ -137,7 +137,11 @@ struct sata_dwc_device {
 #endif
 };
 
-#define SATA_DWC_QCMD_MAX      32
+/*
+ * Allow one extra special slot for commands and DMA management
+ * to account for libata internal commands.
+ */
+#define SATA_DWC_QCMD_MAX      (ATA_MAX_QUEUE + 1)
 
 struct sata_dwc_device_port {
        struct sata_dwc_device  *hsdev;
index 2578b2d..e465108 100644 (file)
@@ -1,10 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- *  console driver for LCD2S 4x20 character displays connected through i2c.
- *  The display also has a spi interface, but the driver does not support
+ *  Console driver for LCD2S 4x20 character displays connected through i2c.
+ *  The display also has a SPI interface, but the driver does not support
  *  this yet.
  *
- *  This is a driver allowing you to use a LCD2S 4x20 from modtronix
+ *  This is a driver allowing you to use a LCD2S 4x20 from Modtronix
  *  engineering as auxdisplay character device.
  *
  *  (C) 2019 by Lemonage Software GmbH
@@ -12,7 +12,9 @@
  *  All rights reserved.
  */
 #include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
+#include <linux/property.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
@@ -104,7 +106,7 @@ static int lcd2s_print(struct charlcd *lcd, int c)
 static int lcd2s_gotoxy(struct charlcd *lcd, unsigned int x, unsigned int y)
 {
        struct lcd2s_data *lcd2s = lcd->drvdata;
-       u8 buf[] = { LCD2S_CMD_CUR_POS, y + 1, x + 1};
+       u8 buf[3] = { LCD2S_CMD_CUR_POS, y + 1, x + 1 };
 
        lcd2s_i2c_master_send(lcd2s->i2c, buf, sizeof(buf));
 
@@ -214,16 +216,15 @@ static int lcd2s_lines(struct charlcd *lcd, enum charlcd_lines lines)
        return 0;
 }
 
+/*
+ * Generator: LGcxxxxx...xx; must have <c> between '0' and '7',
+ * representing the numerical ASCII code of the redefined character,
+ * and <xx...xx> a sequence of 16 hex digits representing 8 bytes
+ * for each character. Most LCDs will only use 5 lower bits of
+ * the 7 first bytes.
+ */
 static int lcd2s_redefine_char(struct charlcd *lcd, char *esc)
 {
-       /* Generator : LGcxxxxx...xx; must have <c> between '0'
-        * and '7', representing the numerical ASCII code of the
-        * redefined character, and <xx...xx> a sequence of 16
-        * hex digits representing 8 bytes for each character.
-        * Most LCDs will only use 5 lower bits of the 7 first
-        * bytes.
-        */
-
        struct lcd2s_data *lcd2s = lcd->drvdata;
        u8 buf[LCD2S_CHARACTER_SIZE + 2] = { LCD2S_CMD_DEF_CUSTOM_CHAR };
        u8 value;
@@ -286,8 +287,7 @@ static const struct charlcd_ops lcd2s_ops = {
        .redefine_char  = lcd2s_redefine_char,
 };
 
-static int lcd2s_i2c_probe(struct i2c_client *i2c,
-                               const struct i2c_device_id *id)
+static int lcd2s_i2c_probe(struct i2c_client *i2c)
 {
        struct charlcd *lcd;
        struct lcd2s_data *lcd2s;
@@ -355,43 +355,22 @@ static const struct i2c_device_id lcd2s_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, lcd2s_i2c_id);
 
-#ifdef CONFIG_OF
 static const struct of_device_id lcd2s_of_table[] = {
        { .compatible = "modtronix,lcd2s" },
        { }
 };
 MODULE_DEVICE_TABLE(of, lcd2s_of_table);
-#endif
 
 static struct i2c_driver lcd2s_i2c_driver = {
        .driver = {
                .name = "lcd2s",
-#ifdef CONFIG_OF
-               .of_match_table = of_match_ptr(lcd2s_of_table),
-#endif
+               .of_match_table = lcd2s_of_table,
        },
-       .probe = lcd2s_i2c_probe,
+       .probe_new = lcd2s_i2c_probe,
        .remove = lcd2s_i2c_remove,
        .id_table = lcd2s_i2c_id,
 };
-
-static int __init lcd2s_modinit(void)
-{
-       int ret = 0;
-
-       ret = i2c_add_driver(&lcd2s_i2c_driver);
-       if (ret != 0)
-               pr_err("Failed to register lcd2s driver\n");
-
-       return ret;
-}
-module_init(lcd2s_modinit)
-
-static void __exit lcd2s_exit(void)
-{
-       i2c_del_driver(&lcd2s_i2c_driver);
-}
-module_exit(lcd2s_exit)
+module_i2c_driver(lcd2s_i2c_driver);
 
 MODULE_DESCRIPTION("LCD2S character display driver");
 MODULE_AUTHOR("Lars Poeschel");
index 96881d5..9676a1d 100644 (file)
@@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
                unsigned int set_size)
 {
        struct drbd_request *r;
-       struct drbd_request *req = NULL;
+       struct drbd_request *req = NULL, *tmp = NULL;
        int expect_epoch = 0;
        int expect_size = 0;
 
@@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
         * to catch requests being barrier-acked "unexpectedly".
         * It usually should find the same req again, or some READ preceding it. */
        list_for_each_entry(req, &connection->transfer_log, tl_requests)
-               if (req->epoch == expect_epoch)
+               if (req->epoch == expect_epoch) {
+                       tmp = req;
                        break;
+               }
+       req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
        list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
                if (req->epoch != expect_epoch)
                        break;
index c043945..75be0e1 100644 (file)
@@ -180,7 +180,8 @@ void start_new_tl_epoch(struct drbd_connection *connection)
 void complete_master_bio(struct drbd_device *device,
                struct bio_and_error *m)
 {
-       m->bio->bi_status = errno_to_blk_status(m->error);
+       if (unlikely(m->error))
+               m->bio->bi_status = errno_to_blk_status(m->error);
        bio_endio(m->bio);
        dec_ap_bio(device);
 }
@@ -332,17 +333,21 @@ static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct dr
 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
 {
        struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
+       struct drbd_request *iter = req;
        if (!connection)
                return;
        if (connection->req_next != req)
                return;
-       list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
-               const unsigned s = req->rq_state;
-               if (s & RQ_NET_QUEUED)
+
+       req = NULL;
+       list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
+               const unsigned int s = iter->rq_state;
+
+               if (s & RQ_NET_QUEUED) {
+                       req = iter;
                        break;
+               }
        }
-       if (&req->tl_requests == &connection->transfer_log)
-               req = NULL;
        connection->req_next = req;
 }
 
@@ -358,17 +363,21 @@ static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, st
 static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
 {
        struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
+       struct drbd_request *iter = req;
        if (!connection)
                return;
        if (connection->req_ack_pending != req)
                return;
-       list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
-               const unsigned s = req->rq_state;
-               if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
+
+       req = NULL;
+       list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
+               const unsigned int s = iter->rq_state;
+
+               if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) {
+                       req = iter;
                        break;
+               }
        }
-       if (&req->tl_requests == &connection->transfer_log)
-               req = NULL;
        connection->req_ack_pending = req;
 }
 
@@ -384,17 +393,21 @@ static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, s
 static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
 {
        struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
+       struct drbd_request *iter = req;
        if (!connection)
                return;
        if (connection->req_not_net_done != req)
                return;
-       list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
-               const unsigned s = req->rq_state;
-               if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
+
+       req = NULL;
+       list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
+               const unsigned int s = iter->rq_state;
+
+               if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) {
+                       req = iter;
                        break;
+               }
        }
-       if (&req->tl_requests == &connection->transfer_log)
-               req = NULL;
        connection->req_not_net_done = req;
 }
 
index 3e636a7..a58595f 100644 (file)
@@ -1591,6 +1591,7 @@ struct compat_loop_info {
        compat_ulong_t  lo_inode;       /* ioctl r/o */
        compat_dev_t    lo_rdevice;     /* ioctl r/o */
        compat_int_t    lo_offset;
+       compat_int_t    lo_encrypt_type;        /* obsolete, ignored */
        compat_int_t    lo_encrypt_key_size;    /* ioctl w/o */
        compat_int_t    lo_flags;       /* ioctl r/o */
        char            lo_name[LO_NAME_SIZE];
index 4db9a8c..e094d2b 100644 (file)
@@ -88,7 +88,7 @@ static void n64cart_submit_bio(struct bio *bio)
 {
        struct bio_vec bvec;
        struct bvec_iter iter;
-       struct device *dev = bio->bi_disk->private_data;
+       struct device *dev = bio->bi_bdev->bd_disk->private_data;
        u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
 
        bio_for_each_segment(bvec, bio, iter) {
index d1e2646..de42458 100644 (file)
@@ -931,7 +931,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
        if (rc)
                goto unmap;
 
-       for (n = 0, i = 0; n < nseg; n++) {
+       for (n = 0; n < nseg; n++) {
                uint8_t first_sect, last_sect;
 
                if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
index 378262e..003056d 100644 (file)
@@ -576,7 +576,7 @@ struct setup_rw_req {
        struct blkif_request *ring_req;
        grant_ref_t gref_head;
        unsigned int id;
-       /* Only used when persistent grant is used and it's a read request */
+       /* Only used when persistent grant is used and it's a write request */
        bool need_copy;
        unsigned int bvec_off;
        char *bvec_data;
index 7408118..55f4837 100644 (file)
@@ -449,6 +449,7 @@ config RANDOM_TRUST_BOOTLOADER
        device randomness. Say Y here to assume the entropy provided by the
        booloader is trustworthy so it will be added to the kernel's entropy
        pool. Otherwise, say N here so it will be regarded as device input that
-       only mixes the entropy pool.
+       only mixes the entropy pool. This can also be configured at boot with
+       "random.trust_bootloader=on/off".
 
 endmenu
index 66ce7c0..e15063d 100644 (file)
@@ -224,9 +224,10 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void
  *
  * These interfaces will return the requested number of random bytes
  * into the given buffer or as a return value. This is equivalent to
- * a read from /dev/urandom. The integer family of functions may be
- * higher performance for one-off random integers, because they do a
- * bit of buffering.
+ * a read from /dev/urandom. The u32, u64, int, and long family of
+ * functions may be higher performance for one-off random integers,
+ * because they do a bit of buffering and do not invoke reseeding
+ * until the buffer is emptied.
  *
  *********************************************************************/
 
@@ -436,11 +437,8 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
  * This shouldn't be set by functions like add_device_randomness(),
  * where we can't trust the buffer passed to it is guaranteed to be
  * unpredictable (so it might not have any entropy at all).
- *
- * Returns the number of bytes processed from input, which is bounded
- * by CRNG_INIT_CNT_THRESH if account is true.
  */
-static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
+static void crng_pre_init_inject(const void *input, size_t len, bool account)
 {
        static int crng_init_cnt = 0;
        struct blake2s_state hash;
@@ -451,18 +449,15 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
        spin_lock_irqsave(&base_crng.lock, flags);
        if (crng_init != 0) {
                spin_unlock_irqrestore(&base_crng.lock, flags);
-               return 0;
+               return;
        }
 
-       if (account)
-               len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
-
        blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
        blake2s_update(&hash, input, len);
        blake2s_final(&hash, base_crng.key);
 
        if (account) {
-               crng_init_cnt += len;
+               crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
                if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
                        ++base_crng.generation;
                        crng_init = 1;
@@ -473,8 +468,6 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
 
        if (crng_init == 1)
                pr_notice("fast init done\n");
-
-       return len;
 }
 
 static void _get_random_bytes(void *buf, size_t nbytes)
@@ -530,7 +523,6 @@ EXPORT_SYMBOL(get_random_bytes);
 
 static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
 {
-       bool large_request = nbytes > 256;
        ssize_t ret = 0;
        size_t len;
        u32 chacha_state[CHACHA_STATE_WORDS];
@@ -539,22 +531,23 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
        if (!nbytes)
                return 0;
 
-       len = min_t(size_t, 32, nbytes);
-       crng_make_state(chacha_state, output, len);
-
-       if (copy_to_user(buf, output, len))
-               return -EFAULT;
-       nbytes -= len;
-       buf += len;
-       ret += len;
-
-       while (nbytes) {
-               if (large_request && need_resched()) {
-                       if (signal_pending(current))
-                               break;
-                       schedule();
-               }
+       /*
+        * Immediately overwrite the ChaCha key at index 4 with random
+        * bytes, in case userspace causes copy_to_user() below to sleep
+        * forever, so that we still retain forward secrecy in that case.
+        */
+       crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+       /*
+        * However, if we're doing a read of len <= 32, we don't need to
+        * use chacha_state after, so we can simply return those bytes to
+        * the user directly.
+        */
+       if (nbytes <= CHACHA_KEY_SIZE) {
+               ret = copy_to_user(buf, &chacha_state[4], nbytes) ? -EFAULT : nbytes;
+               goto out_zero_chacha;
+       }
 
+       do {
                chacha20_block(chacha_state, output);
                if (unlikely(chacha_state[12] == 0))
                        ++chacha_state[13];
@@ -568,10 +561,18 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
                nbytes -= len;
                buf += len;
                ret += len;
-       }
 
-       memzero_explicit(chacha_state, sizeof(chacha_state));
+               BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
+               if (!(ret % PAGE_SIZE) && nbytes) {
+                       if (signal_pending(current))
+                               break;
+                       cond_resched();
+               }
+       } while (nbytes);
+
        memzero_explicit(output, sizeof(output));
+out_zero_chacha:
+       memzero_explicit(chacha_state, sizeof(chacha_state));
        return ret;
 }
 
@@ -948,11 +949,17 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force)
  **********************************************************************/
 
 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
 static int __init parse_trust_cpu(char *arg)
 {
        return kstrtobool(arg, &trust_cpu);
 }
+static int __init parse_trust_bootloader(char *arg)
+{
+       return kstrtobool(arg, &trust_bootloader);
+}
 early_param("random.trust_cpu", parse_trust_cpu);
+early_param("random.trust_bootloader", parse_trust_bootloader);
 
 /*
  * The first collection of entropy occurs at system boot while interrupts
@@ -968,6 +975,11 @@ int __init rand_initialize(void)
        bool arch_init = true;
        unsigned long rv;
 
+#if defined(LATENT_ENTROPY_PLUGIN)
+       static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
+       _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
+#endif
+
        for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
                if (!arch_get_random_seed_long_early(&rv) &&
                    !arch_get_random_long_early(&rv)) {
@@ -1128,13 +1140,10 @@ void rand_initialize_disk(struct gendisk *disk)
 void add_hwgenerator_randomness(const void *buffer, size_t count,
                                size_t entropy)
 {
-       if (unlikely(crng_init == 0)) {
-               size_t ret = crng_pre_init_inject(buffer, count, true);
-               mix_pool_bytes(buffer, ret);
-               count -= ret;
-               buffer += ret;
-               if (!count || crng_init == 0)
-                       return;
+       if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) {
+               crng_pre_init_inject(buffer, count, true);
+               mix_pool_bytes(buffer, count);
+               return;
        }
 
        /*
@@ -1160,7 +1169,7 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
  */
 void add_bootloader_randomness(const void *buf, size_t size)
 {
-       if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
+       if (trust_bootloader)
                add_hwgenerator_randomness(buf, size, size * 8);
        else
                add_device_randomness(buf, size);
@@ -1533,6 +1542,13 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
 {
        static int maxwarn = 10;
 
+       /*
+        * Opportunistically attempt to initialize the RNG on platforms that
+        * have fast cycle counters, but don't (for now) require it to succeed.
+        */
+       if (!crng_ready())
+               try_to_generate_entropy();
+
        if (!crng_ready() && maxwarn > 0) {
                maxwarn--;
                if (__ratelimit(&urandom_warning))
index 07a27b6..ed11918 100644 (file)
@@ -2332,15 +2332,19 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
 }
 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
 
-static int clk_set_rate_range_nolock(struct clk *clk,
-                                    unsigned long min,
-                                    unsigned long max)
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
 {
        int ret = 0;
        unsigned long old_min, old_max, rate;
 
-       lockdep_assert_held(&prepare_lock);
-
        if (!clk)
                return 0;
 
@@ -2353,6 +2357,8 @@ static int clk_set_rate_range_nolock(struct clk *clk,
                return -EINVAL;
        }
 
+       clk_prepare_lock();
+
        if (clk->exclusive_count)
                clk_core_rate_unprotect(clk->core);
 
@@ -2396,28 +2402,6 @@ out:
        if (clk->exclusive_count)
                clk_core_rate_protect(clk->core);
 
-       return ret;
-}
-
-/**
- * clk_set_rate_range - set a rate range for a clock source
- * @clk: clock source
- * @min: desired minimum clock rate in Hz, inclusive
- * @max: desired maximum clock rate in Hz, inclusive
- *
- * Return: 0 for success or negative errno on failure.
- */
-int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
-{
-       int ret;
-
-       if (!clk)
-               return 0;
-
-       clk_prepare_lock();
-
-       ret = clk_set_rate_range_nolock(clk, min, max);
-
        clk_prepare_unlock();
 
        return ret;
@@ -4419,7 +4403,9 @@ void __clk_put(struct clk *clk)
        }
 
        hlist_del(&clk->clks_node);
-       clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
+       if (clk->min_rate > clk->core->req_rate ||
+           clk->max_rate < clk->core->req_rate)
+               clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
 
        owner = clk->core->owner;
        kref_put(&clk->core->ref, __clk_release);
index fd2339c..6731a82 100644 (file)
@@ -760,65 +760,9 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
        clk_put(user1);
 }
 
-/*
- * Test that if we have several subsequent calls to
- * clk_set_rate_range(), across multiple users, the core will reevaluate
- * whether a new rate is needed, including when a user drop its clock.
- *
- * With clk_dummy_maximize_rate_ops, this means that the rate will
- * trail along the maximum as it evolves.
- */
-static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
-{
-       struct clk_dummy_context *ctx = test->priv;
-       struct clk_hw *hw = &ctx->hw;
-       struct clk *clk = hw->clk;
-       struct clk *user1, *user2;
-       unsigned long rate;
-
-       user1 = clk_hw_get_clk(hw, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
-
-       user2 = clk_hw_get_clk(hw, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
-
-       KUNIT_ASSERT_EQ(test,
-                       clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
-                       0);
-
-       KUNIT_ASSERT_EQ(test,
-                       clk_set_rate_range(user1,
-                                          0,
-                                          DUMMY_CLOCK_RATE_2),
-                       0);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
-
-       KUNIT_ASSERT_EQ(test,
-                       clk_set_rate_range(user2,
-                                          0,
-                                          DUMMY_CLOCK_RATE_1),
-                       0);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
-
-       clk_put(user2);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
-
-       clk_put(user1);
-}
-
 static struct kunit_case clk_range_maximize_test_cases[] = {
        KUNIT_CASE(clk_range_test_set_range_rate_maximized),
        KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
-       KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
        {}
 };
 
@@ -933,61 +877,9 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
        clk_put(user1);
 }
 
-/*
- * Test that if we have several subsequent calls to
- * clk_set_rate_range(), across multiple users, the core will reevaluate
- * whether a new rate is needed, including when a user drop its clock.
- *
- * With clk_dummy_minimize_rate_ops, this means that the rate will
- * trail along the minimum as it evolves.
- */
-static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
-{
-       struct clk_dummy_context *ctx = test->priv;
-       struct clk_hw *hw = &ctx->hw;
-       struct clk *clk = hw->clk;
-       struct clk *user1, *user2;
-       unsigned long rate;
-
-       user1 = clk_hw_get_clk(hw, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
-
-       user2 = clk_hw_get_clk(hw, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
-
-       KUNIT_ASSERT_EQ(test,
-                       clk_set_rate_range(user1,
-                                          DUMMY_CLOCK_RATE_1,
-                                          ULONG_MAX),
-                       0);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
-
-       KUNIT_ASSERT_EQ(test,
-                       clk_set_rate_range(user2,
-                                          DUMMY_CLOCK_RATE_2,
-                                          ULONG_MAX),
-                       0);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
-
-       clk_put(user2);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
-
-       clk_put(user1);
-}
-
 static struct kunit_case clk_range_minimize_test_cases[] = {
        KUNIT_CASE(clk_range_test_set_range_rate_minimized),
        KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
-       KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
        {}
 };
 
index 68a94e5..4615376 100644 (file)
@@ -69,6 +69,11 @@ config SUN6I_A31_CCU
        default MACH_SUN6I
        depends on MACH_SUN6I || COMPILE_TEST
 
+config SUN6I_RTC_CCU
+       tristate "Support for the Allwinner H616/R329 RTC CCU"
+       default ARCH_SUNXI
+       depends on ARCH_SUNXI || COMPILE_TEST
+
 config SUN8I_A23_CCU
        tristate "Support for the Allwinner A23 CCU"
        default MACH_SUN8I
index ec931cb..6b3ae2b 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
 obj-$(CONFIG_SUN4I_A10_CCU)    += sun4i-a10-ccu.o
 obj-$(CONFIG_SUN5I_CCU)                += sun5i-ccu.o
 obj-$(CONFIG_SUN6I_A31_CCU)    += sun6i-a31-ccu.o
+obj-$(CONFIG_SUN6I_RTC_CCU)    += sun6i-rtc-ccu.o
 obj-$(CONFIG_SUN8I_A23_CCU)    += sun8i-a23-ccu.o
 obj-$(CONFIG_SUN8I_A33_CCU)    += sun8i-a33-ccu.o
 obj-$(CONFIG_SUN8I_A83T_CCU)   += sun8i-a83t-ccu.o
@@ -60,6 +61,7 @@ sun50i-h616-ccu-y             += ccu-sun50i-h616.o
 sun4i-a10-ccu-y                        += ccu-sun4i-a10.o
 sun5i-ccu-y                    += ccu-sun5i.o
 sun6i-a31-ccu-y                        += ccu-sun6i-a31.o
+sun6i-rtc-ccu-y                        += ccu-sun6i-rtc.o
 sun8i-a23-ccu-y                        += ccu-sun8i-a23.o
 sun8i-a33-ccu-y                        += ccu-sun8i-a33.o
 sun8i-a83t-ccu-y               += ccu-sun8i-a83t.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
new file mode 100644 (file)
index 0000000..8a10bad
--- /dev/null
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 Samuel Holland <samuel@sholland.org>
+//
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <linux/clk/sunxi-ng.h>
+
+#include "ccu_common.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mux.h"
+
+#include "ccu-sun6i-rtc.h"
+
+#define IOSC_ACCURACY                  300000000 /* 30% */
+#define IOSC_RATE                      16000000
+
+#define LOSC_RATE                      32768
+#define LOSC_RATE_SHIFT                        15
+
+#define LOSC_CTRL_REG                  0x0
+#define LOSC_CTRL_KEY                  0x16aa0000
+
+#define IOSC_32K_CLK_DIV_REG           0x8
+#define IOSC_32K_CLK_DIV               GENMASK(4, 0)
+#define IOSC_32K_PRE_DIV               32
+
+#define IOSC_CLK_CALI_REG              0xc
+#define IOSC_CLK_CALI_DIV_ONES         22
+#define IOSC_CLK_CALI_EN               BIT(1)
+#define IOSC_CLK_CALI_SRC_SEL          BIT(0)
+
+#define LOSC_OUT_GATING_REG            0x60
+
+#define DCXO_CTRL_REG                  0x160
+#define DCXO_CTRL_CLK16M_RC_EN         BIT(0)
+
+struct sun6i_rtc_match_data {
+       bool                            have_ext_osc32k         : 1;
+       bool                            have_iosc_calibration   : 1;
+       bool                            rtc_32k_single_parent   : 1;
+       const struct clk_parent_data    *osc32k_fanout_parents;
+       u8                              osc32k_fanout_nparents;
+};
+
+static bool have_iosc_calibration;
+
+static int ccu_iosc_enable(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       return ccu_gate_helper_enable(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static void ccu_iosc_disable(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       return ccu_gate_helper_disable(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static int ccu_iosc_is_enabled(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       return ccu_gate_helper_is_enabled(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static unsigned long ccu_iosc_recalc_rate(struct clk_hw *hw,
+                                         unsigned long parent_rate)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       if (have_iosc_calibration) {
+               u32 reg = readl(cm->base + IOSC_CLK_CALI_REG);
+
+               /*
+                * Recover the IOSC frequency by shifting the ones place of
+                * (fixed-point divider * 32768) into bit zero.
+                */
+               if (reg & IOSC_CLK_CALI_EN)
+                       return reg >> (IOSC_CLK_CALI_DIV_ONES - LOSC_RATE_SHIFT);
+       }
+
+       return IOSC_RATE;
+}
+
+static unsigned long ccu_iosc_recalc_accuracy(struct clk_hw *hw,
+                                             unsigned long parent_accuracy)
+{
+       return IOSC_ACCURACY;
+}
+
+static const struct clk_ops ccu_iosc_ops = {
+       .enable                 = ccu_iosc_enable,
+       .disable                = ccu_iosc_disable,
+       .is_enabled             = ccu_iosc_is_enabled,
+       .recalc_rate            = ccu_iosc_recalc_rate,
+       .recalc_accuracy        = ccu_iosc_recalc_accuracy,
+};
+
+static struct ccu_common iosc_clk = {
+       .reg            = DCXO_CTRL_REG,
+       .hw.init        = CLK_HW_INIT_NO_PARENT("iosc", &ccu_iosc_ops,
+                                               CLK_GET_RATE_NOCACHE),
+};
+
+static int ccu_iosc_32k_prepare(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (!have_iosc_calibration)
+               return 0;
+
+       val = readl(cm->base + IOSC_CLK_CALI_REG);
+       writel(val | IOSC_CLK_CALI_EN | IOSC_CLK_CALI_SRC_SEL,
+              cm->base + IOSC_CLK_CALI_REG);
+
+       return 0;
+}
+
+static void ccu_iosc_32k_unprepare(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (!have_iosc_calibration)
+               return;
+
+       val = readl(cm->base + IOSC_CLK_CALI_REG);
+       writel(val & ~(IOSC_CLK_CALI_EN | IOSC_CLK_CALI_SRC_SEL),
+              cm->base + IOSC_CLK_CALI_REG);
+}
+
+static unsigned long ccu_iosc_32k_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (have_iosc_calibration) {
+               val = readl(cm->base + IOSC_CLK_CALI_REG);
+
+               /* Assume the calibrated 32k clock is accurate. */
+               if (val & IOSC_CLK_CALI_SRC_SEL)
+                       return LOSC_RATE;
+       }
+
+       val = readl(cm->base + IOSC_32K_CLK_DIV_REG) & IOSC_32K_CLK_DIV;
+
+       return parent_rate / IOSC_32K_PRE_DIV / (val + 1);
+}
+
+static unsigned long ccu_iosc_32k_recalc_accuracy(struct clk_hw *hw,
+                                                 unsigned long parent_accuracy)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (have_iosc_calibration) {
+               val = readl(cm->base + IOSC_CLK_CALI_REG);
+
+               /* Assume the calibrated 32k clock is accurate. */
+               if (val & IOSC_CLK_CALI_SRC_SEL)
+                       return 0;
+       }
+
+       return parent_accuracy;
+}
+
+static const struct clk_ops ccu_iosc_32k_ops = {
+       .prepare                = ccu_iosc_32k_prepare,
+       .unprepare              = ccu_iosc_32k_unprepare,
+       .recalc_rate            = ccu_iosc_32k_recalc_rate,
+       .recalc_accuracy        = ccu_iosc_32k_recalc_accuracy,
+};
+
+static struct ccu_common iosc_32k_clk = {
+       .hw.init        = CLK_HW_INIT_HW("iosc-32k", &iosc_clk.hw,
+                                        &ccu_iosc_32k_ops,
+                                        CLK_GET_RATE_NOCACHE),
+};
+
+static const struct clk_hw *ext_osc32k[] = { NULL }; /* updated during probe */
+
+static SUNXI_CCU_GATE_HWS(ext_osc32k_gate_clk, "ext-osc32k-gate",
+                         ext_osc32k, 0x0, BIT(4), 0);
+
+static const struct clk_hw *osc32k_parents[] = {
+       &iosc_32k_clk.hw,
+       &ext_osc32k_gate_clk.common.hw
+};
+
+static struct clk_init_data osc32k_init_data = {
+       .name           = "osc32k",
+       .ops            = &ccu_mux_ops,
+       .parent_hws     = osc32k_parents,
+       .num_parents    = ARRAY_SIZE(osc32k_parents), /* updated during probe */
+};
+
+static struct ccu_mux osc32k_clk = {
+       .mux    = _SUNXI_CCU_MUX(0, 1),
+       .common = {
+               .reg            = LOSC_CTRL_REG,
+               .features       = CCU_FEATURE_KEY_FIELD,
+               .hw.init        = &osc32k_init_data,
+       },
+};
+
+/* This falls back to the global name for fwnodes without a named reference. */
+static const struct clk_parent_data osc24M[] = {
+       { .fw_name = "hosc", .name = "osc24M" }
+};
+
+static struct ccu_gate osc24M_32k_clk = {
+       .enable = BIT(16),
+       .common = {
+               .reg            = LOSC_OUT_GATING_REG,
+               .prediv         = 750,
+               .features       = CCU_FEATURE_ALL_PREDIV,
+               .hw.init        = CLK_HW_INIT_PARENTS_DATA("osc24M-32k", osc24M,
+                                                          &ccu_gate_ops, 0),
+       },
+};
+
+static const struct clk_hw *rtc_32k_parents[] = {
+       &osc32k_clk.common.hw,
+       &osc24M_32k_clk.common.hw
+};
+
+static struct clk_init_data rtc_32k_init_data = {
+       .name           = "rtc-32k",
+       .ops            = &ccu_mux_ops,
+       .parent_hws     = rtc_32k_parents,
+       .num_parents    = ARRAY_SIZE(rtc_32k_parents), /* updated during probe */
+};
+
+static struct ccu_mux rtc_32k_clk = {
+       .mux    = _SUNXI_CCU_MUX(1, 1),
+       .common = {
+               .reg            = LOSC_CTRL_REG,
+               .features       = CCU_FEATURE_KEY_FIELD,
+               .hw.init        = &rtc_32k_init_data,
+       },
+};
+
+static struct clk_init_data osc32k_fanout_init_data = {
+       .name           = "osc32k-fanout",
+       .ops            = &ccu_mux_ops,
+       /* parents are set during probe */
+};
+
+static struct ccu_mux osc32k_fanout_clk = {
+       .enable = BIT(0),
+       .mux    = _SUNXI_CCU_MUX(1, 2),
+       .common = {
+               .reg            = LOSC_OUT_GATING_REG,
+               .hw.init        = &osc32k_fanout_init_data,
+       },
+};
+
+static struct ccu_common *sun6i_rtc_ccu_clks[] = {
+       &iosc_clk,
+       &iosc_32k_clk,
+       &ext_osc32k_gate_clk.common,
+       &osc32k_clk.common,
+       &osc24M_32k_clk.common,
+       &rtc_32k_clk.common,
+       &osc32k_fanout_clk.common,
+};
+
+static struct clk_hw_onecell_data sun6i_rtc_ccu_hw_clks = {
+       .num = CLK_NUMBER,
+       .hws = {
+               [CLK_OSC32K]            = &osc32k_clk.common.hw,
+               [CLK_OSC32K_FANOUT]     = &osc32k_fanout_clk.common.hw,
+               [CLK_IOSC]              = &iosc_clk.hw,
+               [CLK_IOSC_32K]          = &iosc_32k_clk.hw,
+               [CLK_EXT_OSC32K_GATE]   = &ext_osc32k_gate_clk.common.hw,
+               [CLK_OSC24M_32K]        = &osc24M_32k_clk.common.hw,
+               [CLK_RTC_32K]           = &rtc_32k_clk.common.hw,
+       },
+};
+
+static const struct sunxi_ccu_desc sun6i_rtc_ccu_desc = {
+       .ccu_clks       = sun6i_rtc_ccu_clks,
+       .num_ccu_clks   = ARRAY_SIZE(sun6i_rtc_ccu_clks),
+
+       .hw_clks        = &sun6i_rtc_ccu_hw_clks,
+};
+
+static const struct clk_parent_data sun50i_h6_osc32k_fanout_parents[] = {
+       { .hw = &osc32k_clk.common.hw },
+};
+
+static const struct clk_parent_data sun50i_h616_osc32k_fanout_parents[] = {
+       { .hw = &osc32k_clk.common.hw },
+       { .fw_name = "pll-32k" },
+       { .hw = &osc24M_32k_clk.common.hw }
+};
+
+static const struct clk_parent_data sun50i_r329_osc32k_fanout_parents[] = {
+       { .hw = &osc32k_clk.common.hw },
+       { .hw = &ext_osc32k_gate_clk.common.hw },
+       { .hw = &osc24M_32k_clk.common.hw }
+};
+
+static const struct sun6i_rtc_match_data sun50i_h6_rtc_ccu_data = {
+       .have_ext_osc32k        = true,
+       .have_iosc_calibration  = true,
+       .osc32k_fanout_parents  = sun50i_h6_osc32k_fanout_parents,
+       .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h6_osc32k_fanout_parents),
+};
+
+static const struct sun6i_rtc_match_data sun50i_h616_rtc_ccu_data = {
+       .have_iosc_calibration  = true,
+       .rtc_32k_single_parent  = true,
+       .osc32k_fanout_parents  = sun50i_h616_osc32k_fanout_parents,
+       .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h616_osc32k_fanout_parents),
+};
+
+static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = {
+       .have_ext_osc32k        = true,
+       .osc32k_fanout_parents  = sun50i_r329_osc32k_fanout_parents,
+       .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
+};
+
+static const struct of_device_id sun6i_rtc_ccu_match[] = {
+       {
+               .compatible     = "allwinner,sun50i-h6-rtc",
+               .data           = &sun50i_h6_rtc_ccu_data,
+       },
+       {
+               .compatible     = "allwinner,sun50i-h616-rtc",
+               .data           = &sun50i_h616_rtc_ccu_data,
+       },
+       {
+               .compatible     = "allwinner,sun50i-r329-rtc",
+               .data           = &sun50i_r329_rtc_ccu_data,
+       },
+};
+
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
+{
+       const struct sun6i_rtc_match_data *data;
+       struct clk *ext_osc32k_clk = NULL;
+       const struct of_device_id *match;
+
+       /* This driver is only used for newer variants of the hardware. */
+       match = of_match_device(sun6i_rtc_ccu_match, dev);
+       if (!match)
+               return 0;
+
+       data = match->data;
+       have_iosc_calibration = data->have_iosc_calibration;
+
+       if (data->have_ext_osc32k) {
+               const char *fw_name;
+
+               /* ext-osc32k was the only input clock in the old binding. */
+               fw_name = of_property_read_bool(dev->of_node, "clock-names")
+                       ? "ext-osc32k" : NULL;
+               ext_osc32k_clk = devm_clk_get_optional(dev, fw_name);
+               if (IS_ERR(ext_osc32k_clk))
+                       return PTR_ERR(ext_osc32k_clk);
+       }
+
+       if (ext_osc32k_clk) {
+               /* Link ext-osc32k-gate to its parent. */
+               *ext_osc32k = __clk_get_hw(ext_osc32k_clk);
+       } else {
+               /* ext-osc32k-gate is an orphan, so do not register it. */
+               sun6i_rtc_ccu_hw_clks.hws[CLK_EXT_OSC32K_GATE] = NULL;
+               osc32k_init_data.num_parents = 1;
+       }
+
+       if (data->rtc_32k_single_parent)
+               rtc_32k_init_data.num_parents = 1;
+
+       osc32k_fanout_init_data.parent_data = data->osc32k_fanout_parents;
+       osc32k_fanout_init_data.num_parents = data->osc32k_fanout_nparents;
+
+       return devm_sunxi_ccu_probe(dev, reg, &sun6i_rtc_ccu_desc);
+}
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h
new file mode 100644 (file)
index 0000000..9ae821f
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CCU_SUN6I_RTC_H
+#define _CCU_SUN6I_RTC_H
+
+#include <dt-bindings/clock/sun6i-rtc.h>
+
+#define CLK_IOSC_32K           3
+#define CLK_EXT_OSC32K_GATE    4
+#define CLK_OSC24M_32K         5
+#define CLK_RTC_32K            6
+
+#define CLK_NUMBER             (CLK_RTC_32K + 1)
+
+#endif /* _CCU_SUN6I_RTC_H */
index 98a1834..fbf16c6 100644 (file)
@@ -17,6 +17,7 @@
 #define CCU_FEATURE_LOCK_REG           BIT(5)
 #define CCU_FEATURE_MMC_TIMING_SWITCH  BIT(6)
 #define CCU_FEATURE_SIGMA_DELTA_MOD    BIT(7)
+#define CCU_FEATURE_KEY_FIELD          BIT(8)
 
 /* MMC timing mode switch bit */
 #define CCU_MMC_NEW_TIMING_MODE                BIT(30)
index 2306a1c..1d557e3 100644 (file)
@@ -12,6 +12,8 @@
 #include "ccu_gate.h"
 #include "ccu_mux.h"
 
+#define CCU_MUX_KEY_VALUE              0x16aa0000
+
 static u16 ccu_mux_get_prediv(struct ccu_common *common,
                              struct ccu_mux_internal *cm,
                              int parent_index)
@@ -191,6 +193,11 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
        spin_lock_irqsave(common->lock, flags);
 
        reg = readl(common->base + common->reg);
+
+       /* The key field always reads as zero. */
+       if (common->features & CCU_FEATURE_KEY_FIELD)
+               reg |= CCU_MUX_KEY_VALUE;
+
        reg &= ~GENMASK(cm->width + cm->shift - 1, cm->shift);
        writel(reg | (index << cm->shift), common->base + common->reg);
 
index c0aeedd..ff71dd6 100644 (file)
@@ -47,6 +47,10 @@ config CPU_IDLE_GOV_HALTPOLL
 config DT_IDLE_STATES
        bool
 
+config DT_IDLE_GENPD
+       depends on PM_GENERIC_DOMAINS_OF
+       bool
+
 menu "ARM CPU Idle Drivers"
 depends on ARM || ARM64
 source "drivers/cpuidle/Kconfig.arm"
@@ -62,6 +66,11 @@ depends on PPC
 source "drivers/cpuidle/Kconfig.powerpc"
 endmenu
 
+menu "RISC-V CPU Idle Drivers"
+depends on RISCV
+source "drivers/cpuidle/Kconfig.riscv"
+endmenu
+
 config HALTPOLL_CPUIDLE
        tristate "Halt poll cpuidle driver"
        depends on X86 && KVM_GUEST
index 15d6c46..be7f512 100644 (file)
@@ -27,6 +27,7 @@ config ARM_PSCI_CPUIDLE_DOMAIN
        bool "PSCI CPU idle Domain"
        depends on ARM_PSCI_CPUIDLE
        depends on PM_GENERIC_DOMAINS_OF
+       select DT_IDLE_GENPD
        default y
        help
          Select this to enable the PSCI based CPUidle driver to use PM domains,
diff --git a/drivers/cpuidle/Kconfig.riscv b/drivers/cpuidle/Kconfig.riscv
new file mode 100644 (file)
index 0000000..78518c2
--- /dev/null
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RISC-V CPU Idle drivers
+#
+
+config RISCV_SBI_CPUIDLE
+       bool "RISC-V SBI CPU idle Driver"
+       depends on RISCV_SBI
+       select DT_IDLE_STATES
+       select CPU_IDLE_MULTIPLE_DRIVERS
+       select DT_IDLE_GENPD if PM_GENERIC_DOMAINS_OF
+       help
+         Select this option to enable RISC-V SBI firmware based CPU idle
+         driver for RISC-V systems. This drivers also supports hierarchical
+         DT based layout of the idle state.
index 26bbc5e..d103342 100644 (file)
@@ -6,6 +6,7 @@
 obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
 obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
 obj-$(CONFIG_DT_IDLE_STATES)             += dt_idle_states.o
+obj-$(CONFIG_DT_IDLE_GENPD)              += dt_idle_genpd.o
 obj-$(CONFIG_ARCH_HAS_CPU_RELAX)         += poll_state.o
 obj-$(CONFIG_HALTPOLL_CPUIDLE)           += cpuidle-haltpoll.o
 
@@ -34,3 +35,7 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE)                += cpuidle-cps.o
 # POWERPC drivers
 obj-$(CONFIG_PSERIES_CPUIDLE)          += cpuidle-pseries.o
 obj-$(CONFIG_POWERNV_CPUIDLE)          += cpuidle-powernv.o
+
+###############################################################################
+# RISC-V drivers
+obj-$(CONFIG_RISCV_SBI_CPUIDLE)                += cpuidle-riscv-sbi.o
index ff2c3f8..755bbdf 100644 (file)
@@ -47,73 +47,14 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
        return 0;
 }
 
-static int psci_pd_parse_state_nodes(struct genpd_power_state *states,
-                                    int state_count)
-{
-       int i, ret;
-       u32 psci_state, *psci_state_buf;
-
-       for (i = 0; i < state_count; i++) {
-               ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode),
-                                       &psci_state);
-               if (ret)
-                       goto free_state;
-
-               psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
-               if (!psci_state_buf) {
-                       ret = -ENOMEM;
-                       goto free_state;
-               }
-               *psci_state_buf = psci_state;
-               states[i].data = psci_state_buf;
-       }
-
-       return 0;
-
-free_state:
-       i--;
-       for (; i >= 0; i--)
-               kfree(states[i].data);
-       return ret;
-}
-
-static int psci_pd_parse_states(struct device_node *np,
-                       struct genpd_power_state **states, int *state_count)
-{
-       int ret;
-
-       /* Parse the domain idle states. */
-       ret = of_genpd_parse_idle_states(np, states, state_count);
-       if (ret)
-               return ret;
-
-       /* Fill out the PSCI specifics for each found state. */
-       ret = psci_pd_parse_state_nodes(*states, *state_count);
-       if (ret)
-               kfree(*states);
-
-       return ret;
-}
-
-static void psci_pd_free_states(struct genpd_power_state *states,
-                               unsigned int state_count)
-{
-       int i;
-
-       for (i = 0; i < state_count; i++)
-               kfree(states[i].data);
-       kfree(states);
-}
-
 static int psci_pd_init(struct device_node *np, bool use_osi)
 {
        struct generic_pm_domain *pd;
        struct psci_pd_provider *pd_provider;
        struct dev_power_governor *pd_gov;
-       struct genpd_power_state *states = NULL;
        int ret = -ENOMEM, state_count = 0;
 
-       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+       pd = dt_idle_pd_alloc(np, psci_dt_parse_state_node);
        if (!pd)
                goto out;
 
@@ -121,22 +62,6 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
        if (!pd_provider)
                goto free_pd;
 
-       pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
-       if (!pd->name)
-               goto free_pd_prov;
-
-       /*
-        * Parse the domain idle states and let genpd manage the state selection
-        * for those being compatible with "domain-idle-state".
-        */
-       ret = psci_pd_parse_states(np, &states, &state_count);
-       if (ret)
-               goto free_name;
-
-       pd->free_states = psci_pd_free_states;
-       pd->name = kbasename(pd->name);
-       pd->states = states;
-       pd->state_count = state_count;
        pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
 
        /* Allow power off when OSI has been successfully enabled. */
@@ -149,10 +74,8 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
        pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
 
        ret = pm_genpd_init(pd, pd_gov, false);
-       if (ret) {
-               psci_pd_free_states(states, state_count);
-               goto free_name;
-       }
+       if (ret)
+               goto free_pd_prov;
 
        ret = of_genpd_add_provider_simple(np, pd);
        if (ret)
@@ -166,12 +89,10 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
 
 remove_pd:
        pm_genpd_remove(pd);
-free_name:
-       kfree(pd->name);
 free_pd_prov:
        kfree(pd_provider);
 free_pd:
-       kfree(pd);
+       dt_idle_pd_free(pd);
 out:
        pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
        return ret;
@@ -195,30 +116,6 @@ static void psci_pd_remove(void)
        }
 }
 
-static int psci_pd_init_topology(struct device_node *np)
-{
-       struct device_node *node;
-       struct of_phandle_args child, parent;
-       int ret;
-
-       for_each_child_of_node(np, node) {
-               if (of_parse_phandle_with_args(node, "power-domains",
-                                       "#power-domain-cells", 0, &parent))
-                       continue;
-
-               child.np = node;
-               child.args_count = 0;
-               ret = of_genpd_add_subdomain(&parent, &child);
-               of_node_put(parent.np);
-               if (ret) {
-                       of_node_put(node);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
 static bool psci_pd_try_set_osi_mode(void)
 {
        int ret;
@@ -282,7 +179,7 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
                goto no_pd;
 
        /* Link genpd masters/subdomains to model the CPU topology. */
-       ret = psci_pd_init_topology(np);
+       ret = dt_idle_pd_init_topology(np);
        if (ret)
                goto remove_pd;
 
@@ -314,28 +211,3 @@ static int __init psci_idle_init_domains(void)
        return platform_driver_register(&psci_cpuidle_domain_driver);
 }
 subsys_initcall(psci_idle_init_domains);
-
-struct device *psci_dt_attach_cpu(int cpu)
-{
-       struct device *dev;
-
-       dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
-       if (IS_ERR_OR_NULL(dev))
-               return dev;
-
-       pm_runtime_irq_safe(dev);
-       if (cpu_online(cpu))
-               pm_runtime_get_sync(dev);
-
-       dev_pm_syscore_device(dev, true);
-
-       return dev;
-}
-
-void psci_dt_detach_cpu(struct device *dev)
-{
-       if (IS_ERR_OR_NULL(dev))
-               return;
-
-       dev_pm_domain_detach(dev, false);
-}
index d8e925e..4e13264 100644 (file)
@@ -10,8 +10,19 @@ void psci_set_domain_state(u32 state);
 int psci_dt_parse_state_node(struct device_node *np, u32 *state);
 
 #ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN
-struct device *psci_dt_attach_cpu(int cpu);
-void psci_dt_detach_cpu(struct device *dev);
+
+#include "dt_idle_genpd.h"
+
+static inline struct device *psci_dt_attach_cpu(int cpu)
+{
+       return dt_idle_attach_cpu(cpu, "psci");
+}
+
+static inline void psci_dt_detach_cpu(struct device *dev)
+{
+       dt_idle_detach_cpu(dev);
+}
+
 #else
 static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; }
 static inline void psci_dt_detach_cpu(struct device *dev) { }
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
new file mode 100644 (file)
index 0000000..b459eda
--- /dev/null
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RISC-V SBI CPU idle driver.
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu_cooling.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <asm/cpuidle.h>
+#include <asm/sbi.h>
+#include <asm/suspend.h>
+
+#include "dt_idle_states.h"
+#include "dt_idle_genpd.h"
+
+struct sbi_cpuidle_data {
+       u32 *states;
+       struct device *dev;
+};
+
+struct sbi_domain_state {
+       bool available;
+       u32 state;
+};
+
+static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
+static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
+static bool sbi_cpuidle_use_osi;
+static bool sbi_cpuidle_use_cpuhp;
+static bool sbi_cpuidle_pd_allow_domain_state;
+
+static inline void sbi_set_domain_state(u32 state)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       data->available = true;
+       data->state = state;
+}
+
+static inline u32 sbi_get_domain_state(void)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       return data->state;
+}
+
+static inline void sbi_clear_domain_state(void)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       data->available = false;
+}
+
+static inline bool sbi_is_domain_state_available(void)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       return data->available;
+}
+
+static int sbi_suspend_finisher(unsigned long suspend_type,
+                               unsigned long resume_addr,
+                               unsigned long opaque)
+{
+       struct sbiret ret;
+
+       ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
+                       suspend_type, resume_addr, opaque, 0, 0, 0);
+
+       return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
+}
+
+static int sbi_suspend(u32 state)
+{
+       if (state & SBI_HSM_SUSP_NON_RET_BIT)
+               return cpu_suspend(state, sbi_suspend_finisher);
+       else
+               return sbi_suspend_finisher(state, 0, 0);
+}
+
+static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
+                                  struct cpuidle_driver *drv, int idx)
+{
+       u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
+
+       return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]);
+}
+
+static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+                                         struct cpuidle_driver *drv, int idx,
+                                         bool s2idle)
+{
+       struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
+       u32 *states = data->states;
+       struct device *pd_dev = data->dev;
+       u32 state;
+       int ret;
+
+       ret = cpu_pm_enter();
+       if (ret)
+               return -1;
+
+       /* Do runtime PM to manage a hierarchical CPU toplogy. */
+       rcu_irq_enter_irqson();
+       if (s2idle)
+               dev_pm_genpd_suspend(pd_dev);
+       else
+               pm_runtime_put_sync_suspend(pd_dev);
+       rcu_irq_exit_irqson();
+
+       if (sbi_is_domain_state_available())
+               state = sbi_get_domain_state();
+       else
+               state = states[idx];
+
+       ret = sbi_suspend(state) ? -1 : idx;
+
+       rcu_irq_enter_irqson();
+       if (s2idle)
+               dev_pm_genpd_resume(pd_dev);
+       else
+               pm_runtime_get_sync(pd_dev);
+       rcu_irq_exit_irqson();
+
+       cpu_pm_exit();
+
+       /* Clear the domain state to start fresh when back from idle. */
+       sbi_clear_domain_state();
+       return ret;
+}
+
+static int sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+                                      struct cpuidle_driver *drv, int idx)
+{
+       return __sbi_enter_domain_idle_state(dev, drv, idx, false);
+}
+
+static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
+                                             struct cpuidle_driver *drv,
+                                             int idx)
+{
+       return __sbi_enter_domain_idle_state(dev, drv, idx, true);
+}
+
+static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
+{
+       struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
+
+       if (pd_dev)
+               pm_runtime_get_sync(pd_dev);
+
+       return 0;
+}
+
+static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
+{
+       struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
+
+       if (pd_dev) {
+               pm_runtime_put_sync(pd_dev);
+               /* Clear domain state to start fresh at next online. */
+               sbi_clear_domain_state();
+       }
+
+       return 0;
+}
+
+static void sbi_idle_init_cpuhp(void)
+{
+       int err;
+
+       if (!sbi_cpuidle_use_cpuhp)
+               return;
+
+       err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+                                       "cpuidle/sbi:online",
+                                       sbi_cpuidle_cpuhp_up,
+                                       sbi_cpuidle_cpuhp_down);
+       if (err)
+               pr_warn("Failed %d while setup cpuhp state\n", err);
+}
+
+static const struct of_device_id sbi_cpuidle_state_match[] = {
+       { .compatible = "riscv,idle-state",
+         .data = sbi_cpuidle_enter_state },
+       { },
+};
+
+static bool sbi_suspend_state_is_valid(u32 state)
+{
+       if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
+           state < SBI_HSM_SUSPEND_RET_PLATFORM)
+               return false;
+       if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
+           state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
+               return false;
+       return true;
+}
+
+static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+       int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
+
+       if (err) {
+               pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np);
+               return err;
+       }
+
+       if (!sbi_suspend_state_is_valid(*state)) {
+               pr_warn("Invalid SBI suspend state %#x\n", *state);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
+                                    struct sbi_cpuidle_data *data,
+                                    unsigned int state_count, int cpu)
+{
+       /* Currently limit the hierarchical topology to be used in OSI mode. */
+       if (!sbi_cpuidle_use_osi)
+               return 0;
+
+       data->dev = dt_idle_attach_cpu(cpu, "sbi");
+       if (IS_ERR_OR_NULL(data->dev))
+               return PTR_ERR_OR_ZERO(data->dev);
+
+       /*
+        * Using the deepest state for the CPU to trigger a potential selection
+        * of a shared state for the domain, assumes the domain states are all
+        * deeper states.
+        */
+       drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
+       drv->states[state_count - 1].enter_s2idle =
+                                       sbi_enter_s2idle_domain_idle_state;
+       sbi_cpuidle_use_cpuhp = true;
+
+       return 0;
+}
+
+static int sbi_cpuidle_dt_init_states(struct device *dev,
+                                       struct cpuidle_driver *drv,
+                                       unsigned int cpu,
+                                       unsigned int state_count)
+{
+       struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+       struct device_node *state_node;
+       struct device_node *cpu_node;
+       u32 *states;
+       int i, ret;
+
+       cpu_node = of_cpu_device_node_get(cpu);
+       if (!cpu_node)
+               return -ENODEV;
+
+       states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
+       if (!states) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       /* Parse SBI specific details from state DT nodes */
+       for (i = 1; i < state_count; i++) {
+               state_node = of_get_cpu_state_node(cpu_node, i - 1);
+               if (!state_node)
+                       break;
+
+               ret = sbi_dt_parse_state_node(state_node, &states[i]);
+               of_node_put(state_node);
+
+               if (ret)
+                       return ret;
+
+               pr_debug("sbi-state %#x index %d\n", states[i], i);
+       }
+       if (i != state_count) {
+               ret = -ENODEV;
+               goto fail;
+       }
+
+       /* Initialize optional data, used for the hierarchical topology. */
+       ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
+       if (ret < 0)
+               return ret;
+
+       /* Store states in the per-cpu struct. */
+       data->states = states;
+
+fail:
+       of_node_put(cpu_node);
+
+       return ret;
+}
+
+static void sbi_cpuidle_deinit_cpu(int cpu)
+{
+       struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+
+       dt_idle_detach_cpu(data->dev);
+       sbi_cpuidle_use_cpuhp = false;
+}
+
+static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
+{
+       struct cpuidle_driver *drv;
+       unsigned int state_count = 0;
+       int ret = 0;
+
+       drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+       if (!drv)
+               return -ENOMEM;
+
+       drv->name = "sbi_cpuidle";
+       drv->owner = THIS_MODULE;
+       drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+       /* RISC-V architectural WFI to be represented as state index 0. */
+       drv->states[0].enter = sbi_cpuidle_enter_state;
+       drv->states[0].exit_latency = 1;
+       drv->states[0].target_residency = 1;
+       drv->states[0].power_usage = UINT_MAX;
+       strcpy(drv->states[0].name, "WFI");
+       strcpy(drv->states[0].desc, "RISC-V WFI");
+
+       /*
+        * If no DT idle states are detected (ret == 0) let the driver
+        * initialization fail accordingly since there is no reason to
+        * initialize the idle driver if only wfi is supported, the
+        * default archictectural back-end already executes wfi
+        * on idle entry.
+        */
+       ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1);
+       if (ret <= 0) {
+               pr_debug("HART%ld: failed to parse DT idle states\n",
+                        cpuid_to_hartid_map(cpu));
+               return ret ? : -ENODEV;
+       }
+       state_count = ret + 1; /* Include WFI state as well */
+
+       /* Initialize idle states from DT. */
+       ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
+       if (ret) {
+               pr_err("HART%ld: failed to init idle states\n",
+                      cpuid_to_hartid_map(cpu));
+               return ret;
+       }
+
+       ret = cpuidle_register(drv, NULL);
+       if (ret)
+               goto deinit;
+
+       cpuidle_cooling_register(drv);
+
+       return 0;
+deinit:
+       sbi_cpuidle_deinit_cpu(cpu);
+       return ret;
+}
+
+static void sbi_cpuidle_domain_sync_state(struct device *dev)
+{
+       /*
+        * All devices have now been attached/probed to the PM domain
+        * topology, hence it's fine to allow domain states to be picked.
+        */
+       sbi_cpuidle_pd_allow_domain_state = true;
+}
+
+#ifdef CONFIG_DT_IDLE_GENPD
+
+static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
+{
+       struct genpd_power_state *state = &pd->states[pd->state_idx];
+       u32 *pd_state;
+
+       if (!state->data)
+               return 0;
+
+       if (!sbi_cpuidle_pd_allow_domain_state)
+               return -EBUSY;
+
+       /* OSI mode is enabled, set the corresponding domain state. */
+       pd_state = state->data;
+       sbi_set_domain_state(*pd_state);
+
+       return 0;
+}
+
+struct sbi_pd_provider {
+       struct list_head link;
+       struct device_node *node;
+};
+
+static LIST_HEAD(sbi_pd_providers);
+
+static int sbi_pd_init(struct device_node *np)
+{
+       struct generic_pm_domain *pd;
+       struct sbi_pd_provider *pd_provider;
+       struct dev_power_governor *pd_gov;
+       int ret = -ENOMEM, state_count = 0;
+
+       pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node);
+       if (!pd)
+               goto out;
+
+       pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
+       if (!pd_provider)
+               goto free_pd;
+
+       pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
+
+       /* Allow power off when OSI is available. */
+       if (sbi_cpuidle_use_osi)
+               pd->power_off = sbi_cpuidle_pd_power_off;
+       else
+               pd->flags |= GENPD_FLAG_ALWAYS_ON;
+
+       /* Use governor for CPU PM domains if it has some states to manage. */
+       pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
+
+       ret = pm_genpd_init(pd, pd_gov, false);
+       if (ret)
+               goto free_pd_prov;
+
+       ret = of_genpd_add_provider_simple(np, pd);
+       if (ret)
+               goto remove_pd;
+
+       pd_provider->node = of_node_get(np);
+       list_add(&pd_provider->link, &sbi_pd_providers);
+
+       pr_debug("init PM domain %s\n", pd->name);
+       return 0;
+
+remove_pd:
+       pm_genpd_remove(pd);
+free_pd_prov:
+       kfree(pd_provider);
+free_pd:
+       dt_idle_pd_free(pd);
+out:
+       pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
+       return ret;
+}
+
+static void sbi_pd_remove(void)
+{
+       struct sbi_pd_provider *pd_provider, *it;
+       struct generic_pm_domain *genpd;
+
+       list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) {
+               of_genpd_del_provider(pd_provider->node);
+
+               genpd = of_genpd_remove_last(pd_provider->node);
+               if (!IS_ERR(genpd))
+                       kfree(genpd);
+
+               of_node_put(pd_provider->node);
+               list_del(&pd_provider->link);
+               kfree(pd_provider);
+       }
+}
+
+static int sbi_genpd_probe(struct device_node *np)
+{
+       struct device_node *node;
+       int ret = 0, pd_count = 0;
+
+       if (!np)
+               return -ENODEV;
+
+       /*
+        * Parse child nodes for the "#power-domain-cells" property and
+        * initialize a genpd/genpd-of-provider pair when it's found.
+        */
+       for_each_child_of_node(np, node) {
+               if (!of_find_property(node, "#power-domain-cells", NULL))
+                       continue;
+
+               ret = sbi_pd_init(node);
+               if (ret)
+                       goto put_node;
+
+               pd_count++;
+       }
+
+       /* Bail out if not using the hierarchical CPU topology. */
+       if (!pd_count)
+               goto no_pd;
+
+       /* Link genpd masters/subdomains to model the CPU topology. */
+       ret = dt_idle_pd_init_topology(np);
+       if (ret)
+               goto remove_pd;
+
+       return 0;
+
+put_node:
+       of_node_put(node);
+remove_pd:
+       sbi_pd_remove();
+       pr_err("failed to create CPU PM domains ret=%d\n", ret);
+no_pd:
+       return ret;
+}
+
+#else
+
+static inline int sbi_genpd_probe(struct device_node *np)
+{
+       return 0;
+}
+
+#endif
+
+static int sbi_cpuidle_probe(struct platform_device *pdev)
+{
+       int cpu, ret;
+       struct cpuidle_driver *drv;
+       struct cpuidle_device *dev;
+       struct device_node *np, *pds_node;
+
+       /* Detect OSI support based on CPU DT nodes */
+       sbi_cpuidle_use_osi = true;
+       for_each_possible_cpu(cpu) {
+               np = of_cpu_device_node_get(cpu);
+               if (np &&
+                   of_find_property(np, "power-domains", NULL) &&
+                   of_find_property(np, "power-domain-names", NULL)) {
+                       continue;
+               } else {
+                       sbi_cpuidle_use_osi = false;
+                       break;
+               }
+       }
+
+       /* Populate generic power domains from DT nodes */
+       pds_node = of_find_node_by_path("/cpus/power-domains");
+       if (pds_node) {
+               ret = sbi_genpd_probe(pds_node);
+               of_node_put(pds_node);
+               if (ret)
+                       return ret;
+       }
+
+       /* Initialize CPU idle driver for each CPU */
+       for_each_possible_cpu(cpu) {
+               ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
+               if (ret) {
+                       pr_debug("HART%ld: idle driver init failed\n",
+                                cpuid_to_hartid_map(cpu));
+                       goto out_fail;
+               }
+       }
+
+       /* Setup CPU hotplut notifiers */
+       sbi_idle_init_cpuhp();
+
+       pr_info("idle driver registered for all CPUs\n");
+
+       return 0;
+
+out_fail:
+       while (--cpu >= 0) {
+               dev = per_cpu(cpuidle_devices, cpu);
+               drv = cpuidle_get_cpu_driver(dev);
+               cpuidle_unregister(drv);
+               sbi_cpuidle_deinit_cpu(cpu);
+       }
+
+       return ret;
+}
+
+static struct platform_driver sbi_cpuidle_driver = {
+       .probe = sbi_cpuidle_probe,
+       .driver = {
+               .name = "sbi-cpuidle",
+               .sync_state = sbi_cpuidle_domain_sync_state,
+       },
+};
+
+static int __init sbi_cpuidle_init(void)
+{
+       int ret;
+       struct platform_device *pdev;
+
+       /*
+        * The SBI HSM suspend function is only available when:
+        * 1) SBI version is 0.3 or higher
+        * 2) SBI HSM extension is available
+        */
+       if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
+           sbi_probe_extension(SBI_EXT_HSM) <= 0) {
+               pr_info("HSM suspend not available\n");
+               return 0;
+       }
+
+       ret = platform_driver_register(&sbi_cpuidle_driver);
+       if (ret)
+               return ret;
+
+       pdev = platform_device_register_simple("sbi-cpuidle",
+                                               -1, NULL, 0);
+       if (IS_ERR(pdev)) {
+               platform_driver_unregister(&sbi_cpuidle_driver);
+               return PTR_ERR(pdev);
+       }
+
+       return 0;
+}
+device_initcall(sbi_cpuidle_init);
diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c
new file mode 100644 (file)
index 0000000..b371655
--- /dev/null
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PM domains for CPUs via genpd.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "dt-idle-genpd: " fmt
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "dt_idle_genpd.h"
+
+static int pd_parse_state_nodes(
+                       int (*parse_state)(struct device_node *, u32 *),
+                       struct genpd_power_state *states, int state_count)
+{
+       int i, ret;
+       u32 state, *state_buf;
+
+       for (i = 0; i < state_count; i++) {
+               ret = parse_state(to_of_node(states[i].fwnode), &state);
+               if (ret)
+                       goto free_state;
+
+               state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+               if (!state_buf) {
+                       ret = -ENOMEM;
+                       goto free_state;
+               }
+               *state_buf = state;
+               states[i].data = state_buf;
+       }
+
+       return 0;
+
+free_state:
+       i--;
+       for (; i >= 0; i--)
+               kfree(states[i].data);
+       return ret;
+}
+
+static int pd_parse_states(struct device_node *np,
+                          int (*parse_state)(struct device_node *, u32 *),
+                          struct genpd_power_state **states,
+                          int *state_count)
+{
+       int ret;
+
+       /* Parse the domain idle states. */
+       ret = of_genpd_parse_idle_states(np, states, state_count);
+       if (ret)
+               return ret;
+
+       /* Fill out the dt specifics for each found state. */
+       ret = pd_parse_state_nodes(parse_state, *states, *state_count);
+       if (ret)
+               kfree(*states);
+
+       return ret;
+}
+
+static void pd_free_states(struct genpd_power_state *states,
+                           unsigned int state_count)
+{
+       int i;
+
+       for (i = 0; i < state_count; i++)
+               kfree(states[i].data);
+       kfree(states);
+}
+
+void dt_idle_pd_free(struct generic_pm_domain *pd)
+{
+       pd_free_states(pd->states, pd->state_count);
+       kfree(pd->name);
+       kfree(pd);
+}
+
+struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+                       int (*parse_state)(struct device_node *, u32 *))
+{
+       struct generic_pm_domain *pd;
+       struct genpd_power_state *states = NULL;
+       int ret, state_count = 0;
+
+       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+       if (!pd)
+               goto out;
+
+       pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
+       if (!pd->name)
+               goto free_pd;
+
+       /*
+        * Parse the domain idle states and let genpd manage the state selection
+        * for those being compatible with "domain-idle-state".
+        */
+       ret = pd_parse_states(np, parse_state, &states, &state_count);
+       if (ret)
+               goto free_name;
+
+       pd->free_states = pd_free_states;
+       pd->name = kbasename(pd->name);
+       pd->states = states;
+       pd->state_count = state_count;
+
+       pr_debug("alloc PM domain %s\n", pd->name);
+       return pd;
+
+free_name:
+       kfree(pd->name);
+free_pd:
+       kfree(pd);
+out:
+       pr_err("failed to alloc PM domain %pOF\n", np);
+       return NULL;
+}
+
+int dt_idle_pd_init_topology(struct device_node *np)
+{
+       struct device_node *node;
+       struct of_phandle_args child, parent;
+       int ret;
+
+       for_each_child_of_node(np, node) {
+               if (of_parse_phandle_with_args(node, "power-domains",
+                                       "#power-domain-cells", 0, &parent))
+                       continue;
+
+               child.np = node;
+               child.args_count = 0;
+               ret = of_genpd_add_subdomain(&parent, &child);
+               of_node_put(parent.np);
+               if (ret) {
+                       of_node_put(node);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+struct device *dt_idle_attach_cpu(int cpu, const char *name)
+{
+       struct device *dev;
+
+       dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), name);
+       if (IS_ERR_OR_NULL(dev))
+               return dev;
+
+       pm_runtime_irq_safe(dev);
+       if (cpu_online(cpu))
+               pm_runtime_get_sync(dev);
+
+       dev_pm_syscore_device(dev, true);
+
+       return dev;
+}
+
+void dt_idle_detach_cpu(struct device *dev)
+{
+       if (IS_ERR_OR_NULL(dev))
+               return;
+
+       dev_pm_domain_detach(dev, false);
+}
diff --git a/drivers/cpuidle/dt_idle_genpd.h b/drivers/cpuidle/dt_idle_genpd.h
new file mode 100644 (file)
index 0000000..a95483d
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_IDLE_GENPD
+#define __DT_IDLE_GENPD
+
+struct device_node;
+struct generic_pm_domain;
+
+#ifdef CONFIG_DT_IDLE_GENPD
+
+void dt_idle_pd_free(struct generic_pm_domain *pd);
+
+struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+                       int (*parse_state)(struct device_node *, u32 *));
+
+int dt_idle_pd_init_topology(struct device_node *np);
+
+struct device *dt_idle_attach_cpu(int cpu, const char *name);
+
+void dt_idle_detach_cpu(struct device *dev);
+
+#else
+
+static inline void dt_idle_pd_free(struct generic_pm_domain *pd)
+{
+}
+
+static inline struct generic_pm_domain *dt_idle_pd_alloc(
+                       struct device_node *np,
+                       int (*parse_state)(struct device_node *, u32 *))
+{
+       return NULL;
+}
+
+static inline int dt_idle_pd_init_topology(struct device_node *np)
+{
+       return 0;
+}
+
+static inline struct device *dt_idle_attach_cpu(int cpu, const char *name)
+{
+       return NULL;
+}
+
+static inline void dt_idle_detach_cpu(struct device *dev)
+{
+}
+
+#endif
+
+#endif
index b894e3a..5f8915f 100644 (file)
@@ -3,8 +3,11 @@ config CRYPTO_DEV_VIRTIO
        tristate "VirtIO crypto driver"
        depends on VIRTIO
        select CRYPTO_AEAD
+       select CRYPTO_AKCIPHER2
        select CRYPTO_SKCIPHER
        select CRYPTO_ENGINE
+       select CRYPTO_RSA
+       select MPILIB
        help
          This driver provides support for virtio crypto device. If you
          choose 'M' here, this module will be called virtio_crypto.
index cbfcccc..bfa6cba 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
 virtio_crypto-objs := \
-       virtio_crypto_algs.o \
+       virtio_crypto_skcipher_algs.o \
+       virtio_crypto_akcipher_algs.o \
        virtio_crypto_mgr.o \
        virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
new file mode 100644 (file)
index 0000000..f3ec942
--- /dev/null
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /* Asymmetric algorithms supported by virtio crypto device
+  *
+  * Authors: zhenwei pi <pizhenwei@bytedance.com>
+  *          lei he <helei.sig11@bytedance.com>
+  *
+  * Copyright 2022 Bytedance CO., LTD.
+  */
+
+#include <linux/mpi.h>
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/rsa.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+struct virtio_crypto_rsa_ctx {
+       MPI n;
+};
+
+struct virtio_crypto_akcipher_ctx {
+       struct crypto_engine_ctx enginectx;
+       struct virtio_crypto *vcrypto;
+       struct crypto_akcipher *tfm;
+       bool session_valid;
+       __u64 session_id;
+       union {
+               struct virtio_crypto_rsa_ctx rsa_ctx;
+       };
+};
+
+struct virtio_crypto_akcipher_request {
+       struct virtio_crypto_request base;
+       struct virtio_crypto_akcipher_ctx *akcipher_ctx;
+       struct akcipher_request *akcipher_req;
+       void *src_buf;
+       void *dst_buf;
+       uint32_t opcode;
+};
+
+struct virtio_crypto_akcipher_algo {
+       uint32_t algonum;
+       uint32_t service;
+       unsigned int active_devs;
+       struct akcipher_alg algo;
+};
+
+static DEFINE_MUTEX(algs_lock);
+
+static void virtio_crypto_akcipher_finalize_req(
+       struct virtio_crypto_akcipher_request *vc_akcipher_req,
+       struct akcipher_request *req, int err)
+{
+       virtcrypto_clear_request(&vc_akcipher_req->base);
+
+       crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
+}
+
+static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)
+{
+       struct virtio_crypto_akcipher_request *vc_akcipher_req =
+               container_of(vc_req, struct virtio_crypto_akcipher_request, base);
+       struct akcipher_request *akcipher_req;
+       int error;
+
+       switch (vc_req->status) {
+       case VIRTIO_CRYPTO_OK:
+               error = 0;
+               break;
+       case VIRTIO_CRYPTO_INVSESS:
+       case VIRTIO_CRYPTO_ERR:
+               error = -EINVAL;
+               break;
+       case VIRTIO_CRYPTO_BADMSG:
+               error = -EBADMSG;
+               break;
+
+       case VIRTIO_CRYPTO_KEY_REJECTED:
+               error = -EKEYREJECTED;
+               break;
+
+       default:
+               error = -EIO;
+               break;
+       }
+
+       akcipher_req = vc_akcipher_req->akcipher_req;
+       if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
+               sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
+                                   vc_akcipher_req->dst_buf, akcipher_req->dst_len);
+       virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
+}
+
+static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
+               struct virtio_crypto_ctrl_header *header, void *para,
+               const uint8_t *key, unsigned int keylen)
+{
+       struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       uint8_t *pkey;
+       unsigned int inlen;
+       int err;
+       unsigned int num_out = 0, num_in = 0;
+
+       pkey = kmemdup(key, keylen, GFP_ATOMIC);
+       if (!pkey)
+               return -ENOMEM;
+
+       spin_lock(&vcrypto->ctrl_lock);
+       memcpy(&vcrypto->ctrl.header, header, sizeof(vcrypto->ctrl.header));
+       memcpy(&vcrypto->ctrl.u, para, sizeof(vcrypto->ctrl.u));
+       vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+
+       sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sgs[num_out++] = &outhdr_sg;
+
+       sg_init_one(&key_sg, pkey, keylen);
+       sgs[num_out++] = &key_sg;
+
+       sg_init_one(&inhdr_sg, &vcrypto->input, sizeof(vcrypto->input));
+       sgs[num_out + num_in++] = &inhdr_sg;
+
+       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+       if (err < 0)
+               goto out;
+
+       virtqueue_kick(vcrypto->ctrl_vq);
+       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
+              !virtqueue_is_broken(vcrypto->ctrl_vq))
+               cpu_relax();
+
+       if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       ctx->session_id = le64_to_cpu(vcrypto->input.session_id);
+       ctx->session_valid = true;
+       err = 0;
+
+out:
+       spin_unlock(&vcrypto->ctrl_lock);
+       kfree_sensitive(pkey);
+
+       if (err < 0)
+               pr_err("virtio_crypto: Create session failed status: %u\n",
+                       le32_to_cpu(vcrypto->input.status));
+
+       return err;
+}
+
+static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)
+{
+       struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
+       struct virtio_crypto_destroy_session_req *destroy_session;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       unsigned int num_out = 0, num_in = 0, inlen;
+       int err;
+
+       spin_lock(&vcrypto->ctrl_lock);
+       if (!ctx->session_valid) {
+               err = 0;
+               goto out;
+       }
+       vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+       vcrypto->ctrl.header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
+       vcrypto->ctrl.header.queue_id = 0;
+
+       destroy_session = &vcrypto->ctrl.u.destroy_session;
+       destroy_session->session_id = cpu_to_le64(ctx->session_id);
+
+       sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sgs[num_out++] = &outhdr_sg;
+
+       sg_init_one(&inhdr_sg, &vcrypto->ctrl_status.status, sizeof(vcrypto->ctrl_status.status));
+       sgs[num_out + num_in++] = &inhdr_sg;
+
+       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+       if (err < 0)
+               goto out;
+
+       virtqueue_kick(vcrypto->ctrl_vq);
+       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
+              !virtqueue_is_broken(vcrypto->ctrl_vq))
+               cpu_relax();
+
+       if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = 0;
+       ctx->session_valid = false;
+
+out:
+       spin_unlock(&vcrypto->ctrl_lock);
+       if (err < 0) {
+               pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+                       vcrypto->ctrl_status.status, destroy_session->session_id);
+       }
+
+       return err;
+}
+
+static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
+               struct akcipher_request *req, struct data_queue *data_vq)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+       struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
+       struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
+       void *src_buf = NULL, *dst_buf = NULL;
+       unsigned int num_out = 0, num_in = 0;
+       int node = dev_to_node(&vcrypto->vdev->dev);
+       unsigned long flags;
+       int ret = -ENOMEM;
+       bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
+       unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
+
+       /* out header */
+       sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
+       sgs[num_out++] = &outhdr_sg;
+
+       /* src data */
+       src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
+       if (!src_buf)
+               goto err;
+
+       if (verify) {
+               /* for verify operation, both src and dst data work as OUT direction */
+               sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
+               sg_init_one(&srcdata_sg, src_buf, src_len);
+               sgs[num_out++] = &srcdata_sg;
+       } else {
+               sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
+               sg_init_one(&srcdata_sg, src_buf, src_len);
+               sgs[num_out++] = &srcdata_sg;
+
+               /* dst data */
+               dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
+               if (!dst_buf)
+                       goto err;
+
+               sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
+               sgs[num_out + num_in++] = &dstdata_sg;
+       }
+
+       vc_akcipher_req->src_buf = src_buf;
+       vc_akcipher_req->dst_buf = dst_buf;
+
+       /* in header */
+       sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status));
+       sgs[num_out + num_in++] = &inhdr_sg;
+
+       spin_lock_irqsave(&data_vq->lock, flags);
+       ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC);
+       virtqueue_kick(data_vq->vq);
+       spin_unlock_irqrestore(&data_vq->lock, flags);
+       if (ret)
+               goto err;
+
+       return 0;
+
+err:
+       kfree(src_buf);
+       kfree(dst_buf);
+
+       return -ENOMEM;
+}
+
+static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
+{
+       struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
+       struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
+       struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+       struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       struct data_queue *data_vq = vc_req->dataq;
+       struct virtio_crypto_op_header *header;
+       struct virtio_crypto_akcipher_data_req *akcipher_req;
+       int ret;
+
+       vc_req->sgs = NULL;
+       vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
+               GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
+       if (!vc_req->req_data)
+               return -ENOMEM;
+
+       /* build request header */
+       header = &vc_req->req_data->header;
+       header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
+       header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+       header->session_id = cpu_to_le64(ctx->session_id);
+
+       /* build request akcipher data */
+       akcipher_req = &vc_req->req_data->u.akcipher_req;
+       akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
+       akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
+
+       ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq);
+       if (ret < 0) {
+               kfree_sensitive(vc_req->req_data);
+               vc_req->req_data = NULL;
+               return ret;
+       }
+
+       return 0;
+}
+
+static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
+{
+       struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
+       struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
+       struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       /* Use the first data virtqueue as default */
+       struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+       vc_req->dataq = data_vq;
+       vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
+       vc_akcipher_req->akcipher_ctx = ctx;
+       vc_akcipher_req->akcipher_req = req;
+       vc_akcipher_req->opcode = opcode;
+
+       return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
+}
+
+static int virtio_crypto_rsa_encrypt(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT);
+}
+
+static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
+}
+
+static int virtio_crypto_rsa_sign(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
+}
+
+static int virtio_crypto_rsa_verify(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
+}
+
+static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
+                                    const void *key,
+                                    unsigned int keylen,
+                                    bool private,
+                                    int padding_algo,
+                                    int hash_algo)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+       struct virtio_crypto *vcrypto;
+       struct virtio_crypto_ctrl_header header;
+       struct virtio_crypto_akcipher_session_para para;
+       struct rsa_key rsa_key = {0};
+       int node = virtio_crypto_get_current_node();
+       uint32_t keytype;
+       int ret;
+
+       /* mpi_free will test n, just free it. */
+       mpi_free(rsa_ctx->n);
+       rsa_ctx->n = NULL;
+
+       if (private) {
+               keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
+               ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+       } else {
+               keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
+               ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+       }
+
+       if (ret)
+               return ret;
+
+       rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
+       if (!rsa_ctx->n)
+               return -ENOMEM;
+
+       if (!ctx->vcrypto) {
+               vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+                                               VIRTIO_CRYPTO_AKCIPHER_RSA);
+               if (!vcrypto) {
+                       pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
+                       return -ENODEV;
+               }
+
+               ctx->vcrypto = vcrypto;
+       } else {
+               virtio_crypto_alg_akcipher_close_session(ctx);
+       }
+
+       /* set ctrl header */
+       header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION);
+       header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+       header.queue_id = 0;
+
+       /* set RSA para */
+       para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+       para.keytype = cpu_to_le32(keytype);
+       para.keylen = cpu_to_le32(keylen);
+       para.u.rsa.padding_algo = cpu_to_le32(padding_algo);
+       para.u.rsa.hash_algo = cpu_to_le32(hash_algo);
+
+       return virtio_crypto_alg_akcipher_init_session(ctx, &header, &para, key, keylen);
+}
+
+static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm,
+                                             const void *key,
+                                             unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
+                                        VIRTIO_CRYPTO_RSA_RAW_PADDING,
+                                        VIRTIO_CRYPTO_RSA_NO_HASH);
+}
+
+
+static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm,
+                                                    const void *key,
+                                                    unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
+                                        VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
+                                        VIRTIO_CRYPTO_RSA_SHA1);
+}
+
+static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm,
+                                            const void *key,
+                                            unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
+                                        VIRTIO_CRYPTO_RSA_RAW_PADDING,
+                                        VIRTIO_CRYPTO_RSA_NO_HASH);
+}
+
+static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm,
+                                                   const void *key,
+                                                   unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
+                                        VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
+                                        VIRTIO_CRYPTO_RSA_SHA1);
+}
+
+static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+
+       return mpi_get_size(rsa_ctx->n);
+}
+
+static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+       ctx->tfm = tfm;
+       ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
+       ctx->enginectx.op.prepare_request = NULL;
+       ctx->enginectx.op.unprepare_request = NULL;
+
+       return 0;
+}
+
+static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+
+       virtio_crypto_alg_akcipher_close_session(ctx);
+       virtcrypto_dev_put(ctx->vcrypto);
+       mpi_free(rsa_ctx->n);
+       rsa_ctx->n = NULL;
+}
+
+static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
+       {
+               .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
+               .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+               .algo = {
+                       .encrypt = virtio_crypto_rsa_encrypt,
+                       .decrypt = virtio_crypto_rsa_decrypt,
+                       .set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
+                       .set_priv_key = virtio_crypto_rsa_raw_set_priv_key,
+                       .max_size = virtio_crypto_rsa_max_size,
+                       .init = virtio_crypto_rsa_init_tfm,
+                       .exit = virtio_crypto_rsa_exit_tfm,
+                       .reqsize = sizeof(struct virtio_crypto_akcipher_request),
+                       .base = {
+                               .cra_name = "rsa",
+                               .cra_driver_name = "virtio-crypto-rsa",
+                               .cra_priority = 150,
+                               .cra_module = THIS_MODULE,
+                               .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
+                       },
+               },
+       },
+       {
+               .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
+               .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+               .algo = {
+                       .encrypt = virtio_crypto_rsa_encrypt,
+                       .decrypt = virtio_crypto_rsa_decrypt,
+                       .sign = virtio_crypto_rsa_sign,
+                       .verify = virtio_crypto_rsa_verify,
+                       .set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
+                       .set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
+                       .max_size = virtio_crypto_rsa_max_size,
+                       .init = virtio_crypto_rsa_init_tfm,
+                       .exit = virtio_crypto_rsa_exit_tfm,
+                       .reqsize = sizeof(struct virtio_crypto_akcipher_request),
+                       .base = {
+                               .cra_name = "pkcs1pad(rsa,sha1)",
+                               .cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
+                               .cra_priority = 150,
+                               .cra_module = THIS_MODULE,
+                               .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
+                       },
+               },
+       },
+};
+
+int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
+{
+       int ret = 0;
+       int i = 0;
+
+       mutex_lock(&algs_lock);
+
+       for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
+               uint32_t service = virtio_crypto_akcipher_algs[i].service;
+               uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
+
+               if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+                       continue;
+
+               if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
+                       ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+                       if (ret)
+                               goto unlock;
+               }
+
+               virtio_crypto_akcipher_algs[i].active_devs++;
+               dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
+                        virtio_crypto_akcipher_algs[i].algo.base.cra_name);
+       }
+
+unlock:
+       mutex_unlock(&algs_lock);
+       return ret;
+}
+
+void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
+{
+       int i = 0;
+
+       mutex_lock(&algs_lock);
+
+       for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
+               uint32_t service = virtio_crypto_akcipher_algs[i].service;
+               uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
+
+               if (virtio_crypto_akcipher_algs[i].active_devs == 0 ||
+                   !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+                       continue;
+
+               if (virtio_crypto_akcipher_algs[i].active_devs == 1)
+                       crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+
+               virtio_crypto_akcipher_algs[i].active_devs--;
+       }
+
+       mutex_unlock(&algs_lock);
+}
index a24f85c..e693d4e 100644 (file)
@@ -56,6 +56,7 @@ struct virtio_crypto {
        u32 mac_algo_l;
        u32 mac_algo_h;
        u32 aead_algo;
+       u32 akcipher_algo;
 
        /* Maximum length of cipher key */
        u32 max_cipher_key_len;
@@ -129,7 +130,9 @@ static inline int virtio_crypto_get_current_node(void)
        return node;
 }
 
-int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
-void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
 
 #endif /* _VIRTIO_CRYPTO_COMMON_H */
index 8e977b7..c6f482d 100644 (file)
@@ -297,6 +297,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
        u32 mac_algo_l = 0;
        u32 mac_algo_h = 0;
        u32 aead_algo = 0;
+       u32 akcipher_algo = 0;
        u32 crypto_services = 0;
 
        if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
@@ -348,6 +349,9 @@ static int virtcrypto_probe(struct virtio_device *vdev)
                        mac_algo_h, &mac_algo_h);
        virtio_cread_le(vdev, struct virtio_crypto_config,
                        aead_algo, &aead_algo);
+       if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
+               virtio_cread_le(vdev, struct virtio_crypto_config,
+                               akcipher_algo, &akcipher_algo);
 
        /* Add virtio crypto device to global table */
        err = virtcrypto_devmgr_add_dev(vcrypto);
@@ -374,7 +378,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
        vcrypto->mac_algo_h = mac_algo_h;
        vcrypto->hash_algo = hash_algo;
        vcrypto->aead_algo = aead_algo;
-
+       vcrypto->akcipher_algo = akcipher_algo;
 
        dev_info(&vdev->dev,
                "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
index 6860f81..70e778a 100644 (file)
@@ -237,8 +237,14 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
  */
 int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
 {
-       if (virtio_crypto_algs_register(vcrypto)) {
-               pr_err("virtio_crypto: Failed to register crypto algs\n");
+       if (virtio_crypto_skcipher_algs_register(vcrypto)) {
+               pr_err("virtio_crypto: Failed to register crypto skcipher algs\n");
+               return -EFAULT;
+       }
+
+       if (virtio_crypto_akcipher_algs_register(vcrypto)) {
+               pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
+               virtio_crypto_skcipher_algs_unregister(vcrypto);
                return -EFAULT;
        }
 
@@ -257,7 +263,8 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
  */
 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
 {
-       virtio_crypto_algs_unregister(vcrypto);
+       virtio_crypto_skcipher_algs_unregister(vcrypto);
+       virtio_crypto_akcipher_algs_unregister(vcrypto);
 }
 
 /*
@@ -312,6 +319,10 @@ bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
        case VIRTIO_CRYPTO_SERVICE_AEAD:
                algo_mask = vcrypto->aead_algo;
                break;
+
+       case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
+               algo_mask = vcrypto->akcipher_algo;
+               break;
        }
 
        if (!(algo_mask & (1u << algo)))
@@ -613,7 +613,7 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
        },
 } };
 
-int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
+int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
 {
        int ret = 0;
        int i = 0;
@@ -644,7 +644,7 @@ unlock:
        return ret;
 }
 
-void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
+void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
 {
        int i = 0;
 
index 69854fd..416725c 100644 (file)
@@ -47,8 +47,9 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip,
 {
        struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
 
-       /* Only clear the OE bit here, requires a RMW. Prevents potential issue
-        * with OE and data getting to the physical pin at different times.
+       /*
+        * Only clear the OE bit here, requires a RMW. Prevents a potential issue
+        * with OE and DAT getting to the physical pin at different times.
         */
        return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0);
 }
@@ -60,9 +61,10 @@ static int ts4900_gpio_direction_output(struct gpio_chip *chip,
        unsigned int reg;
        int ret;
 
-       /* If changing from an input to an output, we need to first set the
-        * proper data bit to what is requested and then set OE bit. This
-        * prevents a glitch that can occur on the IO line
+       /*
+        * If changing from an input to an output, we need to first set the
+        * GPIO's DAT bit to what is requested and then set the OE bit. This
+        * prevents a glitch that can occur on the IO line.
         */
        regmap_read(priv->regmap, offset, &reg);
        if (!(reg & TS4900_GPIO_OE)) {
index b159e92..8e03614 100644 (file)
  * Actually, the following platforms have DIO support:
  *
  * TS-5500:
- *   Documentation: http://wiki.embeddedarm.com/wiki/TS-5500
+ *   Documentation: https://docs.embeddedts.com/TS-5500
  *   Blocks: DIO1, DIO2 and LCD port.
  *
  * TS-5600:
- *   Documentation: http://wiki.embeddedarm.com/wiki/TS-5600
+ *   Documentation: https://docs.embeddedts.com/TS-5600
  *   Blocks: LCD port (identical to TS-5500 LCD).
  */
 
index 7a67487..a95a7cb 100644 (file)
@@ -405,14 +405,25 @@ config HOLTEK_FF
          Say Y here if you have a Holtek On Line Grip based game controller
          and want to have force feedback support for it.
 
+config HID_VIVALDI_COMMON
+       tristate
+       help
+         ChromeOS Vivaldi HID parsing support library. This is a hidden
+         option so that drivers can use common code to parse the HID
+         descriptors for vivaldi function row keymap.
+
 config HID_GOOGLE_HAMMER
        tristate "Google Hammer Keyboard"
+       select HID_VIVALDI_COMMON
+       select INPUT_VIVALDIFMAP
        depends on USB_HID && LEDS_CLASS && CROS_EC
        help
        Say Y here if you have a Google Hammer device.
 
 config HID_VIVALDI
        tristate "Vivaldi Keyboard"
+       select HID_VIVALDI_COMMON
+       select INPUT_VIVALDIFMAP
        depends on HID
        help
          Say Y here if you want to enable support for Vivaldi keyboards.
index d5ce8d7..345ac55 100644 (file)
@@ -50,6 +50,7 @@ obj-$(CONFIG_HID_FT260)               += hid-ft260.o
 obj-$(CONFIG_HID_GEMBIRD)      += hid-gembird.o
 obj-$(CONFIG_HID_GFRM)         += hid-gfrm.o
 obj-$(CONFIG_HID_GLORIOUS)  += hid-glorious.o
+obj-$(CONFIG_HID_VIVALDI_COMMON) += hid-vivaldi-common.o
 obj-$(CONFIG_HID_GOOGLE_HAMMER)        += hid-google-hammer.o
 obj-$(CONFIG_HID_VIVALDI)      += hid-vivaldi.o
 obj-$(CONFIG_HID_GT683R)       += hid-gt683r.o
index ddbe0de..ff40f1e 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/acpi.h>
 #include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -25,6 +26,7 @@
 #include <asm/unaligned.h>
 
 #include "hid-ids.h"
+#include "hid-vivaldi-common.h"
 
 /*
  * C(hrome)B(ase)A(ttached)S(witch) - switch exported by Chrome EC and reporting
@@ -340,9 +342,9 @@ static int hammer_kbd_brightness_set_blocking(struct led_classdev *cdev,
 static int hammer_register_leds(struct hid_device *hdev)
 {
        struct hammer_kbd_leds *kbd_backlight;
-       int error;
 
-       kbd_backlight = kzalloc(sizeof(*kbd_backlight), GFP_KERNEL);
+       kbd_backlight = devm_kzalloc(&hdev->dev, sizeof(*kbd_backlight),
+                                    GFP_KERNEL);
        if (!kbd_backlight)
                return -ENOMEM;
 
@@ -356,26 +358,7 @@ static int hammer_register_leds(struct hid_device *hdev)
        /* Set backlight to 0% initially. */
        hammer_kbd_brightness_set_blocking(&kbd_backlight->cdev, 0);
 
-       error = led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
-       if (error)
-               goto err_free_mem;
-
-       hid_set_drvdata(hdev, kbd_backlight);
-       return 0;
-
-err_free_mem:
-       kfree(kbd_backlight);
-       return error;
-}
-
-static void hammer_unregister_leds(struct hid_device *hdev)
-{
-       struct hammer_kbd_leds *kbd_backlight = hid_get_drvdata(hdev);
-
-       if (kbd_backlight) {
-               led_classdev_unregister(&kbd_backlight->cdev);
-               kfree(kbd_backlight);
-       }
+       return devm_led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
 }
 
 #define HID_UP_GOOGLEVENDOR    0xffd10000
@@ -512,11 +495,23 @@ out:
        kfree(buf);
 }
 
+static void hammer_stop(void *hdev)
+{
+       hid_hw_stop(hdev);
+}
+
 static int hammer_probe(struct hid_device *hdev,
                        const struct hid_device_id *id)
 {
+       struct vivaldi_data *vdata;
        int error;
 
+       vdata = devm_kzalloc(&hdev->dev, sizeof(*vdata), GFP_KERNEL);
+       if (!vdata)
+               return -ENOMEM;
+
+       hid_set_drvdata(hdev, vdata);
+
        error = hid_parse(hdev);
        if (error)
                return error;
@@ -525,6 +520,10 @@ static int hammer_probe(struct hid_device *hdev,
        if (error)
                return error;
 
+       error = devm_add_action(&hdev->dev, hammer_stop, hdev);
+       if (error)
+               return error;
+
        /*
         * We always want to poll for, and handle tablet mode events from
         * devices that have folded usage, even when nobody has opened the input
@@ -577,15 +576,13 @@ static void hammer_remove(struct hid_device *hdev)
                spin_unlock_irqrestore(&cbas_ec_lock, flags);
        }
 
-       hammer_unregister_leds(hdev);
-
-       hid_hw_stop(hdev);
+       /* Unregistering LEDs and stopping the hardware is done via devm */
 }
 
 static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
-       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+       { HID_DEVICE(BUS_USB, HID_GROUP_VIVALDI,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
@@ -610,6 +607,8 @@ static struct hid_driver hammer_driver = {
        .id_table = hammer_devices,
        .probe = hammer_probe,
        .remove = hammer_remove,
+       .feature_mapping = vivaldi_feature_mapping,
+       .input_configured = vivaldi_input_configured,
        .input_mapping = hammer_input_mapping,
        .event = hammer_event,
 };
diff --git a/drivers/hid/hid-vivaldi-common.c b/drivers/hid/hid-vivaldi-common.c
new file mode 100644 (file)
index 0000000..8b3e515
--- /dev/null
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for ChromeOS HID Vivaldi keyboards
+ *
+ * Copyright (C) 2022 Google, Inc
+ */
+
+#include <linux/export.h>
+#include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "hid-vivaldi-common.h"
+
+#define MIN_FN_ROW_KEY 1
+#define MAX_FN_ROW_KEY VIVALDI_MAX_FUNCTION_ROW_KEYS
+#define HID_VD_FN_ROW_PHYSMAP 0x00000001
+#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
+
+/**
+ * vivaldi_feature_mapping - Fill out vivaldi keymap data exposed via HID
+ * @hdev: HID device to parse
+ * @field: HID field to parse
+ * @usage: HID usage to parse
+ *
+ * Note: this function assumes that driver data attached to @hdev contains an
+ * instance of &struct vivaldi_data at the very beginning.
+ */
+void vivaldi_feature_mapping(struct hid_device *hdev,
+                            struct hid_field *field, struct hid_usage *usage)
+{
+       struct vivaldi_data *data = hid_get_drvdata(hdev);
+       struct hid_report *report = field->report;
+       u8 *report_data, *buf;
+       u32 report_len;
+       unsigned int fn_key;
+       int ret;
+
+       if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
+           (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
+               return;
+
+       fn_key = usage->hid & HID_USAGE;
+       if (fn_key < MIN_FN_ROW_KEY || fn_key > MAX_FN_ROW_KEY)
+               return;
+
+       if (fn_key > data->num_function_row_keys)
+               data->num_function_row_keys = fn_key;
+
+       report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL);
+       if (!report_data)
+               return;
+
+       report_len = hid_report_len(report);
+       if (!report->id) {
+               /*
+                * hid_hw_raw_request() will stuff report ID (which will be 0)
+                * into the first byte of the buffer even for unnumbered
+                * reports, so we need to account for this to avoid getting
+                * -EOVERFLOW in return.
+                * Note that hid_alloc_report_buf() adds 7 bytes to the size
+                * so we can safely say that we have space for an extra byte.
+                */
+               report_len++;
+       }
+
+       ret = hid_hw_raw_request(hdev, report->id, report_data,
+                                report_len, HID_FEATURE_REPORT,
+                                HID_REQ_GET_REPORT);
+       if (ret < 0) {
+               dev_warn(&hdev->dev, "failed to fetch feature %d\n",
+                        field->report->id);
+               goto out;
+       }
+
+       if (!report->id) {
+               /*
+                * Undo the damage from hid_hw_raw_request() for unnumbered
+                * reports.
+                */
+               report_data++;
+               report_len--;
+       }
+
+       ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data,
+                                  report_len, 0);
+       if (ret) {
+               dev_warn(&hdev->dev, "failed to report feature %d\n",
+                        field->report->id);
+               goto out;
+       }
+
+       data->function_row_physmap[fn_key - MIN_FN_ROW_KEY] =
+               field->value[usage->usage_index];
+
+out:
+       kfree(buf);
+}
+EXPORT_SYMBOL_GPL(vivaldi_feature_mapping);
+
+static ssize_t function_row_physmap_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct hid_device *hdev = to_hid_device(dev);
+       struct vivaldi_data *data = hid_get_drvdata(hdev);
+
+       return vivaldi_function_row_physmap_show(data, buf);
+}
+
+static DEVICE_ATTR_RO(function_row_physmap);
+static struct attribute *vivaldi_sysfs_attrs[] = {
+       &dev_attr_function_row_physmap.attr,
+       NULL
+};
+
+static const struct attribute_group vivaldi_attribute_group = {
+       .attrs = vivaldi_sysfs_attrs,
+};
+
+/**
+ * vivaldi_input_configured - Complete initialization of device using vivaldi map
+ * @hdev: HID device to which vivaldi attributes should be attached
+ * @hidinput: HID input device (unused)
+ */
+int vivaldi_input_configured(struct hid_device *hdev,
+                            struct hid_input *hidinput)
+{
+       struct vivaldi_data *data = hid_get_drvdata(hdev);
+
+       if (!data->num_function_row_keys)
+               return 0;
+
+       return devm_device_add_group(&hdev->dev, &vivaldi_attribute_group);
+}
+EXPORT_SYMBOL_GPL(vivaldi_input_configured);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-vivaldi-common.h b/drivers/hid/hid-vivaldi-common.h
new file mode 100644 (file)
index 0000000..d42e82d
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _HID_VIVALDI_COMMON_H
+#define _HID_VIVALDI_COMMON_H
+
+struct hid_device;
+struct hid_field;
+struct hid_input;
+struct hid_usage;
+
+void vivaldi_feature_mapping(struct hid_device *hdev,
+                            struct hid_field *field, struct hid_usage *usage);
+
+int vivaldi_input_configured(struct hid_device *hdev,
+                            struct hid_input *hidinput);
+
+#endif /* _HID_VIVALDI_COMMON_H */
index 42ceb20..3a97912 100644 (file)
@@ -8,48 +8,11 @@
 
 #include <linux/device.h>
 #include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/sysfs.h>
 
-#define MIN_FN_ROW_KEY 1
-#define MAX_FN_ROW_KEY 24
-#define HID_VD_FN_ROW_PHYSMAP 0x00000001
-#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
-
-struct vivaldi_data {
-       u32 function_row_physmap[MAX_FN_ROW_KEY - MIN_FN_ROW_KEY + 1];
-       int max_function_row_key;
-};
-
-static ssize_t function_row_physmap_show(struct device *dev,
-                                        struct device_attribute *attr,
-                                        char *buf)
-{
-       struct hid_device *hdev = to_hid_device(dev);
-       struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
-       ssize_t size = 0;
-       int i;
-
-       if (!drvdata->max_function_row_key)
-               return 0;
-
-       for (i = 0; i < drvdata->max_function_row_key; i++)
-               size += sprintf(buf + size, "%02X ",
-                               drvdata->function_row_physmap[i]);
-       size += sprintf(buf + size, "\n");
-       return size;
-}
-
-static DEVICE_ATTR_RO(function_row_physmap);
-static struct attribute *sysfs_attrs[] = {
-       &dev_attr_function_row_physmap.attr,
-       NULL
-};
-
-static const struct attribute_group input_attribute_group = {
-       .attrs = sysfs_attrs
-};
+#include "hid-vivaldi-common.h"
 
 static int vivaldi_probe(struct hid_device *hdev,
                         const struct hid_device_id *id)
@@ -70,86 +33,8 @@ static int vivaldi_probe(struct hid_device *hdev,
        return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 }
 
-static void vivaldi_feature_mapping(struct hid_device *hdev,
-                                   struct hid_field *field,
-                                   struct hid_usage *usage)
-{
-       struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
-       struct hid_report *report = field->report;
-       int fn_key;
-       int ret;
-       u32 report_len;
-       u8 *report_data, *buf;
-
-       if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
-           (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
-               return;
-
-       fn_key = (usage->hid & HID_USAGE);
-       if (fn_key < MIN_FN_ROW_KEY || fn_key > MAX_FN_ROW_KEY)
-               return;
-       if (fn_key > drvdata->max_function_row_key)
-               drvdata->max_function_row_key = fn_key;
-
-       report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL);
-       if (!report_data)
-               return;
-
-       report_len = hid_report_len(report);
-       if (!report->id) {
-               /*
-                * hid_hw_raw_request() will stuff report ID (which will be 0)
-                * into the first byte of the buffer even for unnumbered
-                * reports, so we need to account for this to avoid getting
-                * -EOVERFLOW in return.
-                * Note that hid_alloc_report_buf() adds 7 bytes to the size
-                * so we can safely say that we have space for an extra byte.
-                */
-               report_len++;
-       }
-
-       ret = hid_hw_raw_request(hdev, report->id, report_data,
-                                report_len, HID_FEATURE_REPORT,
-                                HID_REQ_GET_REPORT);
-       if (ret < 0) {
-               dev_warn(&hdev->dev, "failed to fetch feature %d\n",
-                        field->report->id);
-               goto out;
-       }
-
-       if (!report->id) {
-               /*
-                * Undo the damage from hid_hw_raw_request() for unnumbered
-                * reports.
-                */
-               report_data++;
-               report_len--;
-       }
-
-       ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data,
-                                  report_len, 0);
-       if (ret) {
-               dev_warn(&hdev->dev, "failed to report feature %d\n",
-                        field->report->id);
-               goto out;
-       }
-
-       drvdata->function_row_physmap[fn_key - MIN_FN_ROW_KEY] =
-           field->value[usage->usage_index];
-
-out:
-       kfree(buf);
-}
-
-static int vivaldi_input_configured(struct hid_device *hdev,
-                                   struct hid_input *hidinput)
-{
-       return devm_device_add_group(&hdev->dev, &input_attribute_group);
-}
-
 static const struct hid_device_id vivaldi_table[] = {
-       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_VIVALDI, HID_ANY_ID,
-                    HID_ANY_ID) },
+       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_VIVALDI, HID_ANY_ID, HID_ANY_ID) },
        { }
 };
 
index 26d269b..85a2142 100644 (file)
@@ -380,7 +380,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
         * execute:
         *
         *  (a) In the "normal (i.e., not resuming from hibernation)" path,
-        *      the full barrier in smp_store_mb() guarantees that the store
+        *      the full barrier in virt_store_mb() guarantees that the store
         *      is propagated to all CPUs before the add_channel_work work
         *      is queued.  In turn, add_channel_work is queued before the
         *      channel's ring buffer is allocated/initialized and the
@@ -392,14 +392,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
         *      recv_int_page before retrieving the channel pointer from the
         *      array of channels.
         *
-        *  (b) In the "resuming from hibernation" path, the smp_store_mb()
+        *  (b) In the "resuming from hibernation" path, the virt_store_mb()
         *      guarantees that the store is propagated to all CPUs before
         *      the VMBus connection is marked as ready for the resume event
         *      (cf. check_ready_for_resume_event()).  The interrupt handler
         *      of the VMBus driver and vmbus_chan_sched() can not run before
         *      vmbus_bus_resume() has completed execution (cf. resume_noirq).
         */
-       smp_store_mb(
+       virt_store_mb(
                vmbus_connection.channels[channel->offermsg.child_relid],
                channel);
 }
index 439f99b..3248b48 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/kthread.h>
 #include <linux/completion.h>
+#include <linux/count_zeros.h>
 #include <linux/memory_hotplug.h>
 #include <linux/memory.h>
 #include <linux/notifier.h>
@@ -1130,6 +1131,7 @@ static void post_status(struct hv_dynmem_device *dm)
        struct dm_status status;
        unsigned long now = jiffies;
        unsigned long last_post = last_post_time;
+       unsigned long num_pages_avail, num_pages_committed;
 
        if (pressure_report_delay > 0) {
                --pressure_report_delay;
@@ -1154,16 +1156,21 @@ static void post_status(struct hv_dynmem_device *dm)
         * num_pages_onlined) as committed to the host, otherwise it can try
         * asking us to balloon them out.
         */
-       status.num_avail = si_mem_available();
-       status.num_committed = vm_memory_committed() +
+       num_pages_avail = si_mem_available();
+       num_pages_committed = vm_memory_committed() +
                dm->num_pages_ballooned +
                (dm->num_pages_added > dm->num_pages_onlined ?
                 dm->num_pages_added - dm->num_pages_onlined : 0) +
                compute_balloon_floor();
 
-       trace_balloon_status(status.num_avail, status.num_committed,
+       trace_balloon_status(num_pages_avail, num_pages_committed,
                             vm_memory_committed(), dm->num_pages_ballooned,
                             dm->num_pages_added, dm->num_pages_onlined);
+
+       /* Convert numbers of pages into numbers of HV_HYP_PAGEs. */
+       status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE;
+       status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE;
+
        /*
         * If our transaction ID is no longer current, just don't
         * send the status. This can happen if we were interrupted
@@ -1653,6 +1660,38 @@ static void disable_page_reporting(void)
        }
 }
 
+static int ballooning_enabled(void)
+{
+       /*
+        * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE),
+        * since currently it's unclear to us whether an unballoon request can
+        * make sure all page ranges are guest page size aligned.
+        */
+       if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
+               pr_info("Ballooning disabled because page size is not 4096 bytes\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static int hot_add_enabled(void)
+{
+       /*
+        * Disable hot add on ARM64, because we currently rely on
+        * memory_add_physaddr_to_nid() to get a node id of a hot add range,
+        * however ARM64's memory_add_physaddr_to_nid() always return 0 and
+        * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for
+        * add_memory().
+        */
+       if (IS_ENABLED(CONFIG_ARM64)) {
+               pr_info("Memory hot add disabled on ARM64\n");
+               return 0;
+       }
+
+       return 1;
+}
+
 static int balloon_connect_vsp(struct hv_device *dev)
 {
        struct dm_version_request version_req;
@@ -1724,8 +1763,8 @@ static int balloon_connect_vsp(struct hv_device *dev)
         * currently still requires the bits to be set, so we have to add code
         * to fail the host's hot-add and balloon up/down requests, if any.
         */
-       cap_msg.caps.cap_bits.balloon = 1;
-       cap_msg.caps.cap_bits.hot_add = 1;
+       cap_msg.caps.cap_bits.balloon = ballooning_enabled();
+       cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
 
        /*
         * Specify our alignment requirements as it relates
index c1dd21d..ae68298 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/panic_notifier.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
+#include <linux/dma-map-ops.h>
 #include <asm/hyperv-tlfs.h>
 #include <asm/mshyperv.h>
 
@@ -218,6 +219,16 @@ bool hv_query_ext_cap(u64 cap_query)
 }
 EXPORT_SYMBOL_GPL(hv_query_ext_cap);
 
+void hv_setup_dma_ops(struct device *dev, bool coherent)
+{
+       /*
+        * Hyper-V does not offer a vIOMMU in the guest
+        * VM, so pass 0/NULL for the IOMMU settings
+        */
+       arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+}
+EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
+
 bool hv_is_hibernation_supported(void)
 {
        return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
index 71efacb..3d215d9 100644 (file)
@@ -439,7 +439,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
 {
        u32 priv_read_loc = rbi->priv_read_index;
-       u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+       u32 write_loc;
+
+       /*
+        * The Hyper-V host writes the packet data, then uses
+        * store_release() to update the write_index.  Use load_acquire()
+        * here to prevent loads of the packet data from being re-ordered
+        * before the read of the write_index and potentially getting
+        * stale data.
+        */
+       write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
 
        if (write_loc >= priv_read_loc)
                return write_loc - priv_read_loc;
index 60ee8b3..14de170 100644 (file)
@@ -77,8 +77,8 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
 
        /*
         * Hyper-V should be notified only once about a panic.  If we will be
-        * doing hyperv_report_panic_msg() later with kmsg data, don't do
-        * the notification here.
+        * doing hv_kmsg_dump() with kmsg data later, don't do the notification
+        * here.
         */
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
            && hyperv_report_reg()) {
@@ -100,8 +100,8 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
 
        /*
         * Hyper-V should be notified only once about a panic.  If we will be
-        * doing hyperv_report_panic_msg() later with kmsg data, don't do
-        * the notification here.
+        * doing hv_kmsg_dump() with kmsg data later, don't do the notification
+        * here.
         */
        if (hyperv_report_reg())
                hyperv_report_panic(regs, val, true);
@@ -921,6 +921,21 @@ static int vmbus_probe(struct device *child_device)
 }
 
 /*
+ * vmbus_dma_configure -- Configure DMA coherence for VMbus device
+ */
+static int vmbus_dma_configure(struct device *child_device)
+{
+       /*
+        * On ARM64, propagate the DMA coherence setting from the top level
+        * VMbus ACPI device to the child VMbus device being added here.
+        * On x86/x64 coherence is assumed and these calls have no effect.
+        */
+       hv_setup_dma_ops(child_device,
+               device_get_dma_attr(&hv_acpi_dev->dev) == DEV_DMA_COHERENT);
+       return 0;
+}
+
+/*
  * vmbus_remove - Remove a vmbus device
  */
 static void vmbus_remove(struct device *child_device)
@@ -1040,6 +1055,7 @@ static struct bus_type  hv_bus = {
        .remove =               vmbus_remove,
        .probe =                vmbus_probe,
        .uevent =               vmbus_uevent,
+       .dma_configure =        vmbus_dma_configure,
        .dev_groups =           vmbus_dev_groups,
        .drv_groups =           vmbus_drv_groups,
        .bus_groups =           vmbus_bus_groups,
@@ -1546,14 +1562,20 @@ static int vmbus_bus_init(void)
        if (ret)
                goto err_connect;
 
+       if (hv_is_isolation_supported())
+               sysctl_record_panic_msg = 0;
+
        /*
         * Only register if the crash MSRs are available
         */
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
                u64 hyperv_crash_ctl;
                /*
-                * Sysctl registration is not fatal, since by default
-                * reporting is enabled.
+                * Panic message recording (sysctl_record_panic_msg)
+                * is enabled by default in non-isolated guests and
+                * disabled by default in isolated guests; the panic
+                * message recording won't be available in isolated
+                * guests should the following registration fail.
                 */
                hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
                if (!hv_ctl_table_hdr)
@@ -2097,6 +2119,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
        child_device_obj->device.parent = &hv_acpi_dev->dev;
        child_device_obj->device.release = vmbus_device_release;
 
+       child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+       child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+       dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
+
        /*
         * Register with the LDM. This will kick off the driver/device
         * binding...which will eventually call vmbus_match() and vmbus_probe()
@@ -2122,9 +2148,6 @@ int vmbus_device_register(struct hv_device *child_device_obj)
        }
        hv_debug_add_dev_dir(child_device_obj);
 
-       child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
-       child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
-       dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
        return 0;
 
 err_kset_unregister:
@@ -2428,6 +2451,21 @@ static int vmbus_acpi_add(struct acpi_device *device)
 
        hv_acpi_dev = device;
 
+       /*
+        * Older versions of Hyper-V for ARM64 fail to include the _CCA
+        * method on the top level VMbus device in the DSDT. But devices
+        * are hardware coherent in all current Hyper-V use cases, so fix
+        * up the ACPI device to behave as if _CCA is present and indicates
+        * hardware coherence.
+        */
+       ACPI_COMPANION_SET(&device->dev, device);
+       if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
+           device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
+               pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
+               device->flags.cca_seen = true;
+               device->flags.coherent_dma = true;
+       }
+
        result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
                                        vmbus_walk_resources, NULL);
 
@@ -2780,10 +2818,15 @@ static void __exit vmbus_exit(void)
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
                kmsg_dump_unregister(&hv_kmsg_dumper);
                unregister_die_notifier(&hyperv_die_block);
-               atomic_notifier_chain_unregister(&panic_notifier_list,
-                                                &hyperv_panic_block);
        }
 
+       /*
+        * The panic notifier is always registered, hence we should
+        * also unconditionally unregister it here as well.
+        */
+       atomic_notifier_chain_unregister(&panic_notifier_list,
+                                        &hyperv_panic_block);
+
        free_page((unsigned long)hv_panic_page);
        unregister_sysctl_table(hv_ctl_table_hdr);
        hv_ctl_table_hdr = NULL;
index 5baebf6..e2752f7 100644 (file)
@@ -77,6 +77,13 @@ config INPUT_MATRIXKMAP
          To compile this driver as a module, choose M here: the
          module will be called matrix-keymap.
 
+config INPUT_VIVALDIFMAP
+       tristate
+       help
+         ChromeOS Vivaldi keymap support library. This is a hidden
+         option so that drivers can use common code to parse and
+         expose the vivaldi function row keymap.
+
 comment "Userland interfaces"
 
 config INPUT_MOUSEDEV
index 037cc59..2266c7d 100644 (file)
@@ -12,6 +12,7 @@ input-core-y += touchscreen.o
 obj-$(CONFIG_INPUT_FF_MEMLESS) += ff-memless.o
 obj-$(CONFIG_INPUT_SPARSEKMAP) += sparse-keymap.o
 obj-$(CONFIG_INPUT_MATRIXKMAP) += matrix-keymap.o
+obj-$(CONFIG_INPUT_VIVALDIFMAP)        += vivaldi-fmap.o
 
 obj-$(CONFIG_INPUT_LEDS)       += input-leds.o
 obj-$(CONFIG_INPUT_MOUSEDEV)   += mousedev.o
index c3139bc..e5a668c 100644 (file)
@@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
 
 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
 
+static const unsigned int input_max_code[EV_CNT] = {
+       [EV_KEY] = KEY_MAX,
+       [EV_REL] = REL_MAX,
+       [EV_ABS] = ABS_MAX,
+       [EV_MSC] = MSC_MAX,
+       [EV_SW] = SW_MAX,
+       [EV_LED] = LED_MAX,
+       [EV_SND] = SND_MAX,
+       [EV_FF] = FF_MAX,
+};
+
 static inline int is_event_supported(unsigned int code,
                                     unsigned long *bm, unsigned int max)
 {
@@ -511,6 +522,9 @@ void input_set_abs_params(struct input_dev *dev, unsigned int axis,
 {
        struct input_absinfo *absinfo;
 
+       __set_bit(EV_ABS, dev->evbit);
+       __set_bit(axis, dev->absbit);
+
        input_alloc_absinfo(dev);
        if (!dev->absinfo)
                return;
@@ -520,12 +534,45 @@ void input_set_abs_params(struct input_dev *dev, unsigned int axis,
        absinfo->maximum = max;
        absinfo->fuzz = fuzz;
        absinfo->flat = flat;
-
-       __set_bit(EV_ABS, dev->evbit);
-       __set_bit(axis, dev->absbit);
 }
 EXPORT_SYMBOL(input_set_abs_params);
 
+/**
+ * input_copy_abs - Copy absinfo from one input_dev to another
+ * @dst: Destination input device to copy the abs settings to
+ * @dst_axis: ABS_* value selecting the destination axis
+ * @src: Source input device to copy the abs settings from
+ * @src_axis: ABS_* value selecting the source axis
+ *
+ * Set absinfo for the selected destination axis by copying it from
+ * the specified source input device's source axis.
+ * This is useful to e.g. setup a pen/stylus input-device for combined
+ * touchscreen/pen hardware where the pen uses the same coordinates as
+ * the touchscreen.
+ */
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+                   const struct input_dev *src, unsigned int src_axis)
+{
+       /* src must have EV_ABS and src_axis set */
+       if (WARN_ON(!(test_bit(EV_ABS, src->evbit) &&
+                     test_bit(src_axis, src->absbit))))
+               return;
+
+       /*
+        * input_alloc_absinfo() may have failed for the source. Our caller is
+        * expected to catch this when registering the input devices, which may
+        * happen after the input_copy_abs() call.
+        */
+       if (!src->absinfo)
+               return;
+
+       input_set_capability(dst, EV_ABS, dst_axis);
+       if (!dst->absinfo)
+               return;
+
+       dst->absinfo[dst_axis] = src->absinfo[src_axis];
+}
+EXPORT_SYMBOL(input_copy_abs);
 
 /**
  * input_grab_device - grabs device for exclusive use
@@ -2074,6 +2121,14 @@ EXPORT_SYMBOL(input_get_timestamp);
  */
 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
 {
+       if (type < EV_CNT && input_max_code[type] &&
+           code > input_max_code[type]) {
+               pr_err("%s: invalid code %u for type %u\n", __func__, code,
+                      type);
+               dump_stack();
+               return;
+       }
+
        switch (type) {
        case EV_KEY:
                __set_bit(code, dev->keybit);
@@ -2085,9 +2140,6 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
 
        case EV_ABS:
                input_alloc_absinfo(dev);
-               if (!dev->absinfo)
-                       return;
-
                __set_bit(code, dev->absbit);
                break;
 
@@ -2285,12 +2337,6 @@ int input_register_device(struct input_dev *dev)
        /* KEY_RESERVED is not supposed to be transmitted to userspace. */
        __clear_bit(KEY_RESERVED, dev->keybit);
 
-       /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
-       if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
-               __clear_bit(BTN_RIGHT, dev->keybit);
-               __clear_bit(BTN_MIDDLE, dev->keybit);
-       }
-
        /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
        input_cleanse_bitmasks(dev);
 
index 592c95b..e10d57b 100644 (file)
@@ -123,7 +123,7 @@ static void adi_read_packet(struct adi_port *port)
 {
        struct adi *adi = port->adi;
        struct gameport *gameport = port->gameport;
-       unsigned char u, v, w, x, z;
+       unsigned char u, v, w, x;
        int t[2], s[2], i;
        unsigned long flags;
 
@@ -136,7 +136,7 @@ static void adi_read_packet(struct adi_port *port)
        local_irq_save(flags);
 
        gameport_trigger(gameport);
-       v = z = gameport_read(gameport);
+       v = gameport_read(gameport);
 
        do {
                u = v;
index 4c914f7..18190b5 100644 (file)
@@ -131,7 +131,7 @@ static const struct xpad_device {
        { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
        { 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
        { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
-       { 0x045e, 0x0b12, "Microsoft Xbox One X pad", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+       { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
        { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
        { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
        { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
index 9417ee0..4ea79db 100644 (file)
@@ -103,6 +103,7 @@ config KEYBOARD_ATKBD
        select SERIO_LIBPS2
        select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
        select SERIO_GSCPS2 if GSC
+       select INPUT_VIVALDIFMAP
        help
          Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
          you'll need this, unless you have a different type keyboard (USB, ADB
@@ -749,6 +750,7 @@ config KEYBOARD_XTKBD
 config KEYBOARD_CROS_EC
        tristate "ChromeOS EC keyboard"
        select INPUT_MATRIXKMAP
+       select INPUT_VIVALDIFMAP
        depends on CROS_EC
        help
          Say Y here to enable the matrix keyboard used by ChromeOS devices
@@ -779,6 +781,18 @@ config KEYBOARD_BCM
          To compile this driver as a module, choose M here: the
          module will be called bcm-keypad.
 
+config KEYBOARD_MT6779
+       tristate "MediaTek Keypad Support"
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       select REGMAP_MMIO
+       select INPUT_MATRIXKMAP
+       help
+         Say Y here if you want to use the keypad on MediaTek SoCs.
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mt6779-keypad.
+
 config KEYBOARD_MTK_PMIC
        tristate "MediaTek PMIC keys support"
        depends on MFD_MT6397
index e3c8648..721936e 100644 (file)
@@ -44,6 +44,7 @@ obj-$(CONFIG_KEYBOARD_MATRIX)         += matrix_keypad.o
 obj-$(CONFIG_KEYBOARD_MAX7359)         += max7359_keypad.o
 obj-$(CONFIG_KEYBOARD_MCS)             += mcs_touchkey.o
 obj-$(CONFIG_KEYBOARD_MPR121)          += mpr121_touchkey.o
+obj-$(CONFIG_KEYBOARD_MT6779)          += mt6779-keypad.o
 obj-$(CONFIG_KEYBOARD_MTK_PMIC)        += mtk-pmic-keys.o
 obj-$(CONFIG_KEYBOARD_NEWTON)          += newtonkbd.o
 obj-$(CONFIG_KEYBOARD_NOMADIK)         += nomadik-ske-keypad.o
index fbdef95..d413123 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/input.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/serio.h>
 #include <linux/workqueue.h>
 #include <linux/libps2.h>
@@ -64,8 +65,6 @@ static bool atkbd_terminal;
 module_param_named(terminal, atkbd_terminal, bool, 0);
 MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
 
-#define MAX_FUNCTION_ROW_KEYS  24
-
 #define SCANCODE(keymap)       ((keymap >> 16) & 0xFFFF)
 #define KEYCODE(keymap)                (keymap & 0xFFFF)
 
@@ -237,8 +236,7 @@ struct atkbd {
        /* Serializes reconnect(), attr->set() and event work */
        struct mutex mutex;
 
-       u32 function_row_physmap[MAX_FUNCTION_ROW_KEYS];
-       int num_function_row_keys;
+       struct vivaldi_data vdata;
 };
 
 /*
@@ -308,17 +306,7 @@ static struct attribute *atkbd_attributes[] = {
 
 static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf)
 {
-       ssize_t size = 0;
-       int i;
-
-       if (!atkbd->num_function_row_keys)
-               return 0;
-
-       for (i = 0; i < atkbd->num_function_row_keys; i++)
-               size += scnprintf(buf + size, PAGE_SIZE - size, "%02X ",
-                                 atkbd->function_row_physmap[i]);
-       size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
-       return size;
+       return vivaldi_function_row_physmap_show(&atkbd->vdata, buf);
 }
 
 static umode_t atkbd_attr_is_visible(struct kobject *kobj,
@@ -329,7 +317,7 @@ static umode_t atkbd_attr_is_visible(struct kobject *kobj,
        struct atkbd *atkbd = serio_get_drvdata(serio);
 
        if (attr == &atkbd_attr_function_row_physmap.attr &&
-           !atkbd->num_function_row_keys)
+           !atkbd->vdata.num_function_row_keys)
                return 0;
 
        return attr->mode;
@@ -1206,10 +1194,11 @@ static void atkbd_parse_fwnode_data(struct serio *serio)
 
        /* Parse "function-row-physmap" property */
        n = device_property_count_u32(dev, "function-row-physmap");
-       if (n > 0 && n <= MAX_FUNCTION_ROW_KEYS &&
+       if (n > 0 && n <= VIVALDI_MAX_FUNCTION_ROW_KEYS &&
            !device_property_read_u32_array(dev, "function-row-physmap",
-                                           atkbd->function_row_physmap, n)) {
-               atkbd->num_function_row_keys = n;
+                                           atkbd->vdata.function_row_physmap,
+                                           n)) {
+               atkbd->vdata.num_function_row_keys = n;
                dev_dbg(dev, "FW reported %d function-row key locations\n", n);
        }
 }
index fc02c54..6534dfc 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/bitops.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/notifier.h>
@@ -27,8 +28,6 @@
 
 #include <asm/unaligned.h>
 
-#define MAX_NUM_TOP_ROW_KEYS   15
-
 /**
  * struct cros_ec_keyb - Structure representing EC keyboard device
  *
@@ -44,9 +43,7 @@
  * @idev: The input device for the matrix keys.
  * @bs_idev: The input device for non-matrix buttons and switches (or NULL).
  * @notifier: interrupt event notifier for transport devices
- * @function_row_physmap: An array of the encoded rows/columns for the top
- *                        row function keys, in an order from left to right
- * @num_function_row_keys: The number of top row keys in a custom keyboard
+ * @vdata: vivaldi function row data
  */
 struct cros_ec_keyb {
        unsigned int rows;
@@ -64,8 +61,7 @@ struct cros_ec_keyb {
        struct input_dev *bs_idev;
        struct notifier_block notifier;
 
-       u16 function_row_physmap[MAX_NUM_TOP_ROW_KEYS];
-       size_t num_function_row_keys;
+       struct vivaldi_data vdata;
 };
 
 /**
@@ -537,9 +533,9 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
        int err;
        struct property *prop;
        const __be32 *p;
-       u16 *physmap;
+       u32 *physmap;
        u32 key_pos;
-       int row, col;
+       unsigned int row, col, scancode, n_physmap;
 
        err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols);
        if (err)
@@ -591,20 +587,21 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
        ckdev->idev = idev;
        cros_ec_keyb_compute_valid_keys(ckdev);
 
-       physmap = ckdev->function_row_physmap;
+       physmap = ckdev->vdata.function_row_physmap;
+       n_physmap = 0;
        of_property_for_each_u32(dev->of_node, "function-row-physmap",
                                 prop, p, key_pos) {
-               if (ckdev->num_function_row_keys == MAX_NUM_TOP_ROW_KEYS) {
+               if (n_physmap == VIVALDI_MAX_FUNCTION_ROW_KEYS) {
                        dev_warn(dev, "Only support up to %d top row keys\n",
-                                MAX_NUM_TOP_ROW_KEYS);
+                                VIVALDI_MAX_FUNCTION_ROW_KEYS);
                        break;
                }
                row = KEY_ROW(key_pos);
                col = KEY_COL(key_pos);
-               *physmap = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
-               physmap++;
-               ckdev->num_function_row_keys++;
+               scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+               physmap[n_physmap++] = scancode;
        }
+       ckdev->vdata.num_function_row_keys = n_physmap;
 
        err = input_register_device(ckdev->idev);
        if (err) {
@@ -619,18 +616,10 @@ static ssize_t function_row_physmap_show(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
 {
-       ssize_t size = 0;
-       int i;
-       struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
-       u16 *physmap = ckdev->function_row_physmap;
-
-       for (i = 0; i < ckdev->num_function_row_keys; i++)
-               size += scnprintf(buf + size, PAGE_SIZE - size,
-                                 "%s%02X", size ? " " : "", physmap[i]);
-       if (size)
-               size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+       const struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
+       const struct vivaldi_data *data = &ckdev->vdata;
 
-       return size;
+       return vivaldi_function_row_physmap_show(data, buf);
 }
 
 static DEVICE_ATTR_RO(function_row_physmap);
@@ -648,7 +637,7 @@ static umode_t cros_ec_keyb_attr_is_visible(struct kobject *kobj,
        struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
 
        if (attr == &dev_attr_function_row_physmap.attr &&
-           !ckdev->num_function_row_keys)
+           !ckdev->vdata.num_function_row_keys)
                return 0;
 
        return attr->mode;
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
new file mode 100644 (file)
index 0000000..0dbbddc
--- /dev/null
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author Fengping Yu <fengping.yu@mediatek.com>
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MTK_KPD_NAME           "mt6779-keypad"
+#define MTK_KPD_MEM            0x0004
+#define MTK_KPD_DEBOUNCE       0x0018
+#define MTK_KPD_DEBOUNCE_MASK  GENMASK(13, 0)
+#define MTK_KPD_DEBOUNCE_MAX_MS        256
+#define MTK_KPD_NUM_MEMS       5
+#define MTK_KPD_NUM_BITS       136     /* 4*32+8 MEM5 only use 8 BITS */
+
+struct mt6779_keypad {
+       struct regmap *regmap;
+       struct input_dev *input_dev;
+       struct clk *clk;
+       void __iomem *base;
+       u32 n_rows;
+       u32 n_cols;
+       DECLARE_BITMAP(keymap_state, MTK_KPD_NUM_BITS);
+};
+
+static const struct regmap_config mt6779_keypad_regmap_cfg = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = sizeof(u32),
+       .max_register = 36,
+};
+
+static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
+{
+       struct mt6779_keypad *keypad = dev_id;
+       const unsigned short *keycode = keypad->input_dev->keycode;
+       DECLARE_BITMAP(new_state, MTK_KPD_NUM_BITS);
+       DECLARE_BITMAP(change, MTK_KPD_NUM_BITS);
+       unsigned int bit_nr;
+       unsigned int row, col;
+       unsigned int scancode;
+       unsigned int row_shift = get_count_order(keypad->n_cols);
+       bool pressed;
+
+       regmap_bulk_read(keypad->regmap, MTK_KPD_MEM,
+                        new_state, MTK_KPD_NUM_MEMS);
+
+       bitmap_xor(change, new_state, keypad->keymap_state, MTK_KPD_NUM_BITS);
+
+       for_each_set_bit(bit_nr, change, MTK_KPD_NUM_BITS) {
+               /*
+                * Registers are 32bits, but only bits [15:0] are used to
+                * indicate key status.
+                */
+               if (bit_nr % 32 >= 16)
+                       continue;
+
+               row = bit_nr / 32;
+               col = bit_nr % 32;
+               scancode = MATRIX_SCAN_CODE(row, col, row_shift);
+               /* 1: not pressed, 0: pressed */
+               pressed = !test_bit(bit_nr, new_state);
+               dev_dbg(&keypad->input_dev->dev, "%s",
+                       pressed ? "pressed" : "released");
+
+               input_event(keypad->input_dev, EV_MSC, MSC_SCAN, scancode);
+               input_report_key(keypad->input_dev, keycode[scancode], pressed);
+               input_sync(keypad->input_dev);
+
+               dev_dbg(&keypad->input_dev->dev,
+                       "report Linux keycode = %d\n", keycode[scancode]);
+       }
+
+       bitmap_copy(keypad->keymap_state, new_state, MTK_KPD_NUM_BITS);
+
+       return IRQ_HANDLED;
+}
+
+static void mt6779_keypad_clk_disable(void *data)
+{
+       clk_disable_unprepare(data);
+}
+
+static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
+{
+       struct mt6779_keypad *keypad;
+       int irq;
+       u32 debounce;
+       bool wakeup;
+       int error;
+
+       keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL);
+       if (!keypad)
+               return -ENOMEM;
+
+       keypad->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(keypad->base))
+               return PTR_ERR(keypad->base);
+
+       keypad->regmap = devm_regmap_init_mmio(&pdev->dev, keypad->base,
+                                              &mt6779_keypad_regmap_cfg);
+       if (IS_ERR(keypad->regmap)) {
+               dev_err(&pdev->dev,
+                       "regmap init failed:%pe\n", keypad->regmap);
+               return PTR_ERR(keypad->regmap);
+       }
+
+       bitmap_fill(keypad->keymap_state, MTK_KPD_NUM_BITS);
+
+       keypad->input_dev = devm_input_allocate_device(&pdev->dev);
+       if (!keypad->input_dev) {
+               dev_err(&pdev->dev, "Failed to allocate input dev\n");
+               return -ENOMEM;
+       }
+
+       keypad->input_dev->name = MTK_KPD_NAME;
+       keypad->input_dev->id.bustype = BUS_HOST;
+
+       error = matrix_keypad_parse_properties(&pdev->dev, &keypad->n_rows,
+                                              &keypad->n_cols);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to parse keypad params\n");
+               return error;
+       }
+
+       if (device_property_read_u32(&pdev->dev, "debounce-delay-ms",
+                                    &debounce))
+               debounce = 16;
+
+       if (debounce > MTK_KPD_DEBOUNCE_MAX_MS) {
+               dev_err(&pdev->dev,
+                       "Debounce time exceeds the maximum allowed time %dms\n",
+                       MTK_KPD_DEBOUNCE_MAX_MS);
+               return -EINVAL;
+       }
+
+       wakeup = device_property_read_bool(&pdev->dev, "wakeup-source");
+
+       dev_dbg(&pdev->dev, "n_row=%d n_col=%d debounce=%d\n",
+               keypad->n_rows, keypad->n_cols, debounce);
+
+       error = matrix_keypad_build_keymap(NULL, NULL,
+                                          keypad->n_rows, keypad->n_cols,
+                                          NULL, keypad->input_dev);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to build keymap\n");
+               return error;
+       }
+
+       input_set_capability(keypad->input_dev, EV_MSC, MSC_SCAN);
+
+       regmap_write(keypad->regmap, MTK_KPD_DEBOUNCE,
+                    (debounce * (1 << 5)) & MTK_KPD_DEBOUNCE_MASK);
+
+       keypad->clk = devm_clk_get(&pdev->dev, "kpd");
+       if (IS_ERR(keypad->clk))
+               return PTR_ERR(keypad->clk);
+
+       error = clk_prepare_enable(keypad->clk);
+       if (error) {
+               dev_err(&pdev->dev, "cannot prepare/enable keypad clock\n");
+               return error;
+       }
+
+       error = devm_add_action_or_reset(&pdev->dev, mt6779_keypad_clk_disable,
+                                        keypad->clk);
+       if (error)
+               return error;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       error = devm_request_threaded_irq(&pdev->dev, irq,
+                                         NULL, mt6779_keypad_irq_handler,
+                                         IRQF_ONESHOT, MTK_KPD_NAME, keypad);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to request IRQ#%d: %d\n",
+                       irq, error);
+               return error;
+       }
+
+       error = input_register_device(keypad->input_dev);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to register device\n");
+               return error;
+       }
+
+       error = device_init_wakeup(&pdev->dev, wakeup);
+       if (error)
+               dev_warn(&pdev->dev, "device_init_wakeup() failed: %d\n",
+                        error);
+
+       return 0;
+}
+
+static const struct of_device_id mt6779_keypad_of_match[] = {
+       { .compatible = "mediatek,mt6779-keypad" },
+       { .compatible = "mediatek,mt6873-keypad" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver mt6779_keypad_pdrv = {
+       .probe = mt6779_keypad_pdrv_probe,
+       .driver = {
+                  .name = MTK_KPD_NAME,
+                  .of_match_table = mt6779_keypad_of_match,
+       },
+};
+module_platform_driver(mt6779_keypad_pdrv);
+
+MODULE_AUTHOR("Mediatek Corporation");
+MODULE_DESCRIPTION("MTK Keypad (KPD) Driver");
+MODULE_LICENSE("GPL");
index 62391d6..c31ab43 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6358/registers.h>
 #include <linux/mfd/mt6397/core.h>
 #include <linux/mfd/mt6397/registers.h>
 #include <linux/module.h>
@@ -74,11 +75,22 @@ static const struct mtk_pmic_regs mt6323_regs = {
        .pmic_rst_reg = MT6323_TOP_RST_MISC,
 };
 
+static const struct mtk_pmic_regs mt6358_regs = {
+       .keys_regs[MTK_PMIC_PWRKEY_INDEX] =
+               MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
+                                  0x2, MT6358_PSC_TOP_INT_CON0, 0x5),
+       .keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
+               MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
+                                  0x8, MT6358_PSC_TOP_INT_CON0, 0xa),
+       .pmic_rst_reg = MT6358_TOP_RST_MISC,
+};
+
 struct mtk_pmic_keys_info {
        struct mtk_pmic_keys *keys;
        const struct mtk_pmic_keys_regs *regs;
        unsigned int keycode;
        int irq;
+       int irq_r; /* optional: release irq if different */
        bool wakeup:1;
 };
 
@@ -188,6 +200,18 @@ static int mtk_pmic_key_setup(struct mtk_pmic_keys *keys,
                return ret;
        }
 
+       if (info->irq_r > 0) {
+               ret = devm_request_threaded_irq(keys->dev, info->irq_r, NULL,
+                                               mtk_pmic_keys_irq_handler_thread,
+                                               IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+                                               "mtk-pmic-keys", info);
+               if (ret) {
+                       dev_err(keys->dev, "Failed to request IRQ_r: %d: %d\n",
+                               info->irq, ret);
+                       return ret;
+               }
+       }
+
        input_set_capability(keys->input_dev, EV_KEY, info->keycode);
 
        return 0;
@@ -199,8 +223,11 @@ static int __maybe_unused mtk_pmic_keys_suspend(struct device *dev)
        int index;
 
        for (index = 0; index < MTK_PMIC_MAX_KEY_COUNT; index++) {
-               if (keys->keys[index].wakeup)
+               if (keys->keys[index].wakeup) {
                        enable_irq_wake(keys->keys[index].irq);
+                       if (keys->keys[index].irq_r > 0)
+                               enable_irq_wake(keys->keys[index].irq_r);
+               }
        }
 
        return 0;
@@ -212,8 +239,11 @@ static int __maybe_unused mtk_pmic_keys_resume(struct device *dev)
        int index;
 
        for (index = 0; index < MTK_PMIC_MAX_KEY_COUNT; index++) {
-               if (keys->keys[index].wakeup)
+               if (keys->keys[index].wakeup) {
                        disable_irq_wake(keys->keys[index].irq);
+                       if (keys->keys[index].irq_r > 0)
+                               disable_irq_wake(keys->keys[index].irq_r);
+               }
        }
 
        return 0;
@@ -230,6 +260,9 @@ static const struct of_device_id of_mtk_pmic_keys_match_tbl[] = {
                .compatible = "mediatek,mt6323-keys",
                .data = &mt6323_regs,
        }, {
+               .compatible = "mediatek,mt6358-keys",
+               .data = &mt6358_regs,
+       }, {
                /* sentinel */
        }
 };
@@ -241,6 +274,8 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
        unsigned int keycount;
        struct mt6397_chip *pmic_chip = dev_get_drvdata(pdev->dev.parent);
        struct device_node *node = pdev->dev.of_node, *child;
+       static const char *const irqnames[] = { "powerkey", "homekey" };
+       static const char *const irqnames_r[] = { "powerkey_r", "homekey_r" };
        struct mtk_pmic_keys *keys;
        const struct mtk_pmic_regs *mtk_pmic_regs;
        struct input_dev *input_dev;
@@ -268,7 +303,8 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
        input_dev->id.version = 0x0001;
 
        keycount = of_get_available_child_count(node);
-       if (keycount > MTK_PMIC_MAX_KEY_COUNT) {
+       if (keycount > MTK_PMIC_MAX_KEY_COUNT ||
+           keycount > ARRAY_SIZE(irqnames)) {
                dev_err(keys->dev, "too many keys defined (%d)\n", keycount);
                return -EINVAL;
        }
@@ -276,12 +312,23 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
        for_each_child_of_node(node, child) {
                keys->keys[index].regs = &mtk_pmic_regs->keys_regs[index];
 
-               keys->keys[index].irq = platform_get_irq(pdev, index);
+               keys->keys[index].irq =
+                       platform_get_irq_byname(pdev, irqnames[index]);
                if (keys->keys[index].irq < 0) {
                        of_node_put(child);
                        return keys->keys[index].irq;
                }
 
+               if (of_device_is_compatible(node, "mediatek,mt6358-keys")) {
+                       keys->keys[index].irq_r = platform_get_irq_byname(pdev,
+                                                                         irqnames_r[index]);
+
+                       if (keys->keys[index].irq_r < 0) {
+                               of_node_put(child);
+                               return keys->keys[index].irq_r;
+                       }
+               }
+
                error = of_property_read_u32(child,
                        "linux,keycodes", &keys->keys[index].keycode);
                if (error) {
index 7985192..b14a389 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2015  Dialog Semiconductor Ltd.
  */
 
+#include <linux/devm-helpers.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/input.h>
@@ -182,13 +183,6 @@ static irqreturn_t da9063_onkey_irq_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void da9063_cancel_poll(void *data)
-{
-       struct da9063_onkey *onkey = data;
-
-       cancel_delayed_work_sync(&onkey->work);
-}
-
 static int da9063_onkey_probe(struct platform_device *pdev)
 {
        struct da9063_onkey *onkey;
@@ -234,9 +228,8 @@ static int da9063_onkey_probe(struct platform_device *pdev)
 
        input_set_capability(onkey->input, EV_KEY, KEY_POWER);
 
-       INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
-
-       error = devm_add_action(&pdev->dev, da9063_cancel_poll, onkey);
+       error = devm_delayed_work_autocancel(&pdev->dev, &onkey->work,
+                                            da9063_poll_on);
        if (error) {
                dev_err(&pdev->dev,
                        "Failed to add cancel poll action: %d\n",
index ffad142..434d48a 100644 (file)
@@ -186,6 +186,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN2044", /* L470  */
        "LEN2054", /* E480 */
        "LEN2055", /* E580 */
+       "LEN2064", /* T14 Gen 1 AMD / P14s Gen 1 AMD */
        "LEN2068", /* T14 Gen 1 */
        "SYN3052", /* HP EliteBook 840 G4 */
        "SYN3221", /* HP 15-ay000 */
index 8970b49..9b02dd5 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/of.h>
 #include <linux/jiffies.h>
 #include <linux/delay.h>
+#include <linux/timekeeping.h>
 
 #define DRIVER_NAME            "ps2-gpio"
 
 #define PS2_DATA_BIT7          8
 #define PS2_PARITY_BIT         9
 #define PS2_STOP_BIT           10
-#define PS2_TX_TIMEOUT         11
-#define PS2_ACK_BIT            12
+#define PS2_ACK_BIT            11
 
 #define PS2_DEV_RET_ACK                0xfa
 #define PS2_DEV_RET_NACK       0xfe
 
 #define PS2_CMD_RESEND         0xfe
 
+/*
+ * The PS2 protocol specifies a clock frequency between 10kHz and 16.7kHz,
+ * therefore the maximal interrupt interval should be 100us and the minimum
+ * interrupt interval should be ~60us. Let's allow +/- 20us for frequency
+ * deviations and interrupt latency.
+ *
+ * The data line must be samples after ~30us to 50us after the falling edge,
+ * since the device updates the data line at the rising edge.
+ *
+ * ___            ______            ______            ______            ___
+ *    \          /      \          /      \          /      \          /
+ *     \        /        \        /        \        /        \        /
+ *      \______/          \______/          \______/          \______/
+ *
+ *     |-----------------|                 |--------|
+ *          60us/100us                      30us/50us
+ */
+#define PS2_CLK_FREQ_MIN_HZ            10000
+#define PS2_CLK_FREQ_MAX_HZ            16700
+#define PS2_CLK_MIN_INTERVAL_US                ((1000 * 1000) / PS2_CLK_FREQ_MAX_HZ)
+#define PS2_CLK_MAX_INTERVAL_US                ((1000 * 1000) / PS2_CLK_FREQ_MIN_HZ)
+#define PS2_IRQ_MIN_INTERVAL_US                (PS2_CLK_MIN_INTERVAL_US - 20)
+#define PS2_IRQ_MAX_INTERVAL_US                (PS2_CLK_MAX_INTERVAL_US + 20)
+
 struct ps2_gpio_data {
        struct device *dev;
        struct serio *serio;
@@ -52,19 +76,30 @@ struct ps2_gpio_data {
        struct gpio_desc *gpio_data;
        bool write_enable;
        int irq;
-       unsigned char rx_cnt;
-       unsigned char rx_byte;
-       unsigned char tx_cnt;
-       unsigned char tx_byte;
-       struct completion tx_done;
-       struct mutex tx_mutex;
-       struct delayed_work tx_work;
+       ktime_t t_irq_now;
+       ktime_t t_irq_last;
+       struct {
+               unsigned char cnt;
+               unsigned char byte;
+       } rx;
+       struct {
+               unsigned char cnt;
+               unsigned char byte;
+               ktime_t t_xfer_start;
+               ktime_t t_xfer_end;
+               struct completion complete;
+               struct mutex mutex;
+               struct delayed_work work;
+       } tx;
 };
 
 static int ps2_gpio_open(struct serio *serio)
 {
        struct ps2_gpio_data *drvdata = serio->port_data;
 
+       drvdata->t_irq_last = 0;
+       drvdata->tx.t_xfer_end = 0;
+
        enable_irq(drvdata->irq);
        return 0;
 }
@@ -73,7 +108,7 @@ static void ps2_gpio_close(struct serio *serio)
 {
        struct ps2_gpio_data *drvdata = serio->port_data;
 
-       flush_delayed_work(&drvdata->tx_work);
+       flush_delayed_work(&drvdata->tx.work);
        disable_irq(drvdata->irq);
 }
 
@@ -85,9 +120,9 @@ static int __ps2_gpio_write(struct serio *serio, unsigned char val)
        gpiod_direction_output(drvdata->gpio_clk, 0);
 
        drvdata->mode = PS2_MODE_TX;
-       drvdata->tx_byte = val;
+       drvdata->tx.byte = val;
 
-       schedule_delayed_work(&drvdata->tx_work, usecs_to_jiffies(200));
+       schedule_delayed_work(&drvdata->tx.work, usecs_to_jiffies(200));
 
        return 0;
 }
@@ -98,12 +133,12 @@ static int ps2_gpio_write(struct serio *serio, unsigned char val)
        int ret = 0;
 
        if (in_task()) {
-               mutex_lock(&drvdata->tx_mutex);
+               mutex_lock(&drvdata->tx.mutex);
                __ps2_gpio_write(serio, val);
-               if (!wait_for_completion_timeout(&drvdata->tx_done,
+               if (!wait_for_completion_timeout(&drvdata->tx.complete,
                                                 msecs_to_jiffies(10000)))
                        ret = SERIO_TIMEOUT;
-               mutex_unlock(&drvdata->tx_mutex);
+               mutex_unlock(&drvdata->tx.mutex);
        } else {
                __ps2_gpio_write(serio, val);
        }
@@ -115,9 +150,10 @@ static void ps2_gpio_tx_work_fn(struct work_struct *work)
 {
        struct delayed_work *dwork = to_delayed_work(work);
        struct ps2_gpio_data *drvdata = container_of(dwork,
-                                                   struct ps2_gpio_data,
-                                                   tx_work);
+                                                    struct ps2_gpio_data,
+                                                    tx.work);
 
+       drvdata->tx.t_xfer_start = ktime_get();
        enable_irq(drvdata->irq);
        gpiod_direction_output(drvdata->gpio_data, 0);
        gpiod_direction_input(drvdata->gpio_clk);
@@ -128,20 +164,31 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
        unsigned char byte, cnt;
        int data;
        int rxflags = 0;
-       static unsigned long old_jiffies;
+       s64 us_delta;
 
-       byte = drvdata->rx_byte;
-       cnt = drvdata->rx_cnt;
+       byte = drvdata->rx.byte;
+       cnt = drvdata->rx.cnt;
 
-       if (old_jiffies == 0)
-               old_jiffies = jiffies;
+       drvdata->t_irq_now = ktime_get();
+
+       /*
+        * We need to consider spurious interrupts happening right after
+        * a TX xfer finished.
+        */
+       us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->tx.t_xfer_end);
+       if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US))
+               goto end;
 
-       if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) {
+       us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->t_irq_last);
+       if (us_delta > PS2_IRQ_MAX_INTERVAL_US && cnt) {
                dev_err(drvdata->dev,
                        "RX: timeout, probably we missed an interrupt\n");
                goto err;
+       } else if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US)) {
+               /* Ignore spurious IRQs. */
+               goto end;
        }
-       old_jiffies = jiffies;
+       drvdata->t_irq_last = drvdata->t_irq_now;
 
        data = gpiod_get_value(drvdata->gpio_data);
        if (unlikely(data < 0)) {
@@ -178,8 +225,16 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
                        if (!drvdata->write_enable)
                                goto err;
                }
+               break;
+       case PS2_STOP_BIT:
+               /* stop bit should be high */
+               if (unlikely(!data)) {
+                       dev_err(drvdata->dev, "RX: stop bit should be high\n");
+                       goto err;
+               }
 
-               /* Do not send spurious ACK's and NACK's when write fn is
+               /*
+                * Do not send spurious ACK's and NACK's when write fn is
                 * not provided.
                 */
                if (!drvdata->write_enable) {
@@ -189,23 +244,11 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
                                break;
                }
 
-               /* Let's send the data without waiting for the stop bit to be
-                * sent. It may happen that we miss the stop bit. When this
-                * happens we have no way to recover from this, certainly
-                * missing the parity bit would be recognized when processing
-                * the stop bit. When missing both, data is lost.
-                */
                serio_interrupt(drvdata->serio, byte, rxflags);
                dev_dbg(drvdata->dev, "RX: sending byte 0x%x\n", byte);
-               break;
-       case PS2_STOP_BIT:
-               /* stop bit should be high */
-               if (unlikely(!data)) {
-                       dev_err(drvdata->dev, "RX: stop bit should be high\n");
-                       goto err;
-               }
+
                cnt = byte = 0;
-               old_jiffies = 0;
+
                goto end; /* success */
        default:
                dev_err(drvdata->dev, "RX: got out of sync with the device\n");
@@ -217,11 +260,10 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
 
 err:
        cnt = byte = 0;
-       old_jiffies = 0;
        __ps2_gpio_write(drvdata->serio, PS2_CMD_RESEND);
 end:
-       drvdata->rx_cnt = cnt;
-       drvdata->rx_byte = byte;
+       drvdata->rx.cnt = cnt;
+       drvdata->rx.byte = byte;
        return IRQ_HANDLED;
 }
 
@@ -229,20 +271,34 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
 {
        unsigned char byte, cnt;
        int data;
-       static unsigned long old_jiffies;
+       s64 us_delta;
+
+       cnt = drvdata->tx.cnt;
+       byte = drvdata->tx.byte;
 
-       cnt = drvdata->tx_cnt;
-       byte = drvdata->tx_byte;
+       drvdata->t_irq_now = ktime_get();
 
-       if (old_jiffies == 0)
-               old_jiffies = jiffies;
+       /*
+        * There might be pending IRQs since we disabled IRQs in
+        * __ps2_gpio_write().  We can expect at least one clock period until
+        * the device generates the first falling edge after releasing the
+        * clock line.
+        */
+       us_delta = ktime_us_delta(drvdata->t_irq_now,
+                                 drvdata->tx.t_xfer_start);
+       if (unlikely(us_delta < PS2_CLK_MIN_INTERVAL_US))
+               goto end;
 
-       if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) {
+       us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->t_irq_last);
+       if (us_delta > PS2_IRQ_MAX_INTERVAL_US && cnt > 1) {
                dev_err(drvdata->dev,
                        "TX: timeout, probably we missed an interrupt\n");
                goto err;
+       } else if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US)) {
+               /* Ignore spurious IRQs. */
+               goto end;
        }
-       old_jiffies = jiffies;
+       drvdata->t_irq_last = drvdata->t_irq_now;
 
        switch (cnt) {
        case PS2_START_BIT:
@@ -270,27 +326,22 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
                /* release data line to generate stop bit */
                gpiod_direction_input(drvdata->gpio_data);
                break;
-       case PS2_TX_TIMEOUT:
-               /* Devices generate one extra clock pulse before sending the
-                * acknowledgment.
-                */
-               break;
        case PS2_ACK_BIT:
-               gpiod_direction_input(drvdata->gpio_data);
                data = gpiod_get_value(drvdata->gpio_data);
                if (data) {
                        dev_warn(drvdata->dev, "TX: received NACK, retry\n");
                        goto err;
                }
 
+               drvdata->tx.t_xfer_end = ktime_get();
                drvdata->mode = PS2_MODE_RX;
-               complete(&drvdata->tx_done);
+               complete(&drvdata->tx.complete);
 
                cnt = 1;
-               old_jiffies = 0;
                goto end; /* success */
        default:
-               /* Probably we missed the stop bit. Therefore we release data
+               /*
+                * Probably we missed the stop bit. Therefore we release data
                 * line and try again.
                 */
                gpiod_direction_input(drvdata->gpio_data);
@@ -303,11 +354,10 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
 
 err:
        cnt = 1;
-       old_jiffies = 0;
        gpiod_direction_input(drvdata->gpio_data);
-       __ps2_gpio_write(drvdata->serio, drvdata->tx_byte);
+       __ps2_gpio_write(drvdata->serio, drvdata->tx.byte);
 end:
-       drvdata->tx_cnt = cnt;
+       drvdata->tx.cnt = cnt;
        return IRQ_HANDLED;
 }
 
@@ -322,14 +372,19 @@ static irqreturn_t ps2_gpio_irq(int irq, void *dev_id)
 static int ps2_gpio_get_props(struct device *dev,
                                 struct ps2_gpio_data *drvdata)
 {
-       drvdata->gpio_data = devm_gpiod_get(dev, "data", GPIOD_IN);
+       enum gpiod_flags gflags;
+
+       /* Enforce open drain, since this is required by the PS/2 bus. */
+       gflags = GPIOD_IN | GPIOD_FLAGS_BIT_OPEN_DRAIN;
+
+       drvdata->gpio_data = devm_gpiod_get(dev, "data", gflags);
        if (IS_ERR(drvdata->gpio_data)) {
                dev_err(dev, "failed to request data gpio: %ld",
                        PTR_ERR(drvdata->gpio_data));
                return PTR_ERR(drvdata->gpio_data);
        }
 
-       drvdata->gpio_clk = devm_gpiod_get(dev, "clk", GPIOD_IN);
+       drvdata->gpio_clk = devm_gpiod_get(dev, "clk", gflags);
        if (IS_ERR(drvdata->gpio_clk)) {
                dev_err(dev, "failed to request clock gpio: %ld",
                        PTR_ERR(drvdata->gpio_clk));
@@ -387,7 +442,8 @@ static int ps2_gpio_probe(struct platform_device *pdev)
        serio->id.type = SERIO_8042;
        serio->open = ps2_gpio_open;
        serio->close = ps2_gpio_close;
-       /* Write can be enabled in platform/dt data, but possibly it will not
+       /*
+        * Write can be enabled in platform/dt data, but possibly it will not
         * work because of the tough timings.
         */
        serio->write = drvdata->write_enable ? ps2_gpio_write : NULL;
@@ -400,14 +456,15 @@ static int ps2_gpio_probe(struct platform_device *pdev)
        drvdata->dev = dev;
        drvdata->mode = PS2_MODE_RX;
 
-       /* Tx count always starts at 1, as the start bit is sent implicitly by
+       /*
+        * Tx count always starts at 1, as the start bit is sent implicitly by
         * host-to-device communication initialization.
         */
-       drvdata->tx_cnt = 1;
+       drvdata->tx.cnt = 1;
 
-       INIT_DELAYED_WORK(&drvdata->tx_work, ps2_gpio_tx_work_fn);
-       init_completion(&drvdata->tx_done);
-       mutex_init(&drvdata->tx_mutex);
+       INIT_DELAYED_WORK(&drvdata->tx.work, ps2_gpio_tx_work_fn);
+       init_completion(&drvdata->tx.complete);
+       mutex_init(&drvdata->tx.mutex);
 
        serio_register_port(serio);
        platform_set_drvdata(pdev, drvdata);
index ff7794c..43c7d6e 100644 (file)
@@ -638,6 +638,16 @@ config TOUCHSCREEN_MTOUCH
          To compile this driver as a module, choose M here: the
          module will be called mtouch.
 
+config TOUCHSCREEN_IMAGIS
+       tristate "Imagis touchscreen support"
+       depends on I2C
+       help
+         Say Y here if you have an Imagis IST30xxC touchscreen.
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called imagis.
+
 config TOUCHSCREEN_IMX6UL_TSC
        tristate "Freescale i.MX6UL touchscreen controller"
        depends on ((OF && GPIOLIB) || COMPILE_TEST) && HAS_IOMEM
index 39a8127..557f84f 100644 (file)
@@ -49,6 +49,7 @@ obj-$(CONFIG_TOUCHSCREEN_GOODIX)      += goodix_ts.o
 obj-$(CONFIG_TOUCHSCREEN_HIDEEP)       += hideep.o
 obj-$(CONFIG_TOUCHSCREEN_ILI210X)      += ili210x.o
 obj-$(CONFIG_TOUCHSCREEN_ILITEK)       += ilitek_ts_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_IMAGIS)       += imagis.o
 obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC)   += imx6ul_tsc.o
 obj-$(CONFIG_TOUCHSCREEN_INEXIO)       += inexio.o
 obj-$(CONFIG_TOUCHSCREEN_IPROC)                += bcm_iproc_tsc.o
index 752e8ba..3ad9870 100644 (file)
@@ -298,32 +298,17 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
        return -ENOMSG;
 }
 
-static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
+static int goodix_create_pen_input(struct goodix_ts_data *ts)
 {
        struct device *dev = &ts->client->dev;
        struct input_dev *input;
 
        input = devm_input_allocate_device(dev);
        if (!input)
-               return NULL;
-
-       input_alloc_absinfo(input);
-       if (!input->absinfo) {
-               input_free_device(input);
-               return NULL;
-       }
-
-       input->absinfo[ABS_X] = ts->input_dev->absinfo[ABS_MT_POSITION_X];
-       input->absinfo[ABS_Y] = ts->input_dev->absinfo[ABS_MT_POSITION_Y];
-       __set_bit(ABS_X, input->absbit);
-       __set_bit(ABS_Y, input->absbit);
-       input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0);
+               return -ENOMEM;
 
-       input_set_capability(input, EV_KEY, BTN_TOUCH);
-       input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
-       input_set_capability(input, EV_KEY, BTN_STYLUS);
-       input_set_capability(input, EV_KEY, BTN_STYLUS2);
-       __set_bit(INPUT_PROP_DIRECT, input->propbit);
+       input_copy_abs(input, ABS_X, ts->input_dev, ABS_MT_POSITION_X);
+       input_copy_abs(input, ABS_Y, ts->input_dev, ABS_MT_POSITION_Y);
        /*
         * The resolution of these touchscreens is about 10 units/mm, the actual
         * resolution does not matter much since we set INPUT_PROP_DIRECT.
@@ -331,6 +316,13 @@ static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
         */
        input_abs_set_res(input, ABS_X, 10);
        input_abs_set_res(input, ABS_Y, 10);
+       input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0);
+
+       input_set_capability(input, EV_KEY, BTN_TOUCH);
+       input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+       input_set_capability(input, EV_KEY, BTN_STYLUS);
+       input_set_capability(input, EV_KEY, BTN_STYLUS2);
+       __set_bit(INPUT_PROP_DIRECT, input->propbit);
 
        input->name = "Goodix Active Pen";
        input->phys = "input/pen";
@@ -340,25 +332,23 @@ static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
                input->id.product = 0x1001;
        input->id.version = ts->version;
 
-       if (input_register_device(input) != 0) {
-               input_free_device(input);
-               return NULL;
-       }
-
-       return input;
+       ts->input_pen = input;
+       return 0;
 }
 
 static void goodix_ts_report_pen_down(struct goodix_ts_data *ts, u8 *data)
 {
-       int input_x, input_y, input_w;
+       int input_x, input_y, input_w, error;
        u8 key_value;
 
-       if (!ts->input_pen) {
-               ts->input_pen = goodix_create_pen_input(ts);
-               if (!ts->input_pen)
-                       return;
+       if (!ts->pen_input_registered) {
+               error = input_register_device(ts->input_pen);
+               ts->pen_input_registered = (error == 0) ? 1 : error;
        }
 
+       if (ts->pen_input_registered < 0)
+               return;
+
        if (ts->contact_size == 9) {
                input_x = get_unaligned_le16(&data[4]);
                input_y = get_unaligned_le16(&data[6]);
@@ -1215,6 +1205,17 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
                return error;
        }
 
+       /*
+        * Create the input_pen device before goodix_request_irq() calls
+        * devm_request_threaded_irq() so that the devm framework frees
+        * it after disabling the irq.
+        * Unfortunately there is no way to detect if the touchscreen has pen
+        * support, so registering the dev is delayed till the first pen event.
+        */
+       error = goodix_create_pen_input(ts);
+       if (error)
+               return error;
+
        ts->irq_flags = goodix_irq_flags[ts->int_trigger_type] | IRQF_ONESHOT;
        error = goodix_request_irq(ts);
        if (error) {
index fa8602e..87797cc 100644 (file)
@@ -94,6 +94,7 @@ struct goodix_ts_data {
        u16 version;
        bool reset_controller_at_probe;
        bool load_cfg_from_disk;
+       int pen_input_registered;
        struct completion firmware_loading_complete;
        unsigned long irq_flags;
        enum goodix_irq_pin_access_method irq_pin_access_method;
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
new file mode 100644 (file)
index 0000000..e2697e6
--- /dev/null
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#define IST3038C_HIB_ACCESS            (0x800B << 16)
+#define IST3038C_DIRECT_ACCESS         BIT(31)
+#define IST3038C_REG_CHIPID            0x40001000
+#define IST3038C_REG_HIB_BASE          0x30000100
+#define IST3038C_REG_TOUCH_STATUS      (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS)
+#define IST3038C_REG_TOUCH_COORD       (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS | 0x8)
+#define IST3038C_REG_INTR_MESSAGE      (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS | 0x4)
+#define IST3038C_WHOAMI                        0x38c
+#define IST3038C_CHIP_ON_DELAY_MS      60
+#define IST3038C_I2C_RETRY_COUNT       3
+#define IST3038C_MAX_FINGER_NUM                10
+#define IST3038C_X_MASK                        GENMASK(23, 12)
+#define IST3038C_X_SHIFT               12
+#define IST3038C_Y_MASK                        GENMASK(11, 0)
+#define IST3038C_AREA_MASK             GENMASK(27, 24)
+#define IST3038C_AREA_SHIFT            24
+#define IST3038C_FINGER_COUNT_MASK     GENMASK(15, 12)
+#define IST3038C_FINGER_COUNT_SHIFT    12
+#define IST3038C_FINGER_STATUS_MASK    GENMASK(9, 0)
+
+struct imagis_ts {
+       struct i2c_client *client;
+       struct input_dev *input_dev;
+       struct touchscreen_properties prop;
+       struct regulator_bulk_data supplies[2];
+};
+
+static int imagis_i2c_read_reg(struct imagis_ts *ts,
+                              unsigned int reg, u32 *data)
+{
+       __be32 ret_be;
+       __be32 reg_be = cpu_to_be32(reg);
+       struct i2c_msg msg[] = {
+               {
+                       .addr = ts->client->addr,
+                       .flags = 0,
+                       .buf = (unsigned char *)&reg_be,
+                       .len = sizeof(reg_be),
+               }, {
+                       .addr = ts->client->addr,
+                       .flags = I2C_M_RD,
+                       .buf = (unsigned char *)&ret_be,
+                       .len = sizeof(ret_be),
+               },
+       };
+       int ret, error;
+       int retry = IST3038C_I2C_RETRY_COUNT;
+
+       /* Retry in case the controller fails to respond */
+       do {
+               ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+               if (ret == ARRAY_SIZE(msg)) {
+                       *data = be32_to_cpu(ret_be);
+                       return 0;
+               }
+
+               error = ret < 0 ? ret : -EIO;
+               dev_err(&ts->client->dev,
+                       "%s - i2c_transfer failed: %d (%d)\n",
+                       __func__, error, ret);
+       } while (--retry);
+
+       return error;
+}
+
+static irqreturn_t imagis_interrupt(int irq, void *dev_id)
+{
+       struct imagis_ts *ts = dev_id;
+       u32 intr_message, finger_status;
+       unsigned int finger_count, finger_pressed;
+       int i;
+       int error;
+
+       error = imagis_i2c_read_reg(ts, IST3038C_REG_INTR_MESSAGE,
+                                   &intr_message);
+       if (error) {
+               dev_err(&ts->client->dev,
+                       "failed to read the interrupt message: %d\n", error);
+               goto out;
+       }
+
+       finger_count = (intr_message & IST3038C_FINGER_COUNT_MASK) >>
+                               IST3038C_FINGER_COUNT_SHIFT;
+       if (finger_count > IST3038C_MAX_FINGER_NUM) {
+               dev_err(&ts->client->dev,
+                       "finger count %d is more than maximum supported\n",
+                       finger_count);
+               goto out;
+       }
+
+       finger_pressed = intr_message & IST3038C_FINGER_STATUS_MASK;
+
+       for (i = 0; i < finger_count; i++) {
+               error = imagis_i2c_read_reg(ts,
+                                           IST3038C_REG_TOUCH_COORD + (i * 4),
+                                           &finger_status);
+               if (error) {
+                       dev_err(&ts->client->dev,
+                               "failed to read coordinates for finger %d: %d\n",
+                               i, error);
+                       goto out;
+               }
+
+               input_mt_slot(ts->input_dev, i);
+               input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
+                                          finger_pressed & BIT(i));
+               touchscreen_report_pos(ts->input_dev, &ts->prop,
+                                      (finger_status & IST3038C_X_MASK) >>
+                                               IST3038C_X_SHIFT,
+                                      finger_status & IST3038C_Y_MASK, 1);
+               input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+                                (finger_status & IST3038C_AREA_MASK) >>
+                                       IST3038C_AREA_SHIFT);
+       }
+
+       input_mt_sync_frame(ts->input_dev);
+       input_sync(ts->input_dev);
+
+out:
+       return IRQ_HANDLED;
+}
+
+static void imagis_power_off(void *_ts)
+{
+       struct imagis_ts *ts = _ts;
+
+       regulator_bulk_disable(ARRAY_SIZE(ts->supplies), ts->supplies);
+}
+
+static int imagis_power_on(struct imagis_ts *ts)
+{
+       int error;
+
+       error = regulator_bulk_enable(ARRAY_SIZE(ts->supplies), ts->supplies);
+       if (error)
+               return error;
+
+       msleep(IST3038C_CHIP_ON_DELAY_MS);
+
+       return 0;
+}
+
+static int imagis_start(struct imagis_ts *ts)
+{
+       int error;
+
+       error = imagis_power_on(ts);
+       if (error)
+               return error;
+
+       enable_irq(ts->client->irq);
+
+       return 0;
+}
+
+static int imagis_stop(struct imagis_ts *ts)
+{
+       disable_irq(ts->client->irq);
+
+       imagis_power_off(ts);
+
+       return 0;
+}
+
+static int imagis_input_open(struct input_dev *dev)
+{
+       struct imagis_ts *ts = input_get_drvdata(dev);
+
+       return imagis_start(ts);
+}
+
+static void imagis_input_close(struct input_dev *dev)
+{
+       struct imagis_ts *ts = input_get_drvdata(dev);
+
+       imagis_stop(ts);
+}
+
+static int imagis_init_input_dev(struct imagis_ts *ts)
+{
+       struct input_dev *input_dev;
+       int error;
+
+       input_dev = devm_input_allocate_device(&ts->client->dev);
+       if (!input_dev)
+               return -ENOMEM;
+
+       ts->input_dev = input_dev;
+
+       input_dev->name = "Imagis capacitive touchscreen";
+       input_dev->phys = "input/ts";
+       input_dev->id.bustype = BUS_I2C;
+       input_dev->open = imagis_input_open;
+       input_dev->close = imagis_input_close;
+
+       input_set_drvdata(input_dev, ts);
+
+       input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+       input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+       input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+       touchscreen_parse_properties(input_dev, true, &ts->prop);
+       if (!ts->prop.max_x || !ts->prop.max_y) {
+               dev_err(&ts->client->dev,
+                       "Touchscreen-size-x and/or touchscreen-size-y not set in dts\n");
+               return -EINVAL;
+       }
+
+       error = input_mt_init_slots(input_dev,
+                                   IST3038C_MAX_FINGER_NUM,
+                                   INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+       if (error) {
+               dev_err(&ts->client->dev,
+                       "Failed to initialize MT slots: %d", error);
+               return error;
+       }
+
+       error = input_register_device(input_dev);
+       if (error) {
+               dev_err(&ts->client->dev,
+                       "Failed to register input device: %d", error);
+               return error;
+       }
+
+       return 0;
+}
+
+static int imagis_init_regulators(struct imagis_ts *ts)
+{
+       struct i2c_client *client = ts->client;
+
+       ts->supplies[0].supply = "vdd";
+       ts->supplies[1].supply = "vddio";
+       return devm_regulator_bulk_get(&client->dev,
+                                      ARRAY_SIZE(ts->supplies),
+                                      ts->supplies);
+}
+
+static int imagis_probe(struct i2c_client *i2c)
+{
+       struct device *dev = &i2c->dev;
+       struct imagis_ts *ts;
+       int chip_id, error;
+
+       ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+       if (!ts)
+               return -ENOMEM;
+
+       ts->client = i2c;
+
+       error = imagis_init_regulators(ts);
+       if (error) {
+               dev_err(dev, "regulator init error: %d\n", error);
+               return error;
+       }
+
+       error = imagis_power_on(ts);
+       if (error) {
+               dev_err(dev, "failed to enable regulators: %d\n", error);
+               return error;
+       }
+
+       error = devm_add_action_or_reset(dev, imagis_power_off, ts);
+       if (error) {
+               dev_err(dev, "failed to install poweroff action: %d\n", error);
+               return error;
+       }
+
+       error = imagis_i2c_read_reg(ts,
+                       IST3038C_REG_CHIPID | IST3038C_DIRECT_ACCESS,
+                       &chip_id);
+       if (error) {
+               dev_err(dev, "chip ID read failure: %d\n", error);
+               return error;
+       }
+
+       if (chip_id != IST3038C_WHOAMI) {
+               dev_err(dev, "unknown chip ID: 0x%x\n", chip_id);
+               return -EINVAL;
+       }
+
+       error = devm_request_threaded_irq(dev, i2c->irq,
+                                         NULL, imagis_interrupt,
+                                         IRQF_ONESHOT | IRQF_NO_AUTOEN,
+                                         "imagis-touchscreen", ts);
+       if (error) {
+               dev_err(dev, "IRQ %d allocation failure: %d\n",
+                       i2c->irq, error);
+               return error;
+       }
+
+       error = imagis_init_input_dev(ts);
+       if (error)
+               return error;
+
+       return 0;
+}
+
+static int __maybe_unused imagis_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct imagis_ts *ts = i2c_get_clientdata(client);
+       int retval = 0;
+
+       mutex_lock(&ts->input_dev->mutex);
+
+       if (input_device_enabled(ts->input_dev))
+               retval = imagis_stop(ts);
+
+       mutex_unlock(&ts->input_dev->mutex);
+
+       return retval;
+}
+
+static int __maybe_unused imagis_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct imagis_ts *ts = i2c_get_clientdata(client);
+       int retval = 0;
+
+       mutex_lock(&ts->input_dev->mutex);
+
+       if (input_device_enabled(ts->input_dev))
+               retval = imagis_start(ts);
+
+       mutex_unlock(&ts->input_dev->mutex);
+
+       return retval;
+}
+
+static SIMPLE_DEV_PM_OPS(imagis_pm_ops, imagis_suspend, imagis_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id imagis_of_match[] = {
+       { .compatible = "imagis,ist3038c", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, imagis_of_match);
+#endif
+
+static struct i2c_driver imagis_ts_driver = {
+       .driver = {
+               .name = "imagis-touchscreen",
+               .pm = &imagis_pm_ops,
+               .of_match_table = of_match_ptr(imagis_of_match),
+       },
+       .probe_new = imagis_probe,
+};
+
+module_i2c_driver(imagis_ts_driver);
+
+MODULE_DESCRIPTION("Imagis IST3038C Touchscreen Driver");
+MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
+MODULE_LICENSE("GPL");
index b3fa712..34c4cca 100644 (file)
@@ -486,11 +486,11 @@ static int iqs5xx_axis_init(struct i2c_client *client)
 {
        struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
        struct touchscreen_properties *prop = &iqs5xx->prop;
-       struct input_dev *input;
+       struct input_dev *input = iqs5xx->input;
        u16 max_x, max_y;
        int error;
 
-       if (!iqs5xx->input) {
+       if (!input) {
                input = devm_input_allocate_device(&client->dev);
                if (!input)
                        return -ENOMEM;
@@ -512,11 +512,11 @@ static int iqs5xx_axis_init(struct i2c_client *client)
        if (error)
                return error;
 
-       input_set_abs_params(iqs5xx->input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
-       input_set_abs_params(iqs5xx->input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
-       input_set_abs_params(iqs5xx->input, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
+       input_set_abs_params(input, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
 
-       touchscreen_parse_properties(iqs5xx->input, true, prop);
+       touchscreen_parse_properties(input, true, prop);
 
        /*
         * The device reserves 0xFFFF for coordinates that correspond to slots
@@ -540,7 +540,7 @@ static int iqs5xx_axis_init(struct i2c_client *client)
                        return error;
        }
 
-       error = input_mt_init_slots(iqs5xx->input, IQS5XX_NUM_CONTACTS,
+       error = input_mt_init_slots(input, IQS5XX_NUM_CONTACTS,
                                    INPUT_MT_DIRECT);
        if (error)
                dev_err(&client->dev, "Failed to initialize slots: %d\n",
@@ -674,7 +674,7 @@ static irqreturn_t iqs5xx_irq(int irq, void *data)
                input_mt_slot(input, i);
                if (input_mt_report_slot_state(input, MT_TOOL_FINGER,
                                               pressure != 0)) {
-                       touchscreen_report_pos(iqs5xx->input, &iqs5xx->prop,
+                       touchscreen_report_pos(input, &iqs5xx->prop,
                                               be16_to_cpu(touch_data->abs_x),
                                               be16_to_cpu(touch_data->abs_y),
                                               true);
index bc11203..72e0b76 100644 (file)
@@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev)
 
        err = pm_runtime_get_sync(&sdata->client->dev);
        if (err < 0)
-               return err;
+               goto out;
 
        err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
        if (err)
-               return err;
+               goto out;
 
        mutex_lock(&sdata->mutex);
        sdata->running = true;
@@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev)
                                 "failed to enable touchkey\n");
        }
 
-       return 0;
+out:
+       pm_runtime_put_noidle(&sdata->client->dev);
+       return err;
 }
 
 static void stmfts_input_close(struct input_dev *dev)
index 27810f6..72c7258 100644 (file)
@@ -88,6 +88,8 @@ struct tsc200x {
        int                     in_z1;
        int                     in_z2;
 
+       struct touchscreen_properties prop;
+
        spinlock_t              lock;
        struct timer_list       penup_timer;
 
@@ -113,8 +115,7 @@ static void tsc200x_update_pen_state(struct tsc200x *ts,
                                     int x, int y, int pressure)
 {
        if (pressure) {
-               input_report_abs(ts->idev, ABS_X, x);
-               input_report_abs(ts->idev, ABS_Y, y);
+               touchscreen_report_pos(ts->idev, &ts->prop, x, y, false);
                input_report_abs(ts->idev, ABS_PRESSURE, pressure);
                if (!ts->pen_down) {
                        input_report_key(ts->idev, BTN_TOUCH, !!pressure);
@@ -533,7 +534,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
        input_set_abs_params(input_dev, ABS_PRESSURE,
                             0, MAX_12BIT, TSC200X_DEF_P_FUZZ, 0);
 
-       touchscreen_parse_properties(input_dev, false, NULL);
+       touchscreen_parse_properties(input_dev, false, &ts->prop);
 
        /* Ensure the touchscreen is off */
        tsc200x_stop_scan(ts);
diff --git a/drivers/input/vivaldi-fmap.c b/drivers/input/vivaldi-fmap.c
new file mode 100644 (file)
index 0000000..6dae83d
--- /dev/null
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for ChromeOS Vivaldi keyboard function row mapping
+ *
+ * Copyright (C) 2022 Google, Inc
+ */
+
+#include <linux/export.h>
+#include <linux/input/vivaldi-fmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+/**
+ * vivaldi_function_row_physmap_show - Print vivaldi function row physmap attribute
+ * @data: The vivaldi function row map
+ * @buf: Buffer to print the function row phsymap to
+ */
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+                                         char *buf)
+{
+       ssize_t size = 0;
+       int i;
+       const u32 *physmap = data->function_row_physmap;
+
+       if (!data->num_function_row_keys)
+               return 0;
+
+       for (i = 0; i < data->num_function_row_keys; i++)
+               size += scnprintf(buf + size, PAGE_SIZE - size,
+                                 "%s%02X", size ? " " : "", physmap[i]);
+       if (size)
+               size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+
+       return size;
+}
+EXPORT_SYMBOL_GPL(vivaldi_function_row_physmap_show);
+
+MODULE_LICENSE("GPL");
index 4081cb6..4277853 100644 (file)
@@ -210,7 +210,7 @@ struct dm_table {
 #define DM_TIO_MAGIC 28714
 struct dm_target_io {
        unsigned short magic;
-       unsigned short flags;
+       blk_short_t flags;
        unsigned int target_bio_nr;
        struct dm_io *io;
        struct dm_target *ti;
@@ -244,7 +244,7 @@ static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
 #define DM_IO_MAGIC 19577
 struct dm_io {
        unsigned short magic;
-       unsigned short flags;
+       blk_short_t flags;
        atomic_t io_count;
        struct mapped_device *md;
        struct bio *orig_bio;
index c58a511..ad2d5fa 100644 (file)
@@ -2472,9 +2472,11 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
                                        dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
                                        sec &= ~(sector_t)(ic->sectors_per_block - 1);
                                }
+                               if (unlikely(sec >= ic->provided_data_sectors)) {
+                                       journal_entry_set_unused(je);
+                                       continue;
+                               }
                        }
-                       if (unlikely(sec >= ic->provided_data_sectors))
-                               continue;
                        get_area_and_offset(ic, sec, &area, &offset);
                        restore_last_bytes(ic, access_journal_data(ic, i, j), je);
                        for (k = j + 1; k < ic->journal_section_entries; k++) {
index 901abd6..87310fc 100644 (file)
@@ -891,15 +891,21 @@ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
        struct hash_cell *hc = NULL;
 
        if (*param->uuid) {
-               if (*param->name || param->dev)
+               if (*param->name || param->dev) {
+                       DMERR("Invalid ioctl structure: uuid %s, name %s, dev %llx",
+                             param->uuid, param->name, (unsigned long long)param->dev);
                        return NULL;
+               }
 
                hc = __get_uuid_cell(param->uuid);
                if (!hc)
                        return NULL;
        } else if (*param->name) {
-               if (param->dev)
+               if (param->dev) {
+                       DMERR("Invalid ioctl structure: name %s, dev %llx",
+                             param->name, (unsigned long long)param->dev);
                        return NULL;
+               }
 
                hc = __get_name_cell(param->name);
                if (!hc)
@@ -1851,8 +1857,11 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
        if (copy_from_user(param_kernel, user, minimum_data_size))
                return -EFAULT;
 
-       if (param_kernel->data_size < minimum_data_size)
+       if (param_kernel->data_size < minimum_data_size) {
+               DMERR("Invalid data size in the ioctl structure: %u",
+                     param_kernel->data_size);
                return -EINVAL;
+       }
 
        secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;
 
index ad2e0bb..3c5fad7 100644 (file)
@@ -892,13 +892,19 @@ static void dm_io_complete(struct dm_io *io)
        if (unlikely(wq_has_sleeper(&md->wait)))
                wake_up(&md->wait);
 
-       if (io_error == BLK_STS_DM_REQUEUE) {
-               /*
-                * Upper layer won't help us poll split bio, io->orig_bio
-                * may only reflect a subset of the pre-split original,
-                * so clear REQ_POLLED in case of requeue
-                */
-               bio->bi_opf &= ~REQ_POLLED;
+       if (io_error == BLK_STS_DM_REQUEUE || io_error == BLK_STS_AGAIN) {
+               if (bio->bi_opf & REQ_POLLED) {
+                       /*
+                        * Upper layer won't help us poll split bio (io->orig_bio
+                        * may only reflect a subset of the pre-split original)
+                        * so clear REQ_POLLED in case of requeue.
+                        */
+                       bio->bi_opf &= ~REQ_POLLED;
+                       if (io_error == BLK_STS_AGAIN) {
+                               /* io_uring doesn't handle BLK_STS_AGAIN (yet) */
+                               queue_io(md, bio);
+                       }
+               }
                return;
        }
 
index a7e3eb9..a32050f 100644 (file)
@@ -351,9 +351,6 @@ static ssize_t dev_attribute_show(struct device *dev,
         * we still can use 'ubi->ubi_num'.
         */
        ubi = container_of(dev, struct ubi_device, dev);
-       ubi = ubi_get_device(ubi->ubi_num);
-       if (!ubi)
-               return -ENODEV;
 
        if (attr == &dev_eraseblock_size)
                ret = sprintf(buf, "%d\n", ubi->leb_size);
@@ -382,7 +379,6 @@ static ssize_t dev_attribute_show(struct device *dev,
        else
                ret = -EINVAL;
 
-       ubi_put_device(ubi);
        return ret;
 }
 
@@ -979,9 +975,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
                        goto out_detach;
        }
 
-       /* Make device "available" before it becomes accessible via sysfs */
-       ubi_devices[ubi_num] = ubi;
-
        err = uif_init(ubi);
        if (err)
                goto out_detach;
@@ -1026,6 +1019,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
        wake_up_process(ubi->bgt_thread);
        spin_unlock(&ubi->wl_lock);
 
+       ubi_devices[ubi_num] = ubi;
        ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
        return ubi_num;
 
@@ -1034,7 +1028,6 @@ out_debugfs:
 out_uif:
        uif_close(ubi);
 out_detach:
-       ubi_devices[ubi_num] = NULL;
        ubi_wl_close(ubi);
        ubi_free_all_volumes(ubi);
        vfree(ubi->vtbl);
index 022af59..6b5f1ff 100644 (file)
@@ -468,7 +468,9 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
                        if (err == UBI_IO_FF_BITFLIPS)
                                scrub = 1;
 
-                       add_aeb(ai, free, pnum, ec, scrub);
+                       ret = add_aeb(ai, free, pnum, ec, scrub);
+                       if (ret)
+                               goto out;
                        continue;
                } else if (err == 0 || err == UBI_IO_BITFLIPS) {
                        dbg_bld("Found non empty PEB:%i in pool", pnum);
@@ -638,8 +640,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 0);
+               ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 0);
+               if (ret)
+                       goto fail;
        }
 
        /* read EC values from used list */
@@ -649,8 +653,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 0);
+               ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 0);
+               if (ret)
+                       goto fail;
        }
 
        /* read EC values from scrub list */
@@ -660,8 +666,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 1);
+               ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 1);
+               if (ret)
+                       goto fail;
        }
 
        /* read EC values from erase list */
@@ -671,8 +679,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 1);
+               ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 1);
+               if (ret)
+                       goto fail;
        }
 
        ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
index 139ee13..1bc7b3a 100644 (file)
@@ -56,16 +56,11 @@ static ssize_t vol_attribute_show(struct device *dev,
 {
        int ret;
        struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
-       struct ubi_device *ubi;
-
-       ubi = ubi_get_device(vol->ubi->ubi_num);
-       if (!ubi)
-               return -ENODEV;
+       struct ubi_device *ubi = vol->ubi;
 
        spin_lock(&ubi->volumes_lock);
        if (!ubi->volumes[vol->vol_id]) {
                spin_unlock(&ubi->volumes_lock);
-               ubi_put_device(ubi);
                return -ENODEV;
        }
        /* Take a reference to prevent volume removal */
@@ -103,7 +98,6 @@ static ssize_t vol_attribute_show(struct device *dev,
        vol->ref_count -= 1;
        ubi_assert(vol->ref_count >= 0);
        spin_unlock(&ubi->volumes_lock);
-       ubi_put_device(ubi);
        return ret;
 }
 
index 11f26b0..87838cb 100644 (file)
@@ -169,6 +169,24 @@ struct receive_queue {
        struct xdp_rxq_info xdp_rxq;
 };
 
+/* This structure can contain rss message with maximum settings for indirection table and keysize
+ * Note, that default structure that describes RSS configuration virtio_net_rss_config
+ * contains same info but can't handle table values.
+ * In any case, structure would be passed to virtio hw through sg_buf split by parts
+ * because table sizes may be differ according to the device configuration.
+ */
+#define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
+#define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
+struct virtio_net_ctrl_rss {
+       u32 hash_types;
+       u16 indirection_table_mask;
+       u16 unclassified_queue;
+       u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
+       u16 max_tx_vq;
+       u8 hash_key_length;
+       u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+};
+
 /* Control VQ buffers: protected by the rtnl lock */
 struct control_buf {
        struct virtio_net_ctrl_hdr hdr;
@@ -178,6 +196,7 @@ struct control_buf {
        u8 allmulti;
        __virtio16 vid;
        __virtio64 offloads;
+       struct virtio_net_ctrl_rss rss;
 };
 
 struct virtnet_info {
@@ -206,6 +225,14 @@ struct virtnet_info {
        /* Host will merge rx buffers for big packets (shake it! shake it!) */
        bool mergeable_rx_bufs;
 
+       /* Host supports rss and/or hash report */
+       bool has_rss;
+       bool has_rss_hash_report;
+       u8 rss_key_size;
+       u16 rss_indir_table_size;
+       u32 rss_hash_types_supported;
+       u32 rss_hash_types_saved;
+
        /* Has control virtqueue */
        bool has_cvq;
 
@@ -242,13 +269,13 @@ struct virtnet_info {
 };
 
 struct padded_vnet_hdr {
-       struct virtio_net_hdr_mrg_rxbuf hdr;
+       struct virtio_net_hdr_v1_hash hdr;
        /*
         * hdr is in a separate sg buffer, and data sg buffer shares same page
         * with this header sg. This padding makes next sg 16 byte aligned
         * after the header.
         */
-       char padding[4];
+       char padding[12];
 };
 
 static bool is_xdp_frame(void *ptr)
@@ -396,7 +423,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 
        hdr_len = vi->hdr_len;
        if (vi->mergeable_rx_bufs)
-               hdr_padded_len = sizeof(*hdr);
+               hdr_padded_len = hdr_len;
        else
                hdr_padded_len = sizeof(struct padded_vnet_hdr);
 
@@ -1123,6 +1150,35 @@ xdp_xmit:
        return NULL;
 }
 
+static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
+                               struct sk_buff *skb)
+{
+       enum pkt_hash_types rss_hash_type;
+
+       if (!hdr_hash || !skb)
+               return;
+
+       switch ((int)hdr_hash->hash_report) {
+       case VIRTIO_NET_HASH_REPORT_TCPv4:
+       case VIRTIO_NET_HASH_REPORT_UDPv4:
+       case VIRTIO_NET_HASH_REPORT_TCPv6:
+       case VIRTIO_NET_HASH_REPORT_UDPv6:
+       case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
+       case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
+               rss_hash_type = PKT_HASH_TYPE_L4;
+               break;
+       case VIRTIO_NET_HASH_REPORT_IPv4:
+       case VIRTIO_NET_HASH_REPORT_IPv6:
+       case VIRTIO_NET_HASH_REPORT_IPv6_EX:
+               rss_hash_type = PKT_HASH_TYPE_L3;
+               break;
+       case VIRTIO_NET_HASH_REPORT_NONE:
+       default:
+               rss_hash_type = PKT_HASH_TYPE_NONE;
+       }
+       skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
+}
+
 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                        void *buf, unsigned int len, void **ctx,
                        unsigned int *xdp_xmit,
@@ -1157,6 +1213,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                return;
 
        hdr = skb_vnet_hdr(skb);
+       if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
+               virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
 
        if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1266,7 +1324,8 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
                                          struct ewma_pkt_len *avg_pkt_len,
                                          unsigned int room)
 {
-       const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+       struct virtnet_info *vi = rq->vq->vdev->priv;
+       const size_t hdr_len = vi->hdr_len;
        unsigned int len;
 
        if (room)
@@ -2183,6 +2242,174 @@ static void virtnet_get_ringparam(struct net_device *dev,
        ring->tx_pending = ring->tx_max_pending;
 }
 
+static bool virtnet_commit_rss_command(struct virtnet_info *vi)
+{
+       struct net_device *dev = vi->dev;
+       struct scatterlist sgs[4];
+       unsigned int sg_buf_size;
+
+       /* prepare sgs */
+       sg_init_table(sgs, 4);
+
+       sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
+       sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+
+       sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
+       sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+
+       sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
+                       - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
+       sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+
+       sg_buf_size = vi->rss_key_size;
+       sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+
+       if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
+                                 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
+                                 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
+               dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+               return false;
+       }
+       return true;
+}
+
+static void virtnet_init_default_rss(struct virtnet_info *vi)
+{
+       u32 indir_val = 0;
+       int i = 0;
+
+       vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+       vi->rss_hash_types_saved = vi->rss_hash_types_supported;
+       vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+                                               ? vi->rss_indir_table_size - 1 : 0;
+       vi->ctrl->rss.unclassified_queue = 0;
+
+       for (; i < vi->rss_indir_table_size; ++i) {
+               indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
+               vi->ctrl->rss.indirection_table[i] = indir_val;
+       }
+
+       vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
+       vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+
+       netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+}
+
+static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
+{
+       info->data = 0;
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case TCP_V6_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case UDP_V4_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case UDP_V6_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case IPV4_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+
+               break;
+       case IPV6_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+
+               break;
+       default:
+               info->data = 0;
+               break;
+       }
+}
+
+static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
+{
+       u32 new_hashtypes = vi->rss_hash_types_saved;
+       bool is_disable = info->data & RXH_DISCARD;
+       bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
+
+       /* supports only 'sd', 'sdfn' and 'r' */
+       if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
+               return false;
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
+               break;
+       case UDP_V4_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
+               break;
+       case IPV4_FLOW:
+               new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
+               if (!is_disable)
+                       new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
+               break;
+       case TCP_V6_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
+               break;
+       case UDP_V6_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
+               break;
+       case IPV6_FLOW:
+               new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+               if (!is_disable)
+                       new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+               break;
+       default:
+               /* unsupported flow */
+               return false;
+       }
+
+       /* if unsupported hashtype was set */
+       if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
+               return false;
+
+       if (new_hashtypes != vi->rss_hash_types_saved) {
+               vi->rss_hash_types_saved = new_hashtypes;
+               vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+               if (vi->dev->features & NETIF_F_RXHASH)
+                       return virtnet_commit_rss_command(vi);
+       }
+
+       return true;
+}
 
 static void virtnet_get_drvinfo(struct net_device *dev,
                                struct ethtool_drvinfo *info)
@@ -2411,6 +2638,92 @@ static void virtnet_update_settings(struct virtnet_info *vi)
                vi->duplex = duplex;
 }
 
+static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
+{
+       return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
+}
+
+static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
+{
+       return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
+}
+
+static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int i;
+
+       if (indir) {
+               for (i = 0; i < vi->rss_indir_table_size; ++i)
+                       indir[i] = vi->ctrl->rss.indirection_table[i];
+       }
+
+       if (key)
+               memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+       return 0;
+}
+
+static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int i;
+
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+
+       if (indir) {
+               for (i = 0; i < vi->rss_indir_table_size; ++i)
+                       vi->ctrl->rss.indirection_table[i] = indir[i];
+       }
+       if (key)
+               memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
+
+       virtnet_commit_rss_command(vi);
+
+       return 0;
+}
+
+static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int rc = 0;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = vi->curr_queue_pairs;
+               break;
+       case ETHTOOL_GRXFH:
+               virtnet_get_hashflow(vi, info);
+               break;
+       default:
+               rc = -EOPNOTSUPP;
+       }
+
+       return rc;
+}
+
+static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int rc = 0;
+
+       switch (info->cmd) {
+       case ETHTOOL_SRXFH:
+               if (!virtnet_set_hashflow(vi, info))
+                       rc = -EINVAL;
+
+               break;
+       default:
+               rc = -EOPNOTSUPP;
+       }
+
+       return rc;
+}
+
 static const struct ethtool_ops virtnet_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
        .get_drvinfo = virtnet_get_drvinfo,
@@ -2426,6 +2739,12 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
        .set_link_ksettings = virtnet_set_link_ksettings,
        .set_coalesce = virtnet_set_coalesce,
        .get_coalesce = virtnet_get_coalesce,
+       .get_rxfh_key_size = virtnet_get_rxfh_key_size,
+       .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
+       .get_rxfh = virtnet_get_rxfh,
+       .set_rxfh = virtnet_set_rxfh,
+       .get_rxnfc = virtnet_get_rxnfc,
+       .set_rxnfc = virtnet_set_rxnfc,
 };
 
 static void virtnet_freeze_down(struct virtio_device *vdev)
@@ -2678,6 +2997,16 @@ static int virtnet_set_features(struct net_device *dev,
                vi->guest_offloads = offloads;
        }
 
+       if ((dev->features ^ features) & NETIF_F_RXHASH) {
+               if (features & NETIF_F_RXHASH)
+                       vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+               else
+                       vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+
+               if (!virtnet_commit_rss_command(vi))
+                       return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -2851,7 +3180,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
  */
 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
 {
-       const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+       const unsigned int hdr_len = vi->hdr_len;
        unsigned int rq_size = virtqueue_get_vring_size(vq);
        unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
        unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
@@ -3072,6 +3401,10 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
                             "VIRTIO_NET_F_CTRL_VQ") ||
             VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
             VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
+                            "VIRTIO_NET_F_CTRL_VQ") ||
+            VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
+                            "VIRTIO_NET_F_CTRL_VQ") ||
+            VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
                             "VIRTIO_NET_F_CTRL_VQ"))) {
                return false;
        }
@@ -3112,13 +3445,14 @@ static int virtnet_probe(struct virtio_device *vdev)
        u16 max_queue_pairs;
        int mtu;
 
-       /* Find if host supports multiqueue virtio_net device */
-       err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
-                                  struct virtio_net_config,
-                                  max_virtqueue_pairs, &max_queue_pairs);
+       /* Find if host supports multiqueue/rss virtio_net device */
+       max_queue_pairs = 1;
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+               max_queue_pairs =
+                    virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
 
        /* We need at least 2 queue's */
-       if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+       if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
            max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
            !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
                max_queue_pairs = 1;
@@ -3206,8 +3540,33 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
                vi->mergeable_rx_bufs = true;
 
-       if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
-           virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
+               vi->has_rss_hash_report = true;
+
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+               vi->has_rss = true;
+
+       if (vi->has_rss || vi->has_rss_hash_report) {
+               vi->rss_indir_table_size =
+                       virtio_cread16(vdev, offsetof(struct virtio_net_config,
+                               rss_max_indirection_table_length));
+               vi->rss_key_size =
+                       virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+
+               vi->rss_hash_types_supported =
+                   virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
+               vi->rss_hash_types_supported &=
+                               ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
+                                 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
+                                 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
+
+               dev->hw_features |= NETIF_F_RXHASH;
+       }
+
+       if (vi->has_rss_hash_report)
+               vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
+       else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
+                virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
                vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
        else
                vi->hdr_len = sizeof(struct virtio_net_hdr);
@@ -3274,6 +3633,9 @@ static int virtnet_probe(struct virtio_device *vdev)
                }
        }
 
+       if (vi->has_rss || vi->has_rss_hash_report)
+               virtnet_init_default_rss(vi);
+
        err = register_netdev(dev);
        if (err) {
                pr_debug("virtio_net: registering device failed\n");
@@ -3405,7 +3767,8 @@ static struct virtio_device_id id_table[] = {
        VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
        VIRTIO_NET_F_CTRL_MAC_ADDR, \
        VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
-       VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
+       VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
+       VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT
 
 static unsigned int features[] = {
        VIRTNET_FEATURES,
index 677fa4b..efb85c6 100644 (file)
@@ -1830,9 +1830,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
        nvme_config_discard(disk, ns);
        blk_queue_max_write_zeroes_sectors(disk->queue,
                                           ns->ctrl->max_zeroes_sectors);
-
-       set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
-               test_bit(NVME_NS_FORCE_RO, &ns->flags));
 }
 
 static inline bool nvme_first_scan(struct gendisk *disk)
@@ -1891,6 +1888,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
                        goto out_unfreeze;
        }
 
+       set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
+               test_bit(NVME_NS_FORCE_RO, &ns->flags));
        set_bit(NVME_NS_READY, &ns->flags);
        blk_mq_unfreeze_queue(ns->disk->queue);
 
@@ -1903,6 +1902,9 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
        if (nvme_ns_head_multipath(ns->head)) {
                blk_mq_freeze_queue(ns->head->disk->queue);
                nvme_update_disk_info(ns->head->disk, ns, id);
+               set_disk_ro(ns->head->disk,
+                           (id->nsattr & NVME_NS_ATTR_RO) ||
+                                   test_bit(NVME_NS_FORCE_RO, &ns->flags));
                nvme_mpath_revalidate_paths(ns);
                blk_stack_limits(&ns->head->disk->queue->limits,
                                 &ns->queue->limits, 0);
@@ -3589,15 +3591,20 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
        NULL,
 };
 
-static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
+static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
                unsigned nsid)
 {
        struct nvme_ns_head *h;
 
-       lockdep_assert_held(&subsys->lock);
+       lockdep_assert_held(&ctrl->subsys->lock);
 
-       list_for_each_entry(h, &subsys->nsheads, entry) {
-               if (h->ns_id != nsid)
+       list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
+               /*
+                * Private namespaces can share NSIDs under some conditions.
+                * In that case we can't use the same ns_head for namespaces
+                * with the same NSID.
+                */
+               if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
                        continue;
                if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
                        return h;
@@ -3791,7 +3798,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
        }
 
        mutex_lock(&ctrl->subsys->lock);
-       head = nvme_find_ns_head(ctrl->subsys, nsid);
+       head = nvme_find_ns_head(ctrl, nsid);
        if (!head) {
                ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
                if (ret) {
@@ -3988,6 +3995,16 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        set_capacity(ns->disk, 0);
        nvme_fault_inject_fini(&ns->fault_inject);
 
+       /*
+        * Ensure that !NVME_NS_READY is seen by other threads to prevent
+        * this ns going back into current_path.
+        */
+       synchronize_srcu(&ns->head->srcu);
+
+       /* wait for concurrent submissions */
+       if (nvme_mpath_clear_current_path(ns))
+               synchronize_srcu(&ns->head->srcu);
+
        mutex_lock(&ns->ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
        if (list_empty(&ns->head->list)) {
@@ -3999,10 +4016,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        /* guarantee not available in head->list */
        synchronize_rcu();
 
-       /* wait for concurrent submissions */
-       if (nvme_mpath_clear_current_path(ns))
-               synchronize_srcu(&ns->head->srcu);
-
        if (!nvme_ns_head_multipath(ns->head))
                nvme_cdev_del(&ns->cdev, &ns->cdev_device);
        del_gendisk(ns->disk);
@@ -4480,6 +4493,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
        if (ctrl->queue_count > 1) {
                nvme_queue_scan(ctrl);
                nvme_start_queues(ctrl);
+               nvme_mpath_update(ctrl);
        }
 
        nvme_change_uevent(ctrl, "NVME_EVENT=connected");
index 1b31f19..d464fdf 100644 (file)
@@ -482,10 +482,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
 
        /*
         * Add a multipath node if the subsystems supports multiple controllers.
-        * We also do this for private namespaces as the namespace sharing data could
-        * change after a rescan.
+        * We also do this for private namespaces as the namespace sharing flag
+        * could change after a rescan.
         */
-       if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
+       if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
+           !nvme_is_unique_nsid(ctrl, head) || !multipath)
                return 0;
 
        head->disk = blk_alloc_disk(ctrl->numa_node);
@@ -612,8 +613,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
        ns->ana_grpid = le32_to_cpu(desc->grpid);
        ns->ana_state = desc->state;
        clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
-
-       if (nvme_state_is_live(ns->ana_state))
+       /*
+        * nvme_mpath_set_live() will trigger I/O to the multipath path device
+        * and in turn to this path device.  However we cannot accept this I/O
+        * if the controller is not live.  This may deadlock if called from
+        * nvme_mpath_init_identify() and the ctrl will never complete
+        * initialization, preventing I/O from completing.  For this case we
+        * will reprocess the ANA log page in nvme_mpath_update() once the
+        * controller is ready.
+        */
+       if (nvme_state_is_live(ns->ana_state) &&
+           ns->ctrl->state == NVME_CTRL_LIVE)
                nvme_mpath_set_live(ns);
 }
 
@@ -700,6 +710,18 @@ static void nvme_ana_work(struct work_struct *work)
        nvme_read_ana_log(ctrl);
 }
 
+void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+       u32 nr_change_groups = 0;
+
+       if (!ctrl->ana_log_buf)
+               return;
+
+       mutex_lock(&ctrl->ana_lock);
+       nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
+       mutex_unlock(&ctrl->ana_lock);
+}
+
 static void nvme_anatt_timeout(struct timer_list *t)
 {
        struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
index f4b674a..1393bbf 100644 (file)
@@ -723,6 +723,25 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                return queue_live;
        return __nvme_check_ready(ctrl, rq, queue_live);
 }
+
+/*
+ * NSID shall be unique for all shared namespaces, or if at least one of the
+ * following conditions is met:
+ *   1. Namespace Management is supported by the controller
+ *   2. ANA is supported by the controller
+ *   3. NVM Set are supported by the controller
+ *
+ * In other case, private namespace are not required to report a unique NSID.
+ */
+static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
+               struct nvme_ns_head *head)
+{
+       return head->shared ||
+               (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
+               (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
+               (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
+}
+
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buf, unsigned bufflen);
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
@@ -782,6 +801,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_mpath_update(struct nvme_ctrl *ctrl);
 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@@ -853,6 +873,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
        return 0;
 }
+static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+}
 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
 {
 }
index 2e98ac3..d817ca1 100644 (file)
@@ -45,7 +45,7 @@
 #define NVME_MAX_SEGS  127
 
 static int use_threaded_interrupts;
-module_param(use_threaded_interrupts, int, 0);
+module_param(use_threaded_interrupts, int, 0444);
 
 static bool use_cmb_sqes = true;
 module_param(use_cmb_sqes, bool, 0444);
@@ -3467,7 +3467,10 @@ static const struct pci_device_id nvme_id_table[] = {
                                NVME_QUIRK_128_BYTES_SQES |
                                NVME_QUIRK_SHARED_TAGS |
                                NVME_QUIRK_SKIP_CID_GEN },
-
+       { PCI_DEVICE(0x144d, 0xa808),   /* Samsung X5 */
+               .driver_data =  NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
+                               NVME_QUIRK_NO_DEEPEST_PS |
+                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
 };
index 46d0dab..397daaf 100644 (file)
@@ -988,7 +988,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
        ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
        mutex_unlock(&ctrl->lock);
 
-       schedule_work(&ctrl->async_event_work);
+       queue_work(nvmet_wq, &ctrl->async_event_work);
 }
 
 void nvmet_execute_keep_alive(struct nvmet_req *req)
index 8fedd1e..e44b298 100644 (file)
@@ -1555,7 +1555,7 @@ static void nvmet_port_release(struct config_item *item)
        struct nvmet_port *port = to_nvmet_port(item);
 
        /* Let inflight controllers teardown complete */
-       flush_scheduled_work();
+       flush_workqueue(nvmet_wq);
        list_del(&port->global_entry);
 
        kfree(port->ana_state);
index 64c2d2f..90e7532 100644 (file)
@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
 static DEFINE_IDA(cntlid_ida);
 
+struct workqueue_struct *nvmet_wq;
+EXPORT_SYMBOL_GPL(nvmet_wq);
+
 /*
  * This read/write semaphore is used to synchronize access to configuration
  * information on a target system that will result in discovery log page
@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
        list_add_tail(&aen->entry, &ctrl->async_events);
        mutex_unlock(&ctrl->lock);
 
-       schedule_work(&ctrl->async_event_work);
+       queue_work(nvmet_wq, &ctrl->async_event_work);
 }
 
 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
        if (reset_tbkas) {
                pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
                        ctrl->cntlid);
-               schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+               queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
                return;
        }
 
@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
        pr_debug("ctrl %d start keep-alive timer for %d secs\n",
                ctrl->cntlid, ctrl->kato);
 
-       schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+       queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -1120,7 +1123,7 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
 
 static inline bool nvmet_css_supported(u8 cc_css)
 {
-       switch (cc_css <<= NVME_CC_CSS_SHIFT) {
+       switch (cc_css << NVME_CC_CSS_SHIFT) {
        case NVME_CC_CSS_NVM:
        case NVME_CC_CSS_CSI:
                return true;
@@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
        mutex_lock(&ctrl->lock);
        if (!(ctrl->csts & NVME_CSTS_CFS)) {
                ctrl->csts |= NVME_CSTS_CFS;
-               schedule_work(&ctrl->fatal_err_work);
+               queue_work(nvmet_wq, &ctrl->fatal_err_work);
        }
        mutex_unlock(&ctrl->lock);
 }
@@ -1619,9 +1622,15 @@ static int __init nvmet_init(void)
                goto out_free_zbd_work_queue;
        }
 
+       nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+       if (!nvmet_wq) {
+               error = -ENOMEM;
+               goto out_free_buffered_work_queue;
+       }
+
        error = nvmet_init_discovery();
        if (error)
-               goto out_free_work_queue;
+               goto out_free_nvmet_work_queue;
 
        error = nvmet_init_configfs();
        if (error)
@@ -1630,7 +1639,9 @@ static int __init nvmet_init(void)
 
 out_exit_discovery:
        nvmet_exit_discovery();
-out_free_work_queue:
+out_free_nvmet_work_queue:
+       destroy_workqueue(nvmet_wq);
+out_free_buffered_work_queue:
        destroy_workqueue(buffered_io_wq);
 out_free_zbd_work_queue:
        destroy_workqueue(zbd_wq);
@@ -1642,6 +1653,7 @@ static void __exit nvmet_exit(void)
        nvmet_exit_configfs();
        nvmet_exit_discovery();
        ida_destroy(&cntlid_ida);
+       destroy_workqueue(nvmet_wq);
        destroy_workqueue(buffered_io_wq);
        destroy_workqueue(zbd_wq);
 
index de90001..ab2627e 100644 (file)
@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
        list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
                if (!nvmet_fc_tgt_a_get(assoc))
                        continue;
-               if (!schedule_work(&assoc->del_work))
+               if (!queue_work(nvmet_wq, &assoc->del_work))
                        /* already deleting - release local reference */
                        nvmet_fc_tgt_a_put(assoc);
        }
@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
                        continue;
                assoc->hostport->invalid = 1;
                noassoc = false;
-               if (!schedule_work(&assoc->del_work))
+               if (!queue_work(nvmet_wq, &assoc->del_work))
                        /* already deleting - release local reference */
                        nvmet_fc_tgt_a_put(assoc);
        }
@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
                nvmet_fc_tgtport_put(tgtport);
 
                if (found_ctrl) {
-                       if (!schedule_work(&assoc->del_work))
+                       if (!queue_work(nvmet_wq, &assoc->del_work))
                                /* already deleting - release local reference */
                                nvmet_fc_tgt_a_put(assoc);
                        return;
@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
        iod->rqstdatalen = lsreqbuf_len;
        iod->hosthandle = hosthandle;
 
-       schedule_work(&iod->work);
+       queue_work(nvmet_wq, &iod->work);
 
        return 0;
 }
index 54606f1..5c16372 100644 (file)
@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
                spin_lock(&rport->lock);
                list_add_tail(&rport->ls_list, &tls_req->ls_list);
                spin_unlock(&rport->lock);
-               schedule_work(&rport->ls_work);
+               queue_work(nvmet_wq, &rport->ls_work);
                return ret;
        }
 
@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
                spin_lock(&rport->lock);
                list_add_tail(&rport->ls_list, &tls_req->ls_list);
                spin_unlock(&rport->lock);
-               schedule_work(&rport->ls_work);
+               queue_work(nvmet_wq, &rport->ls_work);
        }
 
        return 0;
@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
                spin_lock(&tport->lock);
                list_add_tail(&tport->ls_list, &tls_req->ls_list);
                spin_unlock(&tport->lock);
-               schedule_work(&tport->ls_work);
+               queue_work(nvmet_wq, &tport->ls_work);
                return ret;
        }
 
@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
                spin_lock(&tport->lock);
                list_add_tail(&tport->ls_list, &tls_req->ls_list);
                spin_unlock(&tport->lock);
-               schedule_work(&tport->ls_work);
+               queue_work(nvmet_wq, &tport->ls_work);
        }
 
        return 0;
@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
        tgt_rscn->tport = tgtport->private;
        INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
 
-       schedule_work(&tgt_rscn->work);
+       queue_work(nvmet_wq, &tgt_rscn->work);
 }
 
 static void
@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
        INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
        kref_init(&tfcp_req->ref);
 
-       schedule_work(&tfcp_req->fcp_rcv_work);
+       queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
 
        return 0;
 }
@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
 {
        struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
 
-       schedule_work(&tfcp_req->tio_done_work);
+       queue_work(nvmet_wq, &tfcp_req->tio_done_work);
 }
 
 static void
@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
 
        if (abortio)
                /* leave the reference while the work item is scheduled */
-               WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+               WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
        else  {
                /*
                 * as the io has already had the done callback made,
index 6485dc8..f3d58ab 100644 (file)
@@ -283,7 +283,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
        if (!nvmet_check_transfer_len(req, 0))
                return;
        INIT_WORK(&req->f.work, nvmet_file_flush_work);
-       schedule_work(&req->f.work);
+       queue_work(nvmet_wq, &req->f.work);
 }
 
 static void nvmet_file_execute_discard(struct nvmet_req *req)
@@ -343,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
        if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
                return;
        INIT_WORK(&req->f.work, nvmet_file_dsm_work);
-       schedule_work(&req->f.work);
+       queue_work(nvmet_wq, &req->f.work);
 }
 
 static void nvmet_file_write_zeroes_work(struct work_struct *w)
@@ -373,7 +373,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
        if (!nvmet_check_transfer_len(req, 0))
                return;
        INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
-       schedule_work(&req->f.work);
+       queue_work(nvmet_wq, &req->f.work);
 }
 
 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
index 23f9d6f..59024af 100644 (file)
@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                iod->req.transfer_len = blk_rq_payload_bytes(req);
        }
 
-       schedule_work(&iod->work);
+       queue_work(nvmet_wq, &iod->work);
        return BLK_STS_OK;
 }
 
@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
                return;
        }
 
-       schedule_work(&iod->work);
+       queue_work(nvmet_wq, &iod->work);
 }
 
 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
index d910c6a..6981875 100644 (file)
@@ -366,6 +366,7 @@ struct nvmet_req {
 
 extern struct workqueue_struct *buffered_io_wq;
 extern struct workqueue_struct *zbd_wq;
+extern struct workqueue_struct *nvmet_wq;
 
 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
 {
index a4de1e0..5247c24 100644 (file)
@@ -283,7 +283,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
        if (req->p.use_workqueue || effects) {
                INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
                req->p.rq = rq;
-               schedule_work(&req->p.work);
+               queue_work(nvmet_wq, &req->p.work);
        } else {
                rq->end_io_data = req;
                blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
index 2446d09..2fab0b2 100644 (file)
@@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
 
        if (queue->host_qid == 0) {
                /* Let inflight controller teardown complete */
-               flush_scheduled_work();
+               flush_workqueue(nvmet_wq);
        }
 
        ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
 
        if (disconnect) {
                rdma_disconnect(queue->cm_id);
-               schedule_work(&queue->release_work);
+               queue_work(nvmet_wq, &queue->release_work);
        }
 }
 
@@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
        mutex_unlock(&nvmet_rdma_queue_mutex);
 
        pr_err("failed to connect queue %d\n", queue->idx);
-       schedule_work(&queue->release_work);
+       queue_work(nvmet_wq, &queue->release_work);
 }
 
 /**
@@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
                if (!queue) {
                        struct nvmet_rdma_port *port = cm_id->context;
 
-                       schedule_delayed_work(&port->repair_work, 0);
+                       queue_delayed_work(nvmet_wq, &port->repair_work, 0);
                        break;
                }
                fallthrough;
@@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
        nvmet_rdma_disable_port(port);
        ret = nvmet_rdma_enable_port(port);
        if (ret)
-               schedule_delayed_work(&port->repair_work, 5 * HZ);
+               queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
 }
 
 static int nvmet_rdma_add_port(struct nvmet_port *nport)
@@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
        }
        mutex_unlock(&nvmet_rdma_queue_mutex);
 
-       flush_scheduled_work();
+       flush_workqueue(nvmet_wq);
 }
 
 static struct ib_client nvmet_rdma_ib_client = {
index 83ca577..2793554 100644 (file)
@@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
        spin_lock(&queue->state_lock);
        if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
                queue->state = NVMET_TCP_Q_DISCONNECTING;
-               schedule_work(&queue->release_work);
+               queue_work(nvmet_wq, &queue->release_work);
        }
        spin_unlock(&queue->state_lock);
 }
@@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
                goto out;
 
        if (sk->sk_state == TCP_LISTEN)
-               schedule_work(&port->accept_work);
+               queue_work(nvmet_wq, &port->accept_work);
 out:
        read_unlock_bh(&sk->sk_callback_lock);
 }
@@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
 
        if (sq->qid == 0) {
                /* Let inflight controller teardown complete */
-               flush_scheduled_work();
+               flush_workqueue(nvmet_wq);
        }
 
        queue->nr_cmds = sq->size * 2;
@@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void)
 
        nvmet_unregister_transport(&nvmet_tcp_ops);
 
-       flush_scheduled_work();
+       flush_workqueue(nvmet_wq);
        mutex_lock(&nvmet_tcp_queue_mutex);
        list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
                kernel_sock_shutdown(queue->sock, SHUT_RDWR);
        mutex_unlock(&nvmet_tcp_queue_mutex);
-       flush_scheduled_work();
+       flush_workqueue(nvmet_wq);
 
        destroy_workqueue(nvmet_tcp_wq);
 }
index df84d22..d270a20 100644 (file)
@@ -766,14 +766,6 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
        return irqd->parent_data->hwirq;
 }
 
-static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
-                                      struct msi_desc *msi_desc)
-{
-       msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) |
-                             msi_desc->msg.address_lo;
-       msi_entry->data = msi_desc->msg.data;
-}
-
 /*
  * @nr_bm_irqs:                Indicates the number of IRQs that were allocated from
  *                     the bitmap.
@@ -3415,6 +3407,15 @@ static int hv_pci_probe(struct hv_device *hdev,
        hbus->bridge->domain_nr = dom;
 #ifdef CONFIG_X86
        hbus->sysdata.domain = dom;
+#elif defined(CONFIG_ARM64)
+       /*
+        * Set the PCI bus parent to be the corresponding VMbus
+        * device. Then the VMbus device will be assigned as the
+        * ACPI companion in pcibios_root_bridge_prepare() and
+        * pci_dma_configure() will propagate device coherence
+        * information to devices created on the bus.
+        */
+       hbus->sysdata.parent = hdev->device.parent;
 #endif
 
        hbus->hdev = hdev;
index 5d4be97..6420ca1 100644 (file)
@@ -2,6 +2,7 @@
 
 # tell define_trace.h where to find the cros ec trace header
 CFLAGS_cros_ec_trace.o:=               -I$(src)
+CFLAGS_cros_ec_sensorhub_ring.o:=      -I$(src)
 
 obj-$(CONFIG_CHROMEOS_LAPTOP)          += chromeos_laptop.o
 obj-$(CONFIG_CHROMEOS_PRIVACY_SCREEN)  += chromeos_privacy_screen.o
@@ -21,7 +22,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV)         += cros_ec_chardev.o
 obj-$(CONFIG_CROS_EC_LIGHTBAR)         += cros_ec_lightbar.o
 obj-$(CONFIG_CROS_EC_VBC)              += cros_ec_vbc.o
 obj-$(CONFIG_CROS_EC_DEBUGFS)          += cros_ec_debugfs.o
-cros-ec-sensorhub-objs                 := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o
+cros-ec-sensorhub-objs                 := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
 obj-$(CONFIG_CROS_EC_SENSORHUB)                += cros-ec-sensorhub.o
 obj-$(CONFIG_CROS_EC_SYSFS)            += cros_ec_sysfs.o
 obj-$(CONFIG_CROS_USBPD_LOGGER)                += cros_usbpd_logger.o
index 272c898..0dbceee 100644 (file)
@@ -25,6 +25,9 @@
 
 #define CIRC_ADD(idx, size, value)     (((idx) + (value)) & ((size) - 1))
 
+/* waitqueue for log readers */
+static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
+
 /**
  * struct cros_ec_debugfs - EC debugging information.
  *
@@ -33,7 +36,6 @@
  * @log_buffer: circular buffer for console log information
  * @read_msg: preallocated EC command and buffer to read console log
  * @log_mutex: mutex to protect circular buffer
- * @log_wq: waitqueue for log readers
  * @log_poll_work: recurring task to poll EC for new console log data
  * @panicinfo_blob: panicinfo debugfs blob
  */
@@ -44,7 +46,6 @@ struct cros_ec_debugfs {
        struct circ_buf log_buffer;
        struct cros_ec_command *read_msg;
        struct mutex log_mutex;
-       wait_queue_head_t log_wq;
        struct delayed_work log_poll_work;
        /* EC panicinfo */
        struct debugfs_blob_wrapper panicinfo_blob;
@@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
                        buf_space--;
                }
 
-               wake_up(&debug_info->log_wq);
+               wake_up(&cros_ec_debugfs_log_wq);
        }
 
        mutex_unlock(&debug_info->log_mutex);
@@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
 
                mutex_unlock(&debug_info->log_mutex);
 
-               ret = wait_event_interruptible(debug_info->log_wq,
+               ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
                                        CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
                if (ret < 0)
                        return ret;
@@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file,
        struct cros_ec_debugfs *debug_info = file->private_data;
        __poll_t mask = 0;
 
-       poll_wait(file, &debug_info->log_wq, wait);
+       poll_wait(file, &cros_ec_debugfs_log_wq, wait);
 
        mutex_lock(&debug_info->log_mutex);
        if (CIRC_CNT(debug_info->log_buffer.head,
@@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
        debug_info->log_buffer.tail = 0;
 
        mutex_init(&debug_info->log_mutex);
-       init_waitqueue_head(&debug_info->log_wq);
 
        debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
                            debug_info, &cros_ec_console_log_fops);
index 98e3708..71948da 100644 (file)
@@ -17,7 +17,8 @@
 #include <linux/sort.h>
 #include <linux/slab.h>
 
-#include "cros_ec_trace.h"
+#define CREATE_TRACE_POINTS
+#include "cros_ec_sensorhub_trace.h"
 
 /* Precision of fixed point for the m values from the filter */
 #define M_PRECISION BIT(23)
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_trace.h b/drivers/platform/chrome/cros_ec_sensorhub_trace.h
new file mode 100644 (file)
index 0000000..57d9b47
--- /dev/null
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events for the ChromeOS Sensorhub kernel module
+ *
+ * Copyright 2021 Google LLC.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cros_ec
+
+#if !defined(_CROS_EC_SENSORHUB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _CROS_EC_SENSORHUB_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cros_ec_sensorhub_timestamp,
+           TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+                    s64 current_timestamp, s64 current_time),
+       TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
+               current_time),
+       TP_STRUCT__entry(
+               __field(u32, ec_sample_timestamp)
+               __field(u32, ec_fifo_timestamp)
+               __field(s64, fifo_timestamp)
+               __field(s64, current_timestamp)
+               __field(s64, current_time)
+               __field(s64, delta)
+       ),
+       TP_fast_assign(
+               __entry->ec_sample_timestamp = ec_sample_timestamp;
+               __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+               __entry->fifo_timestamp = fifo_timestamp;
+               __entry->current_timestamp = current_timestamp;
+               __entry->current_time = current_time;
+               __entry->delta = current_timestamp - current_time;
+       ),
+       TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+                 __entry->ec_sample_timestamp,
+               __entry->ec_fifo_timestamp,
+               __entry->fifo_timestamp,
+               __entry->current_timestamp,
+               __entry->current_time,
+               __entry->delta
+       )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_data,
+           TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+                    s64 current_timestamp, s64 current_time),
+       TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
+       TP_STRUCT__entry(
+               __field(u32, ec_sensor_num)
+               __field(u32, ec_fifo_timestamp)
+               __field(s64, fifo_timestamp)
+               __field(s64, current_timestamp)
+               __field(s64, current_time)
+               __field(s64, delta)
+       ),
+       TP_fast_assign(
+               __entry->ec_sensor_num = ec_sensor_num;
+               __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+               __entry->fifo_timestamp = fifo_timestamp;
+               __entry->current_timestamp = current_timestamp;
+               __entry->current_time = current_time;
+               __entry->delta = current_timestamp - current_time;
+       ),
+       TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+                 __entry->ec_sensor_num,
+               __entry->ec_fifo_timestamp,
+               __entry->fifo_timestamp,
+               __entry->current_timestamp,
+               __entry->current_time,
+               __entry->delta
+       )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_filter,
+           TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
+       TP_ARGS(state, dx, dy),
+       TP_STRUCT__entry(
+               __field(s64, dx)
+               __field(s64, dy)
+               __field(s64, median_m)
+               __field(s64, median_error)
+               __field(s64, history_len)
+               __field(s64, x)
+               __field(s64, y)
+       ),
+       TP_fast_assign(
+               __entry->dx = dx;
+               __entry->dy = dy;
+               __entry->median_m = state->median_m;
+               __entry->median_error = state->median_error;
+               __entry->history_len = state->history_len;
+               __entry->x = state->x_offset;
+               __entry->y = state->y_offset;
+       ),
+       TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
+                 __entry->dx,
+               __entry->dy,
+               __entry->median_m,
+               __entry->median_error,
+               __entry->history_len,
+               __entry->x,
+               __entry->y
+       )
+);
+
+
+#endif /* _CROS_EC_SENSORHUB_TRACE_H_ */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cros_ec_sensorhub_trace
+
+#include <trace/define_trace.h>
index 7e7cfc9..9bb5cd2 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/types.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
-#include <linux/platform_data/cros_ec_sensorhub.h>
 
 #include <linux/tracepoint.h>
 
@@ -71,100 +70,6 @@ TRACE_EVENT(cros_ec_request_done,
                  __entry->retval)
 );
 
-TRACE_EVENT(cros_ec_sensorhub_timestamp,
-           TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
-                    s64 current_timestamp, s64 current_time),
-       TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
-               current_time),
-       TP_STRUCT__entry(
-               __field(u32, ec_sample_timestamp)
-               __field(u32, ec_fifo_timestamp)
-               __field(s64, fifo_timestamp)
-               __field(s64, current_timestamp)
-               __field(s64, current_time)
-               __field(s64, delta)
-       ),
-       TP_fast_assign(
-               __entry->ec_sample_timestamp = ec_sample_timestamp;
-               __entry->ec_fifo_timestamp = ec_fifo_timestamp;
-               __entry->fifo_timestamp = fifo_timestamp;
-               __entry->current_timestamp = current_timestamp;
-               __entry->current_time = current_time;
-               __entry->delta = current_timestamp - current_time;
-       ),
-       TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
-                 __entry->ec_sample_timestamp,
-               __entry->ec_fifo_timestamp,
-               __entry->fifo_timestamp,
-               __entry->current_timestamp,
-               __entry->current_time,
-               __entry->delta
-       )
-);
-
-TRACE_EVENT(cros_ec_sensorhub_data,
-           TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
-                    s64 current_timestamp, s64 current_time),
-       TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
-       TP_STRUCT__entry(
-               __field(u32, ec_sensor_num)
-               __field(u32, ec_fifo_timestamp)
-               __field(s64, fifo_timestamp)
-               __field(s64, current_timestamp)
-               __field(s64, current_time)
-               __field(s64, delta)
-       ),
-       TP_fast_assign(
-               __entry->ec_sensor_num = ec_sensor_num;
-               __entry->ec_fifo_timestamp = ec_fifo_timestamp;
-               __entry->fifo_timestamp = fifo_timestamp;
-               __entry->current_timestamp = current_timestamp;
-               __entry->current_time = current_time;
-               __entry->delta = current_timestamp - current_time;
-       ),
-       TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
-                 __entry->ec_sensor_num,
-               __entry->ec_fifo_timestamp,
-               __entry->fifo_timestamp,
-               __entry->current_timestamp,
-               __entry->current_time,
-               __entry->delta
-       )
-);
-
-TRACE_EVENT(cros_ec_sensorhub_filter,
-           TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
-       TP_ARGS(state, dx, dy),
-       TP_STRUCT__entry(
-               __field(s64, dx)
-               __field(s64, dy)
-               __field(s64, median_m)
-               __field(s64, median_error)
-               __field(s64, history_len)
-               __field(s64, x)
-               __field(s64, y)
-       ),
-       TP_fast_assign(
-               __entry->dx = dx;
-               __entry->dy = dy;
-               __entry->median_m = state->median_m;
-               __entry->median_error = state->median_error;
-               __entry->history_len = state->history_len;
-               __entry->x = state->x_offset;
-               __entry->y = state->y_offset;
-       ),
-       TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
-                 __entry->dx,
-               __entry->dy,
-               __entry->median_m,
-               __entry->median_error,
-               __entry->history_len,
-               __entry->x,
-               __entry->y
-       )
-);
-
-
 #endif /* _CROS_EC_TRACE_H_ */
 
 /* this part must be outside header guard */
index 5de0bfb..4bd2752 100644 (file)
@@ -115,17 +115,18 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
                return ret;
        cap->data = ret;
 
+       /* Try-power-role is optional. */
        ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
        if (ret) {
-               dev_err(dev, "try-power-role not found: %d\n", ret);
-               return ret;
+               dev_warn(dev, "try-power-role not found: %d\n", ret);
+               cap->prefer_role = TYPEC_NO_PREFERRED_ROLE;
+       } else {
+               ret = typec_find_power_role(buf);
+               if (ret < 0)
+                       return ret;
+               cap->prefer_role = ret;
        }
 
-       ret = typec_find_power_role(buf);
-       if (ret < 0)
-               return ret;
-       cap->prefer_role = ret;
-
        cap->fwnode = fwnode;
 
        return 0;
@@ -227,6 +228,7 @@ static void cros_typec_remove_partner(struct cros_typec_data *typec,
        cros_typec_unregister_altmodes(typec, port_num, true);
 
        cros_typec_usb_disconnect_state(port);
+       port->mux_flags = USB_PD_MUX_NONE;
 
        typec_unregister_partner(port->partner);
        port->partner = NULL;
@@ -512,20 +514,38 @@ static int cros_typec_enable_usb4(struct cros_typec_data *typec,
 }
 
 static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
-                               uint8_t mux_flags,
                                struct ec_response_usb_pd_control_v2 *pd_ctrl)
 {
        struct cros_typec_port *port = typec->ports[port_num];
+       struct ec_response_usb_pd_mux_info resp;
+       struct ec_params_usb_pd_mux_info req = {
+               .port = port_num,
+       };
        struct ec_params_usb_pd_mux_ack mux_ack;
        enum typec_orientation orientation;
        int ret;
 
-       if (mux_flags == USB_PD_MUX_NONE) {
+       ret = cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
+                             &req, sizeof(req), &resp, sizeof(resp));
+       if (ret < 0) {
+               dev_warn(typec->dev, "Failed to get mux info for port: %d, err = %d\n",
+                        port_num, ret);
+               return ret;
+       }
+
+       /* No change needs to be made, let's exit early. */
+       if (port->mux_flags == resp.flags && port->role == pd_ctrl->role)
+               return 0;
+
+       port->mux_flags = resp.flags;
+       port->role = pd_ctrl->role;
+
+       if (port->mux_flags == USB_PD_MUX_NONE) {
                ret = cros_typec_usb_disconnect_state(port);
                goto mux_ack;
        }
 
-       if (mux_flags & USB_PD_MUX_POLARITY_INVERTED)
+       if (port->mux_flags & USB_PD_MUX_POLARITY_INVERTED)
                orientation = TYPEC_ORIENTATION_REVERSE;
        else
                orientation = TYPEC_ORIENTATION_NORMAL;
@@ -540,22 +560,22 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
        if (ret)
                return ret;
 
-       if (mux_flags & USB_PD_MUX_USB4_ENABLED) {
+       if (port->mux_flags & USB_PD_MUX_USB4_ENABLED) {
                ret = cros_typec_enable_usb4(typec, port_num, pd_ctrl);
-       } else if (mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
+       } else if (port->mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
                ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl);
-       } else if (mux_flags & USB_PD_MUX_DP_ENABLED) {
+       } else if (port->mux_flags & USB_PD_MUX_DP_ENABLED) {
                ret = cros_typec_enable_dp(typec, port_num, pd_ctrl);
-       } else if (mux_flags & USB_PD_MUX_SAFE_MODE) {
+       } else if (port->mux_flags & USB_PD_MUX_SAFE_MODE) {
                ret = cros_typec_usb_safe_state(port);
-       } else if (mux_flags & USB_PD_MUX_USB_ENABLED) {
+       } else if (port->mux_flags & USB_PD_MUX_USB_ENABLED) {
                port->state.alt = NULL;
                port->state.mode = TYPEC_STATE_USB;
                ret = typec_mux_set(port->mux, &port->state);
        } else {
                dev_dbg(typec->dev,
                        "Unrecognized mode requested, mux flags: %x\n",
-                       mux_flags);
+                       port->mux_flags);
        }
 
 mux_ack:
@@ -630,17 +650,6 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
        }
 }
 
-static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
-                                  struct ec_response_usb_pd_mux_info *resp)
-{
-       struct ec_params_usb_pd_mux_info req = {
-               .port = port_num,
-       };
-
-       return cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO, &req,
-                              sizeof(req), resp, sizeof(*resp));
-}
-
 /*
  * Helper function to register partner/plug altmodes.
  */
@@ -938,7 +947,6 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
 {
        struct ec_params_usb_pd_control req;
        struct ec_response_usb_pd_control_v2 resp;
-       struct ec_response_usb_pd_mux_info mux_resp;
        int ret;
 
        if (port_num < 0 || port_num >= typec->num_ports) {
@@ -958,6 +966,11 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
        if (ret < 0)
                return ret;
 
+       /* Update the switches if they exist, according to requested state */
+       ret = cros_typec_configure_mux(typec, port_num, &resp);
+       if (ret)
+               dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
+
        dev_dbg(typec->dev, "Enabled %d: 0x%hhx\n", port_num, resp.enabled);
        dev_dbg(typec->dev, "Role %d: 0x%hhx\n", port_num, resp.role);
        dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity);
@@ -973,27 +986,7 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
        if (typec->typec_cmd_supported)
                cros_typec_handle_status(typec, port_num);
 
-       /* Update the switches if they exist, according to requested state */
-       ret = cros_typec_get_mux_info(typec, port_num, &mux_resp);
-       if (ret < 0) {
-               dev_warn(typec->dev,
-                        "Failed to get mux info for port: %d, err = %d\n",
-                        port_num, ret);
-               return 0;
-       }
-
-       /* No change needs to be made, let's exit early. */
-       if (typec->ports[port_num]->mux_flags == mux_resp.flags &&
-           typec->ports[port_num]->role == resp.role)
-               return 0;
-
-       typec->ports[port_num]->mux_flags = mux_resp.flags;
-       typec->ports[port_num]->role = resp.role;
-       ret = cros_typec_configure_mux(typec, port_num, mux_resp.flags, &resp);
-       if (ret)
-               dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
-
-       return ret;
+       return 0;
 }
 
 static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
@@ -1075,7 +1068,13 @@ static int cros_typec_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        typec->dev = dev;
+
        typec->ec = dev_get_drvdata(pdev->dev.parent);
+       if (!typec->ec) {
+               dev_err(dev, "couldn't find parent EC device\n");
+               return -ENODEV;
+       }
+
        platform_set_drvdata(pdev, typec);
 
        ret = cros_typec_get_cmd_version(typec);
index f6d6d4c..41c65b4 100644 (file)
@@ -1293,6 +1293,16 @@ config RTC_DRV_OPAL
          This driver can also be built as a module. If so, the module
          will be called rtc-opal.
 
+config RTC_DRV_OPTEE
+       tristate "OP-TEE based RTC driver"
+       depends on OPTEE
+       help
+         Select this to get support for OP-TEE based RTC control on SoCs where
+         RTC are not accessible to the normal world (Linux).
+
+         This driver can also be built as a module. If so, the module
+         will be called rtc-optee.
+
 config RTC_DRV_ZYNQMP
        tristate "Xilinx Zynq Ultrascale+ MPSoC RTC"
        depends on OF && HAS_IOMEM
index e92f3e9..2d827d8 100644 (file)
@@ -115,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_GAMECUBE)      += rtc-gamecube.o
 obj-$(CONFIG_RTC_DRV_NTXEC)    += rtc-ntxec.o
 obj-$(CONFIG_RTC_DRV_OMAP)     += rtc-omap.o
 obj-$(CONFIG_RTC_DRV_OPAL)     += rtc-opal.o
+obj-$(CONFIG_RTC_DRV_OPTEE)    += rtc-optee.o
 obj-$(CONFIG_RTC_DRV_PALMAS)   += rtc-palmas.o
 obj-$(CONFIG_RTC_DRV_PCAP)     += rtc-pcap.o
 obj-$(CONFIG_RTC_DRV_PCF2123)  += rtc-pcf2123.o
index 4b460c6..3c8eec2 100644 (file)
@@ -26,6 +26,15 @@ struct class *rtc_class;
 static void rtc_device_release(struct device *dev)
 {
        struct rtc_device *rtc = to_rtc_device(dev);
+       struct timerqueue_head *head = &rtc->timerqueue;
+       struct timerqueue_node *node;
+
+       mutex_lock(&rtc->ops_lock);
+       while ((node = timerqueue_getnext(head)))
+               timerqueue_del(head, node);
+       mutex_unlock(&rtc->ops_lock);
+
+       cancel_work_sync(&rtc->irqwork);
 
        ida_simple_remove(&rtc_ida, rtc->id);
        mutex_destroy(&rtc->ops_lock);
@@ -390,9 +399,6 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc)
        if (!rtc->ops->set_alarm)
                clear_bit(RTC_FEATURE_ALARM, rtc->features);
 
-       if (rtc->uie_unsupported)
-               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
-
        if (rtc->ops->set_offset)
                set_bit(RTC_FEATURE_CORRECTION, rtc->features);
 
index d8e8357..9edd662 100644 (file)
@@ -804,9 +804,13 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
        struct rtc_time tm;
        ktime_t now;
+       int err;
+
+       err = __rtc_read_time(rtc, &tm);
+       if (err)
+               return err;
 
        timer->enabled = 1;
-       __rtc_read_time(rtc, &tm);
        now = rtc_tm_to_ktime(tm);
 
        /* Skip over expired timers */
@@ -820,7 +824,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        trace_rtc_timer_enqueue(timer);
        if (!next || ktime_before(timer->node.expires, next->expires)) {
                struct rtc_wkalrm alarm;
-               int err;
 
                alarm.time = rtc_ktime_to_tm(timer->node.expires);
                alarm.enabled = 1;
index 336cb9a..d51565b 100644 (file)
@@ -1955,7 +1955,7 @@ static int ds1307_probe(struct i2c_client *client,
                dev_info(ds1307->dev,
                         "'wakeup-source' is set, request for an IRQ is disabled!\n");
                /* We cannot support UIE mode if we do not have an IRQ line */
-               ds1307->rtc->uie_unsupported = 1;
+               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, ds1307->rtc->features);
        }
 
        if (want_irq) {
index 75db7ab..a24331b 100644 (file)
@@ -1273,7 +1273,7 @@ ds1685_rtc_probe(struct platform_device *pdev)
 
        /* See if the platform doesn't support UIE. */
        if (pdata->uie_unsupported)
-               rtc_dev->uie_unsupported = 1;
+               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc_dev->features);
 
        rtc->dev = rtc_dev;
 
@@ -1285,13 +1285,10 @@ ds1685_rtc_probe(struct platform_device *pdev)
         * there won't be an automatic way of notifying the kernel about it,
         * unless ctrlc is explicitly polled.
         */
-       if (!pdata->no_irq) {
-               ret = platform_get_irq(pdev, 0);
-               if (ret <= 0)
-                       return ret;
-
-               rtc->irq_num = ret;
-
+       rtc->irq_num = platform_get_irq(pdev, 0);
+       if (rtc->irq_num <= 0) {
+               clear_bit(RTC_FEATURE_ALARM, rtc_dev->features);
+       } else {
                /* Request an IRQ. */
                ret = devm_request_threaded_irq(&pdev->dev, rtc->irq_num,
                                       NULL, ds1685_rtc_irq_handler,
@@ -1305,7 +1302,6 @@ ds1685_rtc_probe(struct platform_device *pdev)
                        rtc->irq_num = 0;
                }
        }
-       rtc->no_irq = pdata->no_irq;
 
        /* Setup complete. */
        ds1685_rtc_switch_to_bank0(rtc);
@@ -1394,7 +1390,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
                 * have been taken care of by the shutdown scripts and this
                 * is the final function call.
                 */
-               if (!rtc->no_irq)
+               if (rtc->irq_num)
                        disable_irq_nosync(rtc->irq_num);
 
                /* Oscillator must be on and the countdown chain enabled. */
index 138c5e0..11850c2 100644 (file)
@@ -261,15 +261,17 @@ static int __init efi_rtc_probe(struct platform_device *dev)
        if (efi.get_time(&eft, &cap) != EFI_SUCCESS)
                return -ENODEV;
 
-       rtc = devm_rtc_device_register(&dev->dev, "rtc-efi", &efi_rtc_ops,
-                                       THIS_MODULE);
+       rtc = devm_rtc_allocate_device(&dev->dev);
        if (IS_ERR(rtc))
                return PTR_ERR(rtc);
 
-       rtc->uie_unsupported = 1;
        platform_set_drvdata(dev, rtc);
 
-       return 0;
+       rtc->ops = &efi_rtc_ops;
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
+       set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
+
+       return devm_rtc_register_device(rtc);
 }
 
 static struct platform_driver efi_rtc_driver = {
index f717b36..18ca3b3 100644 (file)
@@ -235,6 +235,7 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d)
        }
 
        ret = of_address_to_resource(np, 0, &res);
+       of_node_put(np);
        if (ret) {
                pr_err("no io memory range found\n");
                return -1;
index 0751cae..90e602e 100644 (file)
@@ -220,24 +220,6 @@ static int hym8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
        u8 buf[4];
        int ret;
 
-       /*
-        * The alarm has no seconds so deal with it
-        */
-       if (alm_tm->tm_sec) {
-               alm_tm->tm_sec = 0;
-               alm_tm->tm_min++;
-               if (alm_tm->tm_min >= 60) {
-                       alm_tm->tm_min = 0;
-                       alm_tm->tm_hour++;
-                       if (alm_tm->tm_hour >= 24) {
-                               alm_tm->tm_hour = 0;
-                               alm_tm->tm_mday++;
-                               if (alm_tm->tm_mday > 31)
-                                       alm_tm->tm_mday = 0;
-                       }
-               }
-       }
-
        ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
        if (ret < 0)
                return ret;
@@ -523,6 +505,10 @@ static int hym8563_probe(struct i2c_client *client,
        if (!hym8563)
                return -ENOMEM;
 
+       hym8563->rtc = devm_rtc_allocate_device(&client->dev);
+       if (IS_ERR(hym8563->rtc))
+               return PTR_ERR(hym8563->rtc);
+
        hym8563->client = client;
        i2c_set_clientdata(client, hym8563);
 
@@ -557,19 +543,15 @@ static int hym8563_probe(struct i2c_client *client,
        dev_dbg(&client->dev, "rtc information is %s\n",
                (ret & HYM8563_SEC_VL) ? "invalid" : "valid");
 
-       hym8563->rtc = devm_rtc_device_register(&client->dev, client->name,
-                                               &hym8563_rtc_ops, THIS_MODULE);
-       if (IS_ERR(hym8563->rtc))
-               return PTR_ERR(hym8563->rtc);
-
-       /* the hym8563 alarm only supports a minute accuracy */
-       hym8563->rtc->uie_unsupported = 1;
+       hym8563->rtc->ops = &hym8563_rtc_ops;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, hym8563->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, hym8563->rtc->features);
 
 #ifdef CONFIG_COMMON_CLK
        hym8563_clkout_register_clk(hym8563);
 #endif
 
-       return 0;
+       return devm_rtc_register_device(hym8563->rtc);
 }
 
 static const struct i2c_device_id hym8563_id[] = {
index 6d383b6..d868458 100644 (file)
@@ -932,10 +932,8 @@ static int m41t80_probe(struct i2c_client *client,
        m41t80_data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        m41t80_data->rtc->range_max = RTC_TIMESTAMP_END_2099;
 
-       if (client->irq <= 0) {
-               /* We cannot support UIE mode if we do not have an IRQ line */
-               m41t80_data->rtc->uie_unsupported = 1;
-       }
+       if (client->irq <= 0)
+               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, m41t80_data->rtc->features);
 
        /* Make sure HT (Halt Update) bit is cleared */
        rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
index ae9f131..522449b 100644 (file)
@@ -176,6 +176,17 @@ int mc146818_get_time(struct rtc_time *time)
 }
 EXPORT_SYMBOL_GPL(mc146818_get_time);
 
+/* AMD systems don't allow access to AltCentury with DV1 */
+static bool apply_amd_register_a_behavior(void)
+{
+#ifdef CONFIG_X86
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+           boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+               return true;
+#endif
+       return false;
+}
+
 /* Set the current date and time in the real time clock. */
 int mc146818_set_time(struct rtc_time *time)
 {
@@ -232,8 +243,10 @@ int mc146818_set_time(struct rtc_time *time)
        if (yrs >= 100)
                yrs -= 100;
 
-       if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
-           || RTC_ALWAYS_BCD) {
+       spin_lock_irqsave(&rtc_lock, flags);
+       save_control = CMOS_READ(RTC_CONTROL);
+       spin_unlock_irqrestore(&rtc_lock, flags);
+       if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
                sec = bin2bcd(sec);
                min = bin2bcd(min);
                hrs = bin2bcd(hrs);
@@ -247,7 +260,10 @@ int mc146818_set_time(struct rtc_time *time)
        save_control = CMOS_READ(RTC_CONTROL);
        CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
        save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-       CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+       if (apply_amd_register_a_behavior())
+               CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
+       else
+               CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
 
 #ifdef CONFIG_MACH_DECSTATION
        CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
index bb2ea9b..6d7656a 100644 (file)
@@ -210,20 +210,6 @@ static int mpc5121_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
        struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
        struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
 
-       /*
-        * the alarm has no seconds so deal with it
-        */
-       if (alarm->time.tm_sec) {
-               alarm->time.tm_sec = 0;
-               alarm->time.tm_min++;
-               if (alarm->time.tm_min >= 60) {
-                       alarm->time.tm_min = 0;
-                       alarm->time.tm_hour++;
-                       if (alarm->time.tm_hour >= 24)
-                               alarm->time.tm_hour = 0;
-               }
-       }
-
        alarm->time.tm_mday = -1;
        alarm->time.tm_mon = -1;
        alarm->time.tm_year = -1;
@@ -349,7 +335,8 @@ static int mpc5121_rtc_probe(struct platform_device *op)
        }
 
        rtc->rtc->ops = &mpc5200_rtc_ops;
-       rtc->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtc->features);
        rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
        rtc->rtc->range_max = 65733206399ULL; /* 4052-12-31 23:59:59 */
 
index f8f49a9..ad41aaf 100644 (file)
@@ -250,7 +250,7 @@ static int opal_rtc_probe(struct platform_device *pdev)
        rtc->ops = &opal_rtc_ops;
        rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
        rtc->range_max = RTC_TIMESTAMP_END_9999;
-       rtc->uie_unsupported = 1;
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
 
        return devm_rtc_register_device(rtc);
 }
diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c
new file mode 100644 (file)
index 0000000..9f8b5d4
--- /dev/null
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Microchip.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/tee_drv.h>
+
+#define RTC_INFO_VERSION       0x1
+
+#define TA_CMD_RTC_GET_INFO            0x0
+#define TA_CMD_RTC_GET_TIME            0x1
+#define TA_CMD_RTC_SET_TIME            0x2
+#define TA_CMD_RTC_GET_OFFSET          0x3
+#define TA_CMD_RTC_SET_OFFSET          0x4
+
+#define TA_RTC_FEATURE_CORRECTION      BIT(0)
+
+struct optee_rtc_time {
+       u32 tm_sec;
+       u32 tm_min;
+       u32 tm_hour;
+       u32 tm_mday;
+       u32 tm_mon;
+       u32 tm_year;
+       u32 tm_wday;
+};
+
+struct optee_rtc_info {
+       u64 version;
+       u64 features;
+       struct optee_rtc_time range_min;
+       struct optee_rtc_time range_max;
+};
+
+/**
+ * struct optee_rtc - OP-TEE RTC private data
+ * @dev:               OP-TEE based RTC device.
+ * @ctx:               OP-TEE context handler.
+ * @session_id:                RTC TA session identifier.
+ * @shm:               Memory pool shared with RTC device.
+ * @features:          Bitfield of RTC features
+ */
+struct optee_rtc {
+       struct device *dev;
+       struct tee_context *ctx;
+       u32 session_id;
+       struct tee_shm *shm;
+       u64 features;
+};
+
+static int optee_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct optee_rtc_time *optee_tm;
+       struct tee_param param[4] = {0};
+       int ret;
+
+       inv_arg.func = TA_CMD_RTC_GET_TIME;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       /* Fill invoke cmd params */
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+       param[0].u.memref.shm = priv->shm;
+       param[0].u.memref.size = sizeof(struct optee_rtc_time);
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       optee_tm = tee_shm_get_va(priv->shm, 0);
+       if (IS_ERR(optee_tm))
+               return PTR_ERR(optee_tm);
+
+       if (param[0].u.memref.size != sizeof(*optee_tm))
+               return -EPROTO;
+
+       tm->tm_sec = optee_tm->tm_sec;
+       tm->tm_min = optee_tm->tm_min;
+       tm->tm_hour = optee_tm->tm_hour;
+       tm->tm_mday = optee_tm->tm_mday;
+       tm->tm_mon = optee_tm->tm_mon;
+       tm->tm_year = optee_tm->tm_year - 1900;
+       tm->tm_wday = optee_tm->tm_wday;
+       tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+       return 0;
+}
+
+static int optee_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       struct optee_rtc_time optee_tm;
+       void *rtc_data;
+       int ret;
+
+       optee_tm.tm_sec = tm->tm_sec;
+       optee_tm.tm_min = tm->tm_min;
+       optee_tm.tm_hour = tm->tm_hour;
+       optee_tm.tm_mday = tm->tm_mday;
+       optee_tm.tm_mon = tm->tm_mon;
+       optee_tm.tm_year = tm->tm_year + 1900;
+       optee_tm.tm_wday = tm->tm_wday;
+
+       inv_arg.func = TA_CMD_RTC_SET_TIME;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+       param[0].u.memref.shm = priv->shm;
+       param[0].u.memref.size = sizeof(struct optee_rtc_time);
+
+       rtc_data = tee_shm_get_va(priv->shm, 0);
+       if (IS_ERR(rtc_data))
+               return PTR_ERR(rtc_data);
+
+       memcpy(rtc_data, &optee_tm, sizeof(struct optee_rtc_time));
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       return 0;
+}
+
+static int optee_rtc_readoffset(struct device *dev, long *offset)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       int ret;
+
+       if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+               return -EOPNOTSUPP;
+
+       inv_arg.func = TA_CMD_RTC_GET_OFFSET;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       *offset = param[0].u.value.a;
+
+       return 0;
+}
+
+static int optee_rtc_setoffset(struct device *dev, long offset)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       int ret;
+
+       if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+               return -EOPNOTSUPP;
+
+       inv_arg.func = TA_CMD_RTC_SET_OFFSET;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+       param[0].u.value.a = offset;
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       return 0;
+}
+
+static const struct rtc_class_ops optee_rtc_ops = {
+       .read_time      = optee_rtc_readtime,
+       .set_time       = optee_rtc_settime,
+       .set_offset     = optee_rtc_setoffset,
+       .read_offset    = optee_rtc_readoffset,
+};
+
+static int optee_rtc_read_info(struct device *dev, struct rtc_device *rtc,
+                              u64 *features)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       struct optee_rtc_info *info;
+       struct optee_rtc_time *tm;
+       int ret;
+
+       inv_arg.func = TA_CMD_RTC_GET_INFO;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+       param[0].u.memref.shm = priv->shm;
+       param[0].u.memref.size = sizeof(*info);
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       info = tee_shm_get_va(priv->shm, 0);
+       if (IS_ERR(info))
+               return PTR_ERR(info);
+
+       if (param[0].u.memref.size != sizeof(*info))
+               return -EPROTO;
+
+       if (info->version != RTC_INFO_VERSION)
+               return -EPROTO;
+
+       *features = info->features;
+
+       tm = &info->range_min;
+       rtc->range_min = mktime64(tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min,
+                                 tm->tm_sec);
+       tm = &info->range_max;
+       rtc->range_max = mktime64(tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min,
+                                 tm->tm_sec);
+
+       return 0;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+       if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+               return 1;
+       else
+               return 0;
+}
+
+static int optee_rtc_probe(struct device *dev)
+{
+       struct tee_client_device *rtc_device = to_tee_client_device(dev);
+       struct tee_ioctl_open_session_arg sess_arg;
+       struct optee_rtc *priv;
+       struct rtc_device *rtc;
+       struct tee_shm *shm;
+       int ret, err;
+
+       memset(&sess_arg, 0, sizeof(sess_arg));
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       rtc = devm_rtc_allocate_device(dev);
+       if (IS_ERR(rtc))
+               return PTR_ERR(rtc);
+
+       /* Open context with TEE driver */
+       priv->ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
+       if (IS_ERR(priv->ctx))
+               return -ENODEV;
+
+       /* Open session with rtc Trusted App */
+       export_uuid(sess_arg.uuid, &rtc_device->id.uuid);
+       sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+
+       ret = tee_client_open_session(priv->ctx, &sess_arg, NULL);
+       if (ret < 0 || sess_arg.ret != 0) {
+               dev_err(dev, "tee_client_open_session failed, err: %x\n", sess_arg.ret);
+               err = -EINVAL;
+               goto out_ctx;
+       }
+       priv->session_id = sess_arg.session;
+
+       shm = tee_shm_alloc_kernel_buf(priv->ctx, sizeof(struct optee_rtc_info));
+       if (IS_ERR(shm)) {
+               dev_err(priv->dev, "tee_shm_alloc_kernel_buf failed\n");
+               err = PTR_ERR(shm);
+               goto out_sess;
+       }
+
+       priv->shm = shm;
+       priv->dev = dev;
+       dev_set_drvdata(dev, priv);
+
+       rtc->ops = &optee_rtc_ops;
+
+       err = optee_rtc_read_info(dev, rtc, &priv->features);
+       if (err) {
+               dev_err(dev, "Failed to get RTC features from OP-TEE\n");
+               goto out_shm;
+       }
+
+       err = devm_rtc_register_device(rtc);
+       if (err)
+               goto out_shm;
+
+       /*
+        * We must clear this bit after registering because rtc_register_device
+        * will set it if it sees that .set_offset is provided.
+        */
+       if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+               clear_bit(RTC_FEATURE_CORRECTION, rtc->features);
+
+       return 0;
+
+out_shm:
+       tee_shm_free(priv->shm);
+out_sess:
+       tee_client_close_session(priv->ctx, priv->session_id);
+out_ctx:
+       tee_client_close_context(priv->ctx);
+
+       return err;
+}
+
+static int optee_rtc_remove(struct device *dev)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+
+       tee_client_close_session(priv->ctx, priv->session_id);
+       tee_client_close_context(priv->ctx);
+
+       return 0;
+}
+
+static const struct tee_client_device_id optee_rtc_id_table[] = {
+       {UUID_INIT(0xf389f8c8, 0x845f, 0x496c,
+                  0x8b, 0xbe, 0xd6, 0x4b, 0xd2, 0x4c, 0x92, 0xfd)},
+       {}
+};
+
+MODULE_DEVICE_TABLE(tee, optee_rtc_id_table);
+
+static struct tee_client_driver optee_rtc_driver = {
+       .id_table       = optee_rtc_id_table,
+       .driver         = {
+               .name           = "optee_rtc",
+               .bus            = &tee_bus_type,
+               .probe          = optee_rtc_probe,
+               .remove         = optee_rtc_remove,
+       },
+};
+
+static int __init optee_rtc_mod_init(void)
+{
+       return driver_register(&optee_rtc_driver.driver);
+}
+
+static void __exit optee_rtc_mod_exit(void)
+{
+       driver_unregister(&optee_rtc_driver.driver);
+}
+
+module_init(optee_rtc_mod_init);
+module_exit(optee_rtc_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
+MODULE_DESCRIPTION("OP-TEE based RTC driver");
index 7473e6c..e13b5e6 100644 (file)
@@ -427,7 +427,8 @@ static int pcf2123_probe(struct spi_device *spi)
         * support to this driver to generate interrupts more than once
         * per minute.
         */
-       rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
        rtc->ops = &pcf2123_rtc_ops;
        rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        rtc->range_max = RTC_TIMESTAMP_END_2099;
index 81a5b1f..63b275b 100644 (file)
@@ -374,7 +374,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
 static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
        struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
-       unsigned int buf[5], ctrl2;
+       u8 buf[5];
+       unsigned int ctrl2;
        int ret;
 
        ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
@@ -655,13 +656,25 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
        pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
        pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
-       pcf2127->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_2S, pcf2127->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf2127->rtc->features);
        clear_bit(RTC_FEATURE_ALARM, pcf2127->rtc->features);
 
        if (alarm_irq > 0) {
+               unsigned long flags;
+
+               /*
+                * If flags = 0, devm_request_threaded_irq() will use IRQ flags
+                * obtained from device tree.
+                */
+               if (dev_fwnode(dev))
+                       flags = 0;
+               else
+                       flags = IRQF_TRIGGER_LOW;
+
                ret = devm_request_threaded_irq(dev, alarm_irq, NULL,
                                                pcf2127_rtc_irq,
-                                               IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                               flags | IRQF_ONESHOT,
                                                dev_name(dev), dev);
                if (ret) {
                        dev_err(dev, "failed to request alarm irq\n");
index df2b072..9760824 100644 (file)
@@ -616,7 +616,8 @@ static int pcf85063_probe(struct i2c_client *client)
        pcf85063->rtc->ops = &pcf85063_rtc_ops;
        pcf85063->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        pcf85063->rtc->range_max = RTC_TIMESTAMP_END_2099;
-       pcf85063->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_2S, pcf85063->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf85063->rtc->features);
        clear_bit(RTC_FEATURE_ALARM, pcf85063->rtc->features);
 
        if (config->has_alarms && client->irq > 0) {
index c93acad..b1b1943 100644 (file)
@@ -212,14 +212,6 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
        if (err < 0)
                return err;
 
-       /* The alarm has no seconds, round up to nearest minute */
-       if (tm->time.tm_sec) {
-               time64_t alarm_time = rtc_tm_to_time64(&tm->time);
-
-               alarm_time += 60 - tm->time.tm_sec;
-               rtc_time64_to_tm(alarm_time, &tm->time);
-       }
-
        regs[0] = bin2bcd(tm->time.tm_min);
        regs[1] = bin2bcd(tm->time.tm_hour);
        regs[2] = bin2bcd(tm->time.tm_mday);
@@ -240,9 +232,9 @@ static int pcf8523_param_get(struct device *dev, struct rtc_param *param)
 {
        struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
        int ret;
+       u32 value;
 
        switch(param->param) {
-               u32 value;
 
        case RTC_PARAM_BACKUP_SWITCH_MODE:
                ret = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value);
@@ -279,9 +271,9 @@ static int pcf8523_param_get(struct device *dev, struct rtc_param *param)
 static int pcf8523_param_set(struct device *dev, struct rtc_param *param)
 {
        struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
+       u8 mode;
 
        switch(param->param) {
-               u8 mode;
        case RTC_PARAM_BACKUP_SWITCH_MODE:
                switch (param->uvalue) {
                case RTC_BSM_DISABLED:
@@ -450,7 +442,8 @@ static int pcf8523_probe(struct i2c_client *client,
        rtc->ops = &pcf8523_rtc_ops;
        rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        rtc->range_max = RTC_TIMESTAMP_END_2099;
-       rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
 
        if (client->irq > 0) {
                err = regmap_write(pcf8523->regmap, PCF8523_TMR_CLKOUT_CTRL, 0x38);
index c8bddfb..9d06813 100644 (file)
@@ -330,19 +330,6 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
        unsigned char buf[4];
        int err;
 
-       /* The alarm has no seconds, round up to nearest minute */
-       if (tm->time.tm_sec) {
-               time64_t alarm_time = rtc_tm_to_time64(&tm->time);
-
-               alarm_time += 60 - tm->time.tm_sec;
-               rtc_time64_to_tm(alarm_time, &tm->time);
-       }
-
-       dev_dbg(dev, "%s, min=%d hour=%d wday=%d mday=%d "
-               "enabled=%d pending=%d\n", __func__,
-               tm->time.tm_min, tm->time.tm_hour, tm->time.tm_wday,
-               tm->time.tm_mday, tm->enabled, tm->pending);
-
        buf[0] = bin2bcd(tm->time.tm_min);
        buf[1] = bin2bcd(tm->time.tm_hour);
        buf[2] = bin2bcd(tm->time.tm_mday);
@@ -565,7 +552,8 @@ static int pcf8563_probe(struct i2c_client *client,
 
        pcf8563->rtc->ops = &pcf8563_rtc_ops;
        /* the pcf8563 alarm only supports a minute accuracy */
-       pcf8563->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, pcf8563->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf8563->rtc->features);
        pcf8563->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        pcf8563->rtc->range_max = RTC_TIMESTAMP_END_2099;
        pcf8563->rtc->set_start_time = true;
index e38ee88..bad6a5d 100644 (file)
@@ -350,9 +350,6 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
                }
        }
 
-       if (!adev->irq[0])
-               clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
-
        device_init_wakeup(&adev->dev, true);
        ldata->rtc = devm_rtc_allocate_device(&adev->dev);
        if (IS_ERR(ldata->rtc)) {
@@ -360,6 +357,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
                goto out;
        }
 
+       if (!adev->irq[0])
+               clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
+
        ldata->rtc->ops = ops;
        ldata->rtc->range_min = vendor->range_min;
        ldata->rtc->range_max = vendor->range_max;
index 29a1c65..dc6d147 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/rtc.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
+#include <linux/pm_wakeirq.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
@@ -83,7 +84,7 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
        const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
 
        if (!rtc_dd->allow_set_time)
-               return -EACCES;
+               return -ENODEV;
 
        secs = rtc_tm_to_time64(tm);
 
@@ -527,40 +528,28 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
                return rc;
        }
 
-       return devm_rtc_register_device(rtc_dd->rtc);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int pm8xxx_rtc_resume(struct device *dev)
-{
-       struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+       rc = devm_rtc_register_device(rtc_dd->rtc);
+       if (rc)
+               return rc;
 
-       if (device_may_wakeup(dev))
-               disable_irq_wake(rtc_dd->rtc_alarm_irq);
+       rc = dev_pm_set_wake_irq(&pdev->dev, rtc_dd->rtc_alarm_irq);
+       if (rc)
+               return rc;
 
        return 0;
 }
 
-static int pm8xxx_rtc_suspend(struct device *dev)
+static int pm8xxx_remove(struct platform_device *pdev)
 {
-       struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
-
-       if (device_may_wakeup(dev))
-               enable_irq_wake(rtc_dd->rtc_alarm_irq);
-
+       dev_pm_clear_wake_irq(&pdev->dev);
        return 0;
 }
-#endif
-
-static SIMPLE_DEV_PM_OPS(pm8xxx_rtc_pm_ops,
-                        pm8xxx_rtc_suspend,
-                        pm8xxx_rtc_resume);
 
 static struct platform_driver pm8xxx_rtc_driver = {
        .probe          = pm8xxx_rtc_probe,
+       .remove         = pm8xxx_remove,
        .driver = {
                .name           = "rtc-pm8xxx",
-               .pm             = &pm8xxx_rtc_pm_ops,
                .of_match_table = pm8xxx_id_table,
        },
 };
index b4a5200..d4777b0 100644 (file)
@@ -204,8 +204,10 @@ static int spear_rtc_read_time(struct device *dev, struct rtc_time *tm)
        /* we don't report wday/yday/isdst ... */
        rtc_wait_not_busy(config);
 
-       time = readl(config->ioaddr + TIME_REG);
-       date = readl(config->ioaddr + DATE_REG);
+       do {
+               time = readl(config->ioaddr + TIME_REG);
+               date = readl(config->ioaddr + DATE_REG);
+       } while (time == readl(config->ioaddr + TIME_REG));
        tm->tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
        tm->tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
        tm->tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
@@ -352,6 +354,10 @@ static int spear_rtc_probe(struct platform_device *pdev)
        if (!config)
                return -ENOMEM;
 
+       config->rtc = devm_rtc_allocate_device(&pdev->dev);
+       if (IS_ERR(config->rtc))
+               return PTR_ERR(config->rtc);
+
        /* alarm irqs */
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
@@ -380,16 +386,13 @@ static int spear_rtc_probe(struct platform_device *pdev)
        spin_lock_init(&config->lock);
        platform_set_drvdata(pdev, config);
 
-       config->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
-                                       &spear_rtc_ops, THIS_MODULE);
-       if (IS_ERR(config->rtc)) {
-               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
-                               PTR_ERR(config->rtc));
-               status = PTR_ERR(config->rtc);
-               goto err_disable_clock;
-       }
+       config->rtc->ops = &spear_rtc_ops;
+       config->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
+       config->rtc->range_min = RTC_TIMESTAMP_END_9999;
 
-       config->rtc->uie_unsupported = 1;
+       status = devm_rtc_register_device(config->rtc);
+       if (status)
+               goto err_disable_clock;
 
        if (!device_can_wakeup(&pdev->dev))
                device_init_wakeup(&pdev->dev, 1);
index 711832c..5b3e4da 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/clk/sunxi-ng.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/fs.h>
@@ -48,7 +49,8 @@
 
 /* Alarm 0 (counter) */
 #define SUN6I_ALRM_COUNTER                     0x0020
-#define SUN6I_ALRM_CUR_VAL                     0x0024
+/* This holds the remaining alarm seconds on older SoCs (current value) */
+#define SUN6I_ALRM_COUNTER_HMS                 0x0024
 #define SUN6I_ALRM_EN                          0x0028
 #define SUN6I_ALRM_EN_CNT_EN                   BIT(0)
 #define SUN6I_ALRM_IRQ_EN                      0x002c
 #define SUN6I_YEAR_MIN                         1970
 #define SUN6I_YEAR_OFF                         (SUN6I_YEAR_MIN - 1900)
 
+#define SECS_PER_DAY                           (24 * 3600ULL)
+
 /*
  * There are other differences between models, including:
  *
@@ -133,12 +137,15 @@ struct sun6i_rtc_clk_data {
        unsigned int has_auto_swt : 1;
 };
 
+#define RTC_LINEAR_DAY BIT(0)
+
 struct sun6i_rtc_dev {
        struct rtc_device *rtc;
        const struct sun6i_rtc_clk_data *data;
        void __iomem *base;
        int irq;
-       unsigned long alarm;
+       time64_t alarm;
+       unsigned long flags;
 
        struct clk_hw hw;
        struct clk_hw *int_osc;
@@ -363,23 +370,6 @@ CLK_OF_DECLARE_DRIVER(sun8i_h3_rtc_clk, "allwinner,sun8i-h3-rtc",
 CLK_OF_DECLARE_DRIVER(sun50i_h5_rtc_clk, "allwinner,sun50i-h5-rtc",
                      sun8i_h3_rtc_clk_init);
 
-static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
-       .rc_osc_rate = 16000000,
-       .fixed_prescaler = 32,
-       .has_prescaler = 1,
-       .has_out_clk = 1,
-       .export_iosc = 1,
-       .has_losc_en = 1,
-       .has_auto_swt = 1,
-};
-
-static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
-{
-       sun6i_rtc_clk_init(node, &sun50i_h6_rtc_data);
-}
-CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
-                     sun50i_h6_rtc_clk_init);
-
 /*
  * The R40 user manual is self-conflicting on whether the prescaler is
  * fixed or configurable. The clock diagram shows it as fixed, but there
@@ -467,22 +457,30 @@ static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
        } while ((date != readl(chip->base + SUN6I_RTC_YMD)) ||
                 (time != readl(chip->base + SUN6I_RTC_HMS)));
 
+       if (chip->flags & RTC_LINEAR_DAY) {
+               /*
+                * Newer chips store a linear day number, the manual
+                * does not mandate any epoch base. The BSP driver uses
+                * the UNIX epoch, let's just copy that, as it's the
+                * easiest anyway.
+                */
+               rtc_time64_to_tm((date & 0xffff) * SECS_PER_DAY, rtc_tm);
+       } else {
+               rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date);
+               rtc_tm->tm_mon  = SUN6I_DATE_GET_MON_VALUE(date) - 1;
+               rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date);
+
+               /*
+                * switch from (data_year->min)-relative offset to
+                * a (1900)-relative one
+                */
+               rtc_tm->tm_year += SUN6I_YEAR_OFF;
+       }
+
        rtc_tm->tm_sec  = SUN6I_TIME_GET_SEC_VALUE(time);
        rtc_tm->tm_min  = SUN6I_TIME_GET_MIN_VALUE(time);
        rtc_tm->tm_hour = SUN6I_TIME_GET_HOUR_VALUE(time);
 
-       rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date);
-       rtc_tm->tm_mon  = SUN6I_DATE_GET_MON_VALUE(date);
-       rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date);
-
-       rtc_tm->tm_mon  -= 1;
-
-       /*
-        * switch from (data_year->min)-relative offset to
-        * a (1900)-relative one
-        */
-       rtc_tm->tm_year += SUN6I_YEAR_OFF;
-
        return 0;
 }
 
@@ -510,36 +508,54 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
        struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
        struct rtc_time *alrm_tm = &wkalrm->time;
        struct rtc_time tm_now;
-       unsigned long time_now = 0;
-       unsigned long time_set = 0;
-       unsigned long time_gap = 0;
-       int ret = 0;
-
-       ret = sun6i_rtc_gettime(dev, &tm_now);
-       if (ret < 0) {
-               dev_err(dev, "Error in getting time\n");
-               return -EINVAL;
-       }
+       time64_t time_set;
+       u32 counter_val, counter_val_hms;
+       int ret;
 
        time_set = rtc_tm_to_time64(alrm_tm);
-       time_now = rtc_tm_to_time64(&tm_now);
-       if (time_set <= time_now) {
-               dev_err(dev, "Date to set in the past\n");
-               return -EINVAL;
-       }
-
-       time_gap = time_set - time_now;
 
-       if (time_gap > U32_MAX) {
-               dev_err(dev, "Date too far in the future\n");
-               return -EINVAL;
+       if (chip->flags & RTC_LINEAR_DAY) {
+               /*
+                * The alarm registers hold the actual alarm time, encoded
+                * in the same way (linear day + HMS) as the current time.
+                */
+               counter_val_hms = SUN6I_TIME_SET_SEC_VALUE(alrm_tm->tm_sec)  |
+                                 SUN6I_TIME_SET_MIN_VALUE(alrm_tm->tm_min)  |
+                                 SUN6I_TIME_SET_HOUR_VALUE(alrm_tm->tm_hour);
+               /* The division will cut off the H:M:S part of alrm_tm. */
+               counter_val = div_u64(rtc_tm_to_time64(alrm_tm), SECS_PER_DAY);
+       } else {
+               /* The alarm register holds the number of seconds left. */
+               time64_t time_now;
+
+               ret = sun6i_rtc_gettime(dev, &tm_now);
+               if (ret < 0) {
+                       dev_err(dev, "Error in getting time\n");
+                       return -EINVAL;
+               }
+
+               time_now = rtc_tm_to_time64(&tm_now);
+               if (time_set <= time_now) {
+                       dev_err(dev, "Date to set in the past\n");
+                       return -EINVAL;
+               }
+               if ((time_set - time_now) > U32_MAX) {
+                       dev_err(dev, "Date too far in the future\n");
+                       return -EINVAL;
+               }
+
+               counter_val = time_set - time_now;
        }
 
        sun6i_rtc_setaie(0, chip);
        writel(0, chip->base + SUN6I_ALRM_COUNTER);
+       if (chip->flags & RTC_LINEAR_DAY)
+               writel(0, chip->base + SUN6I_ALRM_COUNTER_HMS);
        usleep_range(100, 300);
 
-       writel(time_gap, chip->base + SUN6I_ALRM_COUNTER);
+       writel(counter_val, chip->base + SUN6I_ALRM_COUNTER);
+       if (chip->flags & RTC_LINEAR_DAY)
+               writel(counter_val_hms, chip->base + SUN6I_ALRM_COUNTER_HMS);
        chip->alarm = time_set;
 
        sun6i_rtc_setaie(wkalrm->enabled, chip);
@@ -571,20 +587,25 @@ static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
        u32 date = 0;
        u32 time = 0;
 
-       rtc_tm->tm_year -= SUN6I_YEAR_OFF;
-       rtc_tm->tm_mon += 1;
-
-       date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
-               SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon)  |
-               SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
-
-       if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
-               date |= SUN6I_LEAP_SET_VALUE(1);
-
        time = SUN6I_TIME_SET_SEC_VALUE(rtc_tm->tm_sec)  |
                SUN6I_TIME_SET_MIN_VALUE(rtc_tm->tm_min)  |
                SUN6I_TIME_SET_HOUR_VALUE(rtc_tm->tm_hour);
 
+       if (chip->flags & RTC_LINEAR_DAY) {
+               /* The division will cut off the H:M:S part of rtc_tm. */
+               date = div_u64(rtc_tm_to_time64(rtc_tm), SECS_PER_DAY);
+       } else {
+               rtc_tm->tm_year -= SUN6I_YEAR_OFF;
+               rtc_tm->tm_mon += 1;
+
+               date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
+                       SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon)  |
+                       SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
+
+               if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
+                       date |= SUN6I_LEAP_SET_VALUE(1);
+       }
+
        /* Check whether registers are writable */
        if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL,
                           SUN6I_LOSC_CTRL_ACC_MASK, 50)) {
@@ -668,11 +689,35 @@ static int sun6i_rtc_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(sun6i_rtc_pm_ops,
        sun6i_rtc_suspend, sun6i_rtc_resume);
 
+static void sun6i_rtc_bus_clk_cleanup(void *data)
+{
+       struct clk *bus_clk = data;
+
+       clk_disable_unprepare(bus_clk);
+}
+
 static int sun6i_rtc_probe(struct platform_device *pdev)
 {
        struct sun6i_rtc_dev *chip = sun6i_rtc;
+       struct device *dev = &pdev->dev;
+       struct clk *bus_clk;
        int ret;
 
+       bus_clk = devm_clk_get_optional(dev, "bus");
+       if (IS_ERR(bus_clk))
+               return PTR_ERR(bus_clk);
+
+       if (bus_clk) {
+               ret = clk_prepare_enable(bus_clk);
+               if (ret)
+                       return ret;
+
+               ret = devm_add_action_or_reset(dev, sun6i_rtc_bus_clk_cleanup,
+                                              bus_clk);
+               if (ret)
+                       return ret;
+       }
+
        if (!chip) {
                chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
                if (!chip)
@@ -683,10 +728,18 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
                chip->base = devm_platform_ioremap_resource(pdev, 0);
                if (IS_ERR(chip->base))
                        return PTR_ERR(chip->base);
+
+               if (IS_REACHABLE(CONFIG_SUN6I_RTC_CCU)) {
+                       ret = sun6i_rtc_ccu_probe(dev, chip->base);
+                       if (ret)
+                               return ret;
+               }
        }
 
        platform_set_drvdata(pdev, chip);
 
+       chip->flags = (unsigned long)of_device_get_match_data(&pdev->dev);
+
        chip->irq = platform_get_irq(pdev, 0);
        if (chip->irq < 0)
                return chip->irq;
@@ -733,7 +786,10 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
                return PTR_ERR(chip->rtc);
 
        chip->rtc->ops = &sun6i_rtc_ops;
-       chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
+       if (chip->flags & RTC_LINEAR_DAY)
+               chip->rtc->range_max = (65536 * SECS_PER_DAY) - 1;
+       else
+               chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
 
        ret = devm_rtc_register_device(chip->rtc);
        if (ret)
@@ -758,6 +814,8 @@ static const struct of_device_id sun6i_rtc_dt_ids[] = {
        { .compatible = "allwinner,sun8i-v3-rtc" },
        { .compatible = "allwinner,sun50i-h5-rtc" },
        { .compatible = "allwinner,sun50i-h6-rtc" },
+       { .compatible = "allwinner,sun50i-h616-rtc",
+               .data = (void *)RTC_LINEAR_DAY },
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids);
index 2018614..6eaa932 100644 (file)
@@ -432,14 +432,21 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
+       ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
                            wm8350_rtc_update_handler, 0,
                            "RTC Seconds", wm8350);
+       if (ret)
+               return ret;
+
        wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
 
-       wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
+       ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
                            wm8350_rtc_alarm_handler, 0,
                            "RTC Alarm", wm8350);
+       if (ret) {
+               wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350);
+               return ret;
+       }
 
        return 0;
 }
index cf68a9b..d3d0054 100644 (file)
@@ -180,8 +180,6 @@ static int xgene_rtc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       /* HW does not support update faster than 1 seconds */
-       pdata->rtc->uie_unsupported = 1;
        pdata->rtc->ops = &xgene_rtc_ops;
        pdata->rtc->range_max = U32_MAX;
 
index f0763e3..cb24917 100644 (file)
@@ -745,9 +745,7 @@ sclp_sync_wait(void)
        /* Loop until driver state indicates finished request */
        while (sclp_running_state != sclp_running_state_idle) {
                /* Check for expired request timer */
-               if (timer_pending(&sclp_request_timer) &&
-                   get_tod_clock_fast() > timeout &&
-                   del_timer(&sclp_request_timer))
+               if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer))
                        sclp_request_timer.function(&sclp_request_timer);
                cpu_relax();
        }
index de02886..fe5ee26 100644 (file)
@@ -109,8 +109,7 @@ static void sclp_console_sync_queue(void)
        unsigned long flags;
 
        spin_lock_irqsave(&sclp_con_lock, flags);
-       if (timer_pending(&sclp_con_timer))
-               del_timer(&sclp_con_timer);
+       del_timer(&sclp_con_timer);
        while (sclp_con_queue_running) {
                spin_unlock_irqrestore(&sclp_con_lock, flags);
                sclp_sync_wait();
index 7bc4e4a..3b4e7e5 100644 (file)
@@ -231,8 +231,7 @@ sclp_vt220_emit_current(void)
                        list_add_tail(&sclp_vt220_current_request->list,
                                      &sclp_vt220_outqueue);
                        sclp_vt220_current_request = NULL;
-                       if (timer_pending(&sclp_vt220_timer))
-                               del_timer(&sclp_vt220_timer);
+                       del_timer(&sclp_vt220_timer);
                }
                sclp_vt220_flush_later = 0;
        }
@@ -776,8 +775,7 @@ static void __sclp_vt220_flush_buffer(void)
 
        sclp_vt220_emit_current();
        spin_lock_irqsave(&sclp_vt220_lock, flags);
-       if (timer_pending(&sclp_vt220_timer))
-               del_timer(&sclp_vt220_timer);
+       del_timer(&sclp_vt220_timer);
        while (sclp_vt220_queue_running) {
                spin_unlock_irqrestore(&sclp_vt220_lock, flags);
                sclp_sync_wait();
index 7ada994..38cc156 100644 (file)
@@ -354,10 +354,10 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
        if ((
                sense[0] == SENSE_DATA_CHECK      ||
                sense[0] == SENSE_EQUIPMENT_CHECK ||
-               sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
+               sense[0] == (SENSE_EQUIPMENT_CHECK | SENSE_DEFERRED_UNIT_CHECK)
        ) && (
                sense[1] == SENSE_DRIVE_ONLINE ||
-               sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
+               sense[1] == (SENSE_BEGINNING_OF_TAPE | SENSE_WRITE_MODE)
        )) {
                switch (request->op) {
                /*
index 05e136c..6d63b96 100644 (file)
@@ -113,16 +113,10 @@ ccw_device_timeout(struct timer_list *t)
 void
 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
 {
-       if (expires == 0) {
+       if (expires == 0)
                del_timer(&cdev->private->timer);
-               return;
-       }
-       if (timer_pending(&cdev->private->timer)) {
-               if (mod_timer(&cdev->private->timer, jiffies + expires))
-                       return;
-       }
-       cdev->private->timer.expires = jiffies + expires;
-       add_timer(&cdev->private->timer);
+       else
+               mod_timer(&cdev->private->timer, jiffies + expires);
 }
 
 int
index 8b46368..ab6a749 100644 (file)
@@ -112,16 +112,10 @@ static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
 {
        struct eadm_private *private = get_eadm_private(sch);
 
-       if (expires == 0) {
+       if (expires == 0)
                del_timer(&private->timer);
-               return;
-       }
-       if (timer_pending(&private->timer)) {
-               if (mod_timer(&private->timer, jiffies + expires))
-                       return;
-       }
-       private->timer.expires = jiffies + expires;
-       add_timer(&private->timer);
+       else
+               mod_timer(&private->timer, jiffies + expires);
 }
 
 static void eadm_subchannel_irq(struct subchannel *sch)
index 8fd5a17..6a65885 100644 (file)
@@ -315,6 +315,7 @@ struct ap_perms {
        unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)];
        unsigned long apm[BITS_TO_LONGS(AP_DEVICES)];
        unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)];
+       unsigned long adm[BITS_TO_LONGS(AP_DOMAINS)];
 };
 extern struct ap_perms ap_perms;
 extern struct mutex ap_perms_mutex;
index cf23ce1..7f69ca6 100644 (file)
@@ -155,7 +155,7 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
        /*
         * The cca_xxx2protkey call may fail when a card has been
         * addressed where the master key was changed after last fetch
-        * of the mkvp into the cache. Try 3 times: First witout verify
+        * of the mkvp into the cache. Try 3 times: First without verify
         * then with verify and last round with verify and old master
         * key verification pattern match not ignored.
         */
index 7dc2636..6e08d04 100644 (file)
@@ -1189,13 +1189,6 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
  * @matrix_mdev: a mediated matrix device
  * @kvm: reference to KVM instance
  *
- * Note: The matrix_dev->lock must be taken prior to calling
- * this function; however, the lock will be temporarily released while the
- * guest's AP configuration is set to avoid a potential lockdep splat.
- * The kvm->lock is taken to set the guest's AP configuration which, under
- * certain circumstances, will result in a circular lock dependency if this is
- * done under the @matrix_mdev->lock.
- *
  * Return: 0 if no other mediated matrix device has a reference to @kvm;
  * otherwise, returns an -EPERM.
  */
@@ -1269,18 +1262,11 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
  * by @matrix_mdev.
  *
  * @matrix_mdev: a matrix mediated device
- * @kvm: the pointer to the kvm structure being unset.
- *
- * Note: The matrix_dev->lock must be taken prior to calling
- * this function; however, the lock will be temporarily released while the
- * guest's AP configuration is cleared to avoid a potential lockdep splat.
- * The kvm->lock is taken to clear the guest's AP configuration which, under
- * certain circumstances, will result in a circular lock dependency if this is
- * done under the @matrix_mdev->lock.
  */
-static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev,
-                                  struct kvm *kvm)
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
 {
+       struct kvm *kvm = matrix_mdev->kvm;
+
        if (kvm && kvm->arch.crypto.crycbd) {
                down_write(&kvm->arch.crypto.pqap_hook_rwsem);
                kvm->arch.crypto.pqap_hook = NULL;
@@ -1311,7 +1297,7 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
        matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
 
        if (!data)
-               vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
+               vfio_ap_mdev_unset_kvm(matrix_mdev);
        else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
                notify_rc = NOTIFY_DONE;
 
@@ -1448,7 +1434,7 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
                                 &matrix_mdev->iommu_notifier);
        vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
                                 &matrix_mdev->group_notifier);
-       vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
+       vfio_ap_mdev_unset_kvm(matrix_mdev);
 }
 
 static int vfio_ap_mdev_get_device_info(unsigned long arg)
index 80e2a30..aa6dc3c 100644 (file)
@@ -285,10 +285,53 @@ static ssize_t aqmask_store(struct device *dev,
 
 static DEVICE_ATTR_RW(aqmask);
 
+static ssize_t admask_show(struct device *dev,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       int i, rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       if (mutex_lock_interruptible(&ap_perms_mutex))
+               return -ERESTARTSYS;
+
+       buf[0] = '0';
+       buf[1] = 'x';
+       for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
+               snprintf(buf + 2 + 2 * i * sizeof(long),
+                        PAGE_SIZE - 2 - 2 * i * sizeof(long),
+                        "%016lx", zcdndev->perms.adm[i]);
+       buf[2 + 2 * i * sizeof(long)] = '\n';
+       buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+       rc = 2 + 2 * i * sizeof(long) + 1;
+
+       mutex_unlock(&ap_perms_mutex);
+
+       return rc;
+}
+
+static ssize_t admask_store(struct device *dev,
+                           struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       int rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
+                              AP_DOMAINS, &ap_perms_mutex);
+       if (rc)
+               return rc;
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(admask);
+
 static struct attribute *zcdn_dev_attrs[] = {
        &dev_attr_ioctlmask.attr,
        &dev_attr_apmask.attr,
        &dev_attr_aqmask.attr,
+       &dev_attr_admask.attr,
        NULL
 };
 
@@ -880,11 +923,22 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
        if (rc)
                goto out;
 
+       tdom = *domain;
+       if (perms != &ap_perms && tdom < AP_DOMAINS) {
+               if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
+                       if (!test_bit_inv(tdom, perms->adm)) {
+                               rc = -ENODEV;
+                               goto out;
+                       }
+               } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
+                       rc = -EOPNOTSUPP;
+                       goto out;
+               }
+       }
        /*
         * If a valid target domain is set and this domain is NOT a usage
         * domain but a control only domain, autoselect target domain.
         */
-       tdom = *domain;
        if (tdom < AP_DOMAINS &&
            !ap_test_config_usage_domain(tdom) &&
            ap_test_config_ctrl_domain(tdom))
@@ -1062,6 +1116,18 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
        if (rc)
                goto out_free;
 
+       if (perms != &ap_perms && domain < AUTOSEL_DOM) {
+               if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
+                       if (!test_bit_inv(domain, perms->adm)) {
+                               rc = -ENODEV;
+                               goto out_free;
+                       }
+               } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
+                       rc = -EOPNOTSUPP;
+                       goto out_free;
+               }
+       }
+
        pref_zc = NULL;
        pref_zq = NULL;
        spin_lock(&zcrypt_list_lock);
index 3e259be..fcbd537 100644 (file)
@@ -90,7 +90,7 @@ static ssize_t online_store(struct device *dev,
        list_for_each_entry(zq, &zc->zqueues, list)
                maxzqs++;
        if (maxzqs > 0)
-               zq_uelist = kcalloc(maxzqs + 1, sizeof(zq), GFP_ATOMIC);
+               zq_uelist = kcalloc(maxzqs + 1, sizeof(*zq_uelist), GFP_ATOMIC);
        list_for_each_entry(zq, &zc->zqueues, list)
                if (zcrypt_queue_force_online(zq, online))
                        if (zq_uelist) {
index 9ce5a71..98d33f9 100644 (file)
@@ -1109,7 +1109,7 @@ static int ep11_wrapkey(u16 card, u16 domain,
        if (kb->head.type == TOKTYPE_NON_CCA &&
            kb->head.version == TOKVER_EP11_AES) {
                has_header = true;
-               keysize = kb->head.len < keysize ? kb->head.len : keysize;
+               keysize = min_t(size_t, kb->head.len, keysize);
        }
 
        /* request cprb and payload */
index 7d41dfe..48c4dad 100644 (file)
 
 #include "ifcvf_base.h"
 
-static inline u8 ifc_ioread8(u8 __iomem *addr)
-{
-       return ioread8(addr);
-}
-static inline u16 ifc_ioread16 (__le16 __iomem *addr)
+struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
 {
-       return ioread16(addr);
+       return container_of(hw, struct ifcvf_adapter, vf);
 }
 
-static inline u32 ifc_ioread32(__le32 __iomem *addr)
+u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
 {
-       return ioread32(addr);
-}
+       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
-{
-       iowrite8(value, addr);
-}
+       vp_iowrite16(qid, &cfg->queue_select);
+       vp_iowrite16(vector, &cfg->queue_msix_vector);
 
-static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
-{
-       iowrite16(value, addr);
+       return vp_ioread16(&cfg->queue_msix_vector);
 }
 
-static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
+u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
 {
-       iowrite32(value, addr);
-}
+       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-static void ifc_iowrite64_twopart(u64 val,
-                                 __le32 __iomem *lo, __le32 __iomem *hi)
-{
-       ifc_iowrite32((u32)val, lo);
-       ifc_iowrite32(val >> 32, hi);
-}
+       cfg = hw->common_cfg;
+       vp_iowrite16(vector,  &cfg->msix_config);
 
-struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
-{
-       return container_of(hw, struct ifcvf_adapter, vf);
+       return vp_ioread16(&cfg->msix_config);
 }
 
 static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
@@ -158,15 +142,16 @@ next:
                return -EIO;
        }
 
-       hw->nr_vring = ifc_ioread16(&hw->common_cfg->num_queues);
+       hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues);
 
        for (i = 0; i < hw->nr_vring; i++) {
-               ifc_iowrite16(i, &hw->common_cfg->queue_select);
-               notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
+               vp_iowrite16(i, &hw->common_cfg->queue_select);
+               notify_off = vp_ioread16(&hw->common_cfg->queue_notify_off);
                hw->vring[i].notify_addr = hw->notify_base +
                        notify_off * hw->notify_off_multiplier;
                hw->vring[i].notify_pa = hw->notify_base_pa +
                        notify_off * hw->notify_off_multiplier;
+               hw->vring[i].irq = -EINVAL;
        }
 
        hw->lm_cfg = hw->base[IFCVF_LM_BAR];
@@ -176,17 +161,20 @@ next:
                  hw->common_cfg, hw->notify_base, hw->isr,
                  hw->dev_cfg, hw->notify_off_multiplier);
 
+       hw->vqs_reused_irq = -EINVAL;
+       hw->config_irq = -EINVAL;
+
        return 0;
 }
 
 u8 ifcvf_get_status(struct ifcvf_hw *hw)
 {
-       return ifc_ioread8(&hw->common_cfg->device_status);
+       return vp_ioread8(&hw->common_cfg->device_status);
 }
 
 void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
 {
-       ifc_iowrite8(status, &hw->common_cfg->device_status);
+       vp_iowrite8(status, &hw->common_cfg->device_status);
 }
 
 void ifcvf_reset(struct ifcvf_hw *hw)
@@ -214,11 +202,11 @@ u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
        u32 features_lo, features_hi;
        u64 features;
 
-       ifc_iowrite32(0, &cfg->device_feature_select);
-       features_lo = ifc_ioread32(&cfg->device_feature);
+       vp_iowrite32(0, &cfg->device_feature_select);
+       features_lo = vp_ioread32(&cfg->device_feature);
 
-       ifc_iowrite32(1, &cfg->device_feature_select);
-       features_hi = ifc_ioread32(&cfg->device_feature);
+       vp_iowrite32(1, &cfg->device_feature_select);
+       features_hi = vp_ioread32(&cfg->device_feature);
 
        features = ((u64)features_hi << 32) | features_lo;
 
@@ -271,12 +259,12 @@ void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
 
        WARN_ON(offset + length > hw->config_size);
        do {
-               old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
+               old_gen = vp_ioread8(&hw->common_cfg->config_generation);
                p = dst;
                for (i = 0; i < length; i++)
-                       *p++ = ifc_ioread8(hw->dev_cfg + offset + i);
+                       *p++ = vp_ioread8(hw->dev_cfg + offset + i);
 
-               new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
+               new_gen = vp_ioread8(&hw->common_cfg->config_generation);
        } while (old_gen != new_gen);
 }
 
@@ -289,18 +277,18 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
        p = src;
        WARN_ON(offset + length > hw->config_size);
        for (i = 0; i < length; i++)
-               ifc_iowrite8(*p++, hw->dev_cfg + offset + i);
+               vp_iowrite8(*p++, hw->dev_cfg + offset + i);
 }
 
 static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
 {
        struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-       ifc_iowrite32(0, &cfg->guest_feature_select);
-       ifc_iowrite32((u32)features, &cfg->guest_feature);
+       vp_iowrite32(0, &cfg->guest_feature_select);
+       vp_iowrite32((u32)features, &cfg->guest_feature);
 
-       ifc_iowrite32(1, &cfg->guest_feature_select);
-       ifc_iowrite32(features >> 32, &cfg->guest_feature);
+       vp_iowrite32(1, &cfg->guest_feature_select);
+       vp_iowrite32(features >> 32, &cfg->guest_feature);
 }
 
 static int ifcvf_config_features(struct ifcvf_hw *hw)
@@ -329,7 +317,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
        ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
        q_pair_id = qid / hw->nr_vring;
        avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
-       last_avail_idx = ifc_ioread16(avail_idx_addr);
+       last_avail_idx = vp_ioread16(avail_idx_addr);
 
        return last_avail_idx;
 }
@@ -344,7 +332,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
        q_pair_id = qid / hw->nr_vring;
        avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
        hw->vring[qid].last_avail_idx = num;
-       ifc_iowrite16(num, avail_idx_addr);
+       vp_iowrite16(num, avail_idx_addr);
 
        return 0;
 }
@@ -352,41 +340,23 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
 static int ifcvf_hw_enable(struct ifcvf_hw *hw)
 {
        struct virtio_pci_common_cfg __iomem *cfg;
-       struct ifcvf_adapter *ifcvf;
        u32 i;
 
-       ifcvf = vf_to_adapter(hw);
        cfg = hw->common_cfg;
-       ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
-
-       if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
-               IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
-               return -EINVAL;
-       }
-
        for (i = 0; i < hw->nr_vring; i++) {
                if (!hw->vring[i].ready)
                        break;
 
-               ifc_iowrite16(i, &cfg->queue_select);
-               ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
+               vp_iowrite16(i, &cfg->queue_select);
+               vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
                                     &cfg->queue_desc_hi);
-               ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
+               vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
                                      &cfg->queue_avail_hi);
-               ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
+               vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
                                     &cfg->queue_used_hi);
-               ifc_iowrite16(hw->vring[i].size, &cfg->queue_size);
-               ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
-
-               if (ifc_ioread16(&cfg->queue_msix_vector) ==
-                   VIRTIO_MSI_NO_VECTOR) {
-                       IFCVF_ERR(ifcvf->pdev,
-                                 "No msix vector for queue %u\n", i);
-                       return -EINVAL;
-               }
-
+               vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
                ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
-               ifc_iowrite16(1, &cfg->queue_enable);
+               vp_iowrite16(1, &cfg->queue_enable);
        }
 
        return 0;
@@ -394,18 +364,12 @@ static int ifcvf_hw_enable(struct ifcvf_hw *hw)
 
 static void ifcvf_hw_disable(struct ifcvf_hw *hw)
 {
-       struct virtio_pci_common_cfg __iomem *cfg;
        u32 i;
 
-       cfg = hw->common_cfg;
-       ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config);
-
+       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
        for (i = 0; i < hw->nr_vring; i++) {
-               ifc_iowrite16(i, &cfg->queue_select);
-               ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector);
+               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
        }
-
-       ifc_ioread16(&cfg->queue_msix_vector);
 }
 
 int ifcvf_start_hw(struct ifcvf_hw *hw)
@@ -433,5 +397,5 @@ void ifcvf_stop_hw(struct ifcvf_hw *hw)
 
 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
 {
-       ifc_iowrite16(qid, hw->vring[qid].notify_addr);
+       vp_iowrite16(qid, hw->vring[qid].notify_addr);
 }
index c486873..115b61f 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
 #include <linux/vdpa.h>
+#include <linux/virtio_pci_modern.h>
 #include <uapi/linux/virtio_net.h>
 #include <uapi/linux/virtio_blk.h>
 #include <uapi/linux/virtio_config.h>
@@ -27,8 +28,6 @@
 
 #define IFCVF_QUEUE_ALIGNMENT  PAGE_SIZE
 #define IFCVF_QUEUE_MAX                32768
-#define IFCVF_MSI_CONFIG_OFF   0
-#define IFCVF_MSI_QUEUE_OFF    1
 #define IFCVF_PCI_MAX_RESOURCE 6
 
 #define IFCVF_LM_CFG_SIZE              0x40
 #define ifcvf_private_to_vf(adapter) \
        (&((struct ifcvf_adapter *)adapter)->vf)
 
+/* all vqs and config interrupt has its own vector */
+#define MSIX_VECTOR_PER_VQ_AND_CONFIG          1
+/* all vqs share a vector, and config interrupt has a separate vector */
+#define MSIX_VECTOR_SHARED_VQ_AND_CONFIG       2
+/* all vqs and config interrupt share a vector */
+#define MSIX_VECTOR_DEV_SHARED                 3
+
 struct vring_info {
        u64 desc;
        u64 avail;
@@ -60,25 +66,27 @@ struct ifcvf_hw {
        u8 __iomem *isr;
        /* Live migration */
        u8 __iomem *lm_cfg;
-       u16 nr_vring;
        /* Notification bar number */
        u8 notify_bar;
+       u8 msix_vector_status;
+       /* virtio-net or virtio-blk device config size */
+       u32 config_size;
        /* Notificaiton bar address */
        void __iomem *notify_base;
        phys_addr_t notify_base_pa;
        u32 notify_off_multiplier;
+       u32 dev_type;
        u64 req_features;
        u64 hw_features;
-       u32 dev_type;
        struct virtio_pci_common_cfg __iomem *common_cfg;
        void __iomem *dev_cfg;
        struct vring_info vring[IFCVF_MAX_QUEUES];
        void __iomem * const *base;
        char config_msix_name[256];
        struct vdpa_callback config_cb;
-       unsigned int config_irq;
-       /* virtio-net or virtio-blk device config size */
-       u32 config_size;
+       int config_irq;
+       int vqs_reused_irq;
+       u16 nr_vring;
 };
 
 struct ifcvf_adapter {
@@ -123,4 +131,6 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
 struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
 int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
 u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
+u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
+u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
 #endif /* _IFCVF_H_ */
index d1a6b5a..4366320 100644 (file)
@@ -27,7 +27,7 @@ static irqreturn_t ifcvf_config_changed(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
+static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
 {
        struct vring_info *vring = arg;
 
@@ -37,76 +37,324 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
+{
+       struct ifcvf_hw *vf = arg;
+       struct vring_info *vring;
+       int i;
+
+       for (i = 0; i < vf->nr_vring; i++) {
+               vring = &vf->vring[i];
+               if (vring->cb.callback)
+                       vring->cb.callback(vring->cb.private);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
+{
+       struct ifcvf_hw *vf = arg;
+       u8 isr;
+
+       isr = vp_ioread8(vf->isr);
+       if (isr & VIRTIO_PCI_ISR_CONFIG)
+               ifcvf_config_changed(irq, arg);
+
+       return ifcvf_vqs_reused_intr_handler(irq, arg);
+}
+
 static void ifcvf_free_irq_vectors(void *data)
 {
        pci_free_irq_vectors(data);
 }
 
-static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
+static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
 {
        struct pci_dev *pdev = adapter->pdev;
        struct ifcvf_hw *vf = &adapter->vf;
        int i;
 
+       for (i = 0; i < vf->nr_vring; i++) {
+               if (vf->vring[i].irq != -EINVAL) {
+                       devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
+                       vf->vring[i].irq = -EINVAL;
+               }
+       }
+}
 
-       for (i = 0; i < queues; i++) {
-               devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
-               vf->vring[i].irq = -EINVAL;
+static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+
+       if (vf->vqs_reused_irq != -EINVAL) {
+               devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
+               vf->vqs_reused_irq = -EINVAL;
        }
 
-       devm_free_irq(&pdev->dev, vf->config_irq, vf);
+}
+
+static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter)
+{
+       struct ifcvf_hw *vf = &adapter->vf;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+               ifcvf_free_per_vq_irq(adapter);
+       else
+               ifcvf_free_vqs_reused_irq(adapter);
+}
+
+static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+
+       if (vf->config_irq == -EINVAL)
+               return;
+
+       /* If the irq is shared by all vqs and the config interrupt,
+        * it is already freed in ifcvf_free_vq_irq, so here only
+        * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
+        */
+       if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
+               devm_free_irq(&pdev->dev, vf->config_irq, vf);
+               vf->config_irq = -EINVAL;
+       }
+}
+
+static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+
+       ifcvf_free_vq_irq(adapter);
+       ifcvf_free_config_irq(adapter);
        ifcvf_free_irq_vectors(pdev);
 }
 
-static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+/* ifcvf MSIX vectors allocator, this helper tries to allocate
+ * vectors for all virtqueues and the config interrupt.
+ * It returns the number of allocated vectors, negative
+ * return value when fails.
+ */
+static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
 {
        struct pci_dev *pdev = adapter->pdev;
        struct ifcvf_hw *vf = &adapter->vf;
-       int vector, i, ret, irq;
-       u16 max_intr;
+       int max_intr, ret;
 
        /* all queues and config interrupt  */
        max_intr = vf->nr_vring + 1;
+       ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
 
-       ret = pci_alloc_irq_vectors(pdev, max_intr,
-                                   max_intr, PCI_IRQ_MSIX);
        if (ret < 0) {
                IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
                return ret;
        }
 
+       if (ret < max_intr)
+               IFCVF_INFO(pdev,
+                          "Requested %u vectors, however only %u allocated, lower performance\n",
+                          max_intr, ret);
+
+       return ret;
+}
+
+static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int i, vector, ret, irq;
+
+       vf->vqs_reused_irq = -EINVAL;
+       for (i = 0; i < vf->nr_vring; i++) {
+               snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
+               vector = i;
+               irq = pci_irq_vector(pdev, vector);
+               ret = devm_request_irq(&pdev->dev, irq,
+                                      ifcvf_vq_intr_handler, 0,
+                                      vf->vring[i].msix_name,
+                                      &vf->vring[i]);
+               if (ret) {
+                       IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
+                       goto err;
+               }
+
+               vf->vring[i].irq = irq;
+               ret = ifcvf_set_vq_vector(vf, i, vector);
+               if (ret == VIRTIO_MSI_NO_VECTOR) {
+                       IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+                       goto err;
+               }
+       }
+
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
+
+       return -EFAULT;
+}
+
+static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int i, vector, ret, irq;
+
+       vector = 0;
+       snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
+       irq = pci_irq_vector(pdev, vector);
+       ret = devm_request_irq(&pdev->dev, irq,
+                              ifcvf_vqs_reused_intr_handler, 0,
+                              vf->vring[0].msix_name, vf);
+       if (ret) {
+               IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
+               goto err;
+       }
+
+       vf->vqs_reused_irq = irq;
+       for (i = 0; i < vf->nr_vring; i++) {
+               vf->vring[i].irq = -EINVAL;
+               ret = ifcvf_set_vq_vector(vf, i, vector);
+               if (ret == VIRTIO_MSI_NO_VECTOR) {
+                       IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+                       goto err;
+               }
+       }
+
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
+
+       return -EFAULT;
+}
+
+static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int i, vector, ret, irq;
+
+       vector = 0;
+       snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
+       irq = pci_irq_vector(pdev, vector);
+       ret = devm_request_irq(&pdev->dev, irq,
+                              ifcvf_dev_intr_handler, 0,
+                              vf->vring[0].msix_name, vf);
+       if (ret) {
+               IFCVF_ERR(pdev, "Failed to request irq for the device\n");
+               goto err;
+       }
+
+       vf->vqs_reused_irq = irq;
+       for (i = 0; i < vf->nr_vring; i++) {
+               vf->vring[i].irq = -EINVAL;
+               ret = ifcvf_set_vq_vector(vf, i, vector);
+               if (ret == VIRTIO_MSI_NO_VECTOR) {
+                       IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+                       goto err;
+               }
+       }
+
+       vf->config_irq = irq;
+       ret = ifcvf_set_config_vector(vf, vector);
+       if (ret == VIRTIO_MSI_NO_VECTOR) {
+               IFCVF_ERR(pdev, "No msix vector for device config\n");
+               goto err;
+       }
+
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
+
+       return -EFAULT;
+
+}
+
+static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter)
+{
+       struct ifcvf_hw *vf = &adapter->vf;
+       int ret;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+               ret = ifcvf_request_per_vq_irq(adapter);
+       else
+               ret = ifcvf_request_vqs_reused_irq(adapter);
+
+       return ret;
+}
+
+static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int config_vector, ret;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
+               return 0;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+               /* vector 0 ~ vf->nr_vring for vqs, num vf->nr_vring vector for config interrupt */
+               config_vector = vf->nr_vring;
+
+       if (vf->msix_vector_status ==  MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
+               /* vector 0 for vqs and 1 for config interrupt */
+               config_vector = 1;
+
        snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
                 pci_name(pdev));
-       vector = 0;
-       vf->config_irq = pci_irq_vector(pdev, vector);
+       vf->config_irq = pci_irq_vector(pdev, config_vector);
        ret = devm_request_irq(&pdev->dev, vf->config_irq,
                               ifcvf_config_changed, 0,
                               vf->config_msix_name, vf);
        if (ret) {
                IFCVF_ERR(pdev, "Failed to request config irq\n");
-               return ret;
+               goto err;
        }
 
-       for (i = 0; i < vf->nr_vring; i++) {
-               snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
-                        pci_name(pdev), i);
-               vector = i + IFCVF_MSI_QUEUE_OFF;
-               irq = pci_irq_vector(pdev, vector);
-               ret = devm_request_irq(&pdev->dev, irq,
-                                      ifcvf_intr_handler, 0,
-                                      vf->vring[i].msix_name,
-                                      &vf->vring[i]);
-               if (ret) {
-                       IFCVF_ERR(pdev,
-                                 "Failed to request irq for vq %d\n", i);
-                       ifcvf_free_irq(adapter, i);
+       ret = ifcvf_set_config_vector(vf, config_vector);
+       if (ret == VIRTIO_MSI_NO_VECTOR) {
+               IFCVF_ERR(pdev, "No msix vector for device config\n");
+               goto err;
+       }
 
-                       return ret;
-               }
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
 
-               vf->vring[i].irq = irq;
+       return -EFAULT;
+}
+
+static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+{
+       struct ifcvf_hw *vf = &adapter->vf;
+       int nvectors, ret, max_intr;
+
+       nvectors = ifcvf_alloc_vectors(adapter);
+       if (nvectors <= 0)
+               return -EFAULT;
+
+       vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
+       max_intr = vf->nr_vring + 1;
+       if (nvectors < max_intr)
+               vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
+
+       if (nvectors == 1) {
+               vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
+               ret = ifcvf_request_dev_irq(adapter);
+
+               return ret;
        }
 
+       ret = ifcvf_request_vq_irq(adapter);
+       if (ret)
+               return ret;
+
+       ret = ifcvf_request_config_irq(adapter);
+
+       if (ret)
+               return ret;
+
        return 0;
 }
 
@@ -263,7 +511,7 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
 
        if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
                ifcvf_stop_datapath(adapter);
-               ifcvf_free_irq(adapter, vf->nr_vring);
+               ifcvf_free_irq(adapter);
        }
 
        ifcvf_reset_vring(adapter);
@@ -348,7 +596,7 @@ static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
 {
        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-       return ioread8(&vf->common_cfg->config_generation);
+       return vp_ioread8(&vf->common_cfg->config_generation);
 }
 
 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
@@ -410,7 +658,10 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
 {
        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-       return vf->vring[qid].irq;
+       if (vf->vqs_reused_irq < 0)
+               return vf->vring[qid].irq;
+       else
+               return -EINVAL;
 }
 
 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
index d0f9107..7900130 100644 (file)
@@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
        u32 cur_num_vqs;
        struct notifier_block nb;
        struct vdpa_callback config_cb;
+       struct mlx5_vdpa_wq_ent cvq_ent;
 };
 
 static void free_resources(struct mlx5_vdpa_net *ndev);
@@ -1475,7 +1476,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
        virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
        struct mlx5_core_dev *pfmdev;
        size_t read;
-       u8 mac[ETH_ALEN];
+       u8 mac[ETH_ALEN], mac_back[ETH_ALEN];
 
        pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
        switch (cmd) {
@@ -1489,6 +1490,9 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
                        break;
                }
 
+               if (is_zero_ether_addr(mac))
+                       break;
+
                if (!is_zero_ether_addr(ndev->config.mac)) {
                        if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
                                mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n",
@@ -1503,7 +1507,47 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
                        break;
                }
 
+               /* backup the original mac address so that if failed to add the forward rules
+                * we could restore it
+                */
+               memcpy(mac_back, ndev->config.mac, ETH_ALEN);
+
                memcpy(ndev->config.mac, mac, ETH_ALEN);
+
+               /* Need recreate the flow table entry, so that the packet could forward back
+                */
+               remove_fwd_to_tir(ndev);
+
+               if (add_fwd_to_tir(ndev)) {
+                       mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
+
+                       /* Although it hardly run here, we still need double check */
+                       if (is_zero_ether_addr(mac_back)) {
+                               mlx5_vdpa_warn(mvdev, "restore mac failed: Original MAC is zero\n");
+                               break;
+                       }
+
+                       /* Try to restore original mac address to MFPS table, and try to restore
+                        * the forward rule entry.
+                        */
+                       if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
+                               mlx5_vdpa_warn(mvdev, "restore mac failed: delete MAC %pM from MPFS table failed\n",
+                                              ndev->config.mac);
+                       }
+
+                       if (mlx5_mpfs_add_mac(pfmdev, mac_back)) {
+                               mlx5_vdpa_warn(mvdev, "restore mac failed: insert old MAC %pM into MPFS table failed\n",
+                                              mac_back);
+                       }
+
+                       memcpy(ndev->config.mac, mac_back, ETH_ALEN);
+
+                       if (add_fwd_to_tir(ndev))
+                               mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
+
+                       break;
+               }
+
                status = VIRTIO_NET_OK;
                break;
 
@@ -1615,6 +1659,12 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
        mvdev = wqent->mvdev;
        ndev = to_mlx5_vdpa_ndev(mvdev);
        cvq = &mvdev->cvq;
+
+       mutex_lock(&ndev->reslock);
+
+       if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+               goto out;
+
        if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
                goto out;
 
@@ -1653,9 +1703,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
 
                if (vringh_need_notify_iotlb(&cvq->vring))
                        vringh_notify(&cvq->vring);
+
+               queue_work(mvdev->wq, &wqent->work);
+               break;
        }
+
 out:
-       kfree(wqent);
+       mutex_unlock(&ndev->reslock);
 }
 
 static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
@@ -1663,22 +1717,15 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        struct mlx5_vdpa_virtqueue *mvq;
-       struct mlx5_vdpa_wq_ent *wqent;
 
        if (!is_index_valid(mvdev, idx))
                return;
 
        if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
-               if (!mvdev->cvq.ready)
-                       return;
-
-               wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
-               if (!wqent)
+               if (!mvdev->wq || !mvdev->cvq.ready)
                        return;
 
-               wqent->mvdev = mvdev;
-               INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
-               queue_work(mvdev->wq, &wqent->work);
+               queue_work(mvdev->wq, &ndev->cvq_ent.work);
                return;
        }
 
@@ -2137,7 +2184,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
                goto err_mr;
 
        if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
-               return 0;
+               goto err_mr;
 
        restore_channels_info(ndev);
        err = setup_driver(mvdev);
@@ -2152,12 +2199,14 @@ err_mr:
        return err;
 }
 
+/* reslock must be held for this function */
 static int setup_driver(struct mlx5_vdpa_dev *mvdev)
 {
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        int err;
 
-       mutex_lock(&ndev->reslock);
+       WARN_ON(!mutex_is_locked(&ndev->reslock));
+
        if (ndev->setup) {
                mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
                err = 0;
@@ -2187,7 +2236,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
                goto err_fwd;
        }
        ndev->setup = true;
-       mutex_unlock(&ndev->reslock);
 
        return 0;
 
@@ -2198,23 +2246,23 @@ err_tir:
 err_rqt:
        teardown_virtqueues(ndev);
 out:
-       mutex_unlock(&ndev->reslock);
        return err;
 }
 
+/* reslock must be held for this function */
 static void teardown_driver(struct mlx5_vdpa_net *ndev)
 {
-       mutex_lock(&ndev->reslock);
+
+       WARN_ON(!mutex_is_locked(&ndev->reslock));
+
        if (!ndev->setup)
-               goto out;
+               return;
 
        remove_fwd_to_tir(ndev);
        destroy_tir(ndev);
        destroy_rqt(ndev);
        teardown_virtqueues(ndev);
        ndev->setup = false;
-out:
-       mutex_unlock(&ndev->reslock);
 }
 
 static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
@@ -2235,6 +2283,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
 
        print_status(mvdev, status, true);
 
+       mutex_lock(&ndev->reslock);
+
        if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
                if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
                        err = setup_driver(mvdev);
@@ -2244,16 +2294,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
                        }
                } else {
                        mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
-                       return;
+                       goto err_clear;
                }
        }
 
        ndev->mvdev.status = status;
+       mutex_unlock(&ndev->reslock);
        return;
 
 err_setup:
        mlx5_vdpa_destroy_mr(&ndev->mvdev);
        ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
+err_clear:
+       mutex_unlock(&ndev->reslock);
 }
 
 static int mlx5_vdpa_reset(struct vdpa_device *vdev)
@@ -2263,6 +2316,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
 
        print_status(mvdev, 0, true);
        mlx5_vdpa_info(mvdev, "performing device reset\n");
+
+       mutex_lock(&ndev->reslock);
        teardown_driver(ndev);
        clear_vqs_ready(ndev);
        mlx5_vdpa_destroy_mr(&ndev->mvdev);
@@ -2275,6 +2330,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
                if (mlx5_vdpa_create_mr(mvdev, NULL))
                        mlx5_vdpa_warn(mvdev, "create MR failed\n");
        }
+       mutex_unlock(&ndev->reslock);
 
        return 0;
 }
@@ -2310,19 +2366,24 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
 static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        bool change_map;
        int err;
 
+       mutex_lock(&ndev->reslock);
+
        err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
        if (err) {
                mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
-               return err;
+               goto err;
        }
 
        if (change_map)
-               return mlx5_vdpa_change_map(mvdev, iotlb);
+               err = mlx5_vdpa_change_map(mvdev, iotlb);
 
-       return 0;
+err:
+       mutex_unlock(&ndev->reslock);
+       return err;
 }
 
 static void mlx5_vdpa_free(struct vdpa_device *vdev)
@@ -2565,6 +2626,28 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
        return ret;
 }
 
+static int config_func_mtu(struct mlx5_core_dev *mdev, u16 mtu)
+{
+       int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+       void *in;
+       int err;
+
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu,
+                mtu + MLX5V_ETH_HARD_MTU);
+       MLX5_SET(modify_nic_vport_context_in, in, opcode,
+                MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+
+       err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
+
+       kvfree(in);
+       return err;
+}
+
 static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
                             const struct vdpa_dev_set_config *add_config)
 {
@@ -2624,6 +2707,13 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        init_mvqs(ndev);
        mutex_init(&ndev->reslock);
        config = &ndev->config;
+
+       if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
+               err = config_func_mtu(mdev, add_config->net.mtu);
+               if (err)
+                       goto err_mtu;
+       }
+
        err = query_mtu(mdev, &mtu);
        if (err)
                goto err_mtu;
@@ -2668,6 +2758,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        if (err)
                goto err_mr;
 
+       ndev->cvq_ent.mvdev = mvdev;
+       INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
        mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
        if (!mvdev->wq) {
                err = -ENOMEM;
@@ -2707,9 +2799,12 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
        struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
        struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct workqueue_struct *wq;
 
        mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
-       destroy_workqueue(mvdev->wq);
+       wq = mvdev->wq;
+       mvdev->wq = NULL;
+       destroy_workqueue(wq);
        _vdpa_unregister_device(dev);
        mgtdev->ndev = NULL;
 }
@@ -2741,7 +2836,8 @@ static int mlx5v_probe(struct auxiliary_device *adev,
        mgtdev->mgtdev.device = mdev->device;
        mgtdev->mgtdev.id_table = id_table;
        mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) |
-                                         BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
+                                         BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP) |
+                                         BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
        mgtdev->mgtdev.max_supported_vqs =
                MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
        mgtdev->mgtdev.supported_features = get_supported_features(mdev);
index 1ea5254..2b75c00 100644 (file)
@@ -232,7 +232,7 @@ static int vdpa_name_match(struct device *dev, const void *data)
        return (strcmp(dev_name(&vdev->dev), data) == 0);
 }
 
-static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
+static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 {
        struct device *dev;
 
@@ -257,7 +257,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
  *
  * Return: Returns an error when fail to add device to vDPA bus
  */
-int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)
+int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 {
        if (!vdev->mdev)
                return -EINVAL;
@@ -274,7 +274,7 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device);
  *
  * Return: Returns an error when fail to add to vDPA bus
  */
-int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
+int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 {
        int err;
 
index 40b0983..5829cf2 100644 (file)
@@ -62,8 +62,12 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
         */
        if (start == 0 && last == ULONG_MAX) {
                u64 mid = last / 2;
+               int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr,
+                               perm, opaque);
+
+               if (err)
+                       return err;
 
-               vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque);
                addr += mid + 1;
                start = mid + 1;
        }
index ec5249e..4c2f0bd 100644 (file)
@@ -42,7 +42,7 @@ struct vhost_vdpa {
        struct device dev;
        struct cdev cdev;
        atomic_t opened;
-       int nvqs;
+       u32 nvqs;
        int virtio_id;
        int minor;
        struct eventfd_ctx *config_ctx;
@@ -97,8 +97,11 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
                return;
 
        irq = ops->get_vq_irq(vdpa, qid);
+       if (irq < 0)
+               return;
+
        irq_bypass_unregister_producer(&vq->call_ctx.producer);
-       if (!vq->call_ctx.ctx || irq < 0)
+       if (!vq->call_ctx.ctx)
                return;
 
        vq->call_ctx.producer.token = vq->call_ctx.ctx;
@@ -158,7 +161,8 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
        u8 status, status_old;
-       int ret, nvqs = v->nvqs;
+       u32 nvqs = v->nvqs;
+       int ret;
        u16 i;
 
        if (copy_from_user(&status, statusp, sizeof(status)))
@@ -355,6 +359,30 @@ static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
        return 0;
 }
 
+static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
+{
+       struct vdpa_device *vdpa = v->vdpa;
+       const struct vdpa_config_ops *ops = vdpa->config;
+       u32 size;
+
+       size = ops->get_config_size(vdpa);
+
+       if (copy_to_user(argp, &size, sizeof(size)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
+{
+       struct vdpa_device *vdpa = v->vdpa;
+
+       if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
+               return -EFAULT;
+
+       return 0;
+}
+
 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                                   void __user *argp)
 {
@@ -492,6 +520,12 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
        case VHOST_VDPA_GET_IOVA_RANGE:
                r = vhost_vdpa_get_iova_range(v, argp);
                break;
+       case VHOST_VDPA_GET_CONFIG_SIZE:
+               r = vhost_vdpa_get_config_size(v, argp);
+               break;
+       case VHOST_VDPA_GET_VQS_COUNT:
+               r = vhost_vdpa_get_vqs_count(v, argp);
+               break;
        default:
                r = vhost_dev_ioctl(&v->vdev, cmd, argp);
                if (r == -ENOIOCTLCMD)
@@ -948,7 +982,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
        struct vhost_vdpa *v;
        struct vhost_dev *dev;
        struct vhost_virtqueue **vqs;
-       int nvqs, i, r, opened;
+       int r, opened;
+       u32 i, nvqs;
 
        v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
 
@@ -1001,7 +1036,7 @@ err:
 
 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
 {
-       int i;
+       u32 i;
 
        for (i = 0; i < v->nvqs; i++)
                vhost_vdpa_unsetup_vq_irq(v, i);
index 1768362..d02173f 100644 (file)
@@ -2550,8 +2550,9 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
                       &vq->avail->idx, r);
                return false;
        }
+       vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
 
-       return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
+       return vq->avail_idx != vq->last_avail_idx;
 }
 EXPORT_SYMBOL_GPL(vhost_enable_notify);
 
index 0ae1a39..a1c467a 100644 (file)
@@ -78,6 +78,7 @@ static void vmgenid_notify(struct acpi_device *device, u32 event)
 }
 
 static const struct acpi_device_id vmgenid_ids[] = {
+       { "VMGENCTR", 0 },
        { "VM_GEN_COUNTER", 0 },
        { }
 };
index 492fc26..b5adf6a 100644 (file)
@@ -105,7 +105,7 @@ config VIRTIO_BALLOON
 
 config VIRTIO_MEM
        tristate "Virtio mem driver"
-       depends on X86_64
+       depends on X86_64 || ARM64
        depends on VIRTIO
        depends on MEMORY_HOTPLUG
        depends on MEMORY_HOTREMOVE
@@ -115,8 +115,9 @@ config VIRTIO_MEM
         This driver provides access to virtio-mem paravirtualized memory
         devices, allowing to hotplug and hotunplug memory.
 
-        This driver was only tested under x86-64, but should theoretically
-        work on all architectures that support memory hotplug and hotremove.
+        This driver was only tested under x86-64 and arm64, but should
+        theoretically work on all architectures that support memory hotplug
+        and hotremove.
 
         If unsure, say M.
 
index fdbde1d..d724f67 100644 (file)
@@ -24,46 +24,17 @@ MODULE_PARM_DESC(force_legacy,
                 "Force legacy mode for transitional virtio 1 devices");
 #endif
 
-/* disable irq handlers */
-void vp_disable_cbs(struct virtio_device *vdev)
+/* wait for pending irq handlers */
+void vp_synchronize_vectors(struct virtio_device *vdev)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        int i;
 
-       if (vp_dev->intx_enabled) {
-               /*
-                * The below synchronize() guarantees that any
-                * interrupt for this line arriving after
-                * synchronize_irq() has completed is guaranteed to see
-                * intx_soft_enabled == false.
-                */
-               WRITE_ONCE(vp_dev->intx_soft_enabled, false);
+       if (vp_dev->intx_enabled)
                synchronize_irq(vp_dev->pci_dev->irq);
-       }
-
-       for (i = 0; i < vp_dev->msix_vectors; ++i)
-               disable_irq(pci_irq_vector(vp_dev->pci_dev, i));
-}
-
-/* enable irq handlers */
-void vp_enable_cbs(struct virtio_device *vdev)
-{
-       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       int i;
-
-       if (vp_dev->intx_enabled) {
-               disable_irq(vp_dev->pci_dev->irq);
-               /*
-                * The above disable_irq() provides TSO ordering and
-                * as such promotes the below store to store-release.
-                */
-               WRITE_ONCE(vp_dev->intx_soft_enabled, true);
-               enable_irq(vp_dev->pci_dev->irq);
-               return;
-       }
 
        for (i = 0; i < vp_dev->msix_vectors; ++i)
-               enable_irq(pci_irq_vector(vp_dev->pci_dev, i));
+               synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
 }
 
 /* the notify function used when creating a virt queue */
@@ -113,9 +84,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
        struct virtio_pci_device *vp_dev = opaque;
        u8 isr;
 
-       if (!READ_ONCE(vp_dev->intx_soft_enabled))
-               return IRQ_NONE;
-
        /* reading the ISR has the effect of also clearing it so it's very
         * important to save off the value. */
        isr = ioread8(vp_dev->isr);
@@ -173,8 +141,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
        snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
                 "%s-config", name);
        err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
-                         vp_config_changed, IRQF_NO_AUTOEN,
-                         vp_dev->msix_names[v],
+                         vp_config_changed, 0, vp_dev->msix_names[v],
                          vp_dev);
        if (err)
                goto error;
@@ -193,8 +160,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
                snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
                         "%s-virtqueues", name);
                err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
-                                 vp_vring_interrupt, IRQF_NO_AUTOEN,
-                                 vp_dev->msix_names[v],
+                                 vp_vring_interrupt, 0, vp_dev->msix_names[v],
                                  vp_dev);
                if (err)
                        goto error;
@@ -371,7 +337,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
                         "%s-%s",
                         dev_name(&vp_dev->vdev.dev), names[i]);
                err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
-                                 vring_interrupt, IRQF_NO_AUTOEN,
+                                 vring_interrupt, 0,
                                  vp_dev->msix_names[msix_vec],
                                  vqs[i]);
                if (err)
index 23f6c5c..eb17a29 100644 (file)
@@ -63,7 +63,6 @@ struct virtio_pci_device {
        /* MSI-X support */
        int msix_enabled;
        int intx_enabled;
-       bool intx_soft_enabled;
        cpumask_var_t *msix_affinity_masks;
        /* Name strings for interrupts. This size should be enough,
         * and I'm too lazy to allocate each name separately. */
@@ -102,10 +101,8 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
        return container_of(vdev, struct virtio_pci_device, vdev);
 }
 
-/* disable irq handlers */
-void vp_disable_cbs(struct virtio_device *vdev);
-/* enable irq handlers */
-void vp_enable_cbs(struct virtio_device *vdev);
+/* wait for pending irq handlers */
+void vp_synchronize_vectors(struct virtio_device *vdev);
 /* the notify function used when creating a virt queue */
 bool vp_notify(struct virtqueue *vq);
 /* the config->del_vqs() implementation */
index 34141b9..6f4e34c 100644 (file)
@@ -98,8 +98,8 @@ static void vp_reset(struct virtio_device *vdev)
        /* Flush out the status write, and flush in device writes,
         * including MSi-X interrupts, if any. */
        vp_legacy_get_status(&vp_dev->ldev);
-       /* Disable VQ/configuration callbacks. */
-       vp_disable_cbs(vdev);
+       /* Flush pending VQ/configuration callbacks. */
+       vp_synchronize_vectors(vdev);
 }
 
 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
@@ -185,7 +185,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
 }
 
 static const struct virtio_config_ops virtio_pci_config_ops = {
-       .enable_cbs     = vp_enable_cbs,
        .get            = vp_get,
        .set            = vp_set,
        .get_status     = vp_get_status,
index 5455bc0..a2671a2 100644 (file)
@@ -172,8 +172,8 @@ static void vp_reset(struct virtio_device *vdev)
         */
        while (vp_modern_get_status(mdev))
                msleep(1);
-       /* Disable VQ/configuration callbacks. */
-       vp_disable_cbs(vdev);
+       /* Flush pending VQ/configuration callbacks. */
+       vp_synchronize_vectors(vdev);
 }
 
 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
@@ -293,7 +293,7 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
 
        for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
             pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
-               u8 type, cap_len, id;
+               u8 type, cap_len, id, res_bar;
                u32 tmp32;
                u64 res_offset, res_length;
 
@@ -315,9 +315,14 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
                if (id != required_id)
                        continue;
 
-               /* Type, and ID match, looks good */
                pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
-                                                        bar), bar);
+                                                        bar), &res_bar);
+               if (res_bar >= PCI_STD_NUM_BARS)
+                       continue;
+
+               /* Type and ID match, and the BAR value isn't reserved.
+                * Looks good.
+                */
 
                /* Read the lower 32bit of length and offset */
                pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
@@ -337,6 +342,7 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
                                                     length_hi), &tmp32);
                res_length |= ((u64)tmp32) << 32;
 
+               *bar = res_bar;
                *offset = res_offset;
                *len = res_length;
 
@@ -380,7 +386,6 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
 }
 
 static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
-       .enable_cbs     = vp_enable_cbs,
        .get            = NULL,
        .set            = NULL,
        .generation     = vp_generation,
@@ -398,7 +403,6 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
 };
 
 static const struct virtio_config_ops virtio_pci_config_ops = {
-       .enable_cbs     = vp_enable_cbs,
        .get            = vp_get,
        .set            = vp_set,
        .generation     = vp_generation,
index e8b3ff2..591738a 100644 (file)
@@ -35,6 +35,13 @@ vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
        pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
                              &length);
 
+       /* Check if the BAR may have changed since we requested the region. */
+       if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) {
+               dev_err(&dev->dev,
+                       "virtio_pci: bar unexpectedly changed to %u\n", bar);
+               return NULL;
+       }
+
        if (length <= start) {
                dev_err(&dev->dev,
                        "virtio_pci: bad capability len %u (>%u expected)\n",
@@ -120,7 +127,7 @@ static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
                                     &bar);
 
                /* Ignore structures with reserved BAR values */
-               if (bar > 0x5)
+               if (bar >= PCI_STD_NUM_BARS)
                        continue;
 
                if (type == cfg_type) {
index 962f147..cfb028c 100644 (file)
@@ -379,19 +379,11 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
 
        flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
 
-       if (flags & VRING_DESC_F_INDIRECT) {
-               dma_unmap_single(vring_dma_dev(vq),
-                                virtio64_to_cpu(vq->vq.vdev, desc->addr),
-                                virtio32_to_cpu(vq->vq.vdev, desc->len),
-                                (flags & VRING_DESC_F_WRITE) ?
-                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       } else {
-               dma_unmap_page(vring_dma_dev(vq),
-                              virtio64_to_cpu(vq->vq.vdev, desc->addr),
-                              virtio32_to_cpu(vq->vq.vdev, desc->len),
-                              (flags & VRING_DESC_F_WRITE) ?
-                              DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       }
+       dma_unmap_page(vring_dma_dev(vq),
+                      virtio64_to_cpu(vq->vq.vdev, desc->addr),
+                      virtio32_to_cpu(vq->vq.vdev, desc->len),
+                      (flags & VRING_DESC_F_WRITE) ?
+                      DMA_FROM_DEVICE : DMA_TO_DEVICE);
 }
 
 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
@@ -984,24 +976,24 @@ static struct virtqueue *vring_create_virtqueue_split(
  * Packed ring specific functions - *_packed().
  */
 
-static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
-                                    struct vring_desc_extra *state)
+static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
+                                    struct vring_desc_extra *extra)
 {
        u16 flags;
 
        if (!vq->use_dma_api)
                return;
 
-       flags = state->flags;
+       flags = extra->flags;
 
        if (flags & VRING_DESC_F_INDIRECT) {
                dma_unmap_single(vring_dma_dev(vq),
-                                state->addr, state->len,
+                                extra->addr, extra->len,
                                 (flags & VRING_DESC_F_WRITE) ?
                                 DMA_FROM_DEVICE : DMA_TO_DEVICE);
        } else {
                dma_unmap_page(vring_dma_dev(vq),
-                              state->addr, state->len,
+                              extra->addr, extra->len,
                               (flags & VRING_DESC_F_WRITE) ?
                               DMA_FROM_DEVICE : DMA_TO_DEVICE);
        }
@@ -1017,19 +1009,11 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
 
        flags = le16_to_cpu(desc->flags);
 
-       if (flags & VRING_DESC_F_INDIRECT) {
-               dma_unmap_single(vring_dma_dev(vq),
-                                le64_to_cpu(desc->addr),
-                                le32_to_cpu(desc->len),
-                                (flags & VRING_DESC_F_WRITE) ?
-                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       } else {
-               dma_unmap_page(vring_dma_dev(vq),
-                              le64_to_cpu(desc->addr),
-                              le32_to_cpu(desc->len),
-                              (flags & VRING_DESC_F_WRITE) ?
-                              DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       }
+       dma_unmap_page(vring_dma_dev(vq),
+                      le64_to_cpu(desc->addr),
+                      le32_to_cpu(desc->len),
+                      (flags & VRING_DESC_F_WRITE) ?
+                      DMA_FROM_DEVICE : DMA_TO_DEVICE);
 }
 
 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
@@ -1303,8 +1287,7 @@ unmap_release:
        for (n = 0; n < total_sg; n++) {
                if (i == err_idx)
                        break;
-               vring_unmap_state_packed(vq,
-                                        &vq->packed.desc_extra[curr]);
+               vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
                curr = vq->packed.desc_extra[curr].next;
                i++;
                if (i >= vq->packed.vring.num)
@@ -1383,8 +1366,8 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
        if (unlikely(vq->use_dma_api)) {
                curr = id;
                for (i = 0; i < state->num; i++) {
-                       vring_unmap_state_packed(vq,
-                               &vq->packed.desc_extra[curr]);
+                       vring_unmap_extra_packed(vq,
+                                                &vq->packed.desc_extra[curr]);
                        curr = vq->packed.desc_extra[curr].next;
                }
        }
index 085f5a4..c4e82a8 100644 (file)
@@ -1779,7 +1779,7 @@ config BCM7038_WDT
        tristate "BCM63xx/BCM7038 Watchdog"
        select WATCHDOG_CORE
        depends on HAS_IOMEM
-       depends on ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
+       depends on ARCH_BCM4908 || ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
        help
          Watchdog driver for the built-in hardware in Broadcom 7038 and
          later SoCs used in set-top boxes.  BCM7038 was made public
index 436571b..bd06622 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/watchdog.h>
 
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+                               __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
 struct aspeed_wdt {
        struct watchdog_device  wdd;
        void __iomem            *base;
@@ -266,6 +271,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
        wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT;
        watchdog_init_timeout(&wdt->wdd, 0, dev);
 
+       watchdog_set_nowayout(&wdt->wdd, nowayout);
+
        np = dev->of_node;
 
        ofdid = of_match_node(aspeed_wdt_of_table, np);
index 51bfb79..d0c5d47 100644 (file)
@@ -66,6 +66,7 @@ struct imx2_wdt_device {
        struct watchdog_device wdog;
        bool ext_reset;
        bool clk_is_on;
+       bool no_ping;
 };
 
 static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -312,12 +313,18 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
 
        wdev->ext_reset = of_property_read_bool(dev->of_node,
                                                "fsl,ext-reset-output");
+       /*
+        * The i.MX7D doesn't support low power mode, so we need to ping the watchdog
+        * during suspend.
+        */
+       wdev->no_ping = !of_device_is_compatible(dev->of_node, "fsl,imx7d-wdt");
        platform_set_drvdata(pdev, wdog);
        watchdog_set_drvdata(wdog, wdev);
        watchdog_set_nowayout(wdog, nowayout);
        watchdog_set_restart_priority(wdog, 128);
        watchdog_init_timeout(wdog, timeout, dev);
-       watchdog_stop_ping_on_suspend(wdog);
+       if (wdev->no_ping)
+               watchdog_stop_ping_on_suspend(wdog);
 
        if (imx2_wdt_is_running(wdev)) {
                imx2_wdt_set_timeout(wdog, wdog->timeout);
@@ -366,9 +373,11 @@ static int __maybe_unused imx2_wdt_suspend(struct device *dev)
                imx2_wdt_ping(wdog);
        }
 
-       clk_disable_unprepare(wdev->clk);
+       if (wdev->no_ping) {
+               clk_disable_unprepare(wdev->clk);
 
-       wdev->clk_is_on = false;
+               wdev->clk_is_on = false;
+       }
 
        return 0;
 }
@@ -380,11 +389,14 @@ static int __maybe_unused imx2_wdt_resume(struct device *dev)
        struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
        int ret;
 
-       ret = clk_prepare_enable(wdev->clk);
-       if (ret)
-               return ret;
+       if (wdev->no_ping) {
+               ret = clk_prepare_enable(wdev->clk);
 
-       wdev->clk_is_on = true;
+               if (ret)
+                       return ret;
+
+               wdev->clk_is_on = true;
+       }
 
        if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
                /*
@@ -407,6 +419,7 @@ static SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
 
 static const struct of_device_id imx2_wdt_dt_ids[] = {
        { .compatible = "fsl,imx21-wdt", },
+       { .compatible = "fsl,imx7d-wdt", },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids);
index 31b03fa..281a48d 100644 (file)
@@ -84,10 +84,24 @@ static int ixp4xx_wdt_set_timeout(struct watchdog_device *wdd,
        return 0;
 }
 
+static int ixp4xx_wdt_restart(struct watchdog_device *wdd,
+                              unsigned long action, void *data)
+{
+       struct ixp4xx_wdt *iwdt = to_ixp4xx_wdt(wdd);
+
+       __raw_writel(IXP4XX_WDT_KEY, iwdt->base + IXP4XX_OSWK_OFFSET);
+       __raw_writel(0, iwdt->base + IXP4XX_OSWT_OFFSET);
+       __raw_writel(IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE,
+                    iwdt->base + IXP4XX_OSWE_OFFSET);
+
+       return 0;
+}
+
 static const struct watchdog_ops ixp4xx_wdt_ops = {
        .start = ixp4xx_wdt_start,
        .stop = ixp4xx_wdt_stop,
        .set_timeout = ixp4xx_wdt_set_timeout,
+       .restart = ixp4xx_wdt_restart,
        .owner = THIS_MODULE,
 };
 
index 127eefc..e25e6bf 100644 (file)
@@ -238,8 +238,10 @@ static int armada370_start(struct watchdog_device *wdt_dev)
        atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
 
        /* Enable watchdog timer */
-       atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
-                                               dev->data->wdt_enable_bit);
+       reg = dev->data->wdt_enable_bit;
+       if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
+               reg |= TIMER1_ENABLE_BIT;
+       atomic_io_modify(dev->reg + TIMER_CTRL, reg, reg);
 
        /* Enable reset on watchdog */
        reg = readl(dev->rstout);
@@ -312,7 +314,7 @@ static int armada375_stop(struct watchdog_device *wdt_dev)
 static int armada370_stop(struct watchdog_device *wdt_dev)
 {
        struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
-       u32 reg;
+       u32 reg, mask;
 
        /* Disable reset on watchdog */
        reg = readl(dev->rstout);
@@ -320,7 +322,10 @@ static int armada370_stop(struct watchdog_device *wdt_dev)
        writel(reg, dev->rstout);
 
        /* Disable watchdog timer */
-       atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
+       mask = dev->data->wdt_enable_bit;
+       if (wdt_dev->info->options & WDIOF_PRETIMEOUT)
+               mask |= TIMER1_ENABLE_BIT;
+       atomic_io_modify(dev->reg + TIMER_CTRL, mask, 0);
 
        return 0;
 }
index 5791198..41d58ea 100644 (file)
@@ -327,6 +327,7 @@ static SIMPLE_DEV_PM_OPS(rwdt_pm_ops, rwdt_suspend, rwdt_resume);
 static const struct of_device_id rwdt_ids[] = {
        { .compatible = "renesas,rcar-gen2-wdt", },
        { .compatible = "renesas,rcar-gen3-wdt", },
+       { .compatible = "renesas,rcar-gen4-wdt", },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, rwdt_ids);
index 117bc2a..db843f8 100644 (file)
@@ -228,6 +228,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
        ret = pm_runtime_get_sync(dev);
        if (ret) {
                pm_runtime_put_noidle(dev);
+               pm_runtime_disable(&pdev->dev);
                return dev_err_probe(dev, ret, "runtime pm failed\n");
        }
 
index dd9a744..86ffb58 100644 (file)
@@ -49,7 +49,7 @@
 /* internal variables */
 
 enum tco_reg_layout {
-       sp5100, sb800, efch
+       sp5100, sb800, efch, efch_mmio
 };
 
 struct sp5100_tco {
@@ -87,6 +87,10 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev)
            dev->revision < 0x40) {
                return sp5100;
        } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
+           sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+           sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) {
+               return efch_mmio;
+       } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
            ((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
             dev->revision >= 0x41) ||
            (dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
@@ -209,6 +213,8 @@ static void tco_timer_enable(struct sp5100_tco *tco)
                                          ~EFCH_PM_WATCHDOG_DISABLE,
                                          EFCH_PM_DECODEEN_SECOND_RES);
                break;
+       default:
+               break;
        }
 }
 
@@ -223,14 +229,195 @@ static u32 sp5100_tco_read_pm_reg32(u8 index)
        return val;
 }
 
+static u32 sp5100_tco_request_region(struct device *dev,
+                                    u32 mmio_addr,
+                                    const char *dev_name)
+{
+       if (!devm_request_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE,
+                                    dev_name)) {
+               dev_dbg(dev, "MMIO address 0x%08x already in use\n", mmio_addr);
+               return 0;
+       }
+
+       return mmio_addr;
+}
+
+static u32 sp5100_tco_prepare_base(struct sp5100_tco *tco,
+                                  u32 mmio_addr,
+                                  u32 alt_mmio_addr,
+                                  const char *dev_name)
+{
+       struct device *dev = tco->wdd.parent;
+
+       dev_dbg(dev, "Got 0x%08x from SBResource_MMIO register\n", mmio_addr);
+
+       if (!mmio_addr && !alt_mmio_addr)
+               return -ENODEV;
+
+       /* Check for MMIO address and alternate MMIO address conflicts */
+       if (mmio_addr)
+               mmio_addr = sp5100_tco_request_region(dev, mmio_addr, dev_name);
+
+       if (!mmio_addr && alt_mmio_addr)
+               mmio_addr = sp5100_tco_request_region(dev, alt_mmio_addr, dev_name);
+
+       if (!mmio_addr) {
+               dev_err(dev, "Failed to reserve MMIO or alternate MMIO region\n");
+               return -EBUSY;
+       }
+
+       tco->tcobase = devm_ioremap(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
+       if (!tco->tcobase) {
+               dev_err(dev, "MMIO address 0x%08x failed mapping\n", mmio_addr);
+               devm_release_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
+               return -ENOMEM;
+       }
+
+       dev_info(dev, "Using 0x%08x for watchdog MMIO address\n", mmio_addr);
+
+       return 0;
+}
+
+static int sp5100_tco_timer_init(struct sp5100_tco *tco)
+{
+       struct watchdog_device *wdd = &tco->wdd;
+       struct device *dev = wdd->parent;
+       u32 val;
+
+       val = readl(SP5100_WDT_CONTROL(tco->tcobase));
+       if (val & SP5100_WDT_DISABLED) {
+               dev_err(dev, "Watchdog hardware is disabled\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Save WatchDogFired status, because WatchDogFired flag is
+        * cleared here.
+        */
+       if (val & SP5100_WDT_FIRED)
+               wdd->bootstatus = WDIOF_CARDRESET;
+
+       /* Set watchdog action to reset the system */
+       val &= ~SP5100_WDT_ACTION_RESET;
+       writel(val, SP5100_WDT_CONTROL(tco->tcobase));
+
+       /* Set a reasonable heartbeat before we stop the timer */
+       tco_timer_set_timeout(wdd, wdd->timeout);
+
+       /*
+        * Stop the TCO before we change anything so we don't race with
+        * a zeroed timer.
+        */
+       tco_timer_stop(wdd);
+
+       return 0;
+}
+
+static u8 efch_read_pm_reg8(void __iomem *addr, u8 index)
+{
+       return readb(addr + index);
+}
+
+static void efch_update_pm_reg8(void __iomem *addr, u8 index, u8 reset, u8 set)
+{
+       u8 val;
+
+       val = readb(addr + index);
+       val &= reset;
+       val |= set;
+       writeb(val, addr + index);
+}
+
+static void tco_timer_enable_mmio(void __iomem *addr)
+{
+       efch_update_pm_reg8(addr, EFCH_PM_DECODEEN3,
+                           ~EFCH_PM_WATCHDOG_DISABLE,
+                           EFCH_PM_DECODEEN_SECOND_RES);
+}
+
+static int sp5100_tco_setupdevice_mmio(struct device *dev,
+                                      struct watchdog_device *wdd)
+{
+       struct sp5100_tco *tco = watchdog_get_drvdata(wdd);
+       const char *dev_name = SB800_DEVNAME;
+       u32 mmio_addr = 0, alt_mmio_addr = 0;
+       struct resource *res;
+       void __iomem *addr;
+       int ret;
+       u32 val;
+
+       res = request_mem_region_muxed(EFCH_PM_ACPI_MMIO_PM_ADDR,
+                                      EFCH_PM_ACPI_MMIO_PM_SIZE,
+                                      "sp5100_tco");
+
+       if (!res) {
+               dev_err(dev,
+                       "Memory region 0x%08x already in use\n",
+                       EFCH_PM_ACPI_MMIO_PM_ADDR);
+               return -EBUSY;
+       }
+
+       addr = ioremap(EFCH_PM_ACPI_MMIO_PM_ADDR, EFCH_PM_ACPI_MMIO_PM_SIZE);
+       if (!addr) {
+               dev_err(dev, "Address mapping failed\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /*
+        * EFCH_PM_DECODEEN_WDT_TMREN is dual purpose. This bitfield
+        * enables sp5100_tco register MMIO space decoding. The bitfield
+        * also starts the timer operation. Enable if not already enabled.
+        */
+       val = efch_read_pm_reg8(addr, EFCH_PM_DECODEEN);
+       if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
+               efch_update_pm_reg8(addr, EFCH_PM_DECODEEN, 0xff,
+                                   EFCH_PM_DECODEEN_WDT_TMREN);
+       }
+
+       /* Error if the timer could not be enabled */
+       val = efch_read_pm_reg8(addr, EFCH_PM_DECODEEN);
+       if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
+               dev_err(dev, "Failed to enable the timer\n");
+               ret = -EFAULT;
+               goto out;
+       }
+
+       mmio_addr = EFCH_PM_WDT_ADDR;
+
+       /* Determine alternate MMIO base address */
+       val = efch_read_pm_reg8(addr, EFCH_PM_ISACONTROL);
+       if (val & EFCH_PM_ISACONTROL_MMIOEN)
+               alt_mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
+                       EFCH_PM_ACPI_MMIO_WDT_OFFSET;
+
+       ret = sp5100_tco_prepare_base(tco, mmio_addr, alt_mmio_addr, dev_name);
+       if (!ret) {
+               tco_timer_enable_mmio(addr);
+               ret = sp5100_tco_timer_init(tco);
+       }
+
+out:
+       if (addr)
+               iounmap(addr);
+
+       release_resource(res);
+
+       return ret;
+}
+
 static int sp5100_tco_setupdevice(struct device *dev,
                                  struct watchdog_device *wdd)
 {
        struct sp5100_tco *tco = watchdog_get_drvdata(wdd);
        const char *dev_name;
        u32 mmio_addr = 0, val;
+       u32 alt_mmio_addr = 0;
        int ret;
 
+       if (tco->tco_reg_layout == efch_mmio)
+               return sp5100_tco_setupdevice_mmio(dev, wdd);
+
        /* Request the IO ports used by this driver */
        if (!request_muxed_region(SP5100_IO_PM_INDEX_REG,
                                  SP5100_PM_IOPORTS_SIZE, "sp5100_tco")) {
@@ -247,138 +434,55 @@ static int sp5100_tco_setupdevice(struct device *dev,
                dev_name = SP5100_DEVNAME;
                mmio_addr = sp5100_tco_read_pm_reg32(SP5100_PM_WATCHDOG_BASE) &
                                                                0xfffffff8;
+
+               /*
+                * Secondly, find the watchdog timer MMIO address
+                * from SBResource_MMIO register.
+                */
+
+               /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
+               pci_read_config_dword(sp5100_tco_pci,
+                                     SP5100_SB_RESOURCE_MMIO_BASE,
+                                     &val);
+
+               /* Verify MMIO is enabled and using bar0 */
+               if ((val & SB800_ACPI_MMIO_MASK) == SB800_ACPI_MMIO_DECODE_EN)
+                       alt_mmio_addr = (val & ~0xfff) + SB800_PM_WDT_MMIO_OFFSET;
                break;
        case sb800:
                dev_name = SB800_DEVNAME;
                mmio_addr = sp5100_tco_read_pm_reg32(SB800_PM_WATCHDOG_BASE) &
                                                                0xfffffff8;
+
+               /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
+               val = sp5100_tco_read_pm_reg32(SB800_PM_ACPI_MMIO_EN);
+
+               /* Verify MMIO is enabled and using bar0 */
+               if ((val & SB800_ACPI_MMIO_MASK) == SB800_ACPI_MMIO_DECODE_EN)
+                       alt_mmio_addr = (val & ~0xfff) + SB800_PM_WDT_MMIO_OFFSET;
                break;
        case efch:
                dev_name = SB800_DEVNAME;
-               /*
-                * On Family 17h devices, the EFCH_PM_DECODEEN_WDT_TMREN bit of
-                * EFCH_PM_DECODEEN not only enables the EFCH_PM_WDT_ADDR memory
-                * region, it also enables the watchdog itself.
-                */
-               if (boot_cpu_data.x86 == 0x17) {
-                       val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
-                       if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
-                               sp5100_tco_update_pm_reg8(EFCH_PM_DECODEEN, 0xff,
-                                                         EFCH_PM_DECODEEN_WDT_TMREN);
-                       }
-               }
                val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
                if (val & EFCH_PM_DECODEEN_WDT_TMREN)
                        mmio_addr = EFCH_PM_WDT_ADDR;
+
+               val = sp5100_tco_read_pm_reg8(EFCH_PM_ISACONTROL);
+               if (val & EFCH_PM_ISACONTROL_MMIOEN)
+                       alt_mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
+                               EFCH_PM_ACPI_MMIO_WDT_OFFSET;
                break;
        default:
                return -ENODEV;
        }
 
-       /* Check MMIO address conflict */
-       if (!mmio_addr ||
-           !devm_request_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE,
-                                    dev_name)) {
-               if (mmio_addr)
-                       dev_dbg(dev, "MMIO address 0x%08x already in use\n",
-                               mmio_addr);
-               switch (tco->tco_reg_layout) {
-               case sp5100:
-                       /*
-                        * Secondly, Find the watchdog timer MMIO address
-                        * from SBResource_MMIO register.
-                        */
-                       /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
-                       pci_read_config_dword(sp5100_tco_pci,
-                                             SP5100_SB_RESOURCE_MMIO_BASE,
-                                             &mmio_addr);
-                       if ((mmio_addr & (SB800_ACPI_MMIO_DECODE_EN |
-                                         SB800_ACPI_MMIO_SEL)) !=
-                                                 SB800_ACPI_MMIO_DECODE_EN) {
-                               ret = -ENODEV;
-                               goto unreg_region;
-                       }
-                       mmio_addr &= ~0xFFF;
-                       mmio_addr += SB800_PM_WDT_MMIO_OFFSET;
-                       break;
-               case sb800:
-                       /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
-                       mmio_addr =
-                               sp5100_tco_read_pm_reg32(SB800_PM_ACPI_MMIO_EN);
-                       if ((mmio_addr & (SB800_ACPI_MMIO_DECODE_EN |
-                                         SB800_ACPI_MMIO_SEL)) !=
-                                                 SB800_ACPI_MMIO_DECODE_EN) {
-                               ret = -ENODEV;
-                               goto unreg_region;
-                       }
-                       mmio_addr &= ~0xFFF;
-                       mmio_addr += SB800_PM_WDT_MMIO_OFFSET;
-                       break;
-               case efch:
-                       val = sp5100_tco_read_pm_reg8(EFCH_PM_ISACONTROL);
-                       if (!(val & EFCH_PM_ISACONTROL_MMIOEN)) {
-                               ret = -ENODEV;
-                               goto unreg_region;
-                       }
-                       mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
-                                   EFCH_PM_ACPI_MMIO_WDT_OFFSET;
-                       break;
-               }
-               dev_dbg(dev, "Got 0x%08x from SBResource_MMIO register\n",
-                       mmio_addr);
-               if (!devm_request_mem_region(dev, mmio_addr,
-                                            SP5100_WDT_MEM_MAP_SIZE,
-                                            dev_name)) {
-                       dev_dbg(dev, "MMIO address 0x%08x already in use\n",
-                               mmio_addr);
-                       ret = -EBUSY;
-                       goto unreg_region;
-               }
-       }
-
-       tco->tcobase = devm_ioremap(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
-       if (!tco->tcobase) {
-               dev_err(dev, "failed to get tcobase address\n");
-               ret = -ENOMEM;
-               goto unreg_region;
-       }
-
-       dev_info(dev, "Using 0x%08x for watchdog MMIO address\n", mmio_addr);
-
-       /* Setup the watchdog timer */
-       tco_timer_enable(tco);
-
-       val = readl(SP5100_WDT_CONTROL(tco->tcobase));
-       if (val & SP5100_WDT_DISABLED) {
-               dev_err(dev, "Watchdog hardware is disabled\n");
-               ret = -ENODEV;
-               goto unreg_region;
+       ret = sp5100_tco_prepare_base(tco, mmio_addr, alt_mmio_addr, dev_name);
+       if (!ret) {
+               /* Setup the watchdog timer */
+               tco_timer_enable(tco);
+               ret = sp5100_tco_timer_init(tco);
        }
 
-       /*
-        * Save WatchDogFired status, because WatchDogFired flag is
-        * cleared here.
-        */
-       if (val & SP5100_WDT_FIRED)
-               wdd->bootstatus = WDIOF_CARDRESET;
-       /* Set watchdog action to reset the system */
-       val &= ~SP5100_WDT_ACTION_RESET;
-       writel(val, SP5100_WDT_CONTROL(tco->tcobase));
-
-       /* Set a reasonable heartbeat before we stop the timer */
-       tco_timer_set_timeout(wdd, wdd->timeout);
-
-       /*
-        * Stop the TCO before we change anything so we don't race with
-        * a zeroed timer.
-        */
-       tco_timer_stop(wdd);
-
-       release_region(SP5100_IO_PM_INDEX_REG, SP5100_PM_IOPORTS_SIZE);
-
-       return 0;
-
-unreg_region:
        release_region(SP5100_IO_PM_INDEX_REG, SP5100_PM_IOPORTS_SIZE);
        return ret;
 }
index adf015a..6a0986d 100644 (file)
@@ -58,6 +58,7 @@
 #define SB800_PM_WATCHDOG_SECOND_RES   GENMASK(1, 0)
 #define SB800_ACPI_MMIO_DECODE_EN      BIT(0)
 #define SB800_ACPI_MMIO_SEL            BIT(1)
+#define SB800_ACPI_MMIO_MASK           GENMASK(1, 0)
 
 #define SB800_PM_WDT_MMIO_OFFSET       0xB00
 
 #define EFCH_PM_ISACONTROL_MMIOEN      BIT(1)
 
 #define EFCH_PM_ACPI_MMIO_ADDR         0xfed80000
+#define EFCH_PM_ACPI_MMIO_PM_OFFSET    0x00000300
 #define EFCH_PM_ACPI_MMIO_WDT_OFFSET   0x00000b00
+
+#define EFCH_PM_ACPI_MMIO_PM_ADDR      (EFCH_PM_ACPI_MMIO_ADDR +       \
+                                        EFCH_PM_ACPI_MMIO_PM_OFFSET)
+#define EFCH_PM_ACPI_MMIO_PM_SIZE      8
+#define AMD_ZEN_SMBUS_PCI_REV          0x51
index 3a3d8b5..54903f3 100644 (file)
@@ -171,17 +171,17 @@ static int __watchdog_ping(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_ping: ping the watchdog.
- *     @wdd: the watchdog device to ping
+ * watchdog_ping - ping the watchdog
+ * @wdd: The watchdog device to ping
  *
- *     The caller must hold wd_data->lock.
+ * If the watchdog has no own ping operation then it needs to be
+ * restarted via the start operation. This wrapper function does
+ * exactly that.
+ * We only ping when the watchdog device is running.
+ * The caller must hold wd_data->lock.
  *
- *     If the watchdog has no own ping operation then it needs to be
- *     restarted via the start operation. This wrapper function does
- *     exactly that.
- *     We only ping when the watchdog device is running.
+ * Return: 0 on success, error otherwise.
  */
-
 static int watchdog_ping(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -231,16 +231,14 @@ static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer)
 }
 
 /*
- *     watchdog_start: wrapper to start the watchdog.
- *     @wdd: the watchdog device to start
+ * watchdog_start - wrapper to start the watchdog
+ * @wdd: The watchdog device to start
  *
- *     The caller must hold wd_data->lock.
+ * Start the watchdog if it is not active and mark it active.
+ * The caller must hold wd_data->lock.
  *
- *     Start the watchdog if it is not active and mark it active.
- *     This function returns zero on success or a negative errno code for
- *     failure.
+ * Return: 0 on success or a negative errno code for failure.
  */
-
 static int watchdog_start(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -274,17 +272,15 @@ static int watchdog_start(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_stop: wrapper to stop the watchdog.
- *     @wdd: the watchdog device to stop
+ * watchdog_stop - wrapper to stop the watchdog
+ * @wdd: The watchdog device to stop
  *
- *     The caller must hold wd_data->lock.
+ * Stop the watchdog if it is still active and unmark it active.
+ * If the 'nowayout' feature was set, the watchdog cannot be stopped.
+ * The caller must hold wd_data->lock.
  *
- *     Stop the watchdog if it is still active and unmark it active.
- *     This function returns zero on success or a negative errno code for
- *     failure.
- *     If the 'nowayout' feature was set, the watchdog cannot be stopped.
+ * Return: 0 on success or a negative errno code for failure.
  */
-
 static int watchdog_stop(struct watchdog_device *wdd)
 {
        int err = 0;
@@ -315,14 +311,14 @@ static int watchdog_stop(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_get_status: wrapper to get the watchdog status
- *     @wdd: the watchdog device to get the status from
+ * watchdog_get_status - wrapper to get the watchdog status
+ * @wdd: The watchdog device to get the status from
  *
- *     The caller must hold wd_data->lock.
+ * Get the watchdog's status flags.
+ * The caller must hold wd_data->lock.
  *
- *     Get the watchdog's status flags.
+ * Return: watchdog's status flags.
  */
-
 static unsigned int watchdog_get_status(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -352,13 +348,14 @@ static unsigned int watchdog_get_status(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_set_timeout: set the watchdog timer timeout
- *     @wdd: the watchdog device to set the timeout for
- *     @timeout: timeout to set in seconds
+ * watchdog_set_timeout - set the watchdog timer timeout
+ * @wdd:       The watchdog device to set the timeout for
+ * @timeout:   Timeout to set in seconds
+ *
+ * The caller must hold wd_data->lock.
  *
- *     The caller must hold wd_data->lock.
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_set_timeout(struct watchdog_device *wdd,
                                                        unsigned int timeout)
 {
@@ -385,11 +382,12 @@ static int watchdog_set_timeout(struct watchdog_device *wdd,
 }
 
 /*
- *     watchdog_set_pretimeout: set the watchdog timer pretimeout
- *     @wdd: the watchdog device to set the timeout for
- *     @timeout: pretimeout to set in seconds
+ * watchdog_set_pretimeout - set the watchdog timer pretimeout
+ * @wdd:       The watchdog device to set the timeout for
+ * @timeout:   pretimeout to set in seconds
+ *
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_set_pretimeout(struct watchdog_device *wdd,
                                   unsigned int timeout)
 {
@@ -410,15 +408,15 @@ static int watchdog_set_pretimeout(struct watchdog_device *wdd,
 }
 
 /*
- *     watchdog_get_timeleft: wrapper to get the time left before a reboot
- *     @wdd: the watchdog device to get the remaining time from
- *     @timeleft: the time that's left
+ * watchdog_get_timeleft - wrapper to get the time left before a reboot
+ * @wdd:       The watchdog device to get the remaining time from
+ * @timeleft:  The time that's left
  *
- *     The caller must hold wd_data->lock.
+ * Get the time before a watchdog will reboot (if not pinged).
+ * The caller must hold wd_data->lock.
  *
- *     Get the time before a watchdog will reboot (if not pinged).
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_get_timeleft(struct watchdog_device *wdd,
                                                        unsigned int *timeleft)
 {
@@ -635,14 +633,15 @@ __ATTRIBUTE_GROUPS(wdt);
 #endif
 
 /*
- *     watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
- *     @wdd: the watchdog device to do the ioctl on
- *     @cmd: watchdog command
- *     @arg: argument pointer
+ * watchdog_ioctl_op - call the watchdog drivers ioctl op if defined
+ * @wdd: The watchdog device to do the ioctl on
+ * @cmd: Watchdog command
+ * @arg: Argument pointer
  *
- *     The caller must hold wd_data->lock.
+ * The caller must hold wd_data->lock.
+ *
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
                                                        unsigned long arg)
 {
@@ -653,17 +652,18 @@ static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
 }
 
 /*
- *     watchdog_write: writes to the watchdog.
- *     @file: file from VFS
- *     @data: user address of data
- *     @len: length of data
- *     @ppos: pointer to the file offset
+ * watchdog_write - writes to the watchdog
+ * @file:      File from VFS
+ * @data:      User address of data
+ * @len:       Length of data
+ * @ppos:      Pointer to the file offset
  *
- *     A write to a watchdog device is defined as a keepalive ping.
- *     Writing the magic 'V' sequence allows the next close to turn
- *     off the watchdog (if 'nowayout' is not set).
+ * A write to a watchdog device is defined as a keepalive ping.
+ * Writing the magic 'V' sequence allows the next close to turn
+ * off the watchdog (if 'nowayout' is not set).
+ *
+ * Return: @len if successful, error otherwise.
  */
-
 static ssize_t watchdog_write(struct file *file, const char __user *data,
                                                size_t len, loff_t *ppos)
 {
@@ -706,13 +706,15 @@ static ssize_t watchdog_write(struct file *file, const char __user *data,
 }
 
 /*
- *     watchdog_ioctl: handle the different ioctl's for the watchdog device.
- *     @file: file handle to the device
- *     @cmd: watchdog command
- *     @arg: argument pointer
+ * watchdog_ioctl - handle the different ioctl's for the watchdog device
+ * @file:      File handle to the device
+ * @cmd:       Watchdog command
+ * @arg:       Argument pointer
  *
- *     The watchdog API defines a common set of functions for all watchdogs
- *     according to their available features.
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features.
+ *
+ * Return: 0 if successful, error otherwise.
  */
 
 static long watchdog_ioctl(struct file *file, unsigned int cmd,
@@ -819,15 +821,16 @@ out_ioctl:
 }
 
 /*
- *     watchdog_open: open the /dev/watchdog* devices.
- *     @inode: inode of device
- *     @file: file handle to device
+ * watchdog_open - open the /dev/watchdog* devices
+ * @inode:     Inode of device
+ * @file:      File handle to device
+ *
+ * When the /dev/watchdog* device gets opened, we start the watchdog.
+ * Watch out: the /dev/watchdog device is single open, so we make sure
+ * it can only be opened once.
  *
- *     When the /dev/watchdog* device gets opened, we start the watchdog.
- *     Watch out: the /dev/watchdog device is single open, so we make sure
- *     it can only be opened once.
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_open(struct inode *inode, struct file *file)
 {
        struct watchdog_core_data *wd_data;
@@ -896,15 +899,16 @@ static void watchdog_core_data_release(struct device *dev)
 }
 
 /*
- *     watchdog_release: release the watchdog device.
- *     @inode: inode of device
- *     @file: file handle to device
+ * watchdog_release - release the watchdog device
+ * @inode:     Inode of device
+ * @file:      File handle to device
+ *
+ * This is the code for when /dev/watchdog gets closed. We will only
+ * stop the watchdog when we have received the magic char (and nowayout
+ * was not set), else the watchdog will keep running.
  *
- *     This is the code for when /dev/watchdog gets closed. We will only
- *     stop the watchdog when we have received the magic char (and nowayout
- *     was not set), else the watchdog will keep running.
+ * Always returns 0.
  */
-
 static int watchdog_release(struct inode *inode, struct file *file)
 {
        struct watchdog_core_data *wd_data = file->private_data;
@@ -977,14 +981,15 @@ static struct class watchdog_class = {
 };
 
 /*
- *     watchdog_cdev_register: register watchdog character device
- *     @wdd: watchdog device
+ * watchdog_cdev_register - register watchdog character device
+ * @wdd: Watchdog device
+ *
+ * Register a watchdog character device including handling the legacy
+ * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ * thus we set it up like that.
  *
- *     Register a watchdog character device including handling the legacy
- *     /dev/watchdog node. /dev/watchdog is actually a miscdevice and
- *     thus we set it up like that.
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_cdev_register(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data;
@@ -1074,13 +1079,12 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_cdev_unregister: unregister watchdog character device
- *     @watchdog: watchdog device
+ * watchdog_cdev_unregister - unregister watchdog character device
+ * @wdd: Watchdog device
  *
- *     Unregister watchdog character device and if needed the legacy
- *     /dev/watchdog device.
+ * Unregister watchdog character device and if needed the legacy
+ * /dev/watchdog device.
  */
-
 static void watchdog_cdev_unregister(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -1109,15 +1113,16 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
        put_device(&wd_data->dev);
 }
 
-/*
- *     watchdog_dev_register: register a watchdog device
- *     @wdd: watchdog device
+/**
+ * watchdog_dev_register - register a watchdog device
+ * @wdd: Watchdog device
+ *
+ * Register a watchdog device including handling the legacy
+ * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ * thus we set it up like that.
  *
- *     Register a watchdog device including handling the legacy
- *     /dev/watchdog node. /dev/watchdog is actually a miscdevice and
- *     thus we set it up like that.
+ * Return: 0 if successful, error otherwise.
  */
-
 int watchdog_dev_register(struct watchdog_device *wdd)
 {
        int ret;
@@ -1133,30 +1138,31 @@ int watchdog_dev_register(struct watchdog_device *wdd)
        return ret;
 }
 
-/*
- *     watchdog_dev_unregister: unregister a watchdog device
- *     @watchdog: watchdog device
+/**
+ * watchdog_dev_unregister - unregister a watchdog device
+ * @wdd: watchdog device
  *
- *     Unregister watchdog device and if needed the legacy
- *     /dev/watchdog device.
+ * Unregister watchdog device and if needed the legacy
+ * /dev/watchdog device.
  */
-
 void watchdog_dev_unregister(struct watchdog_device *wdd)
 {
        watchdog_unregister_pretimeout(wdd);
        watchdog_cdev_unregister(wdd);
 }
 
-/*
- *     watchdog_set_last_hw_keepalive: set last HW keepalive time for watchdog
- *     @wdd: watchdog device
- *     @last_ping_ms: time since last HW heartbeat
+/**
+ * watchdog_set_last_hw_keepalive - set last HW keepalive time for watchdog
+ * @wdd:               Watchdog device
+ * @last_ping_ms:      Time since last HW heartbeat
  *
- *     Adjusts the last known HW keepalive time for a watchdog timer.
- *     This is needed if the watchdog is already running when the probe
- *     function is called, and it can't be pinged immediately. This
- *     function must be called immediately after watchdog registration,
- *     and min_hw_heartbeat_ms must be set for this to be useful.
+ * Adjusts the last known HW keepalive time for a watchdog timer.
+ * This is needed if the watchdog is already running when the probe
+ * function is called, and it can't be pinged immediately. This
+ * function must be called immediately after watchdog registration,
+ * and min_hw_heartbeat_ms must be set for this to be useful.
+ *
+ * Return: 0 if successful, error otherwise.
  */
 int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
                                   unsigned int last_ping_ms)
@@ -1180,12 +1186,13 @@ int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
 }
 EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
 
-/*
- *     watchdog_dev_init: init dev part of watchdog core
+/**
+ * watchdog_dev_init - init dev part of watchdog core
  *
- *     Allocate a range of chardev nodes to use for watchdog devices
+ * Allocate a range of chardev nodes to use for watchdog devices.
+ *
+ * Return: 0 if successful, error otherwise.
  */
-
 int __init watchdog_dev_init(void)
 {
        int err;
@@ -1218,12 +1225,11 @@ err_register:
        return err;
 }
 
-/*
- *     watchdog_dev_exit: exit dev part of watchdog core
+/**
+ * watchdog_dev_exit - exit dev part of watchdog core
  *
- *     Release the range of chardev nodes used for watchdog devices
+ * Release the range of chardev nodes used for watchdog devices.
  */
-
 void __exit watchdog_dev_exit(void)
 {
        unregister_chrdev_region(watchdog_devt, MAX_DOGS);
index 55e108e..1c8dc69 100644 (file)
@@ -49,22 +49,20 @@ int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses,
 
 void v9fs_cache_inode_get_cookie(struct inode *inode)
 {
-       struct v9fs_inode *v9inode;
+       struct v9fs_inode *v9inode = V9FS_I(inode);
        struct v9fs_session_info *v9ses;
        __le32 version;
        __le64 path;
 
        if (!S_ISREG(inode->i_mode))
                return;
-
-       v9inode = V9FS_I(inode);
-       if (WARN_ON(v9inode->fscache))
+       if (WARN_ON(v9fs_inode_cookie(v9inode)))
                return;
 
        version = cpu_to_le32(v9inode->qid.version);
        path = cpu_to_le64(v9inode->qid.path);
        v9ses = v9fs_inode2v9ses(inode);
-       v9inode->fscache =
+       v9inode->netfs_ctx.cache =
                fscache_acquire_cookie(v9fs_session_cache(v9ses),
                                       0,
                                       &path, sizeof(path),
@@ -72,5 +70,5 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
                                       i_size_read(&v9inode->vfs_inode));
 
        p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
-                inode, v9inode->fscache);
+                inode, v9fs_inode_cookie(v9inode));
 }
index 08f65c4..e28ddf7 100644 (file)
@@ -623,9 +623,7 @@ static void v9fs_sysfs_cleanup(void)
 static void v9fs_inode_init_once(void *foo)
 {
        struct v9fs_inode *v9inode = (struct v9fs_inode *)foo;
-#ifdef CONFIG_9P_FSCACHE
-       v9inode->fscache = NULL;
-#endif
+
        memset(&v9inode->qid, 0, sizeof(v9inode->qid));
        inode_init_once(&v9inode->vfs_inode);
 }
index bc8b302..ec0e8df 100644 (file)
@@ -9,6 +9,7 @@
 #define FS_9P_V9FS_H
 
 #include <linux/backing-dev.h>
+#include <linux/netfs.h>
 
 /**
  * enum p9_session_flags - option flags for each 9P session
@@ -108,14 +109,15 @@ struct v9fs_session_info {
 #define V9FS_INO_INVALID_ATTR 0x01
 
 struct v9fs_inode {
-#ifdef CONFIG_9P_FSCACHE
-       struct fscache_cookie *fscache;
-#endif
+       struct {
+               /* These must be contiguous */
+               struct inode    vfs_inode;      /* the VFS's inode record */
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
        struct p9_qid qid;
        unsigned int cache_validity;
        struct p9_fid *writeback_fid;
        struct mutex v_mutex;
-       struct inode vfs_inode;
 };
 
 static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
@@ -126,7 +128,7 @@ static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
 static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode)
 {
 #ifdef CONFIG_9P_FSCACHE
-       return v9inode->fscache;
+       return netfs_i_cookie(&v9inode->vfs_inode);
 #else
        return NULL;
 #endif
@@ -163,6 +165,7 @@ extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses,
 extern const struct inode_operations v9fs_dir_inode_operations_dotl;
 extern const struct inode_operations v9fs_file_inode_operations_dotl;
 extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
+extern const struct netfs_request_ops v9fs_req_ops;
 extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses,
                                              struct p9_fid *fid,
                                              struct super_block *sb, int new);
index 76956c9..5011281 100644 (file)
 #include "fid.h"
 
 /**
- * v9fs_req_issue_op - Issue a read from 9P
+ * v9fs_issue_read - Issue a read from 9P
  * @subreq: The read to make
  */
-static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
+static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
 {
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct p9_fid *fid = rreq->netfs_priv;
        struct iov_iter to;
        loff_t pos = subreq->start + subreq->transferred;
@@ -52,20 +52,21 @@ static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
 }
 
 /**
- * v9fs_init_rreq - Initialise a read request
+ * v9fs_init_request - Initialise a read request
  * @rreq: The read request
  * @file: The file being read from
  */
-static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file)
+static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
 {
        struct p9_fid *fid = file->private_data;
 
        refcount_inc(&fid->count);
        rreq->netfs_priv = fid;
+       return 0;
 }
 
 /**
- * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq
+ * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_request
  * @mapping: unused mapping of request to cleanup
  * @priv: private data to cleanup, a fid, guaranted non-null.
  */
@@ -77,21 +78,10 @@ static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
 }
 
 /**
- * v9fs_is_cache_enabled - Determine if caching is enabled for an inode
- * @inode: The inode to check
- */
-static bool v9fs_is_cache_enabled(struct inode *inode)
-{
-       struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode));
-
-       return fscache_cookie_enabled(cookie) && cookie->cache_priv;
-}
-
-/**
  * v9fs_begin_cache_operation - Begin a cache operation for a read
  * @rreq: The read request
  */
-static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
+static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
 {
 #ifdef CONFIG_9P_FSCACHE
        struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
@@ -102,37 +92,14 @@ static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
 #endif
 }
 
-static const struct netfs_read_request_ops v9fs_req_ops = {
-       .init_rreq              = v9fs_init_rreq,
-       .is_cache_enabled       = v9fs_is_cache_enabled,
+const struct netfs_request_ops v9fs_req_ops = {
+       .init_request           = v9fs_init_request,
        .begin_cache_operation  = v9fs_begin_cache_operation,
-       .issue_op               = v9fs_req_issue_op,
+       .issue_read             = v9fs_issue_read,
        .cleanup                = v9fs_req_cleanup,
 };
 
 /**
- * v9fs_vfs_readpage - read an entire page in from 9P
- * @file: file being read
- * @page: structure to page
- *
- */
-static int v9fs_vfs_readpage(struct file *file, struct page *page)
-{
-       struct folio *folio = page_folio(page);
-
-       return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
-}
-
-/**
- * v9fs_vfs_readahead - read a set of pages from 9P
- * @ractl: The readahead parameters
- */
-static void v9fs_vfs_readahead(struct readahead_control *ractl)
-{
-       netfs_readahead(ractl, &v9fs_req_ops, NULL);
-}
-
-/**
  * v9fs_release_page - release the private state associated with a page
  * @page: The page to be released
  * @gfp: The caller's allocation restrictions
@@ -308,8 +275,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
         * file.  We need to do this before we get a lock on the page in case
         * there's more than one writer competing for the same cache block.
         */
-       retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
-                                  &v9fs_req_ops, NULL);
+       retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata);
        if (retval < 0)
                return retval;
 
@@ -370,8 +336,8 @@ static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
 #endif
 
 const struct address_space_operations v9fs_addr_operations = {
-       .readpage = v9fs_vfs_readpage,
-       .readahead = v9fs_vfs_readahead,
+       .readpage = netfs_readpage,
+       .readahead = netfs_readahead,
        .dirty_folio = v9fs_dirty_folio,
        .writepage = v9fs_vfs_writepage,
        .write_begin = v9fs_write_begin,
index 84c3cf7..55367ec 100644 (file)
@@ -231,9 +231,6 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
        v9inode = alloc_inode_sb(sb, v9fs_inode_cache, GFP_KERNEL);
        if (!v9inode)
                return NULL;
-#ifdef CONFIG_9P_FSCACHE
-       v9inode->fscache = NULL;
-#endif
        v9inode->writeback_fid = NULL;
        v9inode->cache_validity = 0;
        mutex_init(&v9inode->v_mutex);
@@ -250,6 +247,14 @@ void v9fs_free_inode(struct inode *inode)
        kmem_cache_free(v9fs_inode_cache, V9FS_I(inode));
 }
 
+/*
+ * Set parameters for the netfs library
+ */
+static void v9fs_set_netfs_context(struct inode *inode)
+{
+       netfs_i_context_init(inode, &v9fs_req_ops);
+}
+
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
                    struct inode *inode, umode_t mode, dev_t rdev)
 {
@@ -338,6 +343,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
                err = -EINVAL;
                goto error;
        }
+
+       v9fs_set_netfs_context(inode);
 error:
        return err;
 
index db832cc..f120bcb 100644 (file)
@@ -76,6 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
        /* there shouldn't be an existing inode */
        BUG_ON(!(inode->i_state & I_NEW));
 
+       netfs_i_context_init(inode, NULL);
        inode->i_size           = 0;
        inode->i_mode           = S_IFDIR | S_IRUGO | S_IXUGO;
        if (root) {
index 0f9fdb2..26292a1 100644 (file)
 #include "internal.h"
 
 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
-static int afs_readpage(struct file *file, struct page *page);
 static int afs_symlink_readpage(struct file *file, struct page *page);
 static void afs_invalidate_folio(struct folio *folio, size_t offset,
                               size_t length);
 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
 
-static void afs_readahead(struct readahead_control *ractl);
 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
 static void afs_vm_open(struct vm_area_struct *area);
 static void afs_vm_close(struct vm_area_struct *area);
@@ -52,8 +50,8 @@ const struct inode_operations afs_file_inode_operations = {
 };
 
 const struct address_space_operations afs_file_aops = {
-       .readpage       = afs_readpage,
-       .readahead      = afs_readahead,
+       .readpage       = netfs_readpage,
+       .readahead      = netfs_readahead,
        .dirty_folio    = afs_dirty_folio,
        .launder_folio  = afs_launder_folio,
        .releasepage    = afs_releasepage,
@@ -240,7 +238,7 @@ void afs_put_read(struct afs_read *req)
 static void afs_fetch_data_notify(struct afs_operation *op)
 {
        struct afs_read *req = op->fetch.req;
-       struct netfs_read_subrequest *subreq = req->subreq;
+       struct netfs_io_subrequest *subreq = req->subreq;
        int error = op->error;
 
        if (error == -ECONNABORTED)
@@ -310,7 +308,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
        return afs_do_sync_operation(op);
 }
 
-static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
+static void afs_issue_read(struct netfs_io_subrequest *subreq)
 {
        struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
        struct afs_read *fsreq;
@@ -359,19 +357,13 @@ static int afs_symlink_readpage(struct file *file, struct page *page)
        return ret;
 }
 
-static void afs_init_rreq(struct netfs_read_request *rreq, struct file *file)
+static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
 {
        rreq->netfs_priv = key_get(afs_file_key(file));
+       return 0;
 }
 
-static bool afs_is_cache_enabled(struct inode *inode)
-{
-       struct fscache_cookie *cookie = afs_vnode_cache(AFS_FS_I(inode));
-
-       return fscache_cookie_enabled(cookie) && cookie->cache_priv;
-}
-
-static int afs_begin_cache_operation(struct netfs_read_request *rreq)
+static int afs_begin_cache_operation(struct netfs_io_request *rreq)
 {
 #ifdef CONFIG_AFS_FSCACHE
        struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
@@ -396,27 +388,14 @@ static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv)
        key_put(netfs_priv);
 }
 
-const struct netfs_read_request_ops afs_req_ops = {
-       .init_rreq              = afs_init_rreq,
-       .is_cache_enabled       = afs_is_cache_enabled,
+const struct netfs_request_ops afs_req_ops = {
+       .init_request           = afs_init_request,
        .begin_cache_operation  = afs_begin_cache_operation,
        .check_write_begin      = afs_check_write_begin,
-       .issue_op               = afs_req_issue_op,
+       .issue_read             = afs_issue_read,
        .cleanup                = afs_priv_cleanup,
 };
 
-static int afs_readpage(struct file *file, struct page *page)
-{
-       struct folio *folio = page_folio(page);
-
-       return netfs_readpage(file, folio, &afs_req_ops, NULL);
-}
-
-static void afs_readahead(struct readahead_control *ractl)
-{
-       netfs_readahead(ractl, &afs_req_ops, NULL);
-}
-
 int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
        fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode)));
index 5964f8a..2fe4024 100644 (file)
@@ -54,6 +54,14 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
 }
 
 /*
+ * Set parameters for the netfs library
+ */
+static void afs_set_netfs_context(struct afs_vnode *vnode)
+{
+       netfs_i_context_init(&vnode->vfs_inode, &afs_req_ops);
+}
+
+/*
  * Initialise an inode from the vnode status.
  */
 static int afs_inode_init_from_status(struct afs_operation *op,
@@ -128,6 +136,7 @@ static int afs_inode_init_from_status(struct afs_operation *op,
        }
 
        afs_set_i_size(vnode, status->size);
+       afs_set_netfs_context(vnode);
 
        vnode->invalid_before   = status->data_version;
        inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
@@ -237,6 +246,7 @@ static void afs_apply_status(struct afs_operation *op,
                 * idea of what the size should be that's not the same as
                 * what's on the server.
                 */
+               vnode->netfs_ctx.remote_i_size = status->size;
                if (change_size) {
                        afs_set_i_size(vnode, status->size);
                        inode->i_ctime = t;
@@ -420,7 +430,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
        struct afs_vnode_cache_aux aux;
 
        if (vnode->status.type != AFS_FTYPE_FILE) {
-               vnode->cache = NULL;
+               vnode->netfs_ctx.cache = NULL;
                return;
        }
 
@@ -430,12 +440,14 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
        key.vnode_id_ext[1]     = htonl(vnode->fid.vnode_hi);
        afs_set_cache_aux(vnode, &aux);
 
-       vnode->cache = fscache_acquire_cookie(
-               vnode->volume->cache,
-               vnode->status.type == AFS_FTYPE_FILE ? 0 : FSCACHE_ADV_SINGLE_CHUNK,
-               &key, sizeof(key),
-               &aux, sizeof(aux),
-               vnode->status.size);
+       afs_vnode_set_cache(vnode,
+                           fscache_acquire_cookie(
+                                   vnode->volume->cache,
+                                   vnode->status.type == AFS_FTYPE_FILE ?
+                                   0 : FSCACHE_ADV_SINGLE_CHUNK,
+                                   &key, sizeof(key),
+                                   &aux, sizeof(aux),
+                                   vnode->status.size));
 #endif
 }
 
@@ -528,6 +540,7 @@ struct inode *afs_root_iget(struct super_block *sb, struct key *key)
 
        vnode = AFS_FS_I(inode);
        vnode->cb_v_break = as->volume->cb_v_break,
+       afs_set_netfs_context(vnode);
 
        op = afs_alloc_operation(key, as->volume);
        if (IS_ERR(op)) {
@@ -786,11 +799,8 @@ void afs_evict_inode(struct inode *inode)
                afs_put_wb_key(wbk);
        }
 
-#ifdef CONFIG_AFS_FSCACHE
-       fscache_relinquish_cookie(vnode->cache,
+       fscache_relinquish_cookie(afs_vnode_cache(vnode),
                                  test_bit(AFS_VNODE_DELETED, &vnode->flags));
-       vnode->cache = NULL;
-#endif
 
        afs_prune_wb_keys(vnode);
        afs_put_permits(rcu_access_pointer(vnode->permit_cache));
index dc5032e..7b7ef94 100644 (file)
@@ -207,7 +207,7 @@ struct afs_read {
        loff_t                  file_size;      /* File size returned by server */
        struct key              *key;           /* The key to use to reissue the read */
        struct afs_vnode        *vnode;         /* The file being read into. */
-       struct netfs_read_subrequest *subreq;   /* Fscache helper read request this belongs to */
+       struct netfs_io_subrequest *subreq;     /* Fscache helper read request this belongs to */
        afs_dataversion_t       data_version;   /* Version number returned by server */
        refcount_t              usage;
        unsigned int            call_debug_id;
@@ -619,15 +619,16 @@ enum afs_lock_state {
  * leak from one inode to another.
  */
 struct afs_vnode {
-       struct inode            vfs_inode;      /* the VFS's inode record */
+       struct {
+               /* These must be contiguous */
+               struct inode    vfs_inode;      /* the VFS's inode record */
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
 
        struct afs_volume       *volume;        /* volume on which vnode resides */
        struct afs_fid          fid;            /* the file identifier for this inode */
        struct afs_file_status  status;         /* AFS status info for this file */
        afs_dataversion_t       invalid_before; /* Child dentries are invalid before this */
-#ifdef CONFIG_AFS_FSCACHE
-       struct fscache_cookie   *cache;         /* caching cookie */
-#endif
        struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */
        struct mutex            io_lock;        /* Lock for serialising I/O on this mutex */
        struct rw_semaphore     validate_lock;  /* lock for validating this vnode */
@@ -674,12 +675,20 @@ struct afs_vnode {
 static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode)
 {
 #ifdef CONFIG_AFS_FSCACHE
-       return vnode->cache;
+       return netfs_i_cookie(&vnode->vfs_inode);
 #else
        return NULL;
 #endif
 }
 
+static inline void afs_vnode_set_cache(struct afs_vnode *vnode,
+                                      struct fscache_cookie *cookie)
+{
+#ifdef CONFIG_AFS_FSCACHE
+       vnode->netfs_ctx.cache = cookie;
+#endif
+}
+
 /*
  * cached security record for one user's attempt to access a vnode
  */
@@ -1063,7 +1072,7 @@ extern const struct address_space_operations afs_file_aops;
 extern const struct address_space_operations afs_symlink_aops;
 extern const struct inode_operations afs_file_inode_operations;
 extern const struct file_operations afs_file_operations;
-extern const struct netfs_read_request_ops afs_req_ops;
+extern const struct netfs_request_ops afs_req_ops;
 
 extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *);
 extern void afs_put_wb_key(struct afs_wb_key *);
index 7592c0f..1fea195 100644 (file)
@@ -688,13 +688,11 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
        /* Reset anything that shouldn't leak from one inode to the next. */
        memset(&vnode->fid, 0, sizeof(vnode->fid));
        memset(&vnode->status, 0, sizeof(vnode->status));
+       afs_vnode_set_cache(vnode, NULL);
 
        vnode->volume           = NULL;
        vnode->lock_key         = NULL;
        vnode->permit_cache     = NULL;
-#ifdef CONFIG_AFS_FSCACHE
-       vnode->cache            = NULL;
-#endif
 
        vnode->flags            = 1 << AFS_VNODE_UNSET;
        vnode->lock_state       = AFS_VNODE_LOCK_NONE;
index e1c1708..6bcf147 100644 (file)
@@ -60,8 +60,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
         * file.  We need to do this before we get a lock on the page in case
         * there's more than one writer competing for the same cache block.
         */
-       ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata,
-                               &afs_req_ops, NULL);
+       ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata);
        if (ret < 0)
                return ret;
 
@@ -355,9 +354,10 @@ static const struct afs_operation_ops afs_store_data_operation = {
 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
                          bool laundering)
 {
+       struct netfs_i_context *ictx = &vnode->netfs_ctx;
        struct afs_operation *op;
        struct afs_wb_key *wbk = NULL;
-       loff_t size = iov_iter_count(iter), i_size;
+       loff_t size = iov_iter_count(iter);
        int ret = -ENOKEY;
 
        _enter("%s{%llx:%llu.%u},%llx,%llx",
@@ -379,15 +379,13 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t
                return -ENOMEM;
        }
 
-       i_size = i_size_read(&vnode->vfs_inode);
-
        afs_op_set_vnode(op, 0, vnode);
        op->file[0].dv_delta = 1;
        op->file[0].modification = true;
        op->store.write_iter = iter;
        op->store.pos = pos;
        op->store.size = size;
-       op->store.i_size = max(pos + size, i_size);
+       op->store.i_size = max(pos + size, ictx->remote_i_size);
        op->store.laundering = laundering;
        op->mtime = vnode->vfs_inode.i_mtime;
        op->flags |= AFS_OPERATION_UNINTR;
index 7b66b93..3c249b9 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1552,7 +1552,6 @@ static int aio_read(struct kiocb *req, const struct iocb *iocb,
        file = req->ki_filp;
        if (unlikely(!(file->f_mode & FMODE_READ)))
                return -EBADF;
-       ret = -EINVAL;
        if (unlikely(!file->f_op->read_iter))
                return -EINVAL;
 
index 0399cf8..151e9da 100644 (file)
@@ -118,7 +118,7 @@ struct btrfs_bio_ctrl {
  */
 struct extent_changeset {
        /* How many bytes are set/cleared in this operation */
-       unsigned int bytes_changed;
+       u64 bytes_changed;
 
        /* Changed ranges */
        struct ulist range_changed;
index 9f455c9..380054c 100644 (file)
@@ -2957,8 +2957,9 @@ out:
        return ret;
 }
 
-static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
 {
+       struct inode *inode = file_inode(file);
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_state *cached_state = NULL;
@@ -2990,6 +2991,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                goto out_only_mutex;
        }
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_only_mutex;
+
        lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
        lockend = round_down(offset + len,
                             btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
@@ -3430,7 +3435,7 @@ static long btrfs_fallocate(struct file *file, int mode,
                return -EOPNOTSUPP;
 
        if (mode & FALLOC_FL_PUNCH_HOLE)
-               return btrfs_punch_hole(inode, offset, len);
+               return btrfs_punch_hole(file, offset, len);
 
        /*
         * Only trigger disk allocation, don't trigger qgroup reserve
@@ -3452,6 +3457,10 @@ static long btrfs_fallocate(struct file *file, int mode,
                        goto out;
        }
 
+       ret = file_modified(file);
+       if (ret)
+               goto out;
+
        /*
         * TODO: Move these two operations after we have checked
         * accurate reserved space, or fallocate can still fail but
index aa0a60e..17d5557 100644 (file)
@@ -1128,7 +1128,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
        int ret = 0;
 
        if (btrfs_is_free_space_inode(inode)) {
-               WARN_ON_ONCE(1);
                ret = -EINVAL;
                goto out_unlock;
        }
@@ -4488,6 +4487,13 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
                           dest->root_key.objectid);
                return -EPERM;
        }
+       if (atomic_read(&dest->nr_swapfiles)) {
+               spin_unlock(&dest->root_item_lock);
+               btrfs_warn(fs_info,
+                          "attempt to delete subvolume %llu with active swapfile",
+                          root->root_key.objectid);
+               return -EPERM;
+       }
        root_flags = btrfs_root_flags(&dest->root_item);
        btrfs_set_root_flags(&dest->root_item,
                             root_flags | BTRFS_ROOT_SUBVOL_DEAD);
@@ -8296,7 +8302,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
         * cover the full folio, like invalidating the last folio, we're
         * still safe to wait for ordered extent to finish.
         */
-       if (!(offset == 0 && length == PAGE_SIZE)) {
+       if (!(offset == 0 && length == folio_size(folio))) {
                btrfs_releasepage(&folio->page, GFP_NOFS);
                return;
        }
@@ -11107,8 +11113,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
         * set. We use this counter to prevent snapshots. We must increment it
         * before walking the extents because we don't want a concurrent
         * snapshot to run after we've already checked the extents.
+        *
+        * It is possible that subvolume is marked for deletion but still not
+        * removed yet. To prevent this race, we check the root status before
+        * activating the swapfile.
         */
+       spin_lock(&root->root_item_lock);
+       if (btrfs_root_dead(root)) {
+               spin_unlock(&root->root_item_lock);
+
+               btrfs_exclop_finish(fs_info);
+               btrfs_warn(fs_info,
+               "cannot activate swapfile because subvolume %llu is being deleted",
+                       root->root_key.objectid);
+               return -EPERM;
+       }
        atomic_inc(&root->nr_swapfiles);
+       spin_unlock(&root->root_item_lock);
 
        isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
 
index 238cee5..f46e710 100644 (file)
@@ -1239,7 +1239,7 @@ static u32 get_extent_max_capacity(const struct extent_map *em)
 }
 
 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
-                                    bool locked)
+                                    u32 extent_thresh, u64 newer_than, bool locked)
 {
        struct extent_map *next;
        bool ret = false;
@@ -1249,11 +1249,12 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
                return false;
 
        /*
-        * We want to check if the next extent can be merged with the current
-        * one, which can be an extent created in a past generation, so we pass
-        * a minimum generation of 0 to defrag_lookup_extent().
+        * Here we need to pass @newer_then when checking the next extent, or
+        * we will hit a case we mark current extent for defrag, but the next
+        * one will not be a target.
+        * This will just cause extra IO without really reducing the fragments.
         */
-       next = defrag_lookup_extent(inode, em->start + em->len, 0, locked);
+       next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
        /* No more em or hole */
        if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
                goto out;
@@ -1265,6 +1266,13 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
         */
        if (next->len >= get_extent_max_capacity(em))
                goto out;
+       /* Skip older extent */
+       if (next->generation < newer_than)
+               goto out;
+       /* Also check extent size */
+       if (next->len >= extent_thresh)
+               goto out;
+
        ret = true;
 out:
        free_extent_map(next);
@@ -1470,7 +1478,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
                        goto next;
 
                next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
-                                                         locked);
+                                               extent_thresh, newer_than, locked);
                if (!next_mergeable) {
                        struct defrag_target_range *last;
 
index 04a88bf..998e3f1 100644 (file)
@@ -645,7 +645,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
        int ret;
 
        /*
-        * Lock destination range to serialize with concurrent readpages() and
+        * Lock destination range to serialize with concurrent readahead() and
         * source range to serialize with relocation.
         */
        btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
@@ -739,7 +739,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
        }
 
        /*
-        * Lock destination range to serialize with concurrent readpages() and
+        * Lock destination range to serialize with concurrent readahead() and
         * source range to serialize with relocation.
         */
        btrfs_double_extent_lock(src, off, inode, destoff, len);
index 1be7cb2..2cfbc74 100644 (file)
@@ -1896,23 +1896,18 @@ static void update_dev_time(const char *device_path)
        path_put(&path);
 }
 
-static int btrfs_rm_dev_item(struct btrfs_device *device)
+static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
+                            struct btrfs_device *device)
 {
        struct btrfs_root *root = device->fs_info->chunk_root;
        int ret;
        struct btrfs_path *path;
        struct btrfs_key key;
-       struct btrfs_trans_handle *trans;
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
-       trans = btrfs_start_transaction(root, 0);
-       if (IS_ERR(trans)) {
-               btrfs_free_path(path);
-               return PTR_ERR(trans);
-       }
        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
        key.type = BTRFS_DEV_ITEM_KEY;
        key.offset = device->devid;
@@ -1923,21 +1918,12 @@ static int btrfs_rm_dev_item(struct btrfs_device *device)
        if (ret) {
                if (ret > 0)
                        ret = -ENOENT;
-               btrfs_abort_transaction(trans, ret);
-               btrfs_end_transaction(trans);
                goto out;
        }
 
        ret = btrfs_del_item(trans, root, path);
-       if (ret) {
-               btrfs_abort_transaction(trans, ret);
-               btrfs_end_transaction(trans);
-       }
-
 out:
        btrfs_free_path(path);
-       if (!ret)
-               ret = btrfs_commit_transaction(trans);
        return ret;
 }
 
@@ -2078,6 +2064,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
                    struct btrfs_dev_lookup_args *args,
                    struct block_device **bdev, fmode_t *mode)
 {
+       struct btrfs_trans_handle *trans;
        struct btrfs_device *device;
        struct btrfs_fs_devices *cur_devices;
        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
@@ -2098,7 +2085,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
 
        ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
        if (ret)
-               goto out;
+               return ret;
 
        device = btrfs_find_device(fs_info->fs_devices, args);
        if (!device) {
@@ -2106,27 +2093,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
                        ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
                else
                        ret = -ENOENT;
-               goto out;
+               return ret;
        }
 
        if (btrfs_pinned_by_swapfile(fs_info, device)) {
                btrfs_warn_in_rcu(fs_info,
                  "cannot remove device %s (devid %llu) due to active swapfile",
                                  rcu_str_deref(device->name), device->devid);
-               ret = -ETXTBSY;
-               goto out;
+               return -ETXTBSY;
        }
 
-       if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
-               ret = BTRFS_ERROR_DEV_TGT_REPLACE;
-               goto out;
-       }
+       if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
+               return BTRFS_ERROR_DEV_TGT_REPLACE;
 
        if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
-           fs_info->fs_devices->rw_devices == 1) {
-               ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
-               goto out;
-       }
+           fs_info->fs_devices->rw_devices == 1)
+               return BTRFS_ERROR_DEV_ONLY_WRITABLE;
 
        if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
                mutex_lock(&fs_info->chunk_mutex);
@@ -2139,14 +2121,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
        if (ret)
                goto error_undo;
 
-       /*
-        * TODO: the superblock still includes this device in its num_devices
-        * counter although write_all_supers() is not locked out. This
-        * could give a filesystem state which requires a degraded mount.
-        */
-       ret = btrfs_rm_dev_item(device);
-       if (ret)
+       trans = btrfs_start_transaction(fs_info->chunk_root, 0);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
                goto error_undo;
+       }
+
+       ret = btrfs_rm_dev_item(trans, device);
+       if (ret) {
+               /* Any error in dev item removal is critical */
+               btrfs_crit(fs_info,
+                          "failed to remove device item for devid %llu: %d",
+                          device->devid, ret);
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
+               return ret;
+       }
 
        clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
        btrfs_scrub_cancel_dev(device);
@@ -2229,7 +2219,8 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
                free_fs_devices(cur_devices);
        }
 
-out:
+       ret = btrfs_commit_transaction(trans);
+
        return ret;
 
 error_undo:
@@ -2240,7 +2231,7 @@ error_undo:
                device->fs_devices->rw_devices++;
                mutex_unlock(&fs_info->chunk_mutex);
        }
-       goto out;
+       return ret;
 }
 
 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
index b7b5fac..1b1b310 100644 (file)
@@ -1801,7 +1801,6 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
 
        map = em->map_lookup;
        /* We only support single profile for now */
-       ASSERT(map->num_stripes == 1);
        device = map->stripes[0].dev;
 
        free_extent_map(em);
@@ -1976,18 +1975,16 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
 
 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
 {
+       struct btrfs_fs_info *fs_info = fs_devices->fs_info;
        struct btrfs_device *device;
        bool ret = false;
 
-       if (!btrfs_is_zoned(fs_devices->fs_info))
+       if (!btrfs_is_zoned(fs_info))
                return true;
 
-       /* Non-single profiles are not supported yet */
-       ASSERT((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0);
-
        /* Check if there is a device with active zones left */
-       mutex_lock(&fs_devices->device_list_mutex);
-       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+       mutex_lock(&fs_info->chunk_mutex);
+       list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
                struct btrfs_zoned_device_info *zinfo = device->zone_info;
 
                if (!device->bdev)
@@ -1999,7 +1996,7 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
                        break;
                }
        }
-       mutex_unlock(&fs_devices->device_list_mutex);
+       mutex_unlock(&fs_info->chunk_mutex);
 
        return ret;
 }
index d67fbe0..2b5561a 100644 (file)
@@ -2352,8 +2352,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
        if (err)
                goto out;
 
-       err = pagecache_write_begin(NULL, mapping, size, 0,
-                                   AOP_FLAG_CONT_EXPAND, &page, &fsdata);
+       err = pagecache_write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
        if (err)
                goto out;
 
index bc7c7a7..9dc81e7 100644 (file)
@@ -380,18 +380,18 @@ presubmission_error:
  * Prepare a read operation, shortening it to a cached/uncached
  * boundary as appropriate.
  */
-static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subrequest *subreq,
+static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
                                                      loff_t i_size)
 {
        enum cachefiles_prepare_read_trace why;
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct netfs_cache_resources *cres = &rreq->cache_resources;
        struct cachefiles_object *object;
        struct cachefiles_cache *cache;
        struct fscache_cookie *cookie = fscache_cres_cookie(cres);
        const struct cred *saved_cred;
        struct file *file = cachefiles_cres_file(cres);
-       enum netfs_read_source ret = NETFS_DOWNLOAD_FROM_SERVER;
+       enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
        loff_t off, to;
        ino_t ino = file ? file_inode(file)->i_ino : 0;
 
@@ -404,7 +404,7 @@ static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subreque
        }
 
        if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
-               __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+               __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
                why = cachefiles_trace_read_no_data;
                goto out_no_object;
        }
@@ -473,7 +473,7 @@ static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subreque
        goto out;
 
 download_and_store:
-       __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+       __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
 out:
        cachefiles_end_secure(cache, saved_cred);
 out_no_object:
index c7a0ab0..aa25bff 100644 (file)
@@ -182,7 +182,7 @@ static int ceph_releasepage(struct page *page, gfp_t gfp)
        return 1;
 }
 
-static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
+static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
 {
        struct inode *inode = rreq->inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
@@ -199,7 +199,7 @@ static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
        rreq->len = roundup(rreq->len, lo->stripe_unit);
 }
 
-static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq)
+static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
 {
        struct inode *inode = subreq->rreq->inode;
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -218,7 +218,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
 {
        struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode);
        struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
-       struct netfs_read_subrequest *subreq = req->r_priv;
+       struct netfs_io_subrequest *subreq = req->r_priv;
        int num_pages;
        int err = req->r_result;
 
@@ -244,9 +244,9 @@ static void finish_netfs_read(struct ceph_osd_request *req)
        iput(req->r_inode);
 }
 
-static bool ceph_netfs_issue_op_inline(struct netfs_read_subrequest *subreq)
+static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
 {
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct inode *inode = rreq->inode;
        struct ceph_mds_reply_info_parsed *rinfo;
        struct ceph_mds_reply_info_in *iinfo;
@@ -258,7 +258,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_read_subrequest *subreq)
        size_t len;
 
        __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
-       __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+       __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
 
        if (subreq->start >= inode->i_size)
                goto out;
@@ -297,9 +297,9 @@ out:
        return true;
 }
 
-static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq)
+static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
 {
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct inode *inode = rreq->inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -353,6 +353,45 @@ out:
        dout("%s: result %d\n", __func__, err);
 }
 
+static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
+{
+       struct inode *inode = rreq->inode;
+       int got = 0, want = CEPH_CAP_FILE_CACHE;
+       int ret = 0;
+
+       if (rreq->origin != NETFS_READAHEAD)
+               return 0;
+
+       if (file) {
+               struct ceph_rw_context *rw_ctx;
+               struct ceph_file_info *fi = file->private_data;
+
+               rw_ctx = ceph_find_rw_context(fi);
+               if (rw_ctx)
+                       return 0;
+       }
+
+       /*
+        * readahead callers do not necessarily hold Fcb caps
+        * (e.g. fadvise, madvise).
+        */
+       ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
+       if (ret < 0) {
+               dout("start_read %p, error getting cap\n", inode);
+               return ret;
+       }
+
+       if (!(got & want)) {
+               dout("start_read %p, no cache cap\n", inode);
+               return -EACCES;
+       }
+       if (ret == 0)
+               return -EACCES;
+
+       rreq->netfs_priv = (void *)(uintptr_t)got;
+       return 0;
+}
+
 static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
 {
        struct inode *inode = mapping->host;
@@ -363,64 +402,16 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
                ceph_put_cap_refs(ci, got);
 }
 
-static const struct netfs_read_request_ops ceph_netfs_read_ops = {
-       .is_cache_enabled       = ceph_is_cache_enabled,
+const struct netfs_request_ops ceph_netfs_ops = {
+       .init_request           = ceph_init_request,
        .begin_cache_operation  = ceph_begin_cache_operation,
-       .issue_op               = ceph_netfs_issue_op,
+       .issue_read             = ceph_netfs_issue_read,
        .expand_readahead       = ceph_netfs_expand_readahead,
        .clamp_length           = ceph_netfs_clamp_length,
        .check_write_begin      = ceph_netfs_check_write_begin,
        .cleanup                = ceph_readahead_cleanup,
 };
 
-/* read a single page, without unlocking it. */
-static int ceph_readpage(struct file *file, struct page *subpage)
-{
-       struct folio *folio = page_folio(subpage);
-       struct inode *inode = file_inode(file);
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_vino vino = ceph_vino(inode);
-       size_t len = folio_size(folio);
-       u64 off = folio_file_pos(folio);
-
-       dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n inline %d",
-            vino.ino, vino.snap, file, off, len, folio, folio_index(folio),
-            ci->i_inline_version != CEPH_INLINE_NONE);
-
-       return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL);
-}
-
-static void ceph_readahead(struct readahead_control *ractl)
-{
-       struct inode *inode = file_inode(ractl->file);
-       struct ceph_file_info *fi = ractl->file->private_data;
-       struct ceph_rw_context *rw_ctx;
-       int got = 0;
-       int ret = 0;
-
-       if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
-               return;
-
-       rw_ctx = ceph_find_rw_context(fi);
-       if (!rw_ctx) {
-               /*
-                * readahead callers do not necessarily hold Fcb caps
-                * (e.g. fadvise, madvise).
-                */
-               int want = CEPH_CAP_FILE_CACHE;
-
-               ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
-               if (ret < 0)
-                       dout("start_read %p, error getting cap\n", inode);
-               else if (!(got & want))
-                       dout("start_read %p, no cache cap\n", inode);
-
-               if (ret <= 0)
-                       return;
-       }
-       netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got);
-}
-
 #ifdef CONFIG_CEPH_FSCACHE
 static void ceph_set_page_fscache(struct page *page)
 {
@@ -1327,8 +1318,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
        struct folio *folio = NULL;
        int r;
 
-       r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL,
-                             &ceph_netfs_read_ops, NULL);
+       r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL);
        if (r == 0)
                folio_wait_fscache(folio);
        if (r < 0) {
@@ -1382,8 +1372,8 @@ out:
 }
 
 const struct address_space_operations ceph_aops = {
-       .readpage = ceph_readpage,
-       .readahead = ceph_readahead,
+       .readpage = netfs_readpage,
+       .readahead = netfs_readahead,
        .writepage = ceph_writepage,
        .writepages = ceph_writepages_start,
        .write_begin = ceph_write_begin,
index 7d22850..ddea999 100644 (file)
@@ -29,26 +29,25 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
        if (!(inode->i_state & I_NEW))
                return;
 
-       WARN_ON_ONCE(ci->fscache);
+       WARN_ON_ONCE(ci->netfs_ctx.cache);
 
-       ci->fscache = fscache_acquire_cookie(fsc->fscache, 0,
-                                            &ci->i_vino, sizeof(ci->i_vino),
-                                            &ci->i_version, sizeof(ci->i_version),
-                                            i_size_read(inode));
+       ci->netfs_ctx.cache =
+               fscache_acquire_cookie(fsc->fscache, 0,
+                                      &ci->i_vino, sizeof(ci->i_vino),
+                                      &ci->i_version, sizeof(ci->i_version),
+                                      i_size_read(inode));
 }
 
-void ceph_fscache_unregister_inode_cookie(struct ceph_inode_infoci)
+void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci)
 {
-       struct fscache_cookie *cookie = ci->fscache;
-
-       fscache_relinquish_cookie(cookie, false);
+       fscache_relinquish_cookie(ceph_fscache_cookie(ci), false);
 }
 
 void ceph_fscache_use_cookie(struct inode *inode, bool will_modify)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       fscache_use_cookie(ci->fscache, will_modify);
+       fscache_use_cookie(ceph_fscache_cookie(ci), will_modify);
 }
 
 void ceph_fscache_unuse_cookie(struct inode *inode, bool update)
@@ -58,9 +57,10 @@ void ceph_fscache_unuse_cookie(struct inode *inode, bool update)
        if (update) {
                loff_t i_size = i_size_read(inode);
 
-               fscache_unuse_cookie(ci->fscache, &ci->i_version, &i_size);
+               fscache_unuse_cookie(ceph_fscache_cookie(ci),
+                                    &ci->i_version, &i_size);
        } else {
-               fscache_unuse_cookie(ci->fscache, NULL, NULL);
+               fscache_unuse_cookie(ceph_fscache_cookie(ci), NULL, NULL);
        }
 }
 
@@ -69,14 +69,14 @@ void ceph_fscache_update(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        loff_t i_size = i_size_read(inode);
 
-       fscache_update_cookie(ci->fscache, &ci->i_version, &i_size);
+       fscache_update_cookie(ceph_fscache_cookie(ci), &ci->i_version, &i_size);
 }
 
 void ceph_fscache_invalidate(struct inode *inode, bool dio_write)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       fscache_invalidate(ceph_inode(inode)->fscache,
+       fscache_invalidate(ceph_fscache_cookie(ci),
                           &ci->i_version, i_size_read(inode),
                           dio_write ? FSCACHE_INVAL_DIO_WRITE : 0);
 }
index b90f301..7255b79 100644 (file)
@@ -26,14 +26,9 @@ void ceph_fscache_unuse_cookie(struct inode *inode, bool update);
 void ceph_fscache_update(struct inode *inode);
 void ceph_fscache_invalidate(struct inode *inode, bool dio_write);
 
-static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
-{
-       ci->fscache = NULL;
-}
-
 static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
 {
-       return ci->fscache;
+       return netfs_i_cookie(&ci->vfs_inode);
 }
 
 static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
@@ -62,7 +57,7 @@ static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
        return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci));
 }
 
-static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
+static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq)
 {
        struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(rreq->inode));
 
@@ -91,10 +86,6 @@ static inline void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
 {
 }
 
-static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
-{
-}
-
 static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
 {
 }
@@ -144,7 +135,7 @@ static inline bool ceph_is_cache_enabled(struct inode *inode)
        return false;
 }
 
-static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
+static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq)
 {
        return -ENOBUFS;
 }
index feb75eb..6c9e837 100644 (file)
@@ -1869,7 +1869,7 @@ retry_snap:
                 * are pending vmtruncate. So write and vmtruncate
                 * can not run at the same time
                 */
-               written = generic_perform_write(file, from, pos);
+               written = generic_perform_write(iocb, from);
                if (likely(written >= 0))
                        iocb->ki_pos = pos + written;
                ceph_end_io_write(inode);
index d80911d..63113e2 100644 (file)
@@ -459,6 +459,9 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 
        dout("alloc_inode %p\n", &ci->vfs_inode);
 
+       /* Set parameters for the netfs library */
+       netfs_i_context_init(&ci->vfs_inode, &ceph_netfs_ops);
+
        spin_lock_init(&ci->i_ceph_lock);
 
        ci->i_version = 0;
@@ -544,9 +547,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        INIT_WORK(&ci->i_work, ceph_inode_work);
        ci->i_work_mask = 0;
        memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
-
-       ceph_fscache_inode_init(ci);
-
        return &ci->vfs_inode;
 }
 
index a1ecc41..20ceab7 100644 (file)
 #include <linux/posix_acl.h>
 #include <linux/refcount.h>
 #include <linux/security.h>
+#include <linux/netfs.h>
+#include <linux/fscache.h>
 
 #include <linux/ceph/libceph.h>
 
-#ifdef CONFIG_CEPH_FSCACHE
-#include <linux/fscache.h>
-#endif
-
 /* large granularity for statfs utilization stats to facilitate
  * large volume sizes on 32-bit machines. */
 #define CEPH_BLOCK_SHIFT   22  /* 4 MB */
@@ -318,6 +316,11 @@ struct ceph_inode_xattrs_info {
  * Ceph inode.
  */
 struct ceph_inode_info {
+       struct {
+               /* These must be contiguous */
+               struct inode vfs_inode;
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
        struct ceph_vino i_vino;   /* ceph ino + snap */
 
        spinlock_t i_ceph_lock;
@@ -428,11 +431,6 @@ struct ceph_inode_info {
 
        struct work_struct i_work;
        unsigned long  i_work_mask;
-
-#ifdef CONFIG_CEPH_FSCACHE
-       struct fscache_cookie *fscache;
-#endif
-       struct inode vfs_inode; /* at end */
 };
 
 static inline struct ceph_inode_info *
@@ -1216,6 +1214,7 @@ extern void __ceph_touch_fmode(struct ceph_inode_info *ci,
 
 /* addr.c */
 extern const struct address_space_operations ceph_aops;
+extern const struct netfs_request_ops ceph_netfs_ops;
 extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
 extern int ceph_uninline_data(struct file *file);
 extern int ceph_pool_perm_check(struct inode *inode, int need);
index ea00e1a..9d33481 100644 (file)
@@ -94,7 +94,7 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
                   le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
                   le32_to_cpu(tcon->fsAttrInfo.Attributes),
                   le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
-                  tcon->tidStatus);
+                  tcon->status);
        if (dev_type == FILE_DEVICE_DISK)
                seq_puts(m, " type: DISK ");
        else if (dev_type == FILE_DEVICE_CD_ROM)
index d1211ad..a47fa44 100644 (file)
@@ -699,14 +699,14 @@ static void cifs_umount_begin(struct super_block *sb)
        tcon = cifs_sb_master_tcon(cifs_sb);
 
        spin_lock(&cifs_tcp_ses_lock);
-       if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
+       if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
                /* we have other mounts to same share or we have
                   already tried to force umount this and woken up
                   all waiting network requests, nothing to do */
                spin_unlock(&cifs_tcp_ses_lock);
                return;
        } else if (tcon->tc_count == 1)
-               tcon->tidStatus = CifsExiting;
+               tcon->status = TID_EXITING;
        spin_unlock(&cifs_tcp_ses_lock);
 
        /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
index 48b343d..8de977c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mempool.h>
 #include <linux/workqueue.h>
 #include <linux/utsname.h>
+#include <linux/netfs.h>
 #include "cifs_fs_sb.h"
 #include "cifsacl.h"
 #include <crypto/internal/hash.h>
@@ -115,10 +116,18 @@ enum statusEnum {
        CifsInNegotiate,
        CifsNeedSessSetup,
        CifsInSessSetup,
-       CifsNeedTcon,
-       CifsInTcon,
-       CifsNeedFilesInvalidate,
-       CifsInFilesInvalidate
+};
+
+/* associated with each tree connection to the server */
+enum tid_status_enum {
+       TID_NEW = 0,
+       TID_GOOD,
+       TID_EXITING,
+       TID_NEED_RECON,
+       TID_NEED_TCON,
+       TID_IN_TCON,
+       TID_NEED_FILES_INVALIDATE, /* currently unused */
+       TID_IN_FILES_INVALIDATE
 };
 
 enum securityEnum {
@@ -852,13 +861,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
 #define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
 #define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
 
-/*
- * The default wsize is 1M. find_get_pages seems to return a maximum of 256
- * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
- * a single wsize request with a single call.
- */
 #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
-#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
 
 /*
  * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
@@ -1038,7 +1041,7 @@ struct cifs_tcon {
        char *password;         /* for share-level security */
        __u32 tid;              /* The 4 byte tree id */
        __u16 Flags;            /* optional support bits */
-       enum statusEnum tidStatus;
+       enum tid_status_enum status;
        atomic_t num_smbs_sent;
        union {
                struct {
@@ -1402,6 +1405,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
  */
 
 struct cifsInodeInfo {
+       struct {
+               /* These must be contiguous */
+               struct inode    vfs_inode;      /* the VFS's inode record */
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
        bool can_cache_brlcks;
        struct list_head llist; /* locks helb by this inode */
        /*
@@ -1432,10 +1440,6 @@ struct cifsInodeInfo {
        u64  uniqueid;                  /* server inode number */
        u64  createtime;                /* creation time on server */
        __u8 lease_key[SMB2_LEASE_KEY_SIZE];    /* lease key for this inode */
-#ifdef CONFIG_CIFS_FSCACHE
-       struct fscache_cookie *fscache;
-#endif
-       struct inode vfs_inode;
        struct list_head deferred_closes; /* list of deferred closes */
        spinlock_t deferred_lock; /* protection on deferred list */
        bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
index 68b9a43..aeba371 100644 (file)
  */
 #define CIFS_SESS_KEY_SIZE (16)
 
-/*
- * Size of the smb3 signing key
- */
-#define SMB3_SIGN_KEY_SIZE (16)
-
-/*
- * Size of the smb3 encryption/decryption key storage.
- * This size is big enough to store any cipher key types.
- */
-#define SMB3_ENC_DEC_KEY_SIZE (32)
-
-#define CIFS_CLIENT_CHALLENGE_SIZE (8)
 #define CIFS_SERVER_CHALLENGE_SIZE (8)
 #define CIFS_HMAC_MD5_HASH_SIZE (16)
 #define CIFS_CPHTXT_SIZE (16)
@@ -1658,7 +1646,7 @@ struct smb_t2_rsp {
 #define SMB_FIND_FILE_ID_FULL_DIR_INFO    0x105
 #define SMB_FIND_FILE_ID_BOTH_DIR_INFO    0x106
 #define SMB_FIND_FILE_UNIX                0x202
-#define SMB_FIND_FILE_POSIX_INFO          0x064
+/* #define SMB_FIND_FILE_POSIX_INFO          0x064 */
 
 typedef struct smb_com_transaction2_qpi_req {
        struct smb_hdr hdr;     /* wct = 14+ */
index 071e2f2..47e927c 100644 (file)
@@ -75,12 +75,11 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
 
        /* only send once per connect */
        spin_lock(&cifs_tcp_ses_lock);
-       if (tcon->ses->status != CifsGood ||
-           tcon->tidStatus != CifsNeedReconnect) {
+       if ((tcon->ses->status != CifsGood) || (tcon->status != TID_NEED_RECON)) {
                spin_unlock(&cifs_tcp_ses_lock);
                return;
        }
-       tcon->tidStatus = CifsInFilesInvalidate;
+       tcon->status = TID_IN_FILES_INVALIDATE;
        spin_unlock(&cifs_tcp_ses_lock);
 
        /* list all files open on tree connection and mark them invalid */
@@ -100,8 +99,8 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
        mutex_unlock(&tcon->crfid.fid_mutex);
 
        spin_lock(&cifs_tcp_ses_lock);
-       if (tcon->tidStatus == CifsInFilesInvalidate)
-               tcon->tidStatus = CifsNeedTcon;
+       if (tcon->status == TID_IN_FILES_INVALIDATE)
+               tcon->status = TID_NEED_TCON;
        spin_unlock(&cifs_tcp_ses_lock);
 
        /*
@@ -136,7 +135,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
         * have tcon) are allowed as we start force umount
         */
        spin_lock(&cifs_tcp_ses_lock);
-       if (tcon->tidStatus == CifsExiting) {
+       if (tcon->status == TID_EXITING) {
                if (smb_command != SMB_COM_WRITE_ANDX &&
                    smb_command != SMB_COM_OPEN_ANDX &&
                    smb_command != SMB_COM_TREE_DISCONNECT) {
@@ -597,7 +596,7 @@ CIFSSMBNegotiate(const unsigned int xid,
        set_credits(server, server->maxReq);
        /* probably no need to store and check maxvcs */
        server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
-       /* set up max_read for readpages check */
+       /* set up max_read for readahead check */
        server->max_read = server->maxBuf;
        server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
        cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf);
index 9964c36..ee3b7c1 100644 (file)
@@ -245,7 +245,7 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
 
                list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
                        tcon->need_reconnect = true;
-                       tcon->tidStatus = CifsNeedReconnect;
+                       tcon->status = TID_NEED_RECON;
                }
                if (ses->tcon_ipc)
                        ses->tcon_ipc->need_reconnect = true;
@@ -2207,7 +2207,7 @@ get_ses_fail:
 
 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
 {
-       if (tcon->tidStatus == CifsExiting)
+       if (tcon->status == TID_EXITING)
                return 0;
        if (strncmp(tcon->treeName, ctx->UNC, MAX_TREE_SIZE))
                return 0;
@@ -3513,6 +3513,9 @@ static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
        struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
        char *oldmnt = cifs_sb->ctx->mount_options;
 
+       cifs_dbg(FYI, "%s: full_path=%s ref_path=%s target=%s\n", __func__, full_path, ref_path,
+                dfs_cache_get_tgt_name(tit));
+
        rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
        if (rc)
                goto out;
@@ -3611,13 +3614,18 @@ static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
        if (rc)
                goto out;
 
-       /* Try all dfs link targets */
+       /* Try all dfs link targets.  If an I/O fails from currently connected DFS target with an
+        * error other than STATUS_PATH_NOT_COVERED (-EREMOTE), then retry it from other targets as
+        * specified in MS-DFSC "3.1.5.2 I/O Operation to Target Fails with an Error Other Than
+        * STATUS_PATH_NOT_COVERED."
+        */
        for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
             tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
                rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
                if (!rc) {
                        rc = is_path_remote(mnt_ctx);
-                       break;
+                       if (!rc || rc == -EREMOTE)
+                               break;
                }
        }
 
@@ -3691,7 +3699,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
                goto error;
 
        rc = is_path_remote(&mnt_ctx);
-       if (rc == -EREMOTE)
+       if (rc)
                rc = follow_dfs_link(&mnt_ctx);
        if (rc)
                goto error;
@@ -4478,12 +4486,12 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
        /* only send once per connect */
        spin_lock(&cifs_tcp_ses_lock);
        if (tcon->ses->status != CifsGood ||
-           (tcon->tidStatus != CifsNew &&
-           tcon->tidStatus != CifsNeedTcon)) {
+           (tcon->status != TID_NEW &&
+           tcon->status != TID_NEED_TCON)) {
                spin_unlock(&cifs_tcp_ses_lock);
                return 0;
        }
-       tcon->tidStatus = CifsInTcon;
+       tcon->status = TID_IN_TCON;
        spin_unlock(&cifs_tcp_ses_lock);
 
        tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
@@ -4524,13 +4532,13 @@ out:
 
        if (rc) {
                spin_lock(&cifs_tcp_ses_lock);
-               if (tcon->tidStatus == CifsInTcon)
-                       tcon->tidStatus = CifsNeedTcon;
+               if (tcon->status == TID_IN_TCON)
+                       tcon->status = TID_NEED_TCON;
                spin_unlock(&cifs_tcp_ses_lock);
        } else {
                spin_lock(&cifs_tcp_ses_lock);
-               if (tcon->tidStatus == CifsInTcon)
-                       tcon->tidStatus = CifsGood;
+               if (tcon->status == TID_IN_TCON)
+                       tcon->status = TID_GOOD;
                spin_unlock(&cifs_tcp_ses_lock);
                tcon->need_reconnect = false;
        }
@@ -4546,24 +4554,24 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
        /* only send once per connect */
        spin_lock(&cifs_tcp_ses_lock);
        if (tcon->ses->status != CifsGood ||
-           (tcon->tidStatus != CifsNew &&
-           tcon->tidStatus != CifsNeedTcon)) {
+           (tcon->status != TID_NEW &&
+           tcon->status != TID_NEED_TCON)) {
                spin_unlock(&cifs_tcp_ses_lock);
                return 0;
        }
-       tcon->tidStatus = CifsInTcon;
+       tcon->status = TID_IN_TCON;
        spin_unlock(&cifs_tcp_ses_lock);
 
        rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
        if (rc) {
                spin_lock(&cifs_tcp_ses_lock);
-               if (tcon->tidStatus == CifsInTcon)
-                       tcon->tidStatus = CifsNeedTcon;
+               if (tcon->status == TID_IN_TCON)
+                       tcon->status = TID_NEED_TCON;
                spin_unlock(&cifs_tcp_ses_lock);
        } else {
                spin_lock(&cifs_tcp_ses_lock);
-               if (tcon->tidStatus == CifsInTcon)
-                       tcon->tidStatus = CifsGood;
+               if (tcon->status == TID_IN_TCON)
+                       tcon->status = TID_GOOD;
                spin_unlock(&cifs_tcp_ses_lock);
                tcon->need_reconnect = false;
        }
index 60f43bf..d511a78 100644 (file)
@@ -4210,13 +4210,19 @@ cifs_page_mkwrite(struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
 
+       /* Wait for the page to be written to the cache before we allow it to
+        * be modified.  We then assume the entire page will need writing back.
+        */
 #ifdef CONFIG_CIFS_FSCACHE
        if (PageFsCache(page) &&
            wait_on_page_fscache_killable(page) < 0)
                return VM_FAULT_RETRY;
 #endif
 
-       lock_page(page);
+       wait_on_page_writeback(page);
+
+       if (lock_page_killable(page) < 0)
+               return VM_FAULT_RETRY;
        return VM_FAULT_LOCKED;
 }
 
index 33af72e..a638b29 100644 (file)
@@ -103,7 +103,7 @@ void cifs_fscache_get_inode_cookie(struct inode *inode)
 
        cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd);
 
-       cifsi->fscache =
+       cifsi->netfs_ctx.cache =
                fscache_acquire_cookie(tcon->fscache, 0,
                                       &cifsi->uniqueid, sizeof(cifsi->uniqueid),
                                       &cd, sizeof(cd),
@@ -126,22 +126,15 @@ void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update)
 void cifs_fscache_release_inode_cookie(struct inode *inode)
 {
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
+       struct fscache_cookie *cookie = cifs_inode_cookie(inode);
 
-       if (cifsi->fscache) {
-               cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
-               fscache_relinquish_cookie(cifsi->fscache, false);
-               cifsi->fscache = NULL;
+       if (cookie) {
+               cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie);
+               fscache_relinquish_cookie(cookie, false);
+               cifsi->netfs_ctx.cache = NULL;
        }
 }
 
-static inline void fscache_end_operation(struct netfs_cache_resources *cres)
-{
-       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-
-       if (ops)
-               ops->end_operation(cres);
-}
-
 /*
  * Fallback page reading interface.
  */
index 5512990..52355c0 100644 (file)
@@ -61,7 +61,7 @@ void cifs_fscache_fill_coherency(struct inode *inode,
 
 static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
 {
-       return CIFS_I(inode)->fscache;
+       return netfs_i_cookie(inode);
 }
 
 static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags)
index 60d853c..2f9e7d2 100644 (file)
@@ -49,7 +49,7 @@ static void cifs_set_ops(struct inode *inode)
                        inode->i_fop = &cifs_file_ops;
                }
 
-               /* check if server can support readpages */
+               /* check if server can support readahead */
                if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read <
                                PAGE_SIZE + MAX_CIFS_HDR_SIZE)
                        inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
index 56598f7..afaf59c 100644 (file)
@@ -116,7 +116,7 @@ tconInfoAlloc(void)
        }
 
        atomic_inc(&tconInfoAllocCount);
-       ret_buf->tidStatus = CifsNew;
+       ret_buf->status = TID_NEW;
        ++ret_buf->tc_count;
        INIT_LIST_HEAD(&ret_buf->openFileList);
        INIT_LIST_HEAD(&ret_buf->tcon_list);
index 4125fd1..82e916a 100644 (file)
 #define END_OF_CHAIN 4
 #define RELATED_REQUEST 8
 
-#define SMB2_SIGNATURE_SIZE (16)
-#define SMB2_NTLMV2_SESSKEY_SIZE (16)
-#define SMB2_HMACSHA256_SIZE (32)
-#define SMB2_CMACAES_SIZE (16)
-#define SMB3_SIGNKEY_SIZE (16)
-#define SMB3_GCM128_CRYPTKEY_SIZE (16)
-#define SMB3_GCM256_CRYPTKEY_SIZE (32)
-
-/* Maximum buffer size value we can send with 1 credit */
-#define SMB2_MAX_BUFFER_SIZE 65536
-
 #endif /* _SMB2_GLOB_H */
index b25623e..c653beb 100644 (file)
@@ -203,7 +203,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
 
        if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
                if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
-                   pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) {
+                   pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
                        /* error packets have 9 byte structure size */
                        cifs_dbg(VFS, "Invalid response size %u for command %d\n",
                                 le16_to_cpu(pdu->StructureSize2), command);
@@ -303,7 +303,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
        /* error responses do not have data area */
        if (shdr->Status && shdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
            (((struct smb2_err_rsp *)shdr)->StructureSize) ==
-                                               SMB2_ERROR_STRUCTURE_SIZE2)
+                                               SMB2_ERROR_STRUCTURE_SIZE2_LE)
                return NULL;
 
        /*
@@ -478,11 +478,11 @@ smb2_get_lease_state(struct cifsInodeInfo *cinode)
        __le32 lease = 0;
 
        if (CIFS_CACHE_WRITE(cinode))
-               lease |= SMB2_LEASE_WRITE_CACHING;
+               lease |= SMB2_LEASE_WRITE_CACHING_LE;
        if (CIFS_CACHE_HANDLE(cinode))
-               lease |= SMB2_LEASE_HANDLE_CACHING;
+               lease |= SMB2_LEASE_HANDLE_CACHING_LE;
        if (CIFS_CACHE_READ(cinode))
-               lease |= SMB2_LEASE_READ_CACHING;
+               lease |= SMB2_LEASE_READ_CACHING_LE;
        return lease;
 }
 
@@ -832,8 +832,8 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve
        rc = __smb2_handle_cancelled_cmd(tcon,
                                         le16_to_cpu(hdr->Command),
                                         le64_to_cpu(hdr->MessageId),
-                                        le64_to_cpu(rsp->PersistentFileId),
-                                        le64_to_cpu(rsp->VolatileFileId));
+                                        rsp->PersistentFileId,
+                                        rsp->VolatileFileId);
        if (rc)
                cifs_put_tcon(tcon);
 
index 891b115..db23f5b 100644 (file)
@@ -897,8 +897,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
        atomic_inc(&tcon->num_remote_opens);
 
        o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
-       oparms.fid->persistent_fid = le64_to_cpu(o_rsp->PersistentFileId);
-       oparms.fid->volatile_fid = le64_to_cpu(o_rsp->VolatileFileId);
+       oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+       oparms.fid->volatile_fid = o_rsp->VolatileFileId;
 #ifdef CONFIG_CIFS_DEBUG2
        oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
 #endif /* CIFS_DEBUG2 */
@@ -1192,17 +1192,12 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
               struct cifs_sb_info *cifs_sb)
 {
        int rc;
-       __le16 *utf16_path;
        struct kvec rsp_iov = {NULL, 0};
        int buftype = CIFS_NO_BUFFER;
        struct smb2_query_info_rsp *rsp;
        struct smb2_file_full_ea_info *info = NULL;
 
-       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
-       if (!utf16_path)
-               return -ENOMEM;
-
-       rc = smb2_query_info_compound(xid, tcon, utf16_path,
+       rc = smb2_query_info_compound(xid, tcon, path,
                                      FILE_READ_EA,
                                      FILE_FULL_EA_INFORMATION,
                                      SMB2_O_INFO_FILE,
@@ -1235,7 +1230,6 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
                        le32_to_cpu(rsp->OutputBufferLength), ea_name);
 
  qeas_exit:
-       kfree(utf16_path);
        free_rsp_buf(buftype, rsp_iov.iov_base);
        return rc;
 }
@@ -1295,7 +1289,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
                         * the new EA. If not we should not add it since we
                         * would not be able to even read the EAs back.
                         */
-                       rc = smb2_query_info_compound(xid, tcon, utf16_path,
+                       rc = smb2_query_info_compound(xid, tcon, path,
                                      FILE_READ_EA,
                                      FILE_FULL_EA_INFORMATION,
                                      SMB2_O_INFO_FILE,
@@ -1643,6 +1637,7 @@ smb2_ioctl_query_info(const unsigned int xid,
        unsigned int size[2];
        void *data[2];
        int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
+       void (*free_req1_func)(struct smb_rqst *r);
 
        vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
        if (vars == NULL)
@@ -1652,27 +1647,29 @@ smb2_ioctl_query_info(const unsigned int xid,
 
        resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
 
-       if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
-               goto e_fault;
-
+       if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) {
+               rc = -EFAULT;
+               goto free_vars;
+       }
        if (qi.output_buffer_length > 1024) {
-               kfree(vars);
-               return -EINVAL;
+               rc = -EINVAL;
+               goto free_vars;
        }
 
        if (!ses || !server) {
-               kfree(vars);
-               return -EIO;
+               rc = -EIO;
+               goto free_vars;
        }
 
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       buffer = memdup_user(arg + sizeof(struct smb_query_info),
-                            qi.output_buffer_length);
-       if (IS_ERR(buffer)) {
-               kfree(vars);
-               return PTR_ERR(buffer);
+       if (qi.output_buffer_length) {
+               buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
+               if (IS_ERR(buffer)) {
+                       rc = PTR_ERR(buffer);
+                       goto free_vars;
+               }
        }
 
        /* Open */
@@ -1710,45 +1707,45 @@ smb2_ioctl_query_info(const unsigned int xid,
        rc = SMB2_open_init(tcon, server,
                            &rqst[0], &oplock, &oparms, path);
        if (rc)
-               goto iqinf_exit;
+               goto free_output_buffer;
        smb2_set_next_command(tcon, &rqst[0]);
 
        /* Query */
        if (qi.flags & PASSTHRU_FSCTL) {
                /* Can eventually relax perm check since server enforces too */
-               if (!capable(CAP_SYS_ADMIN))
+               if (!capable(CAP_SYS_ADMIN)) {
                        rc = -EPERM;
-               else  {
-                       rqst[1].rq_iov = &vars->io_iov[0];
-                       rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
-
-                       rc = SMB2_ioctl_init(tcon, server,
-                                            &rqst[1],
-                                            COMPOUND_FID, COMPOUND_FID,
-                                            qi.info_type, true, buffer,
-                                            qi.output_buffer_length,
-                                            CIFSMaxBufSize -
-                                            MAX_SMB2_CREATE_RESPONSE_SIZE -
-                                            MAX_SMB2_CLOSE_RESPONSE_SIZE);
+                       goto free_open_req;
                }
+               rqst[1].rq_iov = &vars->io_iov[0];
+               rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
+
+               rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+                                    qi.info_type, true, buffer, qi.output_buffer_length,
+                                    CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
+                                    MAX_SMB2_CLOSE_RESPONSE_SIZE);
+               free_req1_func = SMB2_ioctl_free;
        } else if (qi.flags == PASSTHRU_SET_INFO) {
                /* Can eventually relax perm check since server enforces too */
-               if (!capable(CAP_SYS_ADMIN))
+               if (!capable(CAP_SYS_ADMIN)) {
                        rc = -EPERM;
-               else  {
-                       rqst[1].rq_iov = &vars->si_iov[0];
-                       rqst[1].rq_nvec = 1;
-
-                       size[0] = 8;
-                       data[0] = buffer;
-
-                       rc = SMB2_set_info_init(tcon, server,
-                                       &rqst[1],
-                                       COMPOUND_FID, COMPOUND_FID,
-                                       current->tgid,
-                                       FILE_END_OF_FILE_INFORMATION,
-                                       SMB2_O_INFO_FILE, 0, data, size);
+                       goto free_open_req;
+               }
+               if (qi.output_buffer_length < 8) {
+                       rc = -EINVAL;
+                       goto free_open_req;
                }
+               rqst[1].rq_iov = &vars->si_iov[0];
+               rqst[1].rq_nvec = 1;
+
+               /* MS-FSCC 2.4.13 FileEndOfFileInformation */
+               size[0] = 8;
+               data[0] = buffer;
+
+               rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+                                       current->tgid, FILE_END_OF_FILE_INFORMATION,
+                                       SMB2_O_INFO_FILE, 0, data, size);
+               free_req1_func = SMB2_set_info_free;
        } else if (qi.flags == PASSTHRU_QUERY_INFO) {
                rqst[1].rq_iov = &vars->qi_iov[0];
                rqst[1].rq_nvec = 1;
@@ -1759,6 +1756,7 @@ smb2_ioctl_query_info(const unsigned int xid,
                                  qi.info_type, qi.additional_information,
                                  qi.input_buffer_length,
                                  qi.output_buffer_length, buffer);
+               free_req1_func = SMB2_query_info_free;
        } else { /* unknown flags */
                cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
                              qi.flags);
@@ -1766,7 +1764,7 @@ smb2_ioctl_query_info(const unsigned int xid,
        }
 
        if (rc)
-               goto iqinf_exit;
+               goto free_open_req;
        smb2_set_next_command(tcon, &rqst[1]);
        smb2_set_related(&rqst[1]);
 
@@ -1777,14 +1775,14 @@ smb2_ioctl_query_info(const unsigned int xid,
        rc = SMB2_close_init(tcon, server,
                             &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
        if (rc)
-               goto iqinf_exit;
+               goto free_req_1;
        smb2_set_related(&rqst[2]);
 
        rc = compound_send_recv(xid, ses, server,
                                flags, 3, rqst,
                                resp_buftype, rsp_iov);
        if (rc)
-               goto iqinf_exit;
+               goto out;
 
        /* No need to bump num_remote_opens since handle immediately closed */
        if (qi.flags & PASSTHRU_FSCTL) {
@@ -1794,18 +1792,22 @@ smb2_ioctl_query_info(const unsigned int xid,
                        qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
                if (qi.input_buffer_length > 0 &&
                    le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
-                   > rsp_iov[1].iov_len)
-                       goto e_fault;
+                   > rsp_iov[1].iov_len) {
+                       rc = -EFAULT;
+                       goto out;
+               }
 
                if (copy_to_user(&pqi->input_buffer_length,
                                 &qi.input_buffer_length,
-                                sizeof(qi.input_buffer_length)))
-                       goto e_fault;
+                                sizeof(qi.input_buffer_length))) {
+                       rc = -EFAULT;
+                       goto out;
+               }
 
                if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
                                 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
                                 qi.input_buffer_length))
-                       goto e_fault;
+                       rc = -EFAULT;
        } else {
                pqi = (struct smb_query_info __user *)arg;
                qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
@@ -1813,28 +1815,30 @@ smb2_ioctl_query_info(const unsigned int xid,
                        qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
                if (copy_to_user(&pqi->input_buffer_length,
                                 &qi.input_buffer_length,
-                                sizeof(qi.input_buffer_length)))
-                       goto e_fault;
+                                sizeof(qi.input_buffer_length))) {
+                       rc = -EFAULT;
+                       goto out;
+               }
 
                if (copy_to_user(pqi + 1, qi_rsp->Buffer,
                                 qi.input_buffer_length))
-                       goto e_fault;
+                       rc = -EFAULT;
        }
 
- iqinf_exit:
-       cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
-       cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
-       cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
+out:
        free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
        free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
        free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
-       kfree(vars);
+       SMB2_close_free(&rqst[2]);
+free_req_1:
+       free_req1_func(&rqst[1]);
+free_open_req:
+       SMB2_open_free(&rqst[0]);
+free_output_buffer:
        kfree(buffer);
+free_vars:
+       kfree(vars);
        return rc;
-
-e_fault:
-       rc = -EFAULT;
-       goto iqinf_exit;
 }
 
 static ssize_t
@@ -2407,8 +2411,8 @@ again:
                cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
                goto qdf_free;
        }
-       fid->persistent_fid = le64_to_cpu(op_rsp->PersistentFileId);
-       fid->volatile_fid = le64_to_cpu(op_rsp->VolatileFileId);
+       fid->persistent_fid = op_rsp->PersistentFileId;
+       fid->volatile_fid = op_rsp->VolatileFileId;
 
        /* Anything else than ENODATA means a genuine error */
        if (rc && rc != -ENODATA) {
@@ -2646,7 +2650,7 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
  */
 int
 smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
-                        __le16 *utf16_path, u32 desired_access,
+                        const char *path, u32 desired_access,
                         u32 class, u32 type, u32 output_len,
                         struct kvec *rsp, int *buftype,
                         struct cifs_sb_info *cifs_sb)
@@ -2664,6 +2668,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        struct cifs_open_parms oparms;
        struct cifs_fid fid;
        int rc;
+       __le16 *utf16_path;
+       struct cached_fid *cfid = NULL;
+
+       if (!path)
+               path = "";
+       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+       if (!utf16_path)
+               return -ENOMEM;
 
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
@@ -2672,6 +2684,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
        memset(rsp_iov, 0, sizeof(rsp_iov));
 
+       rc = open_cached_dir(xid, tcon, path, cifs_sb, &cfid);
+
        memset(&open_iov, 0, sizeof(open_iov));
        rqst[0].rq_iov = open_iov;
        rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
@@ -2693,15 +2707,29 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        rqst[1].rq_iov = qi_iov;
        rqst[1].rq_nvec = 1;
 
-       rc = SMB2_query_info_init(tcon, server,
-                                 &rqst[1], COMPOUND_FID, COMPOUND_FID,
-                                 class, type, 0,
-                                 output_len, 0,
-                                 NULL);
+       if (cfid) {
+               rc = SMB2_query_info_init(tcon, server,
+                                         &rqst[1],
+                                         cfid->fid->persistent_fid,
+                                         cfid->fid->volatile_fid,
+                                         class, type, 0,
+                                         output_len, 0,
+                                         NULL);
+       } else {
+               rc = SMB2_query_info_init(tcon, server,
+                                         &rqst[1],
+                                         COMPOUND_FID,
+                                         COMPOUND_FID,
+                                         class, type, 0,
+                                         output_len, 0,
+                                         NULL);
+       }
        if (rc)
                goto qic_exit;
-       smb2_set_next_command(tcon, &rqst[1]);
-       smb2_set_related(&rqst[1]);
+       if (!cfid) {
+               smb2_set_next_command(tcon, &rqst[1]);
+               smb2_set_related(&rqst[1]);
+       }
 
        memset(&close_iov, 0, sizeof(close_iov));
        rqst[2].rq_iov = close_iov;
@@ -2713,9 +2741,15 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
                goto qic_exit;
        smb2_set_related(&rqst[2]);
 
-       rc = compound_send_recv(xid, ses, server,
-                               flags, 3, rqst,
-                               resp_buftype, rsp_iov);
+       if (cfid) {
+               rc = compound_send_recv(xid, ses, server,
+                                       flags, 1, &rqst[1],
+                                       &resp_buftype[1], &rsp_iov[1]);
+       } else {
+               rc = compound_send_recv(xid, ses, server,
+                                       flags, 3, rqst,
+                                       resp_buftype, rsp_iov);
+       }
        if (rc) {
                free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
                if (rc == -EREMCHG) {
@@ -2729,11 +2763,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        *buftype = resp_buftype[1];
 
  qic_exit:
+       kfree(utf16_path);
        SMB2_open_free(&rqst[0]);
        SMB2_query_info_free(&rqst[1]);
        SMB2_close_free(&rqst[2]);
        free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
        free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+       if (cfid)
+               close_cached_dir(cfid);
        return rc;
 }
 
@@ -2743,13 +2780,12 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
 {
        struct smb2_query_info_rsp *rsp;
        struct smb2_fs_full_size_info *info = NULL;
-       __le16 utf16_path = 0; /* Null - open root of share */
        struct kvec rsp_iov = {NULL, 0};
        int buftype = CIFS_NO_BUFFER;
        int rc;
 
 
-       rc = smb2_query_info_compound(xid, tcon, &utf16_path,
+       rc = smb2_query_info_compound(xid, tcon, "",
                                      FILE_READ_ATTRIBUTES,
                                      FS_FULL_SIZE_INFORMATION,
                                      SMB2_O_INFO_FILESYSTEM,
@@ -4293,12 +4329,12 @@ static __le32
 map_oplock_to_lease(u8 oplock)
 {
        if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
-               return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
+               return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
        else if (oplock == SMB2_OPLOCK_LEVEL_II)
-               return SMB2_LEASE_READ_CACHING;
+               return SMB2_LEASE_READ_CACHING_LE;
        else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
-               return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
-                      SMB2_LEASE_WRITE_CACHING;
+               return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
+                      SMB2_LEASE_WRITE_CACHING_LE;
        return 0;
 }
 
@@ -4360,7 +4396,7 @@ smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
        struct create_lease *lc = (struct create_lease *)buf;
 
        *epoch = 0; /* not used */
-       if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
+       if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
                return SMB2_OPLOCK_LEVEL_NOCHANGE;
        return le32_to_cpu(lc->lcontext.LeaseState);
 }
@@ -4371,7 +4407,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
        struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
 
        *epoch = le16_to_cpu(lc->lcontext.Epoch);
-       if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
+       if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
                return SMB2_OPLOCK_LEVEL_NOCHANGE;
        if (lease_key)
                memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
@@ -5814,8 +5850,8 @@ struct smb_version_values smb20_values = {
        .protocol_id = SMB20_PROT_ID,
        .req_capabilities = 0, /* MBZ */
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5835,8 +5871,8 @@ struct smb_version_values smb21_values = {
        .protocol_id = SMB21_PROT_ID,
        .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5856,8 +5892,8 @@ struct smb_version_values smb3any_values = {
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
        .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5877,8 +5913,8 @@ struct smb_version_values smbdefault_values = {
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
        .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5898,8 +5934,8 @@ struct smb_version_values smb30_values = {
        .protocol_id = SMB30_PROT_ID,
        .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5919,8 +5955,8 @@ struct smb_version_values smb302_values = {
        .protocol_id = SMB302_PROT_ID,
        .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5940,8 +5976,8 @@ struct smb_version_values smb311_values = {
        .protocol_id = SMB311_PROT_ID,
        .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
index 7e7909b..1b7ad0c 100644 (file)
@@ -163,7 +163,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
                return 0;
 
        spin_lock(&cifs_tcp_ses_lock);
-       if (tcon->tidStatus == CifsExiting) {
+       if (tcon->status == TID_EXITING) {
                /*
                 * only tree disconnect, open, and write,
                 * (and ulogoff which does not have tcon)
@@ -2734,13 +2734,10 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
                goto err_free_req;
        }
 
-       trace_smb3_posix_mkdir_done(xid, le64_to_cpu(rsp->PersistentFileId),
-                                   tcon->tid,
-                                   ses->Suid, CREATE_NOT_FILE,
-                                   FILE_WRITE_ATTRIBUTES);
+       trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
+                                   CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES);
 
-       SMB2_close(xid, tcon, le64_to_cpu(rsp->PersistentFileId),
-                  le64_to_cpu(rsp->VolatileFileId));
+       SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
 
        /* Eventually save off posix specific response info and timestaps */
 
@@ -3009,14 +3006,12 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        } else if (rsp == NULL) /* unlikely to happen, but safer to check */
                goto creat_exit;
        else
-               trace_smb3_open_done(xid, le64_to_cpu(rsp->PersistentFileId),
-                                    tcon->tid,
-                                    ses->Suid, oparms->create_options,
-                                    oparms->desired_access);
+               trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
+                                    oparms->create_options, oparms->desired_access);
 
        atomic_inc(&tcon->num_remote_opens);
-       oparms->fid->persistent_fid = le64_to_cpu(rsp->PersistentFileId);
-       oparms->fid->volatile_fid = le64_to_cpu(rsp->VolatileFileId);
+       oparms->fid->persistent_fid = rsp->PersistentFileId;
+       oparms->fid->volatile_fid = rsp->VolatileFileId;
        oparms->fid->access = oparms->desired_access;
 #ifdef CONFIG_CIFS_DEBUG2
        oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
@@ -3313,8 +3308,8 @@ SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
        if (rc)
                return rc;
 
-       req->PersistentFileId = cpu_to_le64(persistent_fid);
-       req->VolatileFileId = cpu_to_le64(volatile_fid);
+       req->PersistentFileId = persistent_fid;
+       req->VolatileFileId = volatile_fid;
        if (query_attrs)
                req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
        else
@@ -3677,8 +3672,8 @@ SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
        if (rc)
                return rc;
 
-       req->PersistentFileId = cpu_to_le64(persistent_fid);
-       req->VolatileFileId = cpu_to_le64(volatile_fid);
+       req->PersistentFileId = persistent_fid;
+       req->VolatileFileId = volatile_fid;
        /* See note 354 of MS-SMB2, 64K max */
        req->OutputBufferLength =
                cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
@@ -3858,12 +3853,14 @@ void smb2_reconnect_server(struct work_struct *work)
        tcon = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
        if (!tcon) {
                resched = true;
-               list_del_init(&ses->rlist);
-               cifs_put_smb_ses(ses);
+               list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+                       list_del_init(&ses->rlist);
+                       cifs_put_smb_ses(ses);
+               }
                goto done;
        }
 
-       tcon->tidStatus = CifsGood;
+       tcon->status = TID_GOOD;
        tcon->retry = false;
        tcon->need_reconnect = false;
 
@@ -3951,8 +3948,8 @@ SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
        if (rc)
                return rc;
 
-       req->PersistentFileId = cpu_to_le64(persistent_fid);
-       req->VolatileFileId = cpu_to_le64(volatile_fid);
+       req->PersistentFileId = persistent_fid;
+       req->VolatileFileId = volatile_fid;
 
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
@@ -4033,8 +4030,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
        shdr = &req->hdr;
        shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
 
-       req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
-       req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
+       req->PersistentFileId = io_parms->persistent_fid;
+       req->VolatileFileId = io_parms->volatile_fid;
        req->ReadChannelInfoOffset = 0; /* reserved */
        req->ReadChannelInfoLength = 0; /* reserved */
        req->Channel = 0; /* reserved */
@@ -4094,8 +4091,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
                         */
                        shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
                        shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
-                       req->PersistentFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
-                       req->VolatileFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
+                       req->PersistentFileId = (u64)-1;
+                       req->VolatileFileId = (u64)-1;
                }
        }
        if (remaining_bytes > io_parms->length)
@@ -4307,21 +4304,19 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
                        cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
                        cifs_dbg(VFS, "Send error in read = %d\n", rc);
                        trace_smb3_read_err(xid,
-                                           le64_to_cpu(req->PersistentFileId),
+                                           req->PersistentFileId,
                                            io_parms->tcon->tid, ses->Suid,
                                            io_parms->offset, io_parms->length,
                                            rc);
                } else
-                       trace_smb3_read_done(xid,
-                                            le64_to_cpu(req->PersistentFileId),
-                                            io_parms->tcon->tid, ses->Suid,
-                                            io_parms->offset, 0);
+                       trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid,
+                                            ses->Suid, io_parms->offset, 0);
                free_rsp_buf(resp_buftype, rsp_iov.iov_base);
                cifs_small_buf_release(req);
                return rc == -ENODATA ? 0 : rc;
        } else
                trace_smb3_read_done(xid,
-                                    le64_to_cpu(req->PersistentFileId),
+                                   req->PersistentFileId,
                                    io_parms->tcon->tid, ses->Suid,
                                    io_parms->offset, io_parms->length);
 
@@ -4463,8 +4458,8 @@ smb2_async_writev(struct cifs_writedata *wdata,
        shdr = (struct smb2_hdr *)req;
        shdr->Id.SyncId.ProcessId = cpu_to_le32(wdata->cfile->pid);
 
-       req->PersistentFileId = cpu_to_le64(wdata->cfile->fid.persistent_fid);
-       req->VolatileFileId = cpu_to_le64(wdata->cfile->fid.volatile_fid);
+       req->PersistentFileId = wdata->cfile->fid.persistent_fid;
+       req->VolatileFileId = wdata->cfile->fid.volatile_fid;
        req->WriteChannelInfoOffset = 0;
        req->WriteChannelInfoLength = 0;
        req->Channel = 0;
@@ -4562,7 +4557,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
 
        if (rc) {
                trace_smb3_write_err(0 /* no xid */,
-                                    le64_to_cpu(req->PersistentFileId),
+                                    req->PersistentFileId,
                                     tcon->tid, tcon->ses->Suid, wdata->offset,
                                     wdata->bytes, rc);
                kref_put(&wdata->refcount, release);
@@ -4615,8 +4610,8 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 
        req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
 
-       req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
-       req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
+       req->PersistentFileId = io_parms->persistent_fid;
+       req->VolatileFileId = io_parms->volatile_fid;
        req->WriteChannelInfoOffset = 0;
        req->WriteChannelInfoLength = 0;
        req->Channel = 0;
@@ -4645,7 +4640,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 
        if (rc) {
                trace_smb3_write_err(xid,
-                                    le64_to_cpu(req->PersistentFileId),
+                                    req->PersistentFileId,
                                     io_parms->tcon->tid,
                                     io_parms->tcon->ses->Suid,
                                     io_parms->offset, io_parms->length, rc);
@@ -4654,7 +4649,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        } else {
                *nbytes = le32_to_cpu(rsp->DataLength);
                trace_smb3_write_done(xid,
-                                     le64_to_cpu(req->PersistentFileId),
+                                     req->PersistentFileId,
                                      io_parms->tcon->tid,
                                      io_parms->tcon->ses->Suid,
                                      io_parms->offset, *nbytes);
index 33cfd0a..d8c4388 100644 (file)
@@ -56,16 +56,6 @@ struct smb2_rdma_crypto_transform {
 
 #define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL
 
-#define SMB2_ERROR_STRUCTURE_SIZE2 cpu_to_le16(9)
-
-struct smb2_err_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;
-       __le16 Reserved; /* MBZ */
-       __le32 ByteCount;  /* even if zero, at least one byte follows */
-       __u8   ErrorData[1];  /* variable length */
-} __packed;
-
 #define SYMLINK_ERROR_TAG 0x4c4d5953
 
 struct smb2_symlink_err_rsp {
@@ -139,47 +129,6 @@ struct share_redirect_error_context_rsp {
 #define SMB2_LEASE_HANDLE_CACHING_HE   0x02
 #define SMB2_LEASE_WRITE_CACHING_HE    0x04
 
-#define SMB2_LEASE_NONE                        cpu_to_le32(0x00)
-#define SMB2_LEASE_READ_CACHING                cpu_to_le32(0x01)
-#define SMB2_LEASE_HANDLE_CACHING      cpu_to_le32(0x02)
-#define SMB2_LEASE_WRITE_CACHING       cpu_to_le32(0x04)
-
-#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS cpu_to_le32(0x00000002)
-#define SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET cpu_to_le32(0x00000004)
-
-#define SMB2_LEASE_KEY_SIZE 16
-
-struct lease_context {
-       u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
-       __le32 LeaseState;
-       __le32 LeaseFlags;
-       __le64 LeaseDuration;
-} __packed;
-
-struct lease_context_v2 {
-       u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
-       __le32 LeaseState;
-       __le32 LeaseFlags;
-       __le64 LeaseDuration;
-       __le64 ParentLeaseKeyLow;
-       __le64 ParentLeaseKeyHigh;
-       __le16 Epoch;
-       __le16 Reserved;
-} __packed;
-
-struct create_lease {
-       struct create_context ccontext;
-       __u8   Name[8];
-       struct lease_context lcontext;
-} __packed;
-
-struct create_lease_v2 {
-       struct create_context ccontext;
-       __u8   Name[8];
-       struct lease_context_v2 lcontext;
-       __u8   Pad[4];
-} __packed;
-
 struct create_durable {
        struct create_context ccontext;
        __u8   Name[8];
@@ -192,13 +141,6 @@ struct create_durable {
        } Data;
 } __packed;
 
-struct create_posix {
-       struct create_context ccontext;
-       __u8    Name[16];
-       __le32  Mode;
-       __u32   Reserved;
-} __packed;
-
 /* See MS-SMB2 2.2.13.2.11 */
 /* Flags */
 #define SMB2_DHANDLE_FLAG_PERSISTENT   0x00000002
@@ -287,12 +229,6 @@ struct copychunk_ioctl {
        __u32 Reserved2;
 } __packed;
 
-/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
-struct file_zero_data_information {
-       __le64  FileOffset;
-       __le64  BeyondFinalZero;
-} __packed;
-
 struct copychunk_ioctl_rsp {
        __le32 ChunksWritten;
        __le32 ChunkBytesWritten;
@@ -338,11 +274,6 @@ struct fsctl_get_integrity_information_rsp {
        __le32  ClusterSizeInBytes;
 } __packed;
 
-struct file_allocated_range_buffer {
-       __le64  file_offset;
-       __le64  length;
-} __packed;
-
 /* Integrity ChecksumAlgorithm choices for above */
 #define        CHECKSUM_TYPE_NONE      0x0000
 #define        CHECKSUM_TYPE_CRC64     0x0002
@@ -351,53 +282,6 @@ struct file_allocated_range_buffer {
 /* Integrity flags for above */
 #define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF  0x00000001
 
-/* Reparse structures - see MS-FSCC 2.1.2 */
-
-/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
-
-struct reparse_data_buffer {
-       __le32  ReparseTag;
-       __le16  ReparseDataLength;
-       __u16   Reserved;
-       __u8    DataBuffer[]; /* Variable Length */
-} __packed;
-
-struct reparse_guid_data_buffer {
-       __le32  ReparseTag;
-       __le16  ReparseDataLength;
-       __u16   Reserved;
-       __u8    ReparseGuid[16];
-       __u8    DataBuffer[]; /* Variable Length */
-} __packed;
-
-struct reparse_mount_point_data_buffer {
-       __le32  ReparseTag;
-       __le16  ReparseDataLength;
-       __u16   Reserved;
-       __le16  SubstituteNameOffset;
-       __le16  SubstituteNameLength;
-       __le16  PrintNameOffset;
-       __le16  PrintNameLength;
-       __u8    PathBuffer[]; /* Variable Length */
-} __packed;
-
-#define SYMLINK_FLAG_RELATIVE 0x00000001
-
-struct reparse_symlink_data_buffer {
-       __le32  ReparseTag;
-       __le16  ReparseDataLength;
-       __u16   Reserved;
-       __le16  SubstituteNameOffset;
-       __le16  SubstituteNameLength;
-       __le16  PrintNameOffset;
-       __le16  PrintNameLength;
-       __le32  Flags;
-       __u8    PathBuffer[]; /* Variable Length */
-} __packed;
-
-/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
-
-
 /* See MS-DFSC 2.2.2 */
 struct fsctl_get_dfs_referral_req {
        __le16 MaxReferralLevel;
@@ -413,22 +297,6 @@ struct network_resiliency_req {
 } __packed;
 /* There is no buffer for the response ie no struct network_resiliency_rsp */
 
-
-struct validate_negotiate_info_req {
-       __le32 Capabilities;
-       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
-       __le16 SecurityMode;
-       __le16 DialectCount;
-       __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
-} __packed;
-
-struct validate_negotiate_info_rsp {
-       __le32 Capabilities;
-       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
-       __le16 SecurityMode;
-       __le16 Dialect; /* Dialect in use for the connection */
-} __packed;
-
 #define RSS_CAPABLE    cpu_to_le32(0x00000001)
 #define RDMA_CAPABLE   cpu_to_le32(0x00000002)
 
@@ -464,14 +332,6 @@ struct compress_ioctl {
        __le16 CompressionState; /* See cifspdu.h for possible flag values */
 } __packed;
 
-struct duplicate_extents_to_file {
-       __u64 PersistentFileHandle; /* source file handle, opaque endianness */
-       __u64 VolatileFileHandle;
-       __le64 SourceFileOffset;
-       __le64 TargetFileOffset;
-       __le64 ByteCount;  /* Bytes to be copied */
-} __packed;
-
 /*
  * Maximum number of iovs we need for an ioctl request.
  * [0] : struct smb2_ioctl_req
@@ -479,370 +339,11 @@ struct duplicate_extents_to_file {
  */
 #define SMB2_IOCTL_IOV_SIZE 2
 
-struct smb2_ioctl_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 57 */
-       __u16 Reserved;
-       __le32 CtlCode;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       __le32 InputOffset;
-       __le32 InputCount;
-       __le32 MaxInputResponse;
-       __le32 OutputOffset;
-       __le32 OutputCount;
-       __le32 MaxOutputResponse;
-       __le32 Flags;
-       __u32  Reserved2;
-       __u8   Buffer[];
-} __packed;
-
-struct smb2_ioctl_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 57 */
-       __u16 Reserved;
-       __le32 CtlCode;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       __le32 InputOffset;
-       __le32 InputCount;
-       __le32 OutputOffset;
-       __le32 OutputCount;
-       __le32 Flags;
-       __u32  Reserved2;
-       /* char * buffer[] */
-} __packed;
-
-#define SMB2_LOCKFLAG_SHARED_LOCK      0x0001
-#define SMB2_LOCKFLAG_EXCLUSIVE_LOCK   0x0002
-#define SMB2_LOCKFLAG_UNLOCK           0x0004
-#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
-
-struct smb2_lock_element {
-       __le64 Offset;
-       __le64 Length;
-       __le32 Flags;
-       __le32 Reserved;
-} __packed;
-
-struct smb2_lock_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 48 */
-       __le16 LockCount;
-       /*
-        * The least significant four bits are the index, the other 28 bits are
-        * the lock sequence number (0 to 64). See MS-SMB2 2.2.26
-        */
-       __le32 LockSequenceNumber;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       /* Followed by at least one */
-       struct smb2_lock_element locks[1];
-} __packed;
-
-struct smb2_lock_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 4 */
-       __le16 Reserved;
-} __packed;
-
-struct smb2_echo_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 4 */
-       __u16  Reserved;
-} __packed;
-
-struct smb2_echo_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 4 */
-       __u16  Reserved;
-} __packed;
-
-/* search (query_directory) Flags field */
-#define SMB2_RESTART_SCANS             0x01
-#define SMB2_RETURN_SINGLE_ENTRY       0x02
-#define SMB2_INDEX_SPECIFIED           0x04
-#define SMB2_REOPEN                    0x10
-
-#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
-
-/*
- * Valid FileInformation classes.
- *
- * Note that these are a subset of the (file) QUERY_INFO levels defined
- * later in this file (but since QUERY_DIRECTORY uses equivalent numbers
- * we do not redefine them here)
- *
- * FileDirectoryInfomation             0x01
- * FileFullDirectoryInformation                0x02
- * FileIdFullDirectoryInformation      0x26
- * FileBothDirectoryInformation                0x03
- * FileIdBothDirectoryInformation      0x25
- * FileNamesInformation                        0x0C
- * FileIdExtdDirectoryInformation      0x3C
- */
-
-struct smb2_query_directory_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 33 */
-       __u8   FileInformationClass;
-       __u8   Flags;
-       __le32 FileIndex;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       __le16 FileNameOffset;
-       __le16 FileNameLength;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_query_directory_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 9 */
-       __le16 OutputBufferOffset;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-/* Possible InfoType values */
-#define SMB2_O_INFO_FILE       0x01
-#define SMB2_O_INFO_FILESYSTEM 0x02
-#define SMB2_O_INFO_SECURITY   0x03
-#define SMB2_O_INFO_QUOTA      0x04
-
-/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */
-#define OWNER_SECINFO   0x00000001
-#define GROUP_SECINFO   0x00000002
-#define DACL_SECINFO   0x00000004
-#define SACL_SECINFO   0x00000008
-#define LABEL_SECINFO   0x00000010
-#define ATTRIBUTE_SECINFO   0x00000020
-#define SCOPE_SECINFO   0x00000040
-#define BACKUP_SECINFO   0x00010000
-#define UNPROTECTED_SACL_SECINFO   0x10000000
-#define UNPROTECTED_DACL_SECINFO   0x20000000
-#define PROTECTED_SACL_SECINFO   0x40000000
-#define PROTECTED_DACL_SECINFO   0x80000000
-
-/* Flags used for FileFullEAinfo */
-#define SL_RESTART_SCAN                0x00000001
-#define SL_RETURN_SINGLE_ENTRY 0x00000002
-#define SL_INDEX_SPECIFIED     0x00000004
-
-struct smb2_query_info_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 41 */
-       __u8   InfoType;
-       __u8   FileInfoClass;
-       __le32 OutputBufferLength;
-       __le16 InputBufferOffset;
-       __u16  Reserved;
-       __le32 InputBufferLength;
-       __le32 AdditionalInformation;
-       __le32 Flags;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_query_info_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 9 */
-       __le16 OutputBufferOffset;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-/*
- * Maximum number of iovs we need for a set-info request.
- * The largest one is rename/hardlink
- * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
- * [1] : path
- * [2] : compound padding
- */
-#define SMB2_SET_INFO_IOV_SIZE 3
-
-struct smb2_set_info_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 33 */
-       __u8   InfoType;
-       __u8   FileInfoClass;
-       __le32 BufferLength;
-       __le16 BufferOffset;
-       __u16  Reserved;
-       __le32 AdditionalInformation;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_set_info_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 2 */
-} __packed;
-
-struct smb2_oplock_break {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 24 */
-       __u8   OplockLevel;
-       __u8   Reserved;
-       __le32 Reserved2;
-       __u64  PersistentFid;
-       __u64  VolatileFid;
-} __packed;
-
-#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
-
-struct smb2_lease_break {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 44 */
-       __le16 Epoch;
-       __le32 Flags;
-       __u8   LeaseKey[16];
-       __le32 CurrentLeaseState;
-       __le32 NewLeaseState;
-       __le32 BreakReason;
-       __le32 AccessMaskHint;
-       __le32 ShareMaskHint;
-} __packed;
-
-struct smb2_lease_ack {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 36 */
-       __le16 Reserved;
-       __le32 Flags;
-       __u8   LeaseKey[16];
-       __le32 LeaseState;
-       __le64 LeaseDuration;
-} __packed;
-
 /*
- *     PDU infolevel structure definitions
+ *     PDU query infolevel structure definitions
  *     BB consider moving to a different header
  */
 
-/* File System Information Classes */
-#define FS_VOLUME_INFORMATION          1 /* Query */
-#define FS_LABEL_INFORMATION           2 /* Local only */
-#define FS_SIZE_INFORMATION            3 /* Query */
-#define FS_DEVICE_INFORMATION          4 /* Query */
-#define FS_ATTRIBUTE_INFORMATION       5 /* Query */
-#define FS_CONTROL_INFORMATION         6 /* Query, Set */
-#define FS_FULL_SIZE_INFORMATION       7 /* Query */
-#define FS_OBJECT_ID_INFORMATION       8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION     9 /* Local only */
-#define FS_VOLUME_FLAGS_INFORMATION    10 /* Local only */
-#define FS_SECTOR_SIZE_INFORMATION     11 /* SMB3 or later. Query */
-#define FS_POSIX_INFORMATION           100 /* SMB3.1.1 POSIX. Query */
-
-struct smb2_fs_full_size_info {
-       __le64 TotalAllocationUnits;
-       __le64 CallerAvailableAllocationUnits;
-       __le64 ActualAvailableAllocationUnits;
-       __le32 SectorsPerAllocationUnit;
-       __le32 BytesPerSector;
-} __packed;
-
-#define SSINFO_FLAGS_ALIGNED_DEVICE            0x00000001
-#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
-#define SSINFO_FLAGS_NO_SEEK_PENALTY           0x00000004
-#define SSINFO_FLAGS_TRIM_ENABLED              0x00000008
-
-/* sector size info struct */
-struct smb3_fs_ss_info {
-       __le32 LogicalBytesPerSector;
-       __le32 PhysicalBytesPerSectorForAtomicity;
-       __le32 PhysicalBytesPerSectorForPerf;
-       __le32 FileSystemEffectivePhysicalBytesPerSectorForAtomicity;
-       __le32 Flags;
-       __le32 ByteOffsetForSectorAlignment;
-       __le32 ByteOffsetForPartitionAlignment;
-} __packed;
-
-/* volume info struct - see MS-FSCC 2.5.9 */
-#define MAX_VOL_LABEL_LEN      32
-struct smb3_fs_vol_info {
-       __le64  VolumeCreationTime;
-       __u32   VolumeSerialNumber;
-       __le32  VolumeLabelLength; /* includes trailing null */
-       __u8    SupportsObjects; /* True if eg like NTFS, supports objects */
-       __u8    Reserved;
-       __u8    VolumeLabel[]; /* variable len */
-} __packed;
-
-/* partial list of QUERY INFO levels */
-#define FILE_DIRECTORY_INFORMATION     1
-#define FILE_FULL_DIRECTORY_INFORMATION 2
-#define FILE_BOTH_DIRECTORY_INFORMATION 3
-#define FILE_BASIC_INFORMATION         4
-#define FILE_STANDARD_INFORMATION      5
-#define FILE_INTERNAL_INFORMATION      6
-#define FILE_EA_INFORMATION            7
-#define FILE_ACCESS_INFORMATION                8
-#define FILE_NAME_INFORMATION          9
-#define FILE_RENAME_INFORMATION                10
-#define FILE_LINK_INFORMATION          11
-#define FILE_NAMES_INFORMATION         12
-#define FILE_DISPOSITION_INFORMATION   13
-#define FILE_POSITION_INFORMATION      14
-#define FILE_FULL_EA_INFORMATION       15
-#define FILE_MODE_INFORMATION          16
-#define FILE_ALIGNMENT_INFORMATION     17
-#define FILE_ALL_INFORMATION           18
-#define FILE_ALLOCATION_INFORMATION    19
-#define FILE_END_OF_FILE_INFORMATION   20
-#define FILE_ALTERNATE_NAME_INFORMATION 21
-#define FILE_STREAM_INFORMATION                22
-#define FILE_PIPE_INFORMATION          23
-#define FILE_PIPE_LOCAL_INFORMATION    24
-#define FILE_PIPE_REMOTE_INFORMATION   25
-#define FILE_MAILSLOT_QUERY_INFORMATION 26
-#define FILE_MAILSLOT_SET_INFORMATION  27
-#define FILE_COMPRESSION_INFORMATION   28
-#define FILE_OBJECT_ID_INFORMATION     29
-/* Number 30 not defined in documents */
-#define FILE_MOVE_CLUSTER_INFORMATION  31
-#define FILE_QUOTA_INFORMATION         32
-#define FILE_REPARSE_POINT_INFORMATION 33
-#define FILE_NETWORK_OPEN_INFORMATION  34
-#define FILE_ATTRIBUTE_TAG_INFORMATION 35
-#define FILE_TRACKING_INFORMATION      36
-#define FILEID_BOTH_DIRECTORY_INFORMATION 37
-#define FILEID_FULL_DIRECTORY_INFORMATION 38
-#define FILE_VALID_DATA_LENGTH_INFORMATION 39
-#define FILE_SHORT_NAME_INFORMATION    40
-#define FILE_SFIO_RESERVE_INFORMATION  44
-#define FILE_SFIO_VOLUME_INFORMATION   45
-#define FILE_HARD_LINK_INFORMATION     46
-#define FILE_NORMALIZED_NAME_INFORMATION 48
-#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
-#define FILE_STANDARD_LINK_INFORMATION 54
-#define FILE_ID_INFORMATION            59
-#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60
-
-struct smb2_file_internal_info {
-       __le64 IndexNumber;
-} __packed; /* level 6 Query */
-
-struct smb2_file_rename_info { /* encoding of request for level 10 */
-       __u8   ReplaceIfExists; /* 1 = replace existing target with new */
-                               /* 0 = fail if target already exists */
-       __u8   Reserved[7];
-       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
-       __le32 FileNameLength;
-       char   FileName[];     /* New name to be assigned */
-       /* padding - overall struct size must be >= 24 so filename + pad >= 6 */
-} __packed; /* level 10 Set */
-
-struct smb2_file_link_info { /* encoding of request for level 11 */
-       __u8   ReplaceIfExists; /* 1 = replace existing link with new */
-                               /* 0 = fail if link already exists */
-       __u8   Reserved[7];
-       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
-       __le32 FileNameLength;
-       char   FileName[];     /* Name to be assigned to new link */
-} __packed; /* level 11 Set */
-
 struct smb2_file_full_ea_info { /* encoding of response for level 15 */
        __le32 next_entry_offset;
        __u8   flags;
@@ -851,38 +352,6 @@ struct smb2_file_full_ea_info { /* encoding of response for level 15 */
        char   ea_data[]; /* \0 terminated name plus value */
 } __packed; /* level 15 Set */
 
-/*
- * This level 18, although with struct with same name is different from cifs
- * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
- * CurrentByteOffset.
- */
-struct smb2_file_all_info { /* data block encoding of response to level 18 */
-       __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
-       __le64 LastAccessTime;
-       __le64 LastWriteTime;
-       __le64 ChangeTime;
-       __le32 Attributes;
-       __u32  Pad1;            /* End of FILE_BASIC_INFO_INFO equivalent */
-       __le64 AllocationSize;  /* Beginning of FILE_STANDARD_INFO equivalent */
-       __le64 EndOfFile;       /* size ie offset to first free byte in file */
-       __le32 NumberOfLinks;   /* hard links */
-       __u8   DeletePending;
-       __u8   Directory;
-       __u16  Pad2;            /* End of FILE_STANDARD_INFO equivalent */
-       __le64 IndexNumber;
-       __le32 EASize;
-       __le32 AccessFlags;
-       __le64 CurrentByteOffset;
-       __le32 Mode;
-       __le32 AlignmentRequirement;
-       __le32 FileNameLength;
-       char   FileName[1];
-} __packed; /* level 18 Query */
-
-struct smb2_file_eof_info { /* encoding of request for level 10 */
-       __le64 EndOfFile; /* new end of file value */
-} __packed; /* level 20 Set */
-
 struct smb2_file_reparse_point_info {
        __le64 IndexNumber;
        __le32 Tag;
@@ -935,6 +404,8 @@ struct create_posix_rsp {
        struct cifs_sid group; /* var-sized on the wire */
 } __packed;
 
+#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
+
 /*
  * SMB2-only POSIX info level for query dir
  *
@@ -966,31 +437,6 @@ struct smb2_posix_info {
         */
 } __packed;
 
-/* Level 100 query info */
-struct smb311_posix_qinfo {
-       __le64 CreationTime;
-       __le64 LastAccessTime;
-       __le64 LastWriteTime;
-       __le64 ChangeTime;
-       __le64 EndOfFile;
-       __le64 AllocationSize;
-       __le32 DosAttributes;
-       __le64 Inode;
-       __le32 DeviceId;
-       __le32 Zero;
-       /* beginning of POSIX Create Context Response */
-       __le32 HardLinks;
-       __le32 ReparseTag;
-       __le32 Mode;
-       u8     Sids[];
-       /*
-        * var sized owner SID
-        * var sized group SID
-        * le32 filenamelength
-        * u8  filename[]
-        */
-} __packed;
-
 /*
  * Parsed version of the above struct. Allows direct access to the
  * variable length fields
index 4a7062f..a69f1ee 100644 (file)
@@ -283,7 +283,7 @@ extern int smb311_update_preauth_hash(struct cifs_ses *ses,
                                      struct kvec *iov, int nvec);
 extern int smb2_query_info_compound(const unsigned int xid,
                                    struct cifs_tcon *tcon,
-                                   __le16 *utf16_path, u32 desired_access,
+                                   const char *path, u32 desired_access,
                                    u32 class, u32 type, u32 output_len,
                                    struct kvec *rsp, int *buftype,
                                    struct cifs_sb_info *cifs_sb);
index 4fcca79..526a4c1 100644 (file)
@@ -248,7 +248,7 @@ EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
  * which must still be locked and not uptodate.  Normally, blocksize ==
  * PAGE_SIZE and the whole page is decrypted at once.
  *
- * This is for use by the filesystem's ->readpages() method.
+ * This is for use by the filesystem's ->readahead() method.
  *
  * Return: 0 on success; -errno on failure
  */
index 619e5b4..c6800b8 100644 (file)
@@ -203,7 +203,8 @@ struct exfat_mount_options {
        /* on error: continue, panic, remount-ro */
        enum exfat_error_mode errors;
        unsigned utf8:1, /* Use of UTF-8 character set */
-                discard:1; /* Issue discard requests on deletions */
+                discard:1, /* Issue discard requests on deletions */
+                keep_last_dots:1; /* Keep trailing periods in paths */
        int time_offset; /* Offset of timestamps from UTC (in minutes) */
 };
 
index d890fd3..2f51300 100644 (file)
@@ -218,8 +218,6 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
        if (exfat_free_cluster(inode, &clu))
                return -EIO;
 
-       exfat_clear_volume_dirty(sb);
-
        return 0;
 }
 
index af4eb39..a02a04a 100644 (file)
@@ -65,11 +65,14 @@ static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags)
        return ret;
 }
 
-/* returns the length of a struct qstr, ignoring trailing dots */
-static unsigned int exfat_striptail_len(unsigned int len, const char *name)
+/* returns the length of a struct qstr, ignoring trailing dots if necessary */
+static unsigned int exfat_striptail_len(unsigned int len, const char *name,
+                                       bool keep_last_dots)
 {
-       while (len && name[len - 1] == '.')
-               len--;
+       if (!keep_last_dots) {
+               while (len && name[len - 1] == '.')
+                       len--;
+       }
        return len;
 }
 
@@ -83,7 +86,8 @@ static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr)
        struct super_block *sb = dentry->d_sb;
        struct nls_table *t = EXFAT_SB(sb)->nls_io;
        const unsigned char *name = qstr->name;
-       unsigned int len = exfat_striptail_len(qstr->len, qstr->name);
+       unsigned int len = exfat_striptail_len(qstr->len, qstr->name,
+                          EXFAT_SB(sb)->options.keep_last_dots);
        unsigned long hash = init_name_hash(dentry);
        int i, charlen;
        wchar_t c;
@@ -104,8 +108,10 @@ static int exfat_d_cmp(const struct dentry *dentry, unsigned int len,
 {
        struct super_block *sb = dentry->d_sb;
        struct nls_table *t = EXFAT_SB(sb)->nls_io;
-       unsigned int alen = exfat_striptail_len(name->len, name->name);
-       unsigned int blen = exfat_striptail_len(len, str);
+       unsigned int alen = exfat_striptail_len(name->len, name->name,
+                               EXFAT_SB(sb)->options.keep_last_dots);
+       unsigned int blen = exfat_striptail_len(len, str,
+                               EXFAT_SB(sb)->options.keep_last_dots);
        wchar_t c1, c2;
        int charlen, i;
 
@@ -136,7 +142,8 @@ static int exfat_utf8_d_hash(const struct dentry *dentry, struct qstr *qstr)
 {
        struct super_block *sb = dentry->d_sb;
        const unsigned char *name = qstr->name;
-       unsigned int len = exfat_striptail_len(qstr->len, qstr->name);
+       unsigned int len = exfat_striptail_len(qstr->len, qstr->name,
+                              EXFAT_SB(sb)->options.keep_last_dots);
        unsigned long hash = init_name_hash(dentry);
        int i, charlen;
        unicode_t u;
@@ -161,8 +168,11 @@ static int exfat_utf8_d_cmp(const struct dentry *dentry, unsigned int len,
                const char *str, const struct qstr *name)
 {
        struct super_block *sb = dentry->d_sb;
-       unsigned int alen = exfat_striptail_len(name->len, name->name);
-       unsigned int blen = exfat_striptail_len(len, str);
+       unsigned int alen = exfat_striptail_len(name->len, name->name,
+                               EXFAT_SB(sb)->options.keep_last_dots);
+       unsigned int blen = exfat_striptail_len(len, str,
+                               EXFAT_SB(sb)->options.keep_last_dots);
+
        unicode_t u_a, u_b;
        int charlen, i;
 
@@ -416,13 +426,25 @@ static int __exfat_resolve_path(struct inode *inode, const unsigned char *path,
        struct super_block *sb = inode->i_sb;
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
        struct exfat_inode_info *ei = EXFAT_I(inode);
+       int pathlen = strlen(path);
 
-       /* strip all trailing periods */
-       namelen = exfat_striptail_len(strlen(path), path);
+       /*
+        * get the length of the pathname excluding
+        * trailing periods, if any.
+        */
+       namelen = exfat_striptail_len(pathlen, path, false);
+       if (EXFAT_SB(sb)->options.keep_last_dots) {
+               /*
+                * Do not allow the creation of files with names
+                * ending with period(s).
+                */
+               if (!lookup && (namelen < pathlen))
+                       return -EINVAL;
+               namelen = pathlen;
+       }
        if (!namelen)
                return -ENOENT;
-
-       if (strlen(path) > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
+       if (pathlen > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
                return -ENAMETOOLONG;
 
        /*
@@ -554,7 +576,6 @@ static int exfat_create(struct user_namespace *mnt_userns, struct inode *dir,
        exfat_set_volume_dirty(sb);
        err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE,
                &info);
-       exfat_clear_volume_dirty(sb);
        if (err)
                goto unlock;
 
@@ -812,7 +833,6 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
 
        /* This doesn't modify ei */
        ei->dir.dir = DIR_DELETED;
-       exfat_clear_volume_dirty(sb);
 
        inode_inc_iversion(dir);
        dir->i_mtime = dir->i_atime = current_time(dir);
@@ -846,7 +866,6 @@ static int exfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
        exfat_set_volume_dirty(sb);
        err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR,
                &info);
-       exfat_clear_volume_dirty(sb);
        if (err)
                goto unlock;
 
@@ -976,7 +995,6 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
                goto unlock;
        }
        ei->dir.dir = DIR_DELETED;
-       exfat_clear_volume_dirty(sb);
 
        inode_inc_iversion(dir);
        dir->i_mtime = dir->i_atime = current_time(dir);
@@ -1311,7 +1329,6 @@ del_out:
                 */
                new_ei->dir.dir = DIR_DELETED;
        }
-       exfat_clear_volume_dirty(sb);
 out:
        return ret;
 }
index 9f89290..8ca21e7 100644 (file)
@@ -100,7 +100,6 @@ static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags)
 {
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
        struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data;
-       bool sync;
 
        /* retain persistent-flags */
        new_flags |= sbi->vol_flags_persistent;
@@ -119,16 +118,11 @@ static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags)
 
        p_boot->vol_flags = cpu_to_le16(new_flags);
 
-       if ((new_flags & VOLUME_DIRTY) && !buffer_dirty(sbi->boot_bh))
-               sync = true;
-       else
-               sync = false;
-
        set_buffer_uptodate(sbi->boot_bh);
        mark_buffer_dirty(sbi->boot_bh);
 
-       if (sync)
-               sync_dirty_buffer(sbi->boot_bh);
+       __sync_dirty_buffer(sbi->boot_bh, REQ_SYNC | REQ_FUA | REQ_PREFLUSH);
+
        return 0;
 }
 
@@ -174,6 +168,8 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",errors=remount-ro");
        if (opts->discard)
                seq_puts(m, ",discard");
+       if (opts->keep_last_dots)
+               seq_puts(m, ",keep_last_dots");
        if (opts->time_offset)
                seq_printf(m, ",time_offset=%d", opts->time_offset);
        return 0;
@@ -217,6 +213,7 @@ enum {
        Opt_charset,
        Opt_errors,
        Opt_discard,
+       Opt_keep_last_dots,
        Opt_time_offset,
 
        /* Deprecated options */
@@ -243,6 +240,7 @@ static const struct fs_parameter_spec exfat_parameters[] = {
        fsparam_string("iocharset",             Opt_charset),
        fsparam_enum("errors",                  Opt_errors, exfat_param_enums),
        fsparam_flag("discard",                 Opt_discard),
+       fsparam_flag("keep_last_dots",          Opt_keep_last_dots),
        fsparam_s32("time_offset",              Opt_time_offset),
        __fsparam(NULL, "utf8",                 Opt_utf8, fs_param_deprecated,
                  NULL),
@@ -297,6 +295,9 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
        case Opt_discard:
                opts->discard = 1;
                break;
+       case Opt_keep_last_dots:
+               opts->keep_last_dots = 1;
+               break;
        case Opt_time_offset:
                /*
                 * Make the limit 24 just in case someone invents something
index 8bd66cd..6feb07e 100644 (file)
@@ -267,7 +267,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
                goto out;
 
        current->backing_dev_info = inode_to_bdi(inode);
-       ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
+       ret = generic_perform_write(iocb, from);
        current->backing_dev_info = NULL;
 
 out:
index 1ce13f6..13740f2 100644 (file)
@@ -3589,7 +3589,7 @@ const struct iomap_ops ext4_iomap_report_ops = {
 static bool ext4_journalled_dirty_folio(struct address_space *mapping,
                struct folio *folio)
 {
-       WARN_ON_ONCE(!page_has_buffers(&folio->page));
+       WARN_ON_ONCE(!folio_buffers(folio));
        folio_set_checked(folio);
        return filemap_dirty_folio(mapping, folio);
 }
index 1aa26d6..af491e1 100644 (file)
@@ -109,7 +109,7 @@ static void verity_work(struct work_struct *work)
        struct bio *bio = ctx->bio;
 
        /*
-        * fsverity_verify_bio() may call readpages() again, and although verity
+        * fsverity_verify_bio() may call readahead() again, and although verity
         * will be disabled for that, decryption may still be needed, causing
         * another bio_post_read_ctx to be allocated.  So to guarantee that
         * mempool_alloc() never deadlocks we must free the current ctx first.
index a8fc4fa..f5366fe 100644 (file)
@@ -456,7 +456,7 @@ static bool f2fs_dirty_meta_folio(struct address_space *mapping,
                folio_mark_uptodate(folio);
        if (!folio_test_dirty(folio)) {
                filemap_dirty_folio(mapping, folio);
-               inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_META);
+               inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META);
                set_page_private_reference(&folio->page);
                return true;
        }
index f8fcbe9..8e0c2e7 100644 (file)
@@ -164,7 +164,7 @@ static void f2fs_verify_bio(struct work_struct *work)
        bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
 
        /*
-        * fsverity_verify_bio() may call readpages() again, and while verity
+        * fsverity_verify_bio() may call readahead() again, and while verity
         * will be disabled for this, decryption and/or decompression may still
         * be needed, resulting in another bio_post_read_ctx being allocated.
         * So to prevent deadlocks we need to release the current ctx to the
@@ -2392,7 +2392,7 @@ static void f2fs_readahead(struct readahead_control *rac)
        if (!f2fs_is_compress_backend_ready(inode))
                return;
 
-       /* If the file has inline data, skip readpages */
+       /* If the file has inline data, skip readahead */
        if (f2fs_has_inline_data(inode))
                return;
 
@@ -3571,7 +3571,7 @@ static bool f2fs_dirty_data_folio(struct address_space *mapping,
                f2fs_update_dirty_folio(inode, folio);
                return true;
        }
-       return true;
+       return false;
 }
 
 
index d3f39a7..5b89af0 100644 (file)
@@ -4448,7 +4448,7 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
                return -EOPNOTSUPP;
 
        current->backing_dev_info = inode_to_bdi(inode);
-       ret = generic_perform_write(file, from, iocb->ki_pos);
+       ret = generic_perform_write(iocb, from);
        current->backing_dev_info = NULL;
 
        if (ret > 0) {
index 0b6e741..c45d341 100644 (file)
@@ -2146,11 +2146,11 @@ static bool f2fs_dirty_node_folio(struct address_space *mapping,
                folio_mark_uptodate(folio);
 #ifdef CONFIG_F2FS_CHECK_FS
        if (IS_INODE(&folio->page))
-               f2fs_inode_chksum_set(F2FS_P_SB(&folio->page), &folio->page);
+               f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
 #endif
        if (!folio_test_dirty(folio)) {
                filemap_dirty_folio(mapping, folio);
-               inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_NODES);
+               inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
                set_page_private_reference(&folio->page);
                return true;
        }
index f121c21..ed1c9ed 100644 (file)
@@ -71,17 +71,6 @@ static inline void fscache_see_cookie(struct fscache_cookie *cookie,
 }
 
 /*
- * io.c
- */
-static inline void fscache_end_operation(struct netfs_cache_resources *cres)
-{
-       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-
-       if (ops)
-               ops->end_operation(cres);
-}
-
-/*
  * main.c
  */
 extern unsigned fscache_debug;
index eac4984..488b460 100644 (file)
@@ -627,7 +627,7 @@ struct fuse_conn {
        /** Connection successful.  Only set in INIT */
        unsigned conn_init:1;
 
-       /** Do readpages asynchronously?  Only set in INIT */
+       /** Do readahead asynchronously?  Only set in INIT */
        unsigned async_read:1;
 
        /** Return an unique read error after abort.  Only set in INIT */
index d671084..39080b2 100644 (file)
@@ -606,9 +606,9 @@ out:
        return ret;
 }
 
-static inline __be64 *gfs2_indirect_init(struct metapath *mp,
-                                        struct gfs2_glock *gl, unsigned int i,
-                                        unsigned offset, u64 bn)
+static inline void gfs2_indirect_init(struct metapath *mp,
+                                     struct gfs2_glock *gl, unsigned int i,
+                                     unsigned offset, u64 bn)
 {
        __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
                       ((i > 1) ? sizeof(struct gfs2_meta_header) :
@@ -621,7 +621,6 @@ static inline __be64 *gfs2_indirect_init(struct metapath *mp,
        gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
        ptr += offset;
        *ptr = cpu_to_be64(bn);
-       return ptr;
 }
 
 enum alloc_state {
@@ -2146,7 +2145,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
 
        ret = do_shrink(inode, newsize);
 out:
-       gfs2_rs_delete(ip, NULL);
+       gfs2_rs_delete(ip);
        gfs2_qa_put(ip);
        return ret;
 }
index 8c39a85..22b41ac 100644 (file)
@@ -706,7 +706,7 @@ static int gfs2_release(struct inode *inode, struct file *file)
 
        if (file->f_mode & FMODE_WRITE) {
                if (gfs2_rs_active(&ip->i_res))
-                       gfs2_rs_delete(ip, &inode->i_writecount);
+                       gfs2_rs_delete(ip);
                gfs2_qa_put(ip);
        }
        return 0;
@@ -775,8 +775,7 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
                                         size_t *window_size)
 {
        size_t count = iov_iter_count(i);
-       char __user *p;
-       int pages = 1;
+       size_t size, offs;
 
        if (likely(!count))
                return false;
@@ -785,18 +784,20 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
        if (!iter_is_iovec(i))
                return false;
 
+       size = PAGE_SIZE;
+       offs = offset_in_page(i->iov[0].iov_base + i->iov_offset);
        if (*prev_count != count || !*window_size) {
-               int pages, nr_dirtied;
+               size_t nr_dirtied;
 
-               pages = min_t(int, BIO_MAX_VECS, DIV_ROUND_UP(count, PAGE_SIZE));
+               size = ALIGN(offs + count, PAGE_SIZE);
+               size = min_t(size_t, size, SZ_1M);
                nr_dirtied = max(current->nr_dirtied_pause -
-                                current->nr_dirtied, 1);
-               pages = min(pages, nr_dirtied);
+                                current->nr_dirtied, 8);
+               size = min(size, nr_dirtied << PAGE_SHIFT);
        }
 
        *prev_count = count;
-       p = i->iov[0].iov_base + i->iov_offset;
-       *window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p);
+       *window_size = size - offs;
        return true;
 }
 
@@ -851,9 +852,9 @@ retry_under_glock:
                leftover = fault_in_iov_iter_writeable(to, window_size);
                gfs2_holder_disallow_demote(gh);
                if (leftover != window_size) {
-                       if (!gfs2_holder_queued(gh))
-                               goto retry;
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(gh))
+                               goto retry_under_glock;
+                       goto retry;
                }
        }
        if (gfs2_holder_queued(gh))
@@ -920,9 +921,9 @@ retry_under_glock:
                leftover = fault_in_iov_iter_readable(from, window_size);
                gfs2_holder_disallow_demote(gh);
                if (leftover != window_size) {
-                       if (!gfs2_holder_queued(gh))
-                               goto retry;
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(gh))
+                               goto retry_under_glock;
+                       goto retry;
                }
        }
 out:
@@ -950,20 +951,19 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
         * and retry.
         */
 
-       if (iocb->ki_flags & IOCB_DIRECT) {
-               ret = gfs2_file_direct_read(iocb, to, &gh);
-               if (likely(ret != -ENOTBLK))
-                       return ret;
-               iocb->ki_flags &= ~IOCB_DIRECT;
-       }
+       if (iocb->ki_flags & IOCB_DIRECT)
+               return gfs2_file_direct_read(iocb, to, &gh);
+
+       pagefault_disable();
        iocb->ki_flags |= IOCB_NOIO;
        ret = generic_file_read_iter(iocb, to);
        iocb->ki_flags &= ~IOCB_NOIO;
+       pagefault_enable();
        if (ret >= 0) {
                if (!iov_iter_count(to))
                        return ret;
                written = ret;
-       } else {
+       } else if (ret != -EFAULT) {
                if (ret != -EAGAIN)
                        return ret;
                if (iocb->ki_flags & IOCB_NOWAIT)
@@ -989,12 +989,11 @@ retry_under_glock:
                leftover = fault_in_iov_iter_writeable(to, window_size);
                gfs2_holder_disallow_demote(&gh);
                if (leftover != window_size) {
-                       if (!gfs2_holder_queued(&gh)) {
-                               if (written)
-                                       goto out_uninit;
-                               goto retry;
-                       }
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(&gh))
+                               goto retry_under_glock;
+                       if (written)
+                               goto out_uninit;
+                       goto retry;
                }
        }
        if (gfs2_holder_queued(&gh))
@@ -1068,12 +1067,11 @@ retry_under_glock:
                gfs2_holder_disallow_demote(gh);
                if (leftover != window_size) {
                        from->count = min(from->count, window_size - leftover);
-                       if (!gfs2_holder_queued(gh)) {
-                               if (read)
-                                       goto out_uninit;
-                               goto retry;
-                       }
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(gh))
+                               goto retry_under_glock;
+                       if (read && !(iocb->ki_flags & IOCB_DIRECT))
+                               goto out_uninit;
+                       goto retry;
                }
        }
 out_unlock:
@@ -1083,6 +1081,7 @@ out_uninit:
        gfs2_holder_uninit(gh);
        if (statfs_gh)
                kfree(statfs_gh);
+       from->count = orig_count - read;
        return read ? read : ret;
 }
 
@@ -1497,7 +1496,6 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
                if (error != GLR_TRYFAILED)
                        break;
                fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
-               fl_gh->gh_error = 0;
                msleep(sleeptime);
        }
        if (error) {
index 6b23399..630c655 100644 (file)
@@ -542,7 +542,7 @@ restart:
                         * some reason. If this holder is the head of the list, it
                         * means we have a blocked holder at the head, so return 1.
                         */
-                       if (gh->gh_list.prev == &gl->gl_holders)
+                       if (list_is_first(&gh->gh_list, &gl->gl_holders))
                                return 1;
                        do_error(gl, 0);
                        break;
@@ -669,6 +669,8 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
 
        /* Check for state != intended state */
        if (unlikely(state != gl->gl_target)) {
+               if (gh && (ret & LM_OUT_CANCELED))
+                       gfs2_holder_wake(gh);
                if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
                        /* move to back of queue and try next entry */
                        if (ret & LM_OUT_CANCELED) {
@@ -1259,7 +1261,6 @@ void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
        gh->gh_owner_pid = get_pid(task_pid(current));
        gh->gh_state = state;
        gh->gh_flags = flags;
-       gh->gh_error = 0;
        gh->gh_iflags = 0;
        gfs2_glock_hold(gl);
 }
@@ -1565,6 +1566,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
        if (test_bit(GLF_LRU, &gl->gl_flags))
                gfs2_glock_remove_from_lru(gl);
 
+       gh->gh_error = 0;
        spin_lock(&gl->gl_lockref.lock);
        add_to_queue(gh);
        if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
@@ -1691,6 +1693,14 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
        struct gfs2_glock *gl = gh->gh_gl;
 
        spin_lock(&gl->gl_lockref.lock);
+       if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
+           !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
+               spin_unlock(&gl->gl_lockref.lock);
+               gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
+               wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
+               spin_lock(&gl->gl_lockref.lock);
+       }
+
        __gfs2_glock_dq(gh);
        spin_unlock(&gl->gl_lockref.lock);
 }
index 89905f4..c8ec876 100644 (file)
@@ -131,7 +131,21 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
                struct gfs2_sbd *sdp = GFS2_SB(inode);
                struct gfs2_glock *io_gl;
 
-               error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
+               error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE,
+                                      &ip->i_gl);
+               if (unlikely(error))
+                       goto fail;
+
+               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE,
+                                      &io_gl);
+               if (unlikely(error))
+                       goto fail;
+
+               if (blktype != GFS2_BLKST_UNLINKED)
+                       gfs2_cancel_delete_work(io_gl);
+               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT,
+                                          &ip->i_iopen_gh);
+               gfs2_glock_put(io_gl);
                if (unlikely(error))
                        goto fail;
 
@@ -161,16 +175,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
                set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
 
-               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
-               if (unlikely(error))
-                       goto fail;
-               if (blktype != GFS2_BLKST_UNLINKED)
-                       gfs2_cancel_delete_work(io_gl);
-               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
-               gfs2_glock_put(io_gl);
-               if (unlikely(error))
-                       goto fail;
-
                /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
                inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
                inode->i_atime.tv_nsec = 0;
@@ -716,13 +720,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
        BUG_ON(error);
 
-       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+       error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
        if (error)
                goto fail_gunlock2;
 
+       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+       if (error)
+               goto fail_gunlock3;
+
        error = gfs2_trans_begin(sdp, blocks, 0);
        if (error)
-               goto fail_gunlock2;
+               goto fail_gunlock3;
 
        if (blocks > 1) {
                ip->i_eattr = ip->i_no_addr + 1;
@@ -731,10 +739,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        init_dinode(dip, ip, symname);
        gfs2_trans_end(sdp);
 
-       error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
-       if (error)
-               goto fail_gunlock2;
-
        glock_set_object(ip->i_gl, ip);
        glock_set_object(io_gl, ip);
        gfs2_set_iop(inode);
@@ -745,14 +749,14 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (default_acl) {
                error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
                if (error)
-                       goto fail_gunlock3;
+                       goto fail_gunlock4;
                posix_acl_release(default_acl);
                default_acl = NULL;
        }
        if (acl) {
                error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
                if (error)
-                       goto fail_gunlock3;
+                       goto fail_gunlock4;
                posix_acl_release(acl);
                acl = NULL;
        }
@@ -760,11 +764,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
                                             &gfs2_initxattrs, NULL);
        if (error)
-               goto fail_gunlock3;
+               goto fail_gunlock4;
 
        error = link_dinode(dip, name, ip, &da);
        if (error)
-               goto fail_gunlock3;
+               goto fail_gunlock4;
 
        mark_inode_dirty(inode);
        d_instantiate(dentry, inode);
@@ -782,9 +786,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        unlock_new_inode(inode);
        return error;
 
-fail_gunlock3:
+fail_gunlock4:
        glock_clear_object(ip->i_gl, ip);
        glock_clear_object(io_gl, ip);
+fail_gunlock3:
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_gunlock2:
        gfs2_glock_put(io_gl);
@@ -793,7 +798,7 @@ fail_free_inode:
                if (free_vfs_inode) /* else evict will do the put for us */
                        gfs2_glock_put(ip->i_gl);
        }
-       gfs2_rs_delete(ip, NULL);
+       gfs2_rs_deltree(&ip->i_res);
        gfs2_qa_put(ip);
 fail_free_acls:
        posix_acl_release(default_acl);
index 50578f8..2559a79 100644 (file)
@@ -261,6 +261,7 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
        int req;
        u32 lkf;
        char strname[GDLM_STRNAME_BYTES] = "";
+       int error;
 
        req = make_mode(gl->gl_name.ln_sbd, req_state);
        lkf = make_flags(gl, flags, req);
@@ -279,8 +280,14 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
         * Submit the actual lock request.
         */
 
-       return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
+again:
+       error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
                        GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
+       if (error == -EBUSY) {
+               msleep(20);
+               goto again;
+       }
+       return error;
 }
 
 static void gdlm_put_lock(struct gfs2_glock *gl)
@@ -312,8 +319,14 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
                return;
        }
 
+again:
        error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
                           NULL, gl);
+       if (error == -EBUSY) {
+               msleep(20);
+               goto again;
+       }
+
        if (error) {
                fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
                       gl->gl_name.ln_type,
index 0fb3c01..801ad9f 100644 (file)
@@ -680,13 +680,14 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
 /**
  * gfs2_rs_delete - delete a multi-block reservation
  * @ip: The inode for this reservation
- * @wcount: The inode's write count, or NULL
  *
  */
-void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
+void gfs2_rs_delete(struct gfs2_inode *ip)
 {
+       struct inode *inode = &ip->i_inode;
+
        down_write(&ip->i_rw_mutex);
-       if ((wcount == NULL) || (atomic_read(wcount) <= 1))
+       if (atomic_read(&inode->i_writecount) <= 1)
                gfs2_rs_deltree(&ip->i_res);
        up_write(&ip->i_rw_mutex);
 }
@@ -922,15 +923,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
        spin_lock_init(&rgd->rd_rsspin);
        mutex_init(&rgd->rd_mutex);
 
-       error = compute_bitstructs(rgd);
-       if (error)
-               goto fail;
-
        error = gfs2_glock_get(sdp, rgd->rd_addr,
                               &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
        if (error)
                goto fail;
 
+       error = compute_bitstructs(rgd);
+       if (error)
+               goto fail_glock;
+
        rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
        rgd->rd_flags &= ~GFS2_RDF_PREFERRED;
        if (rgd->rd_data > sdp->sd_max_rg_data)
@@ -944,6 +945,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
        }
 
        error = 0; /* someone else read in the rgrp; free it and ignore it */
+fail_glock:
        gfs2_glock_put(rgd->rd_gl);
 
 fail:
@@ -1415,7 +1417,8 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
 
        start = r.start >> bs_shift;
        end = start + (r.len >> bs_shift);
-       minlen = max_t(u64, r.minlen,
+       minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize);
+       minlen = max_t(u64, minlen,
                       q->limits.discard_granularity) >> bs_shift;
 
        if (end <= start || minlen > sdp->sd_max_rg_data)
index 3e2ca1f..46dd94e 100644 (file)
@@ -45,7 +45,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
                             bool dinode, u64 *generation);
 
 extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
+extern void gfs2_rs_delete(struct gfs2_inode *ip);
 extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
                               u64 bstart, u32 blen, int meta);
 extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
index cf9cf66..bdb773e 100644 (file)
@@ -1396,7 +1396,7 @@ out:
        truncate_inode_pages_final(&inode->i_data);
        if (ip->i_qadata)
                gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
-       gfs2_rs_delete(ip, NULL);
+       gfs2_rs_deltree(&ip->i_res);
        gfs2_ordered_del_inode(ip);
        clear_inode(inode);
        gfs2_dir_hash_inval(ip);
index fb2c2ea..08503dc 100644 (file)
@@ -74,7 +74,7 @@ int do_linkat(int olddfd, struct filename *old, int newdfd,
  * namespace.c
  */
 extern struct vfsmount *lookup_mnt(const struct path *);
-extern int finish_automount(struct vfsmount *, struct path *);
+extern int finish_automount(struct vfsmount *, const struct path *);
 
 extern int sb_prepare_remount_readonly(struct super_block *);
 
index b94d57c..a8413f0 100644 (file)
@@ -611,6 +611,7 @@ struct io_sr_msg {
        int                             msg_flags;
        int                             bgid;
        size_t                          len;
+       size_t                          done_io;
 };
 
 struct io_open {
@@ -781,6 +782,7 @@ enum {
        REQ_F_SKIP_LINK_CQES_BIT,
        REQ_F_SINGLE_POLL_BIT,
        REQ_F_DOUBLE_POLL_BIT,
+       REQ_F_PARTIAL_IO_BIT,
        /* keep async read/write and isreg together and in order */
        REQ_F_SUPPORT_NOWAIT_BIT,
        REQ_F_ISREG_BIT,
@@ -843,6 +845,8 @@ enum {
        REQ_F_SINGLE_POLL       = BIT(REQ_F_SINGLE_POLL_BIT),
        /* double poll may active */
        REQ_F_DOUBLE_POLL       = BIT(REQ_F_DOUBLE_POLL_BIT),
+       /* request has already done partial IO */
+       REQ_F_PARTIAL_IO        = BIT(REQ_F_PARTIAL_IO_BIT),
 };
 
 struct async_poll {
@@ -923,7 +927,6 @@ struct io_kiocb {
        struct io_wq_work_node          comp_list;
        atomic_t                        refs;
        atomic_t                        poll_refs;
-       struct io_kiocb                 *link;
        struct io_task_work             io_task_work;
        /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
        struct hlist_node               hash_node;
@@ -931,9 +934,11 @@ struct io_kiocb {
        struct async_poll               *apoll;
        /* opcode allocated if it needs to store data for async defer */
        void                            *async_data;
-       /* custom credentials, valid IFF REQ_F_CREDS is set */
        /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
        struct io_buffer                *kbuf;
+       /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+       struct io_kiocb                 *link;
+       /* custom credentials, valid IFF REQ_F_CREDS is set */
        const struct cred               *creds;
        struct io_wq_work               work;
 };
@@ -962,6 +967,7 @@ struct io_op_def {
        /* set if opcode supports polled "wait" */
        unsigned                pollin : 1;
        unsigned                pollout : 1;
+       unsigned                poll_exclusive : 1;
        /* op supports buffer selection */
        unsigned                buffer_select : 1;
        /* do prep async if is going to be punted */
@@ -1056,6 +1062,7 @@ static const struct io_op_def io_op_defs[] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
+               .poll_exclusive         = 1,
        },
        [IORING_OP_ASYNC_CANCEL] = {
                .audit_skip             = 1,
@@ -1330,6 +1337,8 @@ static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
 
 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
 {
+       lockdep_assert_held(&req->ctx->completion_lock);
+
        if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
                return 0;
        return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
@@ -1362,6 +1371,8 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req,
                cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
                spin_unlock(&ctx->completion_lock);
        } else {
+               lockdep_assert_held(&req->ctx->uring_lock);
+
                cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
        }
 
@@ -1382,7 +1393,7 @@ static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
        return NULL;
 }
 
-static void io_kbuf_recycle(struct io_kiocb *req)
+static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_buffer_list *bl;
@@ -1390,6 +1401,12 @@ static void io_kbuf_recycle(struct io_kiocb *req)
 
        if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
                return;
+       /* don't recycle if we already did IO to this buffer */
+       if (req->flags & REQ_F_PARTIAL_IO)
+               return;
+
+       if (issue_flags & IO_URING_F_UNLOCKED)
+               mutex_lock(&ctx->uring_lock);
 
        lockdep_assert_held(&ctx->uring_lock);
 
@@ -1398,6 +1415,9 @@ static void io_kbuf_recycle(struct io_kiocb *req)
        list_add(&buf->list, &bl->buf_list);
        req->flags &= ~REQ_F_BUFFER_SELECTED;
        req->kbuf = NULL;
+
+       if (issue_flags & IO_URING_F_UNLOCKED)
+               mutex_unlock(&ctx->uring_lock);
 }
 
 static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
@@ -2104,6 +2124,12 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res,
                        }
                }
                io_req_put_rsrc(req, ctx);
+               /*
+                * Selected buffer deallocation in io_clean_op() assumes that
+                * we don't hold ->completion_lock. Clean them here to avoid
+                * deadlocks.
+                */
+               io_put_kbuf_comp(req);
                io_dismantle_req(req);
                io_put_task(req->task, 1);
                wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
@@ -2148,7 +2174,7 @@ static inline void io_req_complete(struct io_kiocb *req, s32 res)
 static void io_req_complete_failed(struct io_kiocb *req, s32 res)
 {
        req_set_fail(req);
-       io_req_complete_post(req, res, io_put_kbuf(req, 0));
+       io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
 }
 
 static void io_req_complete_fail_submit(struct io_kiocb *req)
@@ -2437,6 +2463,8 @@ static void handle_prev_tw_list(struct io_wq_work_node *node,
                struct io_kiocb *req = container_of(node, struct io_kiocb,
                                                    io_task_work.node);
 
+               prefetch(container_of(next, struct io_kiocb, io_task_work.node));
+
                if (req->ctx != *ctx) {
                        if (unlikely(!*uring_locked && *ctx))
                                ctx_commit_and_unlock(*ctx);
@@ -2469,6 +2497,8 @@ static void handle_tw_list(struct io_wq_work_node *node,
                struct io_kiocb *req = container_of(node, struct io_kiocb,
                                                    io_task_work.node);
 
+               prefetch(container_of(next, struct io_kiocb, io_task_work.node));
+
                if (req->ctx != *ctx) {
                        ctx_flush_and_put(*ctx, locked);
                        *ctx = req->ctx;
@@ -2974,8 +3004,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
 
 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 {
-       if (req->rw.kiocb.ki_flags & IOCB_WRITE)
+       if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
                kiocb_end_write(req);
+               fsnotify_modify(req->file);
+       } else {
+               fsnotify_access(req->file);
+       }
        if (unlikely(res != req->result)) {
                if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
                    io_rw_should_reissue(req)) {
@@ -4439,9 +4473,6 @@ static int io_msg_ring_prep(struct io_kiocb *req,
                     sqe->splice_fd_in || sqe->buf_index || sqe->personality))
                return -EINVAL;
 
-       if (req->file->f_op != &io_uring_fops)
-               return -EBADFD;
-
        req->msg.user_data = READ_ONCE(sqe->off);
        req->msg.len = READ_ONCE(sqe->len);
        return 0;
@@ -4451,14 +4482,18 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *target_ctx;
        struct io_msg *msg = &req->msg;
-       int ret = -EOVERFLOW;
        bool filled;
+       int ret;
+
+       ret = -EBADFD;
+       if (req->file->f_op != &io_uring_fops)
+               goto done;
 
+       ret = -EOVERFLOW;
        target_ctx = req->file->private_data;
 
        spin_lock(&target_ctx->completion_lock);
-       filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len,
-                                       IORING_CQE_F_MSG);
+       filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
        io_commit_cqring(target_ctx);
        spin_unlock(&target_ctx->completion_lock);
 
@@ -4467,6 +4502,9 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
                ret = 0;
        }
 
+done:
+       if (ret < 0)
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
@@ -4537,6 +4575,8 @@ static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
                                req->sync.len);
        if (ret < 0)
                req_set_fail(req);
+       else
+               fsnotify_modify(req->file);
        io_req_complete(req, ret);
        return 0;
 }
@@ -5419,12 +5459,21 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (req->ctx->compat)
                sr->msg_flags |= MSG_CMSG_COMPAT;
 #endif
+       sr->done_io = 0;
        return 0;
 }
 
+static bool io_net_retry(struct socket *sock, int flags)
+{
+       if (!(flags & MSG_WAITALL))
+               return false;
+       return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
+}
+
 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_async_msghdr iomsg, *kmsg;
+       struct io_sr_msg *sr = &req->sr_msg;
        struct socket *sock;
        struct io_buffer *kbuf;
        unsigned flags;
@@ -5467,6 +5516,11 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
                        return io_setup_async_msg(req, kmsg);
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
+               if (ret > 0 && io_net_retry(sock, flags)) {
+                       sr->done_io += ret;
+                       req->flags |= REQ_F_PARTIAL_IO;
+                       return io_setup_async_msg(req, kmsg);
+               }
                req_set_fail(req);
        } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
                req_set_fail(req);
@@ -5476,6 +5530,10 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
        if (kmsg->free_iov)
                kfree(kmsg->free_iov);
        req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (ret >= 0)
+               ret += sr->done_io;
+       else if (sr->done_io)
+               ret = sr->done_io;
        __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
        return 0;
 }
@@ -5526,12 +5584,23 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
                        return -EAGAIN;
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
+               if (ret > 0 && io_net_retry(sock, flags)) {
+                       sr->len -= ret;
+                       sr->buf += ret;
+                       sr->done_io += ret;
+                       req->flags |= REQ_F_PARTIAL_IO;
+                       return -EAGAIN;
+               }
                req_set_fail(req);
        } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 out_free:
                req_set_fail(req);
        }
 
+       if (ret >= 0)
+               ret += sr->done_io;
+       else if (sr->done_io)
+               ret = sr->done_io;
        __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
        return 0;
 }
@@ -5569,9 +5638,6 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
        struct file *file;
        int ret, fd;
 
-       if (req->file->f_flags & O_NONBLOCK)
-               req->flags |= REQ_F_NOWAIT;
-
        if (!fixed) {
                fd = __get_unused_fd_flags(accept->flags, accept->nofile);
                if (unlikely(fd < 0))
@@ -5801,7 +5867,7 @@ struct io_poll_table {
 };
 
 #define IO_POLL_CANCEL_FLAG    BIT(31)
-#define IO_POLL_REF_MASK       ((1u << 20)-1)
+#define IO_POLL_REF_MASK       GENMASK(30, 0)
 
 /*
  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
@@ -6035,10 +6101,13 @@ static void io_poll_cancel_req(struct io_kiocb *req)
        io_poll_execute(req, 0, 0);
 }
 
+#define wqe_to_req(wait)       ((void *)((unsigned long) (wait)->private & ~1))
+#define wqe_is_double(wait)    ((unsigned long) (wait)->private & 1)
+
 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                        void *key)
 {
-       struct io_kiocb *req = wait->private;
+       struct io_kiocb *req = wqe_to_req(wait);
        struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
                                                 wait);
        __poll_t mask = key_to_poll(key);
@@ -6076,7 +6145,10 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                if (mask && poll->events & EPOLLONESHOT) {
                        list_del_init(&poll->wait.entry);
                        poll->head = NULL;
-                       req->flags &= ~REQ_F_SINGLE_POLL;
+                       if (wqe_is_double(wait))
+                               req->flags &= ~REQ_F_DOUBLE_POLL;
+                       else
+                               req->flags &= ~REQ_F_SINGLE_POLL;
                }
                __io_poll_execute(req, mask, poll->events);
        }
@@ -6088,6 +6160,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                            struct io_poll_iocb **poll_ptr)
 {
        struct io_kiocb *req = pt->req;
+       unsigned long wqe_private = (unsigned long) req;
 
        /*
         * The file being polled uses multiple waitqueues for poll handling
@@ -6113,6 +6186,8 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                        pt->error = -ENOMEM;
                        return;
                }
+               /* mark as double wq entry */
+               wqe_private |= 1;
                req->flags |= REQ_F_DOUBLE_POLL;
                io_init_poll_iocb(poll, first->events, first->wait.func);
                *poll_ptr = poll;
@@ -6123,7 +6198,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
        req->flags |= REQ_F_SINGLE_POLL;
        pt->nr_entries++;
        poll->head = head;
-       poll->wait.private = req;
+       poll->wait.private = (void *) wqe_private;
 
        if (poll->events & EPOLLEXCLUSIVE)
                add_wait_queue_exclusive(head, &poll->wait);
@@ -6150,7 +6225,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
        INIT_HLIST_NODE(&req->hash_node);
        io_init_poll_iocb(poll, mask, io_poll_wake);
        poll->file = req->file;
-       poll->wait.private = req;
 
        ipt->pt._key = mask;
        ipt->req = req;
@@ -6238,7 +6312,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
        } else {
                mask |= POLLOUT | POLLWRNORM;
        }
-
+       if (def->poll_exclusive)
+               mask |= EPOLLEXCLUSIVE;
        if (!(issue_flags & IO_URING_F_UNLOCKED) &&
            !list_empty(&ctx->apoll_cache)) {
                apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
@@ -6254,6 +6329,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
        req->flags |= REQ_F_POLLED;
        ipt.pt._qproc = io_async_queue_proc;
 
+       io_kbuf_recycle(req, issue_flags);
+
        ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
        if (ret || ipt.error)
                return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
@@ -6281,6 +6358,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
                list = &ctx->cancel_hash[i];
                hlist_for_each_entry_safe(req, tmp, list, hash_node) {
                        if (io_match_task_safe(req, tsk, cancel_all)) {
+                               hlist_del_init(&req->hash_node);
                                io_poll_cancel_req(req);
                                found = true;
                        }
@@ -7075,8 +7153,11 @@ fail:
 
 static void io_clean_op(struct io_kiocb *req)
 {
-       if (req->flags & REQ_F_BUFFER_SELECTED)
+       if (req->flags & REQ_F_BUFFER_SELECTED) {
+               spin_lock(&req->ctx->completion_lock);
                io_put_kbuf_comp(req);
+               spin_unlock(&req->ctx->completion_lock);
+       }
 
        if (req->flags & REQ_F_NEED_CLEANUP) {
                switch (req->opcode) {
@@ -7505,11 +7586,9 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
                 * Queued up for async execution, worker will release
                 * submit reference when the iocb is actually submitted.
                 */
-               io_kbuf_recycle(req);
                io_queue_async_work(req, NULL);
                break;
        case IO_APOLL_OK:
-               io_kbuf_recycle(req);
                break;
        }
 
@@ -8053,6 +8132,13 @@ static int io_sq_thread(void *data)
                                        needs_sched = false;
                                        break;
                                }
+
+                               /*
+                                * Ensure the store of the wakeup flag is not
+                                * reordered with the load of the SQ tail
+                                */
+                               smp_mb();
+
                                if (io_sqring_entries(ctx)) {
                                        needs_sched = false;
                                        break;
@@ -8782,6 +8868,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
                        fput(fpl->fp[i]);
        } else {
                kfree_skb(skb);
+               free_uid(fpl->user);
                kfree(fpl);
        }
 
index 090bf47..80ac36a 100644 (file)
@@ -173,7 +173,7 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
 
        if (*len == 0)
                return -EINVAL;
-       if (start > maxbytes)
+       if (start >= maxbytes)
                return -EFBIG;
 
        /*
index 49dccd9..8ce8720 100644 (file)
@@ -435,18 +435,17 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
 {
        struct iomap_page *iop = to_iomap_page(folio);
        struct inode *inode = folio->mapping->host;
-       size_t len;
        unsigned first, last, i;
 
        if (!iop)
                return false;
 
-       /* Limit range to this folio */
-       len = min(folio_size(folio) - from, count);
+       /* Caller's range may extend past the end of this folio */
+       count = min(folio_size(folio) - from, count);
 
-       /* First and last blocks in range within page */
+       /* First and last blocks in range within folio */
        first = from >> inode->i_blkbits;
-       last = (from + len - 1) >> inode->i_blkbits;
+       last = (from + count - 1) >> inode->i_blkbits;
 
        for (i = first; i <= last; i++)
                if (!test_bit(i, iop->uptodate))
index b288c8a..837cd55 100644 (file)
@@ -415,13 +415,15 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
                jffs2_free_ino_caches(c);
                jffs2_free_raw_node_refs(c);
                ret = -EIO;
-               goto out_free;
+               goto out_sum_exit;
        }
 
        jffs2_calc_trigger_levels(c);
 
        return 0;
 
+ out_sum_exit:
+       jffs2_sum_exit(c);
  out_free:
        kvfree(c->blocks);
 
index 2ac4104..71f03a5 100644 (file)
@@ -603,8 +603,8 @@ out_root:
        jffs2_free_ino_caches(c);
        jffs2_free_raw_node_refs(c);
        kvfree(c->blocks);
- out_inohash:
        jffs2_clear_xattr_subsystem(c);
+ out_inohash:
        kfree(c->inocache_list);
  out_wbuf:
        jffs2_flash_cleanup(c);
index 2e4a867..93a2951 100644 (file)
 #include <linux/mutex.h>
 
 struct jffs2_inode_info {
-       /* We need an internal mutex similar to inode->i_mutex.
+       /* We need an internal mutex similar to inode->i_rwsem.
           Unfortunately, we can't used the existing one, because
           either the GC would deadlock, or we'd have to release it
           before letting GC proceed. Or we'd have to put ugliness
-          into the GC code so it didn't attempt to obtain the i_mutex
+          into the GC code so it didn't attempt to obtain the i_rwsem
           for the inode(s) which are already locked */
        struct mutex sem;
 
index b676056..29671e3 100644 (file)
@@ -136,7 +136,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
                if (!s) {
                        JFFS2_WARNING("Can't allocate memory for summary\n");
                        ret = -ENOMEM;
-                       goto out;
+                       goto out_buf;
                }
        }
 
@@ -275,13 +275,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
        }
        ret = 0;
  out:
+       jffs2_sum_reset_collected(s);
+       kfree(s);
+ out_buf:
        if (buf_size)
                kfree(flashbuf);
 #ifndef __ECOS
        else
                mtd_unpoint(c->mtd, 0, c->mtd->size);
 #endif
-       kfree(s);
        return ret;
 }
 
index 74067a7..8842306 100644 (file)
@@ -120,13 +120,8 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
                if (next == ERR_PTR(-ENODEV))
                        kernfs_seq_stop_active(sf, next);
                return next;
-       } else {
-               /*
-                * The same behavior and code as single_open().  Returns
-                * !NULL if pos is at the beginning; otherwise, NULL.
-                */
-               return NULL + !*ppos;
        }
+       return single_start(sf, ppos);
 }
 
 static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
index 077b876..23871b1 100644 (file)
@@ -656,8 +656,8 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
                rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
        rsp->Reserved = 0;
        rsp->Reserved2 = 0;
-       rsp->PersistentFid = cpu_to_le64(fp->persistent_id);
-       rsp->VolatileFid = cpu_to_le64(fp->volatile_id);
+       rsp->PersistentFid = fp->persistent_id;
+       rsp->VolatileFid = fp->volatile_id;
 
        inc_rfc1001_len(work->response_buf, 24);
 
index 2e12f6d..4cd03d6 100644 (file)
@@ -585,7 +585,7 @@ static int __init ksmbd_server_init(void)
        if (ret)
                goto err_crypto_destroy;
 
-       pr_warn_once("The ksmbd server is experimental, use at your own risk.\n");
+       pr_warn_once("The ksmbd server is experimental\n");
 
        return 0;
 
index 67e8e28..3bf6c56 100644 (file)
@@ -377,12 +377,8 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
         * command in the compound request
         */
        if (req->Command == SMB2_CREATE && rsp->Status == STATUS_SUCCESS) {
-               work->compound_fid =
-                       le64_to_cpu(((struct smb2_create_rsp *)rsp)->
-                               VolatileFileId);
-               work->compound_pfid =
-                       le64_to_cpu(((struct smb2_create_rsp *)rsp)->
-                               PersistentFileId);
+               work->compound_fid = ((struct smb2_create_rsp *)rsp)->VolatileFileId;
+               work->compound_pfid = ((struct smb2_create_rsp *)rsp)->PersistentFileId;
                work->compound_sid = le64_to_cpu(rsp->SessionId);
        }
 
@@ -2129,7 +2125,7 @@ static noinline int create_smb2_pipe(struct ksmbd_work *work)
        rsp->EndofFile = cpu_to_le64(0);
        rsp->FileAttributes = FILE_ATTRIBUTE_NORMAL_LE;
        rsp->Reserved2 = 0;
-       rsp->VolatileFileId = cpu_to_le64(id);
+       rsp->VolatileFileId = id;
        rsp->PersistentFileId = 0;
        rsp->CreateContextsOffset = 0;
        rsp->CreateContextsLength = 0;
@@ -3157,8 +3153,8 @@ int smb2_open(struct ksmbd_work *work)
 
        rsp->Reserved2 = 0;
 
-       rsp->PersistentFileId = cpu_to_le64(fp->persistent_id);
-       rsp->VolatileFileId = cpu_to_le64(fp->volatile_id);
+       rsp->PersistentFileId = fp->persistent_id;
+       rsp->VolatileFileId = fp->volatile_id;
 
        rsp->CreateContextsOffset = 0;
        rsp->CreateContextsLength = 0;
@@ -3865,9 +3861,7 @@ int smb2_query_dir(struct ksmbd_work *work)
                goto err_out2;
        }
 
-       dir_fp = ksmbd_lookup_fd_slow(work,
-                                     le64_to_cpu(req->VolatileFileId),
-                                     le64_to_cpu(req->PersistentFileId));
+       dir_fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
        if (!dir_fp) {
                rc = -EBADF;
                goto err_out2;
@@ -4088,12 +4082,12 @@ static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
         * Windows can sometime send query file info request on
         * pipe without opening it, checking error condition here
         */
-       id = le64_to_cpu(req->VolatileFileId);
+       id = req->VolatileFileId;
        if (!ksmbd_session_rpc_method(sess, id))
                return -ENOENT;
 
        ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n",
-                   req->FileInfoClass, le64_to_cpu(req->VolatileFileId));
+                   req->FileInfoClass, req->VolatileFileId);
 
        switch (req->FileInfoClass) {
        case FILE_STANDARD_INFORMATION:
@@ -4738,7 +4732,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
        }
 
        if (work->next_smb2_rcv_hdr_off) {
-               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+               if (!has_file_id(req->VolatileFileId)) {
                        ksmbd_debug(SMB, "Compound request set FID = %llu\n",
                                    work->compound_fid);
                        id = work->compound_fid;
@@ -4747,8 +4741,8 @@ static int smb2_get_info_file(struct ksmbd_work *work,
        }
 
        if (!has_file_id(id)) {
-               id = le64_to_cpu(req->VolatileFileId);
-               pid = le64_to_cpu(req->PersistentFileId);
+               id = req->VolatileFileId;
+               pid = req->PersistentFileId;
        }
 
        fp = ksmbd_lookup_fd_slow(work, id, pid);
@@ -5113,7 +5107,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
        }
 
        if (work->next_smb2_rcv_hdr_off) {
-               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+               if (!has_file_id(req->VolatileFileId)) {
                        ksmbd_debug(SMB, "Compound request set FID = %llu\n",
                                    work->compound_fid);
                        id = work->compound_fid;
@@ -5122,8 +5116,8 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
        }
 
        if (!has_file_id(id)) {
-               id = le64_to_cpu(req->VolatileFileId);
-               pid = le64_to_cpu(req->PersistentFileId);
+               id = req->VolatileFileId;
+               pid = req->PersistentFileId;
        }
 
        fp = ksmbd_lookup_fd_slow(work, id, pid);
@@ -5221,7 +5215,7 @@ static noinline int smb2_close_pipe(struct ksmbd_work *work)
        struct smb2_close_req *req = smb2_get_msg(work->request_buf);
        struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf);
 
-       id = le64_to_cpu(req->VolatileFileId);
+       id = req->VolatileFileId;
        ksmbd_session_rpc_close(work->sess, id);
 
        rsp->StructureSize = cpu_to_le16(60);
@@ -5280,7 +5274,7 @@ int smb2_close(struct ksmbd_work *work)
        }
 
        if (work->next_smb2_rcv_hdr_off &&
-           !has_file_id(le64_to_cpu(req->VolatileFileId))) {
+           !has_file_id(req->VolatileFileId)) {
                if (!has_file_id(work->compound_fid)) {
                        /* file already closed, return FILE_CLOSED */
                        ksmbd_debug(SMB, "file already closed\n");
@@ -5299,7 +5293,7 @@ int smb2_close(struct ksmbd_work *work)
                        work->compound_pfid = KSMBD_NO_FID;
                }
        } else {
-               volatile_id = le64_to_cpu(req->VolatileFileId);
+               volatile_id = req->VolatileFileId;
        }
        ksmbd_debug(SMB, "volatile_id = %llu\n", volatile_id);
 
@@ -5988,7 +5982,7 @@ int smb2_set_info(struct ksmbd_work *work)
        if (work->next_smb2_rcv_hdr_off) {
                req = ksmbd_req_buf_next(work);
                rsp = ksmbd_resp_buf_next(work);
-               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+               if (!has_file_id(req->VolatileFileId)) {
                        ksmbd_debug(SMB, "Compound request set FID = %llu\n",
                                    work->compound_fid);
                        id = work->compound_fid;
@@ -6000,8 +5994,8 @@ int smb2_set_info(struct ksmbd_work *work)
        }
 
        if (!has_file_id(id)) {
-               id = le64_to_cpu(req->VolatileFileId);
-               pid = le64_to_cpu(req->PersistentFileId);
+               id = req->VolatileFileId;
+               pid = req->PersistentFileId;
        }
 
        fp = ksmbd_lookup_fd_slow(work, id, pid);
@@ -6079,7 +6073,7 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
        struct smb2_read_req *req = smb2_get_msg(work->request_buf);
        struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf);
 
-       id = le64_to_cpu(req->VolatileFileId);
+       id = req->VolatileFileId;
 
        inc_rfc1001_len(work->response_buf, 16);
        rpc_resp = ksmbd_rpc_read(work->sess, id);
@@ -6215,8 +6209,7 @@ int smb2_read(struct ksmbd_work *work)
                        goto out;
        }
 
-       fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId),
-                                 le64_to_cpu(req->PersistentFileId));
+       fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
        if (!fp) {
                err = -ENOENT;
                goto out;
@@ -6335,7 +6328,7 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
        size_t length;
 
        length = le32_to_cpu(req->Length);
-       id = le64_to_cpu(req->VolatileFileId);
+       id = req->VolatileFileId;
 
        if (le16_to_cpu(req->DataOffset) ==
            offsetof(struct smb2_write_req, Buffer)) {
@@ -6471,8 +6464,7 @@ int smb2_write(struct ksmbd_work *work)
                goto out;
        }
 
-       fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId),
-                                 le64_to_cpu(req->PersistentFileId));
+       fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
        if (!fp) {
                err = -ENOENT;
                goto out;
@@ -6584,12 +6576,9 @@ int smb2_flush(struct ksmbd_work *work)
 
        WORK_BUFFERS(work, req, rsp);
 
-       ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n",
-                   le64_to_cpu(req->VolatileFileId));
+       ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n", req->VolatileFileId);
 
-       err = ksmbd_vfs_fsync(work,
-                             le64_to_cpu(req->VolatileFileId),
-                             le64_to_cpu(req->PersistentFileId));
+       err = ksmbd_vfs_fsync(work, req->VolatileFileId, req->PersistentFileId);
        if (err)
                goto out;
 
@@ -6618,8 +6607,7 @@ int smb2_cancel(struct ksmbd_work *work)
        struct ksmbd_conn *conn = work->conn;
        struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
        struct smb2_hdr *chdr;
-       struct ksmbd_work *cancel_work = NULL;
-       int canceled = 0;
+       struct ksmbd_work *cancel_work = NULL, *iter;
        struct list_head *command_list;
 
        ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
@@ -6629,11 +6617,11 @@ int smb2_cancel(struct ksmbd_work *work)
                command_list = &conn->async_requests;
 
                spin_lock(&conn->request_lock);
-               list_for_each_entry(cancel_work, command_list,
+               list_for_each_entry(iter, command_list,
                                    async_request_entry) {
-                       chdr = smb2_get_msg(cancel_work->request_buf);
+                       chdr = smb2_get_msg(iter->request_buf);
 
-                       if (cancel_work->async_id !=
+                       if (iter->async_id !=
                            le64_to_cpu(hdr->Id.AsyncId))
                                continue;
 
@@ -6641,7 +6629,7 @@ int smb2_cancel(struct ksmbd_work *work)
                                    "smb2 with AsyncId %llu cancelled command = 0x%x\n",
                                    le64_to_cpu(hdr->Id.AsyncId),
                                    le16_to_cpu(chdr->Command));
-                       canceled = 1;
+                       cancel_work = iter;
                        break;
                }
                spin_unlock(&conn->request_lock);
@@ -6649,24 +6637,24 @@ int smb2_cancel(struct ksmbd_work *work)
                command_list = &conn->requests;
 
                spin_lock(&conn->request_lock);
-               list_for_each_entry(cancel_work, command_list, request_entry) {
-                       chdr = smb2_get_msg(cancel_work->request_buf);
+               list_for_each_entry(iter, command_list, request_entry) {
+                       chdr = smb2_get_msg(iter->request_buf);
 
                        if (chdr->MessageId != hdr->MessageId ||
-                           cancel_work == work)
+                           iter == work)
                                continue;
 
                        ksmbd_debug(SMB,
                                    "smb2 with mid %llu cancelled command = 0x%x\n",
                                    le64_to_cpu(hdr->MessageId),
                                    le16_to_cpu(chdr->Command));
-                       canceled = 1;
+                       cancel_work = iter;
                        break;
                }
                spin_unlock(&conn->request_lock);
        }
 
-       if (canceled) {
+       if (cancel_work) {
                cancel_work->state = KSMBD_WORK_CANCELLED;
                if (cancel_work->cancel_fn)
                        cancel_work->cancel_fn(cancel_work->cancel_argv);
@@ -6804,12 +6792,9 @@ int smb2_lock(struct ksmbd_work *work)
        int prior_lock = 0;
 
        ksmbd_debug(SMB, "Received lock request\n");
-       fp = ksmbd_lookup_fd_slow(work,
-                                 le64_to_cpu(req->VolatileFileId),
-                                 le64_to_cpu(req->PersistentFileId));
+       fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
        if (!fp) {
-               ksmbd_debug(SMB, "Invalid file id for lock : %llu\n",
-                           le64_to_cpu(req->VolatileFileId));
+               ksmbd_debug(SMB, "Invalid file id for lock : %llu\n", req->VolatileFileId);
                err = -ENOENT;
                goto out2;
        }
@@ -7164,8 +7149,8 @@ static int fsctl_copychunk(struct ksmbd_work *work,
 
        ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0];
 
-       rsp->VolatileFileId = cpu_to_le64(volatile_id);
-       rsp->PersistentFileId = cpu_to_le64(persistent_id);
+       rsp->VolatileFileId = volatile_id;
+       rsp->PersistentFileId = persistent_id;
        ci_rsp->ChunksWritten =
                cpu_to_le32(ksmbd_server_side_copy_max_chunk_count());
        ci_rsp->ChunkBytesWritten =
@@ -7379,8 +7364,8 @@ ipv6_retry:
        if (nii_rsp)
                nii_rsp->Next = 0;
 
-       rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
-       rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
+       rsp->PersistentFileId = SMB2_NO_FID;
+       rsp->VolatileFileId = SMB2_NO_FID;
        return nbytes;
 }
 
@@ -7547,9 +7532,7 @@ static int fsctl_request_resume_key(struct ksmbd_work *work,
 {
        struct ksmbd_file *fp;
 
-       fp = ksmbd_lookup_fd_slow(work,
-                                 le64_to_cpu(req->VolatileFileId),
-                                 le64_to_cpu(req->PersistentFileId));
+       fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
        if (!fp)
                return -ENOENT;
 
@@ -7579,7 +7562,7 @@ int smb2_ioctl(struct ksmbd_work *work)
        if (work->next_smb2_rcv_hdr_off) {
                req = ksmbd_req_buf_next(work);
                rsp = ksmbd_resp_buf_next(work);
-               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+               if (!has_file_id(req->VolatileFileId)) {
                        ksmbd_debug(SMB, "Compound request set FID = %llu\n",
                                    work->compound_fid);
                        id = work->compound_fid;
@@ -7590,14 +7573,14 @@ int smb2_ioctl(struct ksmbd_work *work)
        }
 
        if (!has_file_id(id))
-               id = le64_to_cpu(req->VolatileFileId);
+               id = req->VolatileFileId;
 
        if (req->Flags != cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL)) {
                rsp->hdr.Status = STATUS_NOT_SUPPORTED;
                goto out;
        }
 
-       cnt_code = le32_to_cpu(req->CntCode);
+       cnt_code = le32_to_cpu(req->CtlCode);
        ret = smb2_calc_max_out_buf_len(work, 48,
                                        le32_to_cpu(req->MaxOutputResponse));
        if (ret < 0) {
@@ -7656,8 +7639,8 @@ int smb2_ioctl(struct ksmbd_work *work)
                        goto out;
 
                nbytes = sizeof(struct validate_negotiate_info_rsp);
-               rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
-               rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
+               rsp->PersistentFileId = SMB2_NO_FID;
+               rsp->VolatileFileId = SMB2_NO_FID;
                break;
        case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
                ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len);
@@ -7703,10 +7686,10 @@ int smb2_ioctl(struct ksmbd_work *work)
                rsp->PersistentFileId = req->PersistentFileId;
                fsctl_copychunk(work,
                                (struct copychunk_ioctl_req *)&req->Buffer[0],
-                               le32_to_cpu(req->CntCode),
+                               le32_to_cpu(req->CtlCode),
                                le32_to_cpu(req->InputCount),
-                               le64_to_cpu(req->VolatileFileId),
-                               le64_to_cpu(req->PersistentFileId),
+                               req->VolatileFileId,
+                               req->PersistentFileId,
                                rsp);
                break;
        case FSCTL_SET_SPARSE:
@@ -7857,7 +7840,7 @@ dup_ext_out:
                goto out;
        }
 
-       rsp->CntCode = cpu_to_le32(cnt_code);
+       rsp->CtlCode = cpu_to_le32(cnt_code);
        rsp->InputCount = cpu_to_le32(0);
        rsp->InputOffset = cpu_to_le32(112);
        rsp->OutputOffset = cpu_to_le32(112);
@@ -7903,8 +7886,8 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
        char req_oplevel = 0, rsp_oplevel = 0;
        unsigned int oplock_change_type;
 
-       volatile_id = le64_to_cpu(req->VolatileFid);
-       persistent_id = le64_to_cpu(req->PersistentFid);
+       volatile_id = req->VolatileFid;
+       persistent_id = req->PersistentFid;
        req_oplevel = req->OplockLevel;
        ksmbd_debug(OPLOCK, "v_id %llu, p_id %llu request oplock level %d\n",
                    volatile_id, persistent_id, req_oplevel);
@@ -7999,8 +7982,8 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
        rsp->OplockLevel = rsp_oplevel;
        rsp->Reserved = 0;
        rsp->Reserved2 = 0;
-       rsp->VolatileFid = cpu_to_le64(volatile_id);
-       rsp->PersistentFid = cpu_to_le64(persistent_id);
+       rsp->VolatileFid = volatile_id;
+       rsp->PersistentFid = persistent_id;
        inc_rfc1001_len(work->response_buf, 24);
        return;
 
@@ -8500,7 +8483,7 @@ static void fill_transform_hdr(void *tr_buf, char *old_buf, __le16 cipher_type)
        struct smb2_hdr *hdr = smb2_get_msg(old_buf);
        unsigned int orig_len = get_rfc1002_len(old_buf);
 
-       memset(tr_buf, 0, sizeof(struct smb2_transform_hdr) + 4);
+       /* tr_buf must be cleared by the caller */
        tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
        tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
        tr_hdr->Flags = cpu_to_le16(TRANSFORM_FLAG_ENCRYPTED);
index d494684..af45527 100644 (file)
 #define FILE_CREATED           0x00000002
 #define FILE_OVERWRITTEN       0x00000003
 
-/*
- * Size of the session key (crypto key encrypted with the password
- */
-#define SMB2_NTLMV2_SESSKEY_SIZE       16
-#define SMB2_SIGNATURE_SIZE            16
-#define SMB2_HMACSHA256_SIZE           32
-#define SMB2_CMACAES_SIZE              16
-#define SMB3_GCM128_CRYPTKEY_SIZE      16
-#define SMB3_GCM256_CRYPTKEY_SIZE      32
-
-/*
- * Size of the smb3 encryption/decryption keys
- */
-#define SMB3_ENC_DEC_KEY_SIZE          32
-
-/*
- * Size of the smb3 signing key
- */
-#define SMB3_SIGN_KEY_SIZE             16
-
-#define CIFS_CLIENT_CHALLENGE_SIZE     8
-#define SMB_SERVER_CHALLENGE_SIZE      8
-
 /* SMB2 Max Credits */
 #define SMB2_MAX_CREDITS               8192
 
-/* Maximum buffer size value we can send with 1 credit */
-#define SMB2_MAX_BUFFER_SIZE 65536
-
-#define NUMBER_OF_SMB2_COMMANDS        0x0013
-
 /* BB FIXME - analyze following length BB */
 #define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
 
 #define SMB21_DEFAULT_IOSIZE   (1024 * 1024)
-#define SMB3_DEFAULT_IOSIZE    (4 * 1024 * 1024)
 #define SMB3_DEFAULT_TRANS_SIZE        (1024 * 1024)
 #define SMB3_MIN_IOSIZE        (64 * 1024)
 #define SMB3_MAX_IOSIZE        (8 * 1024 * 1024)
  *
  */
 
-#define SMB2_ERROR_STRUCTURE_SIZE2     9
-#define SMB2_ERROR_STRUCTURE_SIZE2_LE  cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2)
-
-struct smb2_err_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;
-       __u8   ErrorContextCount;
-       __u8   Reserved;
-       __le32 ByteCount;  /* even if zero, at least one byte follows */
-       __u8   ErrorData[1];  /* variable length */
-} __packed;
-
 struct preauth_integrity_info {
        /* PreAuth integrity Hash ID */
        __le16                  Preauth_HashId;
@@ -116,8 +75,8 @@ struct create_durable_reconn_req {
        union {
                __u8  Reserved[16];
                struct {
-                       __le64 PersistentFileId;
-                       __le64 VolatileFileId;
+                       __u64 PersistentFileId;
+                       __u64 VolatileFileId;
                } Fid;
        } Data;
 } __packed;
@@ -126,8 +85,8 @@ struct create_durable_reconn_v2_req {
        struct create_context ccontext;
        __u8   Name[8];
        struct {
-               __le64 PersistentFileId;
-               __le64 VolatileFileId;
+               __u64 PersistentFileId;
+               __u64 VolatileFileId;
        } Fid;
        __u8 CreateGuid[16];
        __le32 Flags;
@@ -161,13 +120,6 @@ struct create_alloc_size_req {
        __le64 AllocationSize;
 } __packed;
 
-struct create_posix {
-       struct create_context ccontext;
-       __u8    Name[16];
-       __le32  Mode;
-       __u32   Reserved;
-} __packed;
-
 struct create_durable_rsp {
        struct create_context ccontext;
        __u8   Name[8];
@@ -209,45 +161,6 @@ struct create_posix_rsp {
        u8 SidBuffer[40];
 } __packed;
 
-#define SMB2_LEASE_NONE_LE                     cpu_to_le32(0x00)
-#define SMB2_LEASE_READ_CACHING_LE             cpu_to_le32(0x01)
-#define SMB2_LEASE_HANDLE_CACHING_LE           cpu_to_le32(0x02)
-#define SMB2_LEASE_WRITE_CACHING_LE            cpu_to_le32(0x04)
-
-#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE   cpu_to_le32(0x02)
-
-#define SMB2_LEASE_KEY_SIZE                    16
-
-struct lease_context {
-       __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
-       __le32 LeaseState;
-       __le32 LeaseFlags;
-       __le64 LeaseDuration;
-} __packed;
-
-struct lease_context_v2 {
-       __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
-       __le32 LeaseState;
-       __le32 LeaseFlags;
-       __le64 LeaseDuration;
-       __u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE];
-       __le16 Epoch;
-       __le16 Reserved;
-} __packed;
-
-struct create_lease {
-       struct create_context ccontext;
-       __u8   Name[8];
-       struct lease_context lcontext;
-} __packed;
-
-struct create_lease_v2 {
-       struct create_context ccontext;
-       __u8   Name[8];
-       struct lease_context_v2 lcontext;
-       __u8   Pad[4];
-} __packed;
-
 struct smb2_buffer_desc_v1 {
        __le64 offset;
        __le32 token;
@@ -256,63 +169,6 @@ struct smb2_buffer_desc_v1 {
 
 #define SMB2_0_IOCTL_IS_FSCTL 0x00000001
 
-struct duplicate_extents_to_file {
-       __u64 PersistentFileHandle; /* source file handle, opaque endianness */
-       __u64 VolatileFileHandle;
-       __le64 SourceFileOffset;
-       __le64 TargetFileOffset;
-       __le64 ByteCount;  /* Bytes to be copied */
-} __packed;
-
-struct smb2_ioctl_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 57 */
-       __le16 Reserved; /* offset from start of SMB2 header to write data */
-       __le32 CntCode;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       __le32 InputOffset; /* Reserved MBZ */
-       __le32 InputCount;
-       __le32 MaxInputResponse;
-       __le32 OutputOffset;
-       __le32 OutputCount;
-       __le32 MaxOutputResponse;
-       __le32 Flags;
-       __le32 Reserved2;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_ioctl_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 49 */
-       __le16 Reserved; /* offset from start of SMB2 header to write data */
-       __le32 CntCode;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       __le32 InputOffset; /* Reserved MBZ */
-       __le32 InputCount;
-       __le32 OutputOffset;
-       __le32 OutputCount;
-       __le32 Flags;
-       __le32 Reserved2;
-       __u8   Buffer[1];
-} __packed;
-
-struct validate_negotiate_info_req {
-       __le32 Capabilities;
-       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
-       __le16 SecurityMode;
-       __le16 DialectCount;
-       __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
-} __packed;
-
-struct validate_negotiate_info_rsp {
-       __le32 Capabilities;
-       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
-       __le16 SecurityMode;
-       __le16 Dialect; /* Dialect in use for the connection */
-} __packed;
-
 struct smb_sockaddr_in {
        __be16 Port;
        __be32 IPv4address;
@@ -357,7 +213,7 @@ struct file_object_buf_type1_ioctl_rsp {
 } __packed;
 
 struct resume_key_ioctl_rsp {
-       __le64 ResumeKey[3];
+       __u64 ResumeKey[3];
        __le32 ContextLength;
        __u8 Context[4]; /* ignored, Windows sets to 4 bytes of zero */
 } __packed;
@@ -386,167 +242,6 @@ struct file_sparse {
        __u8    SetSparse;
 } __packed;
 
-struct file_zero_data_information {
-       __le64  FileOffset;
-       __le64  BeyondFinalZero;
-} __packed;
-
-struct file_allocated_range_buffer {
-       __le64  file_offset;
-       __le64  length;
-} __packed;
-
-struct reparse_data_buffer {
-       __le32  ReparseTag;
-       __le16  ReparseDataLength;
-       __u16   Reserved;
-       __u8    DataBuffer[]; /* Variable Length */
-} __packed;
-
-/* SMB2 Notify Action Flags */
-#define FILE_ACTION_ADDED              0x00000001
-#define FILE_ACTION_REMOVED            0x00000002
-#define FILE_ACTION_MODIFIED           0x00000003
-#define FILE_ACTION_RENAMED_OLD_NAME   0x00000004
-#define FILE_ACTION_RENAMED_NEW_NAME   0x00000005
-#define FILE_ACTION_ADDED_STREAM       0x00000006
-#define FILE_ACTION_REMOVED_STREAM     0x00000007
-#define FILE_ACTION_MODIFIED_STREAM    0x00000008
-#define FILE_ACTION_REMOVED_BY_DELETE  0x00000009
-
-#define SMB2_LOCKFLAG_SHARED           0x0001
-#define SMB2_LOCKFLAG_EXCLUSIVE                0x0002
-#define SMB2_LOCKFLAG_UNLOCK           0x0004
-#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
-#define SMB2_LOCKFLAG_MASK             0x0007
-
-struct smb2_lock_element {
-       __le64 Offset;
-       __le64 Length;
-       __le32 Flags;
-       __le32 Reserved;
-} __packed;
-
-struct smb2_lock_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 48 */
-       __le16 LockCount;
-       __le32 Reserved;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       /* Followed by at least one */
-       struct smb2_lock_element locks[1];
-} __packed;
-
-struct smb2_lock_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 4 */
-       __le16 Reserved;
-} __packed;
-
-struct smb2_echo_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 4 */
-       __u16  Reserved;
-} __packed;
-
-struct smb2_echo_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 4 */
-       __u16  Reserved;
-} __packed;
-
-/* search (query_directory) Flags field */
-#define SMB2_RESTART_SCANS             0x01
-#define SMB2_RETURN_SINGLE_ENTRY       0x02
-#define SMB2_INDEX_SPECIFIED           0x04
-#define SMB2_REOPEN                    0x10
-
-struct smb2_query_directory_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 33 */
-       __u8   FileInformationClass;
-       __u8   Flags;
-       __le32 FileIndex;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       __le16 FileNameOffset;
-       __le16 FileNameLength;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_query_directory_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 9 */
-       __le16 OutputBufferOffset;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-/* Possible InfoType values */
-#define SMB2_O_INFO_FILE       0x01
-#define SMB2_O_INFO_FILESYSTEM 0x02
-#define SMB2_O_INFO_SECURITY   0x03
-#define SMB2_O_INFO_QUOTA      0x04
-
-/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */
-#define OWNER_SECINFO   0x00000001
-#define GROUP_SECINFO   0x00000002
-#define DACL_SECINFO   0x00000004
-#define SACL_SECINFO   0x00000008
-#define LABEL_SECINFO   0x00000010
-#define ATTRIBUTE_SECINFO   0x00000020
-#define SCOPE_SECINFO   0x00000040
-#define BACKUP_SECINFO   0x00010000
-#define UNPROTECTED_SACL_SECINFO   0x10000000
-#define UNPROTECTED_DACL_SECINFO   0x20000000
-#define PROTECTED_SACL_SECINFO   0x40000000
-#define PROTECTED_DACL_SECINFO   0x80000000
-
-struct smb2_query_info_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 41 */
-       __u8   InfoType;
-       __u8   FileInfoClass;
-       __le32 OutputBufferLength;
-       __le16 InputBufferOffset;
-       __u16  Reserved;
-       __le32 InputBufferLength;
-       __le32 AdditionalInformation;
-       __le32 Flags;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_query_info_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 9 */
-       __le16 OutputBufferOffset;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_set_info_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 33 */
-       __u8   InfoType;
-       __u8   FileInfoClass;
-       __le32 BufferLength;
-       __le16 BufferOffset;
-       __u16  Reserved;
-       __le32 AdditionalInformation;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_set_info_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 2 */
-} __packed;
-
 /* FILE Info response size */
 #define FILE_DIRECTORY_INFORMATION_SIZE       1
 #define FILE_FULL_DIRECTORY_INFORMATION_SIZE  2
@@ -602,145 +297,11 @@ struct fs_type_info {
        long            magic_number;
 } __packed;
 
-struct smb2_oplock_break {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 24 */
-       __u8   OplockLevel;
-       __u8   Reserved;
-       __le32 Reserved2;
-       __le64  PersistentFid;
-       __le64  VolatileFid;
-} __packed;
-
-#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
-
-struct smb2_lease_break {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 44 */
-       __le16 Epoch;
-       __le32 Flags;
-       __u8   LeaseKey[16];
-       __le32 CurrentLeaseState;
-       __le32 NewLeaseState;
-       __le32 BreakReason;
-       __le32 AccessMaskHint;
-       __le32 ShareMaskHint;
-} __packed;
-
-struct smb2_lease_ack {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 36 */
-       __le16 Reserved;
-       __le32 Flags;
-       __u8   LeaseKey[16];
-       __le32 LeaseState;
-       __le64 LeaseDuration;
-} __packed;
-
 /*
- *     PDU infolevel structure definitions
+ *     PDU query infolevel structure definitions
  *     BB consider moving to a different header
  */
 
-/* File System Information Classes */
-#define FS_VOLUME_INFORMATION          1 /* Query */
-#define FS_LABEL_INFORMATION           2 /* Set */
-#define FS_SIZE_INFORMATION            3 /* Query */
-#define FS_DEVICE_INFORMATION          4 /* Query */
-#define FS_ATTRIBUTE_INFORMATION       5 /* Query */
-#define FS_CONTROL_INFORMATION         6 /* Query, Set */
-#define FS_FULL_SIZE_INFORMATION       7 /* Query */
-#define FS_OBJECT_ID_INFORMATION       8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION     9 /* Query */
-#define FS_SECTOR_SIZE_INFORMATION     11 /* SMB3 or later. Query */
-#define FS_POSIX_INFORMATION           100 /* SMB3.1.1 POSIX. Query */
-
-struct smb2_fs_full_size_info {
-       __le64 TotalAllocationUnits;
-       __le64 CallerAvailableAllocationUnits;
-       __le64 ActualAvailableAllocationUnits;
-       __le32 SectorsPerAllocationUnit;
-       __le32 BytesPerSector;
-} __packed;
-
-#define SSINFO_FLAGS_ALIGNED_DEVICE            0x00000001
-#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
-#define SSINFO_FLAGS_NO_SEEK_PENALTY           0x00000004
-#define SSINFO_FLAGS_TRIM_ENABLED              0x00000008
-
-/* sector size info struct */
-struct smb3_fs_ss_info {
-       __le32 LogicalBytesPerSector;
-       __le32 PhysicalBytesPerSectorForAtomicity;
-       __le32 PhysicalBytesPerSectorForPerf;
-       __le32 FSEffPhysicalBytesPerSectorForAtomicity;
-       __le32 Flags;
-       __le32 ByteOffsetForSectorAlignment;
-       __le32 ByteOffsetForPartitionAlignment;
-} __packed;
-
-/* File System Control Information */
-struct smb2_fs_control_info {
-       __le64 FreeSpaceStartFiltering;
-       __le64 FreeSpaceThreshold;
-       __le64 FreeSpaceStopFiltering;
-       __le64 DefaultQuotaThreshold;
-       __le64 DefaultQuotaLimit;
-       __le32 FileSystemControlFlags;
-       __le32 Padding;
-} __packed;
-
-/* partial list of QUERY INFO levels */
-#define FILE_DIRECTORY_INFORMATION     1
-#define FILE_FULL_DIRECTORY_INFORMATION 2
-#define FILE_BOTH_DIRECTORY_INFORMATION 3
-#define FILE_BASIC_INFORMATION         4
-#define FILE_STANDARD_INFORMATION      5
-#define FILE_INTERNAL_INFORMATION      6
-#define FILE_EA_INFORMATION            7
-#define FILE_ACCESS_INFORMATION                8
-#define FILE_NAME_INFORMATION          9
-#define FILE_RENAME_INFORMATION                10
-#define FILE_LINK_INFORMATION          11
-#define FILE_NAMES_INFORMATION         12
-#define FILE_DISPOSITION_INFORMATION   13
-#define FILE_POSITION_INFORMATION      14
-#define FILE_FULL_EA_INFORMATION       15
-#define FILE_MODE_INFORMATION          16
-#define FILE_ALIGNMENT_INFORMATION     17
-#define FILE_ALL_INFORMATION           18
-#define FILE_ALLOCATION_INFORMATION    19
-#define FILE_END_OF_FILE_INFORMATION   20
-#define FILE_ALTERNATE_NAME_INFORMATION 21
-#define FILE_STREAM_INFORMATION                22
-#define FILE_PIPE_INFORMATION          23
-#define FILE_PIPE_LOCAL_INFORMATION    24
-#define FILE_PIPE_REMOTE_INFORMATION   25
-#define FILE_MAILSLOT_QUERY_INFORMATION 26
-#define FILE_MAILSLOT_SET_INFORMATION  27
-#define FILE_COMPRESSION_INFORMATION   28
-#define FILE_OBJECT_ID_INFORMATION     29
-/* Number 30 not defined in documents */
-#define FILE_MOVE_CLUSTER_INFORMATION  31
-#define FILE_QUOTA_INFORMATION         32
-#define FILE_REPARSE_POINT_INFORMATION 33
-#define FILE_NETWORK_OPEN_INFORMATION  34
-#define FILE_ATTRIBUTE_TAG_INFORMATION 35
-#define FILE_TRACKING_INFORMATION      36
-#define FILEID_BOTH_DIRECTORY_INFORMATION 37
-#define FILEID_FULL_DIRECTORY_INFORMATION 38
-#define FILE_VALID_DATA_LENGTH_INFORMATION 39
-#define FILE_SHORT_NAME_INFORMATION    40
-#define FILE_SFIO_RESERVE_INFORMATION  44
-#define FILE_SFIO_VOLUME_INFORMATION   45
-#define FILE_HARD_LINK_INFORMATION     46
-#define FILE_NORMALIZED_NAME_INFORMATION 48
-#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
-#define FILE_STANDARD_LINK_INFORMATION 54
-
-#define OP_BREAK_STRUCT_SIZE_20                24
-#define OP_BREAK_STRUCT_SIZE_21                36
-
 struct smb2_file_access_info {
        __le32 AccessFlags;
 } __packed;
@@ -749,56 +310,6 @@ struct smb2_file_alignment_info {
        __le32 AlignmentRequirement;
 } __packed;
 
-struct smb2_file_internal_info {
-       __le64 IndexNumber;
-} __packed; /* level 6 Query */
-
-struct smb2_file_rename_info { /* encoding of request for level 10 */
-       __u8   ReplaceIfExists; /* 1 = replace existing target with new */
-                               /* 0 = fail if target already exists */
-       __u8   Reserved[7];
-       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
-       __le32 FileNameLength;
-       char   FileName[];     /* New name to be assigned */
-} __packed; /* level 10 Set */
-
-struct smb2_file_link_info { /* encoding of request for level 11 */
-       __u8   ReplaceIfExists; /* 1 = replace existing link with new */
-                               /* 0 = fail if link already exists */
-       __u8   Reserved[7];
-       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
-       __le32 FileNameLength;
-       char   FileName[];     /* Name to be assigned to new link */
-} __packed; /* level 11 Set */
-
-/*
- * This level 18, although with struct with same name is different from cifs
- * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
- * CurrentByteOffset.
- */
-struct smb2_file_all_info { /* data block encoding of response to level 18 */
-       __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
-       __le64 LastAccessTime;
-       __le64 LastWriteTime;
-       __le64 ChangeTime;
-       __le32 Attributes;
-       __u32  Pad1;            /* End of FILE_BASIC_INFO_INFO equivalent */
-       __le64 AllocationSize;  /* Beginning of FILE_STANDARD_INFO equivalent */
-       __le64 EndOfFile;       /* size ie offset to first free byte in file */
-       __le32 NumberOfLinks;   /* hard links */
-       __u8   DeletePending;
-       __u8   Directory;
-       __u16  Pad2;            /* End of FILE_STANDARD_INFO equivalent */
-       __le64 IndexNumber;
-       __le32 EASize;
-       __le32 AccessFlags;
-       __le64 CurrentByteOffset;
-       __le32 Mode;
-       __le32 AlignmentRequirement;
-       __le32 FileNameLength;
-       char   FileName[1];
-} __packed; /* level 18 Query */
-
 struct smb2_file_basic_info { /* data block encoding of response to level 18 */
        __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
        __le64 LastAccessTime;
@@ -821,10 +332,6 @@ struct smb2_file_stream_info {
        char   StreamName[];
 } __packed;
 
-struct smb2_file_eof_info { /* encoding of request for level 10 */
-       __le64 EndOfFile; /* new end of file value */
-} __packed; /* level 20 Set */
-
 struct smb2_file_ntwrk_info {
        __le64 CreationTime;
        __le64 LastAccessTime;
@@ -915,34 +422,6 @@ struct create_sd_buf_req {
        struct smb_ntsd ntsd;
 } __packed;
 
-/* Find File infolevels */
-#define SMB_FIND_FILE_POSIX_INFO       0x064
-
-/* Level 100 query info */
-struct smb311_posix_qinfo {
-       __le64 CreationTime;
-       __le64 LastAccessTime;
-       __le64 LastWriteTime;
-       __le64 ChangeTime;
-       __le64 EndOfFile;
-       __le64 AllocationSize;
-       __le32 DosAttributes;
-       __le64 Inode;
-       __le32 DeviceId;
-       __le32 Zero;
-       /* beginning of POSIX Create Context Response */
-       __le32 HardLinks;
-       __le32 ReparseTag;
-       __le32 Mode;
-       u8     Sids[];
-       /*
-        * var sized owner SID
-        * var sized group SID
-        * le32 filenamelength
-        * u8  filename[]
-        */
-} __packed;
-
 struct smb2_posix_info {
        __le32 NextEntryOffset;
        __u32 Ignored;
index 82a1429..8fef9de 100644 (file)
@@ -476,7 +476,7 @@ static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
 
        switch (event) {
        case NETDEV_UP:
-               if (netdev->priv_flags & IFF_BRIDGE_PORT)
+               if (netif_is_bridge_port(netdev))
                        return NOTIFY_OK;
 
                list_for_each_entry(iface, &iface_list, entry) {
@@ -585,7 +585,7 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
 
                rtnl_lock();
                for_each_netdev(&init_net, netdev) {
-                       if (netdev->priv_flags & IFF_BRIDGE_PORT)
+                       if (netif_is_bridge_port(netdev))
                                continue;
                        if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL)))
                                return -ENOMEM;
index 6e9844b..a0a36bf 100644 (file)
@@ -2112,22 +2112,23 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
 {
        unsigned int max = READ_ONCE(sysctl_mount_max);
-       unsigned int mounts = 0, old, pending, sum;
+       unsigned int mounts = 0;
        struct mount *p;
 
+       if (ns->mounts >= max)
+               return -ENOSPC;
+       max -= ns->mounts;
+       if (ns->pending_mounts >= max)
+               return -ENOSPC;
+       max -= ns->pending_mounts;
+
        for (p = mnt; p; p = next_mnt(p, mnt))
                mounts++;
 
-       old = ns->mounts;
-       pending = ns->pending_mounts;
-       sum = old + pending;
-       if ((old > sum) ||
-           (pending > sum) ||
-           (max < sum) ||
-           (mounts > (max - sum)))
+       if (mounts > max)
                return -ENOSPC;
 
-       ns->pending_mounts = pending + mounts;
+       ns->pending_mounts += mounts;
        return 0;
 }
 
@@ -2921,7 +2922,7 @@ static int do_move_mount_old(struct path *path, const char *old_name)
  * add a mount into a namespace's mount tree
  */
 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
-                       struct path *path, int mnt_flags)
+                       const struct path *path, int mnt_flags)
 {
        struct mount *parent = real_mount(path->mnt);
 
@@ -3044,7 +3045,7 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
        return err;
 }
 
-int finish_automount(struct vfsmount *m, struct path *path)
+int finish_automount(struct vfsmount *m, const struct path *path)
 {
        struct dentry *dentry = path->dentry;
        struct mountpoint *mp;
index c15bfc9..f684c0c 100644 (file)
@@ -1,5 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0
 
-netfs-y := read_helper.o stats.o
+netfs-y := \
+       buffered_read.o \
+       io.o \
+       main.o \
+       objects.o
+
+netfs-$(CONFIG_NETFS_STATS) += stats.o
 
 obj-$(CONFIG_NETFS_SUPPORT) := netfs.o
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
new file mode 100644 (file)
index 0000000..281a88a
--- /dev/null
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Network filesystem high-level buffered read support.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * Unlock the folios in a read operation.  We need to set PG_fscache on any
+ * folios we're going to write back before we unlock them.
+ */
+void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+       struct folio *folio;
+       unsigned int iopos, account = 0;
+       pgoff_t start_page = rreq->start / PAGE_SIZE;
+       pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
+       bool subreq_failed = false;
+
+       XA_STATE(xas, &rreq->mapping->i_pages, start_page);
+
+       if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
+               __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
+               list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+                       __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
+               }
+       }
+
+       /* Walk through the pagecache and the I/O request lists simultaneously.
+        * We may have a mixture of cached and uncached sections and we only
+        * really want to write out the uncached sections.  This is slightly
+        * complicated by the possibility that we might have huge pages with a
+        * mixture inside.
+        */
+       subreq = list_first_entry(&rreq->subrequests,
+                                 struct netfs_io_subrequest, rreq_link);
+       iopos = 0;
+       subreq_failed = (subreq->error < 0);
+
+       trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
+
+       rcu_read_lock();
+       xas_for_each(&xas, folio, last_page) {
+               unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
+               unsigned int pgend = pgpos + folio_size(folio);
+               bool pg_failed = false;
+
+               for (;;) {
+                       if (!subreq) {
+                               pg_failed = true;
+                               break;
+                       }
+                       if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
+                               folio_start_fscache(folio);
+                       pg_failed |= subreq_failed;
+                       if (pgend < iopos + subreq->len)
+                               break;
+
+                       account += subreq->transferred;
+                       iopos += subreq->len;
+                       if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+                               subreq = list_next_entry(subreq, rreq_link);
+                               subreq_failed = (subreq->error < 0);
+                       } else {
+                               subreq = NULL;
+                               subreq_failed = false;
+                       }
+                       if (pgend == iopos)
+                               break;
+               }
+
+               if (!pg_failed) {
+                       flush_dcache_folio(folio);
+                       folio_mark_uptodate(folio);
+               }
+
+               if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
+                       if (folio_index(folio) == rreq->no_unlock_folio &&
+                           test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
+                               _debug("no unlock");
+                       else
+                               folio_unlock(folio);
+               }
+       }
+       rcu_read_unlock();
+
+       task_io_account_read(account);
+       if (rreq->netfs_ops->done)
+               rreq->netfs_ops->done(rreq);
+}
+
+static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
+                                        loff_t *_start, size_t *_len, loff_t i_size)
+{
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+       if (cres->ops && cres->ops->expand_readahead)
+               cres->ops->expand_readahead(cres, _start, _len, i_size);
+}
+
+static void netfs_rreq_expand(struct netfs_io_request *rreq,
+                             struct readahead_control *ractl)
+{
+       /* Give the cache a chance to change the request parameters.  The
+        * resultant request must contain the original region.
+        */
+       netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
+
+       /* Give the netfs a chance to change the request parameters.  The
+        * resultant request must contain the original region.
+        */
+       if (rreq->netfs_ops->expand_readahead)
+               rreq->netfs_ops->expand_readahead(rreq);
+
+       /* Expand the request if the cache wants it to start earlier.  Note
+        * that the expansion may get further extended if the VM wishes to
+        * insert THPs and the preferred start and/or end wind up in the middle
+        * of THPs.
+        *
+        * If this is the case, however, the THP size should be an integer
+        * multiple of the cache granule size, so we get a whole number of
+        * granules to deal with.
+        */
+       if (rreq->start  != readahead_pos(ractl) ||
+           rreq->len != readahead_length(ractl)) {
+               readahead_expand(ractl, rreq->start, rreq->len);
+               rreq->start  = readahead_pos(ractl);
+               rreq->len = readahead_length(ractl);
+
+               trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+                                netfs_read_trace_expanded);
+       }
+}
+
+/**
+ * netfs_readahead - Helper to manage a read request
+ * @ractl: The description of the readahead request
+ *
+ * Fulfil a readahead request by drawing data from the cache if possible, or
+ * the netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O
+ * requests from different sources will get munged together.  If necessary, the
+ * readahead window can be expanded in either direction to a more convenient
+ * alighment for RPC efficiency or to make storage in the cache feasible.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+void netfs_readahead(struct readahead_control *ractl)
+{
+       struct netfs_io_request *rreq;
+       struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
+       int ret;
+
+       _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
+
+       if (readahead_count(ractl) == 0)
+               return;
+
+       rreq = netfs_alloc_request(ractl->mapping, ractl->file,
+                                  readahead_pos(ractl),
+                                  readahead_length(ractl),
+                                  NETFS_READAHEAD);
+       if (IS_ERR(rreq))
+               return;
+
+       if (ctx->ops->begin_cache_operation) {
+               ret = ctx->ops->begin_cache_operation(rreq);
+               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+                       goto cleanup_free;
+       }
+
+       netfs_stat(&netfs_n_rh_readahead);
+       trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+                        netfs_read_trace_readahead);
+
+       netfs_rreq_expand(rreq, ractl);
+
+       /* Drop the refs on the folios here rather than in the cache or
+        * filesystem.  The locks will be dropped in netfs_rreq_unlock().
+        */
+       while (readahead_folio(ractl))
+               ;
+
+       netfs_begin_read(rreq, false);
+       return;
+
+cleanup_free:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
+       return;
+}
+EXPORT_SYMBOL(netfs_readahead);
+
+/**
+ * netfs_readpage - Helper to manage a readpage request
+ * @file: The file to read from
+ * @subpage: A subpage of the folio to read
+ *
+ * Fulfil a readpage request by drawing data from the cache if possible, or the
+ * netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O requests
+ * from different sources will get munged together.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+int netfs_readpage(struct file *file, struct page *subpage)
+{
+       struct folio *folio = page_folio(subpage);
+       struct address_space *mapping = folio_file_mapping(folio);
+       struct netfs_io_request *rreq;
+       struct netfs_i_context *ctx = netfs_i_context(mapping->host);
+       int ret;
+
+       _enter("%lx", folio_index(folio));
+
+       rreq = netfs_alloc_request(mapping, file,
+                                  folio_file_pos(folio), folio_size(folio),
+                                  NETFS_READPAGE);
+       if (IS_ERR(rreq)) {
+               ret = PTR_ERR(rreq);
+               goto alloc_error;
+       }
+
+       if (ctx->ops->begin_cache_operation) {
+               ret = ctx->ops->begin_cache_operation(rreq);
+               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+                       goto discard;
+       }
+
+       netfs_stat(&netfs_n_rh_readpage);
+       trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
+       return netfs_begin_read(rreq, true);
+
+discard:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
+alloc_error:
+       folio_unlock(folio);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_readpage);
+
+/*
+ * Prepare a folio for writing without reading first
+ * @folio: The folio being prepared
+ * @pos: starting position for the write
+ * @len: length of write
+ * @always_fill: T if the folio should always be completely filled/cleared
+ *
+ * In some cases, write_begin doesn't need to read at all:
+ * - full folio write
+ * - write that lies in a folio that is completely beyond EOF
+ * - write that covers the folio from start to EOF or beyond it
+ *
+ * If any of these criteria are met, then zero out the unwritten parts
+ * of the folio and return true. Otherwise, return false.
+ */
+static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
+                                bool always_fill)
+{
+       struct inode *inode = folio_inode(folio);
+       loff_t i_size = i_size_read(inode);
+       size_t offset = offset_in_folio(folio, pos);
+       size_t plen = folio_size(folio);
+
+       if (unlikely(always_fill)) {
+               if (pos - offset + len <= i_size)
+                       return false; /* Page entirely before EOF */
+               zero_user_segment(&folio->page, 0, plen);
+               folio_mark_uptodate(folio);
+               return true;
+       }
+
+       /* Full folio write */
+       if (offset == 0 && len >= plen)
+               return true;
+
+       /* Page entirely beyond the end of the file */
+       if (pos - offset >= i_size)
+               goto zero_out;
+
+       /* Write that covers from the start of the folio to EOF or beyond */
+       if (offset == 0 && (pos + len) >= i_size)
+               goto zero_out;
+
+       return false;
+zero_out:
+       zero_user_segments(&folio->page, 0, offset, offset + len, plen);
+       return true;
+}
+
+/**
+ * netfs_write_begin - Helper to prepare for writing
+ * @file: The file to read from
+ * @mapping: The mapping to read from
+ * @pos: File position at which the write will begin
+ * @len: The length of the write (may extend beyond the end of the folio chosen)
+ * @aop_flags: AOP_* flags
+ * @_folio: Where to put the resultant folio
+ * @_fsdata: Place for the netfs to store a cookie
+ *
+ * Pre-read data for a write-begin request by drawing data from the cache if
+ * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
+ * Multiple I/O requests from different sources will get munged together.  If
+ * necessary, the readahead window can be expanded in either direction to a
+ * more convenient alighment for RPC efficiency or to make storage in the cache
+ * feasible.
+ *
+ * The calling netfs must provide a table of operations, only one of which,
+ * issue_op, is mandatory.
+ *
+ * The check_write_begin() operation can be provided to check for and flush
+ * conflicting writes once the folio is grabbed and locked.  It is passed a
+ * pointer to the fsdata cookie that gets returned to the VM to be passed to
+ * write_end.  It is permitted to sleep.  It should return 0 if the request
+ * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
+ * be regot; or return an error.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+int netfs_write_begin(struct file *file, struct address_space *mapping,
+                     loff_t pos, unsigned int len, unsigned int aop_flags,
+                     struct folio **_folio, void **_fsdata)
+{
+       struct netfs_io_request *rreq;
+       struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
+       struct folio *folio;
+       unsigned int fgp_flags;
+       pgoff_t index = pos >> PAGE_SHIFT;
+       int ret;
+
+       DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
+
+retry:
+       fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
+       if (aop_flags & AOP_FLAG_NOFS)
+               fgp_flags |= FGP_NOFS;
+       folio = __filemap_get_folio(mapping, index, fgp_flags,
+                                   mapping_gfp_mask(mapping));
+       if (!folio)
+               return -ENOMEM;
+
+       if (ctx->ops->check_write_begin) {
+               /* Allow the netfs (eg. ceph) to flush conflicts. */
+               ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata);
+               if (ret < 0) {
+                       trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
+                       if (ret == -EAGAIN)
+                               goto retry;
+                       goto error;
+               }
+       }
+
+       if (folio_test_uptodate(folio))
+               goto have_folio;
+
+       /* If the page is beyond the EOF, we want to clear it - unless it's
+        * within the cache granule containing the EOF, in which case we need
+        * to preload the granule.
+        */
+       if (!netfs_is_cache_enabled(ctx) &&
+           netfs_skip_folio_read(folio, pos, len, false)) {
+               netfs_stat(&netfs_n_rh_write_zskip);
+               goto have_folio_no_wait;
+       }
+
+       rreq = netfs_alloc_request(mapping, file,
+                                  folio_file_pos(folio), folio_size(folio),
+                                  NETFS_READ_FOR_WRITE);
+       if (IS_ERR(rreq)) {
+               ret = PTR_ERR(rreq);
+               goto error;
+       }
+       rreq->no_unlock_folio   = folio_index(folio);
+       __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
+
+       if (ctx->ops->begin_cache_operation) {
+               ret = ctx->ops->begin_cache_operation(rreq);
+               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+                       goto error_put;
+       }
+
+       netfs_stat(&netfs_n_rh_write_begin);
+       trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
+
+       /* Expand the request to meet caching requirements and download
+        * preferences.
+        */
+       ractl._nr_pages = folio_nr_pages(folio);
+       netfs_rreq_expand(rreq, &ractl);
+
+       /* We hold the folio locks, so we can drop the references */
+       folio_get(folio);
+       while (readahead_folio(&ractl))
+               ;
+
+       ret = netfs_begin_read(rreq, true);
+       if (ret < 0)
+               goto error;
+
+have_folio:
+       ret = folio_wait_fscache_killable(folio);
+       if (ret < 0)
+               goto error;
+have_folio_no_wait:
+       *_folio = folio;
+       _leave(" = 0");
+       return 0;
+
+error_put:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
+error:
+       folio_unlock(folio);
+       folio_put(folio);
+       _leave(" = %d", ret);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_write_begin);
index b7f2c44..b7b0e3d 100644 (file)
@@ -5,6 +5,10 @@
  * Written by David Howells (dhowells@redhat.com)
  */
 
+#include <linux/netfs.h>
+#include <linux/fscache.h>
+#include <trace/events/netfs.h>
+
 #ifdef pr_fmt
 #undef pr_fmt
 #endif
 #define pr_fmt(fmt) "netfs: " fmt
 
 /*
- * read_helper.c
+ * buffered_read.c
+ */
+void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
+
+/*
+ * io.c
+ */
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
+
+/*
+ * main.c
  */
 extern unsigned int netfs_debug;
 
 /*
+ * objects.c
+ */
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+                                            struct file *file,
+                                            loff_t start, size_t len,
+                                            enum netfs_io_origin origin);
+void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
+void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
+void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
+                      enum netfs_rreq_ref_trace what);
+struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
+
+static inline void netfs_see_request(struct netfs_io_request *rreq,
+                                    enum netfs_rreq_ref_trace what)
+{
+       trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
+}
+
+/*
  * stats.c
  */
 #ifdef CONFIG_NETFS_STATS
@@ -55,6 +88,21 @@ static inline void netfs_stat_d(atomic_t *stat)
 #define netfs_stat_d(x) do {} while(0)
 #endif
 
+/*
+ * Miscellaneous functions.
+ */
+static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+       struct fscache_cookie *cookie = ctx->cache;
+
+       return fscache_cookie_valid(cookie) && cookie->cache_priv &&
+               fscache_cookie_enabled(cookie);
+#else
+       return false;
+#endif
+}
+
 /*****************************************************************************/
 /*
  * debug tracing
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
new file mode 100644 (file)
index 0000000..4289258
--- /dev/null
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Network filesystem high-level read support.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * Clear the unread part of an I/O request.
+ */
+static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
+{
+       struct iov_iter iter;
+
+       iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
+                       subreq->start + subreq->transferred,
+                       subreq->len   - subreq->transferred);
+       iov_iter_zero(iov_iter_count(&iter), &iter);
+}
+
+static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
+                                       bool was_async)
+{
+       struct netfs_io_subrequest *subreq = priv;
+
+       netfs_subreq_terminated(subreq, transferred_or_error, was_async);
+}
+
+/*
+ * Issue a read against the cache.
+ * - Eats the caller's ref on subreq.
+ */
+static void netfs_read_from_cache(struct netfs_io_request *rreq,
+                                 struct netfs_io_subrequest *subreq,
+                                 enum netfs_read_from_hole read_hole)
+{
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+       struct iov_iter iter;
+
+       netfs_stat(&netfs_n_rh_read);
+       iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
+                       subreq->start + subreq->transferred,
+                       subreq->len   - subreq->transferred);
+
+       cres->ops->read(cres, subreq->start, &iter, read_hole,
+                       netfs_cache_read_terminated, subreq);
+}
+
+/*
+ * Fill a subrequest region with zeroes.
+ */
+static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
+                                  struct netfs_io_subrequest *subreq)
+{
+       netfs_stat(&netfs_n_rh_zero);
+       __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+       netfs_subreq_terminated(subreq, 0, false);
+}
+
+/*
+ * Ask the netfs to issue a read request to the server for us.
+ *
+ * The netfs is expected to read from subreq->pos + subreq->transferred to
+ * subreq->pos + subreq->len - 1.  It may not backtrack and write data into the
+ * buffer prior to the transferred point as it might clobber dirty data
+ * obtained from the cache.
+ *
+ * Alternatively, the netfs is allowed to indicate one of two things:
+ *
+ * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
+ *   make progress.
+ *
+ * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
+ *   cleared.
+ */
+static void netfs_read_from_server(struct netfs_io_request *rreq,
+                                  struct netfs_io_subrequest *subreq)
+{
+       netfs_stat(&netfs_n_rh_download);
+       rreq->netfs_ops->issue_read(subreq);
+}
+
+/*
+ * Release those waiting.
+ */
+static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
+{
+       trace_netfs_rreq(rreq, netfs_rreq_trace_done);
+       netfs_clear_subrequests(rreq, was_async);
+       netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
+}
+
+/*
+ * Deal with the completion of writing the data to the cache.  We have to clear
+ * the PG_fscache bits on the folios involved and release the caller's ref.
+ *
+ * May be called in softirq mode and we inherit a ref from the caller.
+ */
+static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
+                                         bool was_async)
+{
+       struct netfs_io_subrequest *subreq;
+       struct folio *folio;
+       pgoff_t unlocked = 0;
+       bool have_unlocked = false;
+
+       rcu_read_lock();
+
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
+
+               xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+                       /* We might have multiple writes from the same huge
+                        * folio, but we mustn't unlock a folio more than once.
+                        */
+                       if (have_unlocked && folio_index(folio) <= unlocked)
+                               continue;
+                       unlocked = folio_index(folio);
+                       folio_end_fscache(folio);
+                       have_unlocked = true;
+               }
+       }
+
+       rcu_read_unlock();
+       netfs_rreq_completed(rreq, was_async);
+}
+
+static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
+                                      bool was_async)
+{
+       struct netfs_io_subrequest *subreq = priv;
+       struct netfs_io_request *rreq = subreq->rreq;
+
+       if (IS_ERR_VALUE(transferred_or_error)) {
+               netfs_stat(&netfs_n_rh_write_failed);
+               trace_netfs_failure(rreq, subreq, transferred_or_error,
+                                   netfs_fail_copy_to_cache);
+       } else {
+               netfs_stat(&netfs_n_rh_write_done);
+       }
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
+
+       /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
+       if (atomic_dec_and_test(&rreq->nr_copy_ops))
+               netfs_rreq_unmark_after_write(rreq, was_async);
+
+       netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+}
+
+/*
+ * Perform any outstanding writes to the cache.  We inherit a ref from the
+ * caller.
+ */
+static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
+{
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+       struct netfs_io_subrequest *subreq, *next, *p;
+       struct iov_iter iter;
+       int ret;
+
+       trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
+
+       /* We don't want terminating writes trying to wake us up whilst we're
+        * still going through the list.
+        */
+       atomic_inc(&rreq->nr_copy_ops);
+
+       list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
+               if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+                       list_del_init(&subreq->rreq_link);
+                       netfs_put_subrequest(subreq, false,
+                                            netfs_sreq_trace_put_no_copy);
+               }
+       }
+
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               /* Amalgamate adjacent writes */
+               while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+                       next = list_next_entry(subreq, rreq_link);
+                       if (next->start != subreq->start + subreq->len)
+                               break;
+                       subreq->len += next->len;
+                       list_del_init(&next->rreq_link);
+                       netfs_put_subrequest(next, false,
+                                            netfs_sreq_trace_put_merged);
+               }
+
+               ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
+                                              rreq->i_size, true);
+               if (ret < 0) {
+                       trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
+                       trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
+                       continue;
+               }
+
+               iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
+                               subreq->start, subreq->len);
+
+               atomic_inc(&rreq->nr_copy_ops);
+               netfs_stat(&netfs_n_rh_write);
+               netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
+               trace_netfs_sreq(subreq, netfs_sreq_trace_write);
+               cres->ops->write(cres, subreq->start, &iter,
+                                netfs_rreq_copy_terminated, subreq);
+       }
+
+       /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
+       if (atomic_dec_and_test(&rreq->nr_copy_ops))
+               netfs_rreq_unmark_after_write(rreq, false);
+}
+
+static void netfs_rreq_write_to_cache_work(struct work_struct *work)
+{
+       struct netfs_io_request *rreq =
+               container_of(work, struct netfs_io_request, work);
+
+       netfs_rreq_do_write_to_cache(rreq);
+}
+
+static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
+{
+       rreq->work.func = netfs_rreq_write_to_cache_work;
+       if (!queue_work(system_unbound_wq, &rreq->work))
+               BUG();
+}
+
+/*
+ * Handle a short read.
+ */
+static void netfs_rreq_short_read(struct netfs_io_request *rreq,
+                                 struct netfs_io_subrequest *subreq)
+{
+       __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
+       __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
+
+       netfs_stat(&netfs_n_rh_short_read);
+       trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
+
+       netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
+       atomic_inc(&rreq->nr_outstanding);
+       if (subreq->source == NETFS_READ_FROM_CACHE)
+               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
+       else
+               netfs_read_from_server(rreq, subreq);
+}
+
+/*
+ * Resubmit any short or failed operations.  Returns true if we got the rreq
+ * ref back.
+ */
+static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+
+       WARN_ON(in_interrupt());
+
+       trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
+
+       /* We don't want terminating submissions trying to wake us up whilst
+        * we're still going through the list.
+        */
+       atomic_inc(&rreq->nr_outstanding);
+
+       __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               if (subreq->error) {
+                       if (subreq->source != NETFS_READ_FROM_CACHE)
+                               break;
+                       subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+                       subreq->error = 0;
+                       netfs_stat(&netfs_n_rh_download_instead);
+                       trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
+                       netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+                       atomic_inc(&rreq->nr_outstanding);
+                       netfs_read_from_server(rreq, subreq);
+               } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
+                       netfs_rreq_short_read(rreq, subreq);
+               }
+       }
+
+       /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
+       if (atomic_dec_and_test(&rreq->nr_outstanding))
+               return true;
+
+       wake_up_var(&rreq->nr_outstanding);
+       return false;
+}
+
+/*
+ * Check to see if the data read is still valid.
+ */
+static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+
+       if (!rreq->netfs_ops->is_still_valid ||
+           rreq->netfs_ops->is_still_valid(rreq))
+               return;
+
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               if (subreq->source == NETFS_READ_FROM_CACHE) {
+                       subreq->error = -ESTALE;
+                       __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+               }
+       }
+}
+
+/*
+ * Assess the state of a read request and decide what to do next.
+ *
+ * Note that we could be in an ordinary kernel thread, on a workqueue or in
+ * softirq context at this point.  We inherit a ref from the caller.
+ */
+static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
+{
+       trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
+
+again:
+       netfs_rreq_is_still_valid(rreq);
+
+       if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
+           test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
+               if (netfs_rreq_perform_resubmissions(rreq))
+                       goto again;
+               return;
+       }
+
+       netfs_rreq_unlock_folios(rreq);
+
+       clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+       wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
+
+       if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
+               return netfs_rreq_write_to_cache(rreq);
+
+       netfs_rreq_completed(rreq, was_async);
+}
+
+static void netfs_rreq_work(struct work_struct *work)
+{
+       struct netfs_io_request *rreq =
+               container_of(work, struct netfs_io_request, work);
+       netfs_rreq_assess(rreq, false);
+}
+
+/*
+ * Handle the completion of all outstanding I/O operations on a read request.
+ * We inherit a ref from the caller.
+ */
+static void netfs_rreq_terminated(struct netfs_io_request *rreq,
+                                 bool was_async)
+{
+       if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
+           was_async) {
+               if (!queue_work(system_unbound_wq, &rreq->work))
+                       BUG();
+       } else {
+               netfs_rreq_assess(rreq, was_async);
+       }
+}
+
+/**
+ * netfs_subreq_terminated - Note the termination of an I/O operation.
+ * @subreq: The I/O request that has terminated.
+ * @transferred_or_error: The amount of data transferred or an error code.
+ * @was_async: The termination was asynchronous
+ *
+ * This tells the read helper that a contributory I/O operation has terminated,
+ * one way or another, and that it should integrate the results.
+ *
+ * The caller indicates in @transferred_or_error the outcome of the operation,
+ * supplying a positive value to indicate the number of bytes transferred, 0 to
+ * indicate a failure to transfer anything that should be retried or a negative
+ * error code.  The helper will look after reissuing I/O operations as
+ * appropriate and writing downloaded data to the cache.
+ *
+ * If @was_async is true, the caller might be running in softirq or interrupt
+ * context and we can't sleep.
+ */
+void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
+                            ssize_t transferred_or_error,
+                            bool was_async)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+       int u;
+
+       _enter("[%u]{%llx,%lx},%zd",
+              subreq->debug_index, subreq->start, subreq->flags,
+              transferred_or_error);
+
+       switch (subreq->source) {
+       case NETFS_READ_FROM_CACHE:
+               netfs_stat(&netfs_n_rh_read_done);
+               break;
+       case NETFS_DOWNLOAD_FROM_SERVER:
+               netfs_stat(&netfs_n_rh_download_done);
+               break;
+       default:
+               break;
+       }
+
+       if (IS_ERR_VALUE(transferred_or_error)) {
+               subreq->error = transferred_or_error;
+               trace_netfs_failure(rreq, subreq, transferred_or_error,
+                                   netfs_fail_read);
+               goto failed;
+       }
+
+       if (WARN(transferred_or_error > subreq->len - subreq->transferred,
+                "Subreq overread: R%x[%x] %zd > %zu - %zu",
+                rreq->debug_id, subreq->debug_index,
+                transferred_or_error, subreq->len, subreq->transferred))
+               transferred_or_error = subreq->len - subreq->transferred;
+
+       subreq->error = 0;
+       subreq->transferred += transferred_or_error;
+       if (subreq->transferred < subreq->len)
+               goto incomplete;
+
+complete:
+       __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+       if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
+               set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
+
+out:
+       trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+
+       /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+       u = atomic_dec_return(&rreq->nr_outstanding);
+       if (u == 0)
+               netfs_rreq_terminated(rreq, was_async);
+       else if (u == 1)
+               wake_up_var(&rreq->nr_outstanding);
+
+       netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+       return;
+
+incomplete:
+       if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
+               netfs_clear_unread(subreq);
+               subreq->transferred = subreq->len;
+               goto complete;
+       }
+
+       if (transferred_or_error == 0) {
+               if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
+                       subreq->error = -ENODATA;
+                       goto failed;
+               }
+       } else {
+               __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+       }
+
+       __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
+       set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+       goto out;
+
+failed:
+       if (subreq->source == NETFS_READ_FROM_CACHE) {
+               netfs_stat(&netfs_n_rh_read_failed);
+               set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+       } else {
+               netfs_stat(&netfs_n_rh_download_failed);
+               set_bit(NETFS_RREQ_FAILED, &rreq->flags);
+               rreq->error = subreq->error;
+       }
+       goto out;
+}
+EXPORT_SYMBOL(netfs_subreq_terminated);
+
+static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
+                                                      loff_t i_size)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+       if (cres->ops)
+               return cres->ops->prepare_read(subreq, i_size);
+       if (subreq->start >= rreq->i_size)
+               return NETFS_FILL_WITH_ZEROES;
+       return NETFS_DOWNLOAD_FROM_SERVER;
+}
+
+/*
+ * Work out what sort of subrequest the next one will be.
+ */
+static enum netfs_io_source
+netfs_rreq_prepare_read(struct netfs_io_request *rreq,
+                       struct netfs_io_subrequest *subreq)
+{
+       enum netfs_io_source source;
+
+       _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
+
+       source = netfs_cache_prepare_read(subreq, rreq->i_size);
+       if (source == NETFS_INVALID_READ)
+               goto out;
+
+       if (source == NETFS_DOWNLOAD_FROM_SERVER) {
+               /* Call out to the netfs to let it shrink the request to fit
+                * its own I/O sizes and boundaries.  If it shinks it here, it
+                * will be called again to make simultaneous calls; if it wants
+                * to make serial calls, it can indicate a short read and then
+                * we will call it again.
+                */
+               if (subreq->len > rreq->i_size - subreq->start)
+                       subreq->len = rreq->i_size - subreq->start;
+
+               if (rreq->netfs_ops->clamp_length &&
+                   !rreq->netfs_ops->clamp_length(subreq)) {
+                       source = NETFS_INVALID_READ;
+                       goto out;
+               }
+       }
+
+       if (WARN_ON(subreq->len == 0))
+               source = NETFS_INVALID_READ;
+
+out:
+       subreq->source = source;
+       trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+       return source;
+}
+
+/*
+ * Slice off a piece of a read request and submit an I/O request for it.
+ */
+static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
+                                   unsigned int *_debug_index)
+{
+       struct netfs_io_subrequest *subreq;
+       enum netfs_io_source source;
+
+       subreq = netfs_alloc_subrequest(rreq);
+       if (!subreq)
+               return false;
+
+       subreq->debug_index     = (*_debug_index)++;
+       subreq->start           = rreq->start + rreq->submitted;
+       subreq->len             = rreq->len   - rreq->submitted;
+
+       _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
+       list_add_tail(&subreq->rreq_link, &rreq->subrequests);
+
+       /* Call out to the cache to find out what it can do with the remaining
+        * subset.  It tells us in subreq->flags what it decided should be done
+        * and adjusts subreq->len down if the subset crosses a cache boundary.
+        *
+        * Then when we hand the subset, it can choose to take a subset of that
+        * (the starts must coincide), in which case, we go around the loop
+        * again and ask it to download the next piece.
+        */
+       source = netfs_rreq_prepare_read(rreq, subreq);
+       if (source == NETFS_INVALID_READ)
+               goto subreq_failed;
+
+       atomic_inc(&rreq->nr_outstanding);
+
+       rreq->submitted += subreq->len;
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+       switch (source) {
+       case NETFS_FILL_WITH_ZEROES:
+               netfs_fill_with_zeroes(rreq, subreq);
+               break;
+       case NETFS_DOWNLOAD_FROM_SERVER:
+               netfs_read_from_server(rreq, subreq);
+               break;
+       case NETFS_READ_FROM_CACHE:
+               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
+               break;
+       default:
+               BUG();
+       }
+
+       return true;
+
+subreq_failed:
+       rreq->error = subreq->error;
+       netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
+       return false;
+}
+
+/*
+ * Begin the process of reading in a chunk of data, where that data may be
+ * stitched together from multiple sources, including multiple servers and the
+ * local cache.
+ */
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
+{
+       unsigned int debug_index = 0;
+       int ret;
+
+       _enter("R=%x %llx-%llx",
+              rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
+
+       if (rreq->len == 0) {
+               pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
+               netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
+               return -EIO;
+       }
+
+       INIT_WORK(&rreq->work, netfs_rreq_work);
+
+       if (sync)
+               netfs_get_request(rreq, netfs_rreq_trace_get_hold);
+
+       /* Chop the read into slices according to what the cache and the netfs
+        * want and submit each one.
+        */
+       atomic_set(&rreq->nr_outstanding, 1);
+       do {
+               if (!netfs_rreq_submit_slice(rreq, &debug_index))
+                       break;
+
+       } while (rreq->submitted < rreq->len);
+
+       if (sync) {
+               /* Keep nr_outstanding incremented so that the ref always belongs to
+                * us, and the service code isn't punted off to a random thread pool to
+                * process.
+                */
+               for (;;) {
+                       wait_var_event(&rreq->nr_outstanding,
+                                      atomic_read(&rreq->nr_outstanding) == 1);
+                       netfs_rreq_assess(rreq, false);
+                       if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
+                               break;
+                       cond_resched();
+               }
+
+               ret = rreq->error;
+               if (ret == 0 && rreq->submitted < rreq->len) {
+                       trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+                       ret = -EIO;
+               }
+               netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
+       } else {
+               /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+               if (atomic_dec_and_test(&rreq->nr_outstanding))
+                       netfs_rreq_assess(rreq, false);
+               ret = 0;
+       }
+       return ret;
+}
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
new file mode 100644 (file)
index 0000000..0685687
--- /dev/null
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Miscellaneous bits for the netfs support library.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/netfs.h>
+
+MODULE_DESCRIPTION("Network fs support");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+unsigned netfs_debug;
+module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
new file mode 100644 (file)
index 0000000..e86107b
--- /dev/null
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Object lifetime handling and tracing.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Allocate an I/O request and initialise it.
+ */
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+                                            struct file *file,
+                                            loff_t start, size_t len,
+                                            enum netfs_io_origin origin)
+{
+       static atomic_t debug_ids;
+       struct inode *inode = file ? file_inode(file) : mapping->host;
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+       struct netfs_io_request *rreq;
+       int ret;
+
+       rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
+       if (!rreq)
+               return ERR_PTR(-ENOMEM);
+
+       rreq->start     = start;
+       rreq->len       = len;
+       rreq->origin    = origin;
+       rreq->netfs_ops = ctx->ops;
+       rreq->mapping   = mapping;
+       rreq->inode     = inode;
+       rreq->i_size    = i_size_read(inode);
+       rreq->debug_id  = atomic_inc_return(&debug_ids);
+       INIT_LIST_HEAD(&rreq->subrequests);
+       refcount_set(&rreq->ref, 1);
+       __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+       if (rreq->netfs_ops->init_request) {
+               ret = rreq->netfs_ops->init_request(rreq, file);
+               if (ret < 0) {
+                       kfree(rreq);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       netfs_stat(&netfs_n_rh_rreq);
+       return rreq;
+}
+
+void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
+{
+       int r;
+
+       __refcount_inc(&rreq->ref, &r);
+       trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
+}
+
+void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
+{
+       struct netfs_io_subrequest *subreq;
+
+       while (!list_empty(&rreq->subrequests)) {
+               subreq = list_first_entry(&rreq->subrequests,
+                                         struct netfs_io_subrequest, rreq_link);
+               list_del(&subreq->rreq_link);
+               netfs_put_subrequest(subreq, was_async,
+                                    netfs_sreq_trace_put_clear);
+       }
+}
+
+static void netfs_free_request(struct work_struct *work)
+{
+       struct netfs_io_request *rreq =
+               container_of(work, struct netfs_io_request, work);
+
+       netfs_clear_subrequests(rreq, false);
+       if (rreq->netfs_priv)
+               rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
+       trace_netfs_rreq(rreq, netfs_rreq_trace_free);
+       if (rreq->cache_resources.ops)
+               rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
+       kfree(rreq);
+       netfs_stat_d(&netfs_n_rh_rreq);
+}
+
+void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
+                      enum netfs_rreq_ref_trace what)
+{
+       unsigned int debug_id = rreq->debug_id;
+       bool dead;
+       int r;
+
+       dead = __refcount_dec_and_test(&rreq->ref, &r);
+       trace_netfs_rreq_ref(debug_id, r - 1, what);
+       if (dead) {
+               if (was_async) {
+                       rreq->work.func = netfs_free_request;
+                       if (!queue_work(system_unbound_wq, &rreq->work))
+                               BUG();
+               } else {
+                       netfs_free_request(&rreq->work);
+               }
+       }
+}
+
+/*
+ * Allocate and partially initialise an I/O request structure.
+ */
+struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+
+       subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL);
+       if (subreq) {
+               INIT_LIST_HEAD(&subreq->rreq_link);
+               refcount_set(&subreq->ref, 2);
+               subreq->rreq = rreq;
+               netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
+               netfs_stat(&netfs_n_rh_sreq);
+       }
+
+       return subreq;
+}
+
+void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+                         enum netfs_sreq_ref_trace what)
+{
+       int r;
+
+       __refcount_inc(&subreq->ref, &r);
+       trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
+                            what);
+}
+
+static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
+                                 bool was_async)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_free);
+       kfree(subreq);
+       netfs_stat_d(&netfs_n_rh_sreq);
+       netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
+}
+
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
+                         enum netfs_sreq_ref_trace what)
+{
+       unsigned int debug_index = subreq->debug_index;
+       unsigned int debug_id = subreq->rreq->debug_id;
+       bool dead;
+       int r;
+
+       dead = __refcount_dec_and_test(&subreq->ref, &r);
+       trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
+       if (dead)
+               netfs_free_subrequest(subreq, was_async);
+}
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
deleted file mode 100644 (file)
index 501da99..0000000
+++ /dev/null
@@ -1,1205 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Network filesystem high-level read support.
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/module.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include <linux/sched/mm.h>
-#include <linux/task_io_accounting_ops.h>
-#include <linux/netfs.h>
-#include "internal.h"
-#define CREATE_TRACE_POINTS
-#include <trace/events/netfs.h>
-
-MODULE_DESCRIPTION("Network fs support");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
-unsigned netfs_debug;
-module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
-
-static void netfs_rreq_work(struct work_struct *);
-static void __netfs_put_subrequest(struct netfs_read_subrequest *, bool);
-
-static void netfs_put_subrequest(struct netfs_read_subrequest *subreq,
-                                bool was_async)
-{
-       if (refcount_dec_and_test(&subreq->usage))
-               __netfs_put_subrequest(subreq, was_async);
-}
-
-static struct netfs_read_request *netfs_alloc_read_request(
-       const struct netfs_read_request_ops *ops, void *netfs_priv,
-       struct file *file)
-{
-       static atomic_t debug_ids;
-       struct netfs_read_request *rreq;
-
-       rreq = kzalloc(sizeof(struct netfs_read_request), GFP_KERNEL);
-       if (rreq) {
-               rreq->netfs_ops = ops;
-               rreq->netfs_priv = netfs_priv;
-               rreq->inode     = file_inode(file);
-               rreq->i_size    = i_size_read(rreq->inode);
-               rreq->debug_id  = atomic_inc_return(&debug_ids);
-               INIT_LIST_HEAD(&rreq->subrequests);
-               INIT_WORK(&rreq->work, netfs_rreq_work);
-               refcount_set(&rreq->usage, 1);
-               __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
-               if (ops->init_rreq)
-                       ops->init_rreq(rreq, file);
-               netfs_stat(&netfs_n_rh_rreq);
-       }
-
-       return rreq;
-}
-
-static void netfs_get_read_request(struct netfs_read_request *rreq)
-{
-       refcount_inc(&rreq->usage);
-}
-
-static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq,
-                                    bool was_async)
-{
-       struct netfs_read_subrequest *subreq;
-
-       while (!list_empty(&rreq->subrequests)) {
-               subreq = list_first_entry(&rreq->subrequests,
-                                         struct netfs_read_subrequest, rreq_link);
-               list_del(&subreq->rreq_link);
-               netfs_put_subrequest(subreq, was_async);
-       }
-}
-
-static void netfs_free_read_request(struct work_struct *work)
-{
-       struct netfs_read_request *rreq =
-               container_of(work, struct netfs_read_request, work);
-       netfs_rreq_clear_subreqs(rreq, false);
-       if (rreq->netfs_priv)
-               rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
-       trace_netfs_rreq(rreq, netfs_rreq_trace_free);
-       if (rreq->cache_resources.ops)
-               rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
-       kfree(rreq);
-       netfs_stat_d(&netfs_n_rh_rreq);
-}
-
-static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async)
-{
-       if (refcount_dec_and_test(&rreq->usage)) {
-               if (was_async) {
-                       rreq->work.func = netfs_free_read_request;
-                       if (!queue_work(system_unbound_wq, &rreq->work))
-                               BUG();
-               } else {
-                       netfs_free_read_request(&rreq->work);
-               }
-       }
-}
-
-/*
- * Allocate and partially initialise an I/O request structure.
- */
-static struct netfs_read_subrequest *netfs_alloc_subrequest(
-       struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-
-       subreq = kzalloc(sizeof(struct netfs_read_subrequest), GFP_KERNEL);
-       if (subreq) {
-               INIT_LIST_HEAD(&subreq->rreq_link);
-               refcount_set(&subreq->usage, 2);
-               subreq->rreq = rreq;
-               netfs_get_read_request(rreq);
-               netfs_stat(&netfs_n_rh_sreq);
-       }
-
-       return subreq;
-}
-
-static void netfs_get_read_subrequest(struct netfs_read_subrequest *subreq)
-{
-       refcount_inc(&subreq->usage);
-}
-
-static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq,
-                                  bool was_async)
-{
-       struct netfs_read_request *rreq = subreq->rreq;
-
-       trace_netfs_sreq(subreq, netfs_sreq_trace_free);
-       kfree(subreq);
-       netfs_stat_d(&netfs_n_rh_sreq);
-       netfs_put_read_request(rreq, was_async);
-}
-
-/*
- * Clear the unread part of an I/O request.
- */
-static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
-{
-       struct iov_iter iter;
-
-       iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
-                       subreq->start + subreq->transferred,
-                       subreq->len   - subreq->transferred);
-       iov_iter_zero(iov_iter_count(&iter), &iter);
-}
-
-static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
-                                       bool was_async)
-{
-       struct netfs_read_subrequest *subreq = priv;
-
-       netfs_subreq_terminated(subreq, transferred_or_error, was_async);
-}
-
-/*
- * Issue a read against the cache.
- * - Eats the caller's ref on subreq.
- */
-static void netfs_read_from_cache(struct netfs_read_request *rreq,
-                                 struct netfs_read_subrequest *subreq,
-                                 enum netfs_read_from_hole read_hole)
-{
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-       struct iov_iter iter;
-
-       netfs_stat(&netfs_n_rh_read);
-       iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
-                       subreq->start + subreq->transferred,
-                       subreq->len   - subreq->transferred);
-
-       cres->ops->read(cres, subreq->start, &iter, read_hole,
-                       netfs_cache_read_terminated, subreq);
-}
-
-/*
- * Fill a subrequest region with zeroes.
- */
-static void netfs_fill_with_zeroes(struct netfs_read_request *rreq,
-                                  struct netfs_read_subrequest *subreq)
-{
-       netfs_stat(&netfs_n_rh_zero);
-       __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
-       netfs_subreq_terminated(subreq, 0, false);
-}
-
-/*
- * Ask the netfs to issue a read request to the server for us.
- *
- * The netfs is expected to read from subreq->pos + subreq->transferred to
- * subreq->pos + subreq->len - 1.  It may not backtrack and write data into the
- * buffer prior to the transferred point as it might clobber dirty data
- * obtained from the cache.
- *
- * Alternatively, the netfs is allowed to indicate one of two things:
- *
- * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
- *   make progress.
- *
- * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
- *   cleared.
- */
-static void netfs_read_from_server(struct netfs_read_request *rreq,
-                                  struct netfs_read_subrequest *subreq)
-{
-       netfs_stat(&netfs_n_rh_download);
-       rreq->netfs_ops->issue_op(subreq);
-}
-
-/*
- * Release those waiting.
- */
-static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async)
-{
-       trace_netfs_rreq(rreq, netfs_rreq_trace_done);
-       netfs_rreq_clear_subreqs(rreq, was_async);
-       netfs_put_read_request(rreq, was_async);
-}
-
-/*
- * Deal with the completion of writing the data to the cache.  We have to clear
- * the PG_fscache bits on the folios involved and release the caller's ref.
- *
- * May be called in softirq mode and we inherit a ref from the caller.
- */
-static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
-                                         bool was_async)
-{
-       struct netfs_read_subrequest *subreq;
-       struct folio *folio;
-       pgoff_t unlocked = 0;
-       bool have_unlocked = false;
-
-       rcu_read_lock();
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
-
-               xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
-                       /* We might have multiple writes from the same huge
-                        * folio, but we mustn't unlock a folio more than once.
-                        */
-                       if (have_unlocked && folio_index(folio) <= unlocked)
-                               continue;
-                       unlocked = folio_index(folio);
-                       folio_end_fscache(folio);
-                       have_unlocked = true;
-               }
-       }
-
-       rcu_read_unlock();
-       netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
-                                      bool was_async)
-{
-       struct netfs_read_subrequest *subreq = priv;
-       struct netfs_read_request *rreq = subreq->rreq;
-
-       if (IS_ERR_VALUE(transferred_or_error)) {
-               netfs_stat(&netfs_n_rh_write_failed);
-               trace_netfs_failure(rreq, subreq, transferred_or_error,
-                                   netfs_fail_copy_to_cache);
-       } else {
-               netfs_stat(&netfs_n_rh_write_done);
-       }
-
-       trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
-
-       /* If we decrement nr_wr_ops to 0, the ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_wr_ops))
-               netfs_rreq_unmark_after_write(rreq, was_async);
-
-       netfs_put_subrequest(subreq, was_async);
-}
-
-/*
- * Perform any outstanding writes to the cache.  We inherit a ref from the
- * caller.
- */
-static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
-{
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-       struct netfs_read_subrequest *subreq, *next, *p;
-       struct iov_iter iter;
-       int ret;
-
-       trace_netfs_rreq(rreq, netfs_rreq_trace_write);
-
-       /* We don't want terminating writes trying to wake us up whilst we're
-        * still going through the list.
-        */
-       atomic_inc(&rreq->nr_wr_ops);
-
-       list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
-               if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) {
-                       list_del_init(&subreq->rreq_link);
-                       netfs_put_subrequest(subreq, false);
-               }
-       }
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               /* Amalgamate adjacent writes */
-               while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
-                       next = list_next_entry(subreq, rreq_link);
-                       if (next->start != subreq->start + subreq->len)
-                               break;
-                       subreq->len += next->len;
-                       list_del_init(&next->rreq_link);
-                       netfs_put_subrequest(next, false);
-               }
-
-               ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
-                                              rreq->i_size, true);
-               if (ret < 0) {
-                       trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
-                       trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
-                       continue;
-               }
-
-               iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
-                               subreq->start, subreq->len);
-
-               atomic_inc(&rreq->nr_wr_ops);
-               netfs_stat(&netfs_n_rh_write);
-               netfs_get_read_subrequest(subreq);
-               trace_netfs_sreq(subreq, netfs_sreq_trace_write);
-               cres->ops->write(cres, subreq->start, &iter,
-                                netfs_rreq_copy_terminated, subreq);
-       }
-
-       /* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_wr_ops))
-               netfs_rreq_unmark_after_write(rreq, false);
-}
-
-static void netfs_rreq_write_to_cache_work(struct work_struct *work)
-{
-       struct netfs_read_request *rreq =
-               container_of(work, struct netfs_read_request, work);
-
-       netfs_rreq_do_write_to_cache(rreq);
-}
-
-static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq)
-{
-       rreq->work.func = netfs_rreq_write_to_cache_work;
-       if (!queue_work(system_unbound_wq, &rreq->work))
-               BUG();
-}
-
-/*
- * Unlock the folios in a read operation.  We need to set PG_fscache on any
- * folios we're going to write back before we unlock them.
- */
-static void netfs_rreq_unlock(struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-       struct folio *folio;
-       unsigned int iopos, account = 0;
-       pgoff_t start_page = rreq->start / PAGE_SIZE;
-       pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
-       bool subreq_failed = false;
-
-       XA_STATE(xas, &rreq->mapping->i_pages, start_page);
-
-       if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
-               __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
-               list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-                       __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
-               }
-       }
-
-       /* Walk through the pagecache and the I/O request lists simultaneously.
-        * We may have a mixture of cached and uncached sections and we only
-        * really want to write out the uncached sections.  This is slightly
-        * complicated by the possibility that we might have huge pages with a
-        * mixture inside.
-        */
-       subreq = list_first_entry(&rreq->subrequests,
-                                 struct netfs_read_subrequest, rreq_link);
-       iopos = 0;
-       subreq_failed = (subreq->error < 0);
-
-       trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
-
-       rcu_read_lock();
-       xas_for_each(&xas, folio, last_page) {
-               unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
-               unsigned int pgend = pgpos + folio_size(folio);
-               bool pg_failed = false;
-
-               for (;;) {
-                       if (!subreq) {
-                               pg_failed = true;
-                               break;
-                       }
-                       if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
-                               folio_start_fscache(folio);
-                       pg_failed |= subreq_failed;
-                       if (pgend < iopos + subreq->len)
-                               break;
-
-                       account += subreq->transferred;
-                       iopos += subreq->len;
-                       if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
-                               subreq = list_next_entry(subreq, rreq_link);
-                               subreq_failed = (subreq->error < 0);
-                       } else {
-                               subreq = NULL;
-                               subreq_failed = false;
-                       }
-                       if (pgend == iopos)
-                               break;
-               }
-
-               if (!pg_failed) {
-                       flush_dcache_folio(folio);
-                       folio_mark_uptodate(folio);
-               }
-
-               if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
-                       if (folio_index(folio) == rreq->no_unlock_folio &&
-                           test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
-                               _debug("no unlock");
-                       else
-                               folio_unlock(folio);
-               }
-       }
-       rcu_read_unlock();
-
-       task_io_account_read(account);
-       if (rreq->netfs_ops->done)
-               rreq->netfs_ops->done(rreq);
-}
-
-/*
- * Handle a short read.
- */
-static void netfs_rreq_short_read(struct netfs_read_request *rreq,
-                                 struct netfs_read_subrequest *subreq)
-{
-       __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
-       __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
-
-       netfs_stat(&netfs_n_rh_short_read);
-       trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
-
-       netfs_get_read_subrequest(subreq);
-       atomic_inc(&rreq->nr_rd_ops);
-       if (subreq->source == NETFS_READ_FROM_CACHE)
-               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
-       else
-               netfs_read_from_server(rreq, subreq);
-}
-
-/*
- * Resubmit any short or failed operations.  Returns true if we got the rreq
- * ref back.
- */
-static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-
-       WARN_ON(in_interrupt());
-
-       trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
-
-       /* We don't want terminating submissions trying to wake us up whilst
-        * we're still going through the list.
-        */
-       atomic_inc(&rreq->nr_rd_ops);
-
-       __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               if (subreq->error) {
-                       if (subreq->source != NETFS_READ_FROM_CACHE)
-                               break;
-                       subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
-                       subreq->error = 0;
-                       netfs_stat(&netfs_n_rh_download_instead);
-                       trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
-                       netfs_get_read_subrequest(subreq);
-                       atomic_inc(&rreq->nr_rd_ops);
-                       netfs_read_from_server(rreq, subreq);
-               } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) {
-                       netfs_rreq_short_read(rreq, subreq);
-               }
-       }
-
-       /* If we decrement nr_rd_ops to 0, the usage ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_rd_ops))
-               return true;
-
-       wake_up_var(&rreq->nr_rd_ops);
-       return false;
-}
-
-/*
- * Check to see if the data read is still valid.
- */
-static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-
-       if (!rreq->netfs_ops->is_still_valid ||
-           rreq->netfs_ops->is_still_valid(rreq))
-               return;
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               if (subreq->source == NETFS_READ_FROM_CACHE) {
-                       subreq->error = -ESTALE;
-                       __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-               }
-       }
-}
-
-/*
- * Assess the state of a read request and decide what to do next.
- *
- * Note that we could be in an ordinary kernel thread, on a workqueue or in
- * softirq context at this point.  We inherit a ref from the caller.
- */
-static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
-{
-       trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
-
-again:
-       netfs_rreq_is_still_valid(rreq);
-
-       if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
-           test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
-               if (netfs_rreq_perform_resubmissions(rreq))
-                       goto again;
-               return;
-       }
-
-       netfs_rreq_unlock(rreq);
-
-       clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
-       wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
-
-       if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
-               return netfs_rreq_write_to_cache(rreq);
-
-       netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_work(struct work_struct *work)
-{
-       struct netfs_read_request *rreq =
-               container_of(work, struct netfs_read_request, work);
-       netfs_rreq_assess(rreq, false);
-}
-
-/*
- * Handle the completion of all outstanding I/O operations on a read request.
- * We inherit a ref from the caller.
- */
-static void netfs_rreq_terminated(struct netfs_read_request *rreq,
-                                 bool was_async)
-{
-       if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
-           was_async) {
-               if (!queue_work(system_unbound_wq, &rreq->work))
-                       BUG();
-       } else {
-               netfs_rreq_assess(rreq, was_async);
-       }
-}
-
-/**
- * netfs_subreq_terminated - Note the termination of an I/O operation.
- * @subreq: The I/O request that has terminated.
- * @transferred_or_error: The amount of data transferred or an error code.
- * @was_async: The termination was asynchronous
- *
- * This tells the read helper that a contributory I/O operation has terminated,
- * one way or another, and that it should integrate the results.
- *
- * The caller indicates in @transferred_or_error the outcome of the operation,
- * supplying a positive value to indicate the number of bytes transferred, 0 to
- * indicate a failure to transfer anything that should be retried or a negative
- * error code.  The helper will look after reissuing I/O operations as
- * appropriate and writing downloaded data to the cache.
- *
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
- */
-void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
-                            ssize_t transferred_or_error,
-                            bool was_async)
-{
-       struct netfs_read_request *rreq = subreq->rreq;
-       int u;
-
-       _enter("[%u]{%llx,%lx},%zd",
-              subreq->debug_index, subreq->start, subreq->flags,
-              transferred_or_error);
-
-       switch (subreq->source) {
-       case NETFS_READ_FROM_CACHE:
-               netfs_stat(&netfs_n_rh_read_done);
-               break;
-       case NETFS_DOWNLOAD_FROM_SERVER:
-               netfs_stat(&netfs_n_rh_download_done);
-               break;
-       default:
-               break;
-       }
-
-       if (IS_ERR_VALUE(transferred_or_error)) {
-               subreq->error = transferred_or_error;
-               trace_netfs_failure(rreq, subreq, transferred_or_error,
-                                   netfs_fail_read);
-               goto failed;
-       }
-
-       if (WARN(transferred_or_error > subreq->len - subreq->transferred,
-                "Subreq overread: R%x[%x] %zd > %zu - %zu",
-                rreq->debug_id, subreq->debug_index,
-                transferred_or_error, subreq->len, subreq->transferred))
-               transferred_or_error = subreq->len - subreq->transferred;
-
-       subreq->error = 0;
-       subreq->transferred += transferred_or_error;
-       if (subreq->transferred < subreq->len)
-               goto incomplete;
-
-complete:
-       __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
-       if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
-               set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
-
-out:
-       trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
-
-       /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
-       u = atomic_dec_return(&rreq->nr_rd_ops);
-       if (u == 0)
-               netfs_rreq_terminated(rreq, was_async);
-       else if (u == 1)
-               wake_up_var(&rreq->nr_rd_ops);
-
-       netfs_put_subrequest(subreq, was_async);
-       return;
-
-incomplete:
-       if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
-               netfs_clear_unread(subreq);
-               subreq->transferred = subreq->len;
-               goto complete;
-       }
-
-       if (transferred_or_error == 0) {
-               if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
-                       subreq->error = -ENODATA;
-                       goto failed;
-               }
-       } else {
-               __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
-       }
-
-       __set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
-       set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-       goto out;
-
-failed:
-       if (subreq->source == NETFS_READ_FROM_CACHE) {
-               netfs_stat(&netfs_n_rh_read_failed);
-               set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-       } else {
-               netfs_stat(&netfs_n_rh_download_failed);
-               set_bit(NETFS_RREQ_FAILED, &rreq->flags);
-               rreq->error = subreq->error;
-       }
-       goto out;
-}
-EXPORT_SYMBOL(netfs_subreq_terminated);
-
-static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequest *subreq,
-                                                      loff_t i_size)
-{
-       struct netfs_read_request *rreq = subreq->rreq;
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-
-       if (cres->ops)
-               return cres->ops->prepare_read(subreq, i_size);
-       if (subreq->start >= rreq->i_size)
-               return NETFS_FILL_WITH_ZEROES;
-       return NETFS_DOWNLOAD_FROM_SERVER;
-}
-
-/*
- * Work out what sort of subrequest the next one will be.
- */
-static enum netfs_read_source
-netfs_rreq_prepare_read(struct netfs_read_request *rreq,
-                       struct netfs_read_subrequest *subreq)
-{
-       enum netfs_read_source source;
-
-       _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
-
-       source = netfs_cache_prepare_read(subreq, rreq->i_size);
-       if (source == NETFS_INVALID_READ)
-               goto out;
-
-       if (source == NETFS_DOWNLOAD_FROM_SERVER) {
-               /* Call out to the netfs to let it shrink the request to fit
-                * its own I/O sizes and boundaries.  If it shinks it here, it
-                * will be called again to make simultaneous calls; if it wants
-                * to make serial calls, it can indicate a short read and then
-                * we will call it again.
-                */
-               if (subreq->len > rreq->i_size - subreq->start)
-                       subreq->len = rreq->i_size - subreq->start;
-
-               if (rreq->netfs_ops->clamp_length &&
-                   !rreq->netfs_ops->clamp_length(subreq)) {
-                       source = NETFS_INVALID_READ;
-                       goto out;
-               }
-       }
-
-       if (WARN_ON(subreq->len == 0))
-               source = NETFS_INVALID_READ;
-
-out:
-       subreq->source = source;
-       trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
-       return source;
-}
-
-/*
- * Slice off a piece of a read request and submit an I/O request for it.
- */
-static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
-                                   unsigned int *_debug_index)
-{
-       struct netfs_read_subrequest *subreq;
-       enum netfs_read_source source;
-
-       subreq = netfs_alloc_subrequest(rreq);
-       if (!subreq)
-               return false;
-
-       subreq->debug_index     = (*_debug_index)++;
-       subreq->start           = rreq->start + rreq->submitted;
-       subreq->len             = rreq->len   - rreq->submitted;
-
-       _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
-       list_add_tail(&subreq->rreq_link, &rreq->subrequests);
-
-       /* Call out to the cache to find out what it can do with the remaining
-        * subset.  It tells us in subreq->flags what it decided should be done
-        * and adjusts subreq->len down if the subset crosses a cache boundary.
-        *
-        * Then when we hand the subset, it can choose to take a subset of that
-        * (the starts must coincide), in which case, we go around the loop
-        * again and ask it to download the next piece.
-        */
-       source = netfs_rreq_prepare_read(rreq, subreq);
-       if (source == NETFS_INVALID_READ)
-               goto subreq_failed;
-
-       atomic_inc(&rreq->nr_rd_ops);
-
-       rreq->submitted += subreq->len;
-
-       trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
-       switch (source) {
-       case NETFS_FILL_WITH_ZEROES:
-               netfs_fill_with_zeroes(rreq, subreq);
-               break;
-       case NETFS_DOWNLOAD_FROM_SERVER:
-               netfs_read_from_server(rreq, subreq);
-               break;
-       case NETFS_READ_FROM_CACHE:
-               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
-               break;
-       default:
-               BUG();
-       }
-
-       return true;
-
-subreq_failed:
-       rreq->error = subreq->error;
-       netfs_put_subrequest(subreq, false);
-       return false;
-}
-
-static void netfs_cache_expand_readahead(struct netfs_read_request *rreq,
-                                        loff_t *_start, size_t *_len, loff_t i_size)
-{
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-
-       if (cres->ops && cres->ops->expand_readahead)
-               cres->ops->expand_readahead(cres, _start, _len, i_size);
-}
-
-static void netfs_rreq_expand(struct netfs_read_request *rreq,
-                             struct readahead_control *ractl)
-{
-       /* Give the cache a chance to change the request parameters.  The
-        * resultant request must contain the original region.
-        */
-       netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
-
-       /* Give the netfs a chance to change the request parameters.  The
-        * resultant request must contain the original region.
-        */
-       if (rreq->netfs_ops->expand_readahead)
-               rreq->netfs_ops->expand_readahead(rreq);
-
-       /* Expand the request if the cache wants it to start earlier.  Note
-        * that the expansion may get further extended if the VM wishes to
-        * insert THPs and the preferred start and/or end wind up in the middle
-        * of THPs.
-        *
-        * If this is the case, however, the THP size should be an integer
-        * multiple of the cache granule size, so we get a whole number of
-        * granules to deal with.
-        */
-       if (rreq->start  != readahead_pos(ractl) ||
-           rreq->len != readahead_length(ractl)) {
-               readahead_expand(ractl, rreq->start, rreq->len);
-               rreq->start  = readahead_pos(ractl);
-               rreq->len = readahead_length(ractl);
-
-               trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
-                                netfs_read_trace_expanded);
-       }
-}
-
-/**
- * netfs_readahead - Helper to manage a read request
- * @ractl: The description of the readahead request
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Fulfil a readahead request by drawing data from the cache if possible, or
- * the netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O
- * requests from different sources will get munged together.  If necessary, the
- * readahead window can be expanded in either direction to a more convenient
- * alighment for RPC efficiency or to make storage in the cache feasible.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.  It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
- *
- * This is usable whether or not caching is enabled.
- */
-void netfs_readahead(struct readahead_control *ractl,
-                    const struct netfs_read_request_ops *ops,
-                    void *netfs_priv)
-{
-       struct netfs_read_request *rreq;
-       unsigned int debug_index = 0;
-       int ret;
-
-       _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
-
-       if (readahead_count(ractl) == 0)
-               goto cleanup;
-
-       rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file);
-       if (!rreq)
-               goto cleanup;
-       rreq->mapping   = ractl->mapping;
-       rreq->start     = readahead_pos(ractl);
-       rreq->len       = readahead_length(ractl);
-
-       if (ops->begin_cache_operation) {
-               ret = ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
-                       goto cleanup_free;
-       }
-
-       netfs_stat(&netfs_n_rh_readahead);
-       trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
-                        netfs_read_trace_readahead);
-
-       netfs_rreq_expand(rreq, ractl);
-
-       atomic_set(&rreq->nr_rd_ops, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Drop the refs on the folios here rather than in the cache or
-        * filesystem.  The locks will be dropped in netfs_rreq_unlock().
-        */
-       while (readahead_folio(ractl))
-               ;
-
-       /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_rd_ops))
-               netfs_rreq_assess(rreq, false);
-       return;
-
-cleanup_free:
-       netfs_put_read_request(rreq, false);
-       return;
-cleanup:
-       if (netfs_priv)
-               ops->cleanup(ractl->mapping, netfs_priv);
-       return;
-}
-EXPORT_SYMBOL(netfs_readahead);
-
-/**
- * netfs_readpage - Helper to manage a readpage request
- * @file: The file to read from
- * @folio: The folio to read
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Fulfil a readpage request by drawing data from the cache if possible, or the
- * netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O requests
- * from different sources will get munged together.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.  It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
- *
- * This is usable whether or not caching is enabled.
- */
-int netfs_readpage(struct file *file,
-                  struct folio *folio,
-                  const struct netfs_read_request_ops *ops,
-                  void *netfs_priv)
-{
-       struct netfs_read_request *rreq;
-       unsigned int debug_index = 0;
-       int ret;
-
-       _enter("%lx", folio_index(folio));
-
-       rreq = netfs_alloc_read_request(ops, netfs_priv, file);
-       if (!rreq) {
-               if (netfs_priv)
-                       ops->cleanup(folio_file_mapping(folio), netfs_priv);
-               folio_unlock(folio);
-               return -ENOMEM;
-       }
-       rreq->mapping   = folio_file_mapping(folio);
-       rreq->start     = folio_file_pos(folio);
-       rreq->len       = folio_size(folio);
-
-       if (ops->begin_cache_operation) {
-               ret = ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
-                       folio_unlock(folio);
-                       goto out;
-               }
-       }
-
-       netfs_stat(&netfs_n_rh_readpage);
-       trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
-
-       netfs_get_read_request(rreq);
-
-       atomic_set(&rreq->nr_rd_ops, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
-        * the service code isn't punted off to a random thread pool to
-        * process.
-        */
-       do {
-               wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
-               netfs_rreq_assess(rreq, false);
-       } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
-
-       ret = rreq->error;
-       if (ret == 0 && rreq->submitted < rreq->len) {
-               trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
-               ret = -EIO;
-       }
-out:
-       netfs_put_read_request(rreq, false);
-       return ret;
-}
-EXPORT_SYMBOL(netfs_readpage);
-
-/*
- * Prepare a folio for writing without reading first
- * @folio: The folio being prepared
- * @pos: starting position for the write
- * @len: length of write
- *
- * In some cases, write_begin doesn't need to read at all:
- * - full folio write
- * - write that lies in a folio that is completely beyond EOF
- * - write that covers the folio from start to EOF or beyond it
- *
- * If any of these criteria are met, then zero out the unwritten parts
- * of the folio and return true. Otherwise, return false.
- */
-static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
-{
-       struct inode *inode = folio_inode(folio);
-       loff_t i_size = i_size_read(inode);
-       size_t offset = offset_in_folio(folio, pos);
-
-       /* Full folio write */
-       if (offset == 0 && len >= folio_size(folio))
-               return true;
-
-       /* pos beyond last folio in the file */
-       if (pos - offset >= i_size)
-               goto zero_out;
-
-       /* Write that covers from the start of the folio to EOF or beyond */
-       if (offset == 0 && (pos + len) >= i_size)
-               goto zero_out;
-
-       return false;
-zero_out:
-       zero_user_segments(&folio->page, 0, offset, offset + len, folio_size(folio));
-       return true;
-}
-
-/**
- * netfs_write_begin - Helper to prepare for writing
- * @file: The file to read from
- * @mapping: The mapping to read from
- * @pos: File position at which the write will begin
- * @len: The length of the write (may extend beyond the end of the folio chosen)
- * @aop_flags: AOP_* flags
- * @_folio: Where to put the resultant folio
- * @_fsdata: Place for the netfs to store a cookie
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Pre-read data for a write-begin request by drawing data from the cache if
- * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
- * Multiple I/O requests from different sources will get munged together.  If
- * necessary, the readahead window can be expanded in either direction to a
- * more convenient alighment for RPC efficiency or to make storage in the cache
- * feasible.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.
- *
- * The check_write_begin() operation can be provided to check for and flush
- * conflicting writes once the folio is grabbed and locked.  It is passed a
- * pointer to the fsdata cookie that gets returned to the VM to be passed to
- * write_end.  It is permitted to sleep.  It should return 0 if the request
- * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
- * be regot; or return an error.
- *
- * This is usable whether or not caching is enabled.
- */
-int netfs_write_begin(struct file *file, struct address_space *mapping,
-                     loff_t pos, unsigned int len, unsigned int aop_flags,
-                     struct folio **_folio, void **_fsdata,
-                     const struct netfs_read_request_ops *ops,
-                     void *netfs_priv)
-{
-       struct netfs_read_request *rreq;
-       struct folio *folio;
-       struct inode *inode = file_inode(file);
-       unsigned int debug_index = 0, fgp_flags;
-       pgoff_t index = pos >> PAGE_SHIFT;
-       int ret;
-
-       DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
-
-retry:
-       fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
-       if (aop_flags & AOP_FLAG_NOFS)
-               fgp_flags |= FGP_NOFS;
-       folio = __filemap_get_folio(mapping, index, fgp_flags,
-                                   mapping_gfp_mask(mapping));
-       if (!folio)
-               return -ENOMEM;
-
-       if (ops->check_write_begin) {
-               /* Allow the netfs (eg. ceph) to flush conflicts. */
-               ret = ops->check_write_begin(file, pos, len, folio, _fsdata);
-               if (ret < 0) {
-                       trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
-                       if (ret == -EAGAIN)
-                               goto retry;
-                       goto error;
-               }
-       }
-
-       if (folio_test_uptodate(folio))
-               goto have_folio;
-
-       /* If the page is beyond the EOF, we want to clear it - unless it's
-        * within the cache granule containing the EOF, in which case we need
-        * to preload the granule.
-        */
-       if (!ops->is_cache_enabled(inode) &&
-           netfs_skip_folio_read(folio, pos, len)) {
-               netfs_stat(&netfs_n_rh_write_zskip);
-               goto have_folio_no_wait;
-       }
-
-       ret = -ENOMEM;
-       rreq = netfs_alloc_read_request(ops, netfs_priv, file);
-       if (!rreq)
-               goto error;
-       rreq->mapping           = folio_file_mapping(folio);
-       rreq->start             = folio_file_pos(folio);
-       rreq->len               = folio_size(folio);
-       rreq->no_unlock_folio   = folio_index(folio);
-       __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
-       netfs_priv = NULL;
-
-       if (ops->begin_cache_operation) {
-               ret = ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
-                       goto error_put;
-       }
-
-       netfs_stat(&netfs_n_rh_write_begin);
-       trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
-
-       /* Expand the request to meet caching requirements and download
-        * preferences.
-        */
-       ractl._nr_pages = folio_nr_pages(folio);
-       netfs_rreq_expand(rreq, &ractl);
-       netfs_get_read_request(rreq);
-
-       /* We hold the folio locks, so we can drop the references */
-       folio_get(folio);
-       while (readahead_folio(&ractl))
-               ;
-
-       atomic_set(&rreq->nr_rd_ops, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
-        * the service code isn't punted off to a random thread pool to
-        * process.
-        */
-       for (;;) {
-               wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
-               netfs_rreq_assess(rreq, false);
-               if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
-                       break;
-               cond_resched();
-       }
-
-       ret = rreq->error;
-       if (ret == 0 && rreq->submitted < rreq->len) {
-               trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
-               ret = -EIO;
-       }
-       netfs_put_read_request(rreq, false);
-       if (ret < 0)
-               goto error;
-
-have_folio:
-       ret = folio_wait_fscache_killable(folio);
-       if (ret < 0)
-               goto error;
-have_folio_no_wait:
-       if (netfs_priv)
-               ops->cleanup(mapping, netfs_priv);
-       *_folio = folio;
-       _leave(" = 0");
-       return 0;
-
-error_put:
-       netfs_put_read_request(rreq, false);
-error:
-       folio_unlock(folio);
-       folio_put(folio);
-       if (netfs_priv)
-               ops->cleanup(mapping, netfs_priv);
-       _leave(" = %d", ret);
-       return ret;
-}
-EXPORT_SYMBOL(netfs_write_begin);
index 9ae538c..5510a7a 100644 (file)
@@ -7,7 +7,6 @@
 
 #include <linux/export.h>
 #include <linux/seq_file.h>
-#include <linux/netfs.h>
 #include "internal.h"
 
 atomic_t netfs_n_rh_readahead;
index b0ca244..150b7fa 100644 (file)
@@ -646,7 +646,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
        result = generic_write_checks(iocb, from);
        if (result > 0) {
                current->backing_dev_info = inode_to_bdi(inode);
-               result = generic_perform_write(file, from, iocb->ki_pos);
+               result = generic_perform_write(iocb, from);
                current->backing_dev_info = NULL;
        }
        nfs_end_io_write(inode);
index 4dee53c..f73c09a 100644 (file)
@@ -238,14 +238,6 @@ void nfs_fscache_release_file(struct inode *inode, struct file *filp)
        }
 }
 
-static inline void fscache_end_operation(struct netfs_cache_resources *cres)
-{
-       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-
-       if (ops)
-               ops->end_operation(cres);
-}
-
 /*
  * Fallback page reading interface.
  */
index 66bdaa2..ca611ac 100644 (file)
 #include "page.h"
 #include "btnode.h"
 
+
+/**
+ * nilfs_init_btnc_inode - initialize B-tree node cache inode
+ * @btnc_inode: inode to be initialized
+ *
+ * nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
+ */
+void nilfs_init_btnc_inode(struct inode *btnc_inode)
+{
+       struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
+
+       btnc_inode->i_mode = S_IFREG;
+       ii->i_flags = 0;
+       memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
+       mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
+}
+
 void nilfs_btnode_cache_clear(struct address_space *btnc)
 {
        invalidate_mapping_pages(btnc, 0, -1);
@@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
 struct buffer_head *
 nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
 {
-       struct inode *inode = NILFS_BTNC_I(btnc);
+       struct inode *inode = btnc->host;
        struct buffer_head *bh;
 
        bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
@@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
                              struct buffer_head **pbh, sector_t *submit_ptr)
 {
        struct buffer_head *bh;
-       struct inode *inode = NILFS_BTNC_I(btnc);
+       struct inode *inode = btnc->host;
        struct page *page;
        int err;
 
@@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
                                    struct nilfs_btnode_chkey_ctxt *ctxt)
 {
        struct buffer_head *obh, *nbh;
-       struct inode *inode = NILFS_BTNC_I(btnc);
+       struct inode *inode = btnc->host;
        __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
        int err;
 
index 1166365..bd5544e 100644 (file)
@@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt {
        struct buffer_head *newbh;
 };
 
+void nilfs_init_btnc_inode(struct inode *btnc_inode);
 void nilfs_btnode_cache_clear(struct address_space *);
 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
                                              __u64 blocknr);
index 3594eab..f544c22 100644 (file)
@@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path)
 static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
                                     __u64 ptr, struct buffer_head **bhp)
 {
-       struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
+       struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+       struct address_space *btnc = btnc_inode->i_mapping;
        struct buffer_head *bh;
 
        bh = nilfs_btnode_create_block(btnc, ptr);
@@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
                                   struct buffer_head **bhp,
                                   const struct nilfs_btree_readahead_info *ra)
 {
-       struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
+       struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+       struct address_space *btnc = btnc_inode->i_mapping;
        struct buffer_head *bh, *ra_bh;
        sector_t submit_ptr = 0;
        int ret;
@@ -1741,6 +1743,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
                dat = nilfs_bmap_get_dat(btree);
        }
 
+       ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode);
+       if (ret < 0)
+               return ret;
+
        ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
        if (ret < 0)
                return ret;
@@ -1913,7 +1919,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
                path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
                path[level].bp_ctxt.bh = path[level].bp_bh;
                ret = nilfs_btnode_prepare_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                if (ret < 0) {
                        nilfs_dat_abort_update(dat,
@@ -1939,7 +1945,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
 
        if (buffer_nilfs_node(path[level].bp_bh)) {
                nilfs_btnode_commit_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                path[level].bp_bh = path[level].bp_ctxt.bh;
        }
@@ -1958,7 +1964,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
                               &path[level].bp_newreq.bpr_req);
        if (buffer_nilfs_node(path[level].bp_bh))
                nilfs_btnode_abort_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
 }
 
@@ -2134,7 +2140,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
                                             struct list_head *listp)
 {
-       struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
+       struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+       struct address_space *btcache = btnc_inode->i_mapping;
        struct list_head lists[NILFS_BTREE_LEVEL_MAX];
        struct pagevec pvec;
        struct buffer_head *bh, *head;
@@ -2188,12 +2195,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
                path[level].bp_ctxt.newkey = blocknr;
                path[level].bp_ctxt.bh = *bh;
                ret = nilfs_btnode_prepare_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                if (ret < 0)
                        return ret;
                nilfs_btnode_commit_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                *bh = path[level].bp_ctxt.bh;
        }
@@ -2398,6 +2405,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
 
        if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
                ret = -EIO;
+       else
+               ret = nilfs_attach_btree_node_cache(
+                       &NILFS_BMAP_I(bmap)->vfs_inode);
+
        return ret;
 }
 
index dc51d3b..3b55e23 100644 (file)
@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
        di = NILFS_DAT_I(dat);
        lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
        nilfs_palloc_setup_cache(dat, &di->palloc_cache);
-       nilfs_mdt_setup_shadow_map(dat, &di->shadow);
+       err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
+       if (err)
+               goto failed;
 
        err = nilfs_read_inode_common(dat, raw_inode);
        if (err)
index a8f5315..04fdd42 100644 (file)
@@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
 int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
                                   __u64 vbn, struct buffer_head **out_bh)
 {
+       struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
        int ret;
 
-       ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
+       ret = nilfs_btnode_submit_block(btnc_inode->i_mapping,
                                        vbn ? : pbn, pbn, REQ_OP_READ, 0,
                                        out_bh, &pbn);
        if (ret == -EEXIST) /* internal code (cache hit) */
@@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
        ii->i_flags = 0;
        nilfs_bmap_init_gc(ii->i_bmap);
 
-       return 0;
+       return nilfs_attach_btree_node_cache(inode);
 }
 
 /**
@@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
                ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
                list_del_init(&ii->i_dirty);
                truncate_inode_pages(&ii->vfs_inode.i_data, 0);
-               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+               nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
                iput(&ii->vfs_inode);
        }
 }
index 476a4a6..6045cea 100644 (file)
  * @cno: checkpoint number
  * @root: pointer on NILFS root object (mounted checkpoint)
  * @for_gc: inode for GC flag
+ * @for_btnc: inode for B-tree node cache flag
+ * @for_shadow: inode for shadowed page cache flag
  */
 struct nilfs_iget_args {
        u64 ino;
        __u64 cno;
        struct nilfs_root *root;
-       int for_gc;
+       bool for_gc;
+       bool for_btnc;
+       bool for_shadow;
 };
 
 static int nilfs_iget_test(struct inode *inode, void *opaque);
@@ -312,7 +316,8 @@ static int nilfs_insert_inode_locked(struct inode *inode,
                                     unsigned long ino)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+               .ino = ino, .root = root, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = false
        };
 
        return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
@@ -525,6 +530,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
                return 0;
 
        ii = NILFS_I(inode);
+       if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
+               if (!args->for_btnc)
+                       return 0;
+       } else if (args->for_btnc) {
+               return 0;
+       }
+       if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
+               if (!args->for_shadow)
+                       return 0;
+       } else if (args->for_shadow) {
+               return 0;
+       }
+
        if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
                return !args->for_gc;
 
@@ -536,15 +554,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
        struct nilfs_iget_args *args = opaque;
 
        inode->i_ino = args->ino;
-       if (args->for_gc) {
+       NILFS_I(inode)->i_cno = args->cno;
+       NILFS_I(inode)->i_root = args->root;
+       if (args->root && args->ino == NILFS_ROOT_INO)
+               nilfs_get_root(args->root);
+
+       if (args->for_gc)
                NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
-               NILFS_I(inode)->i_cno = args->cno;
-               NILFS_I(inode)->i_root = NULL;
-       } else {
-               if (args->root && args->ino == NILFS_ROOT_INO)
-                       nilfs_get_root(args->root);
-               NILFS_I(inode)->i_root = args->root;
-       }
+       if (args->for_btnc)
+               NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
+       if (args->for_shadow)
+               NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
        return 0;
 }
 
@@ -552,7 +572,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
                            unsigned long ino)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+               .ino = ino, .root = root, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = false
        };
 
        return ilookup5(sb, ino, nilfs_iget_test, &args);
@@ -562,7 +583,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
                                unsigned long ino)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+               .ino = ino, .root = root, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = false
        };
 
        return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
@@ -593,7 +615,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
                                __u64 cno)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
+               .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
+               .for_btnc = false, .for_shadow = false
        };
        struct inode *inode;
        int err;
@@ -613,6 +636,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
        return inode;
 }
 
+/**
+ * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
+ * @inode: inode object
+ *
+ * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
+ * or does nothing if the inode already has it.  This function allocates
+ * an additional inode to maintain page cache of B-tree nodes one-on-one.
+ *
+ * Return Value: On success, 0 is returned. On errors, one of the following
+ * negative error code is returned.
+ *
+ * %-ENOMEM - Insufficient memory available.
+ */
+int nilfs_attach_btree_node_cache(struct inode *inode)
+{
+       struct nilfs_inode_info *ii = NILFS_I(inode);
+       struct inode *btnc_inode;
+       struct nilfs_iget_args args;
+
+       if (ii->i_assoc_inode)
+               return 0;
+
+       args.ino = inode->i_ino;
+       args.root = ii->i_root;
+       args.cno = ii->i_cno;
+       args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
+       args.for_btnc = true;
+       args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
+
+       btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
+                                 nilfs_iget_set, &args);
+       if (unlikely(!btnc_inode))
+               return -ENOMEM;
+       if (btnc_inode->i_state & I_NEW) {
+               nilfs_init_btnc_inode(btnc_inode);
+               unlock_new_inode(btnc_inode);
+       }
+       NILFS_I(btnc_inode)->i_assoc_inode = inode;
+       NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
+       ii->i_assoc_inode = btnc_inode;
+
+       return 0;
+}
+
+/**
+ * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
+ * @inode: inode object
+ *
+ * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
+ * holder inode bound to @inode, or does nothing if @inode doesn't have it.
+ */
+void nilfs_detach_btree_node_cache(struct inode *inode)
+{
+       struct nilfs_inode_info *ii = NILFS_I(inode);
+       struct inode *btnc_inode = ii->i_assoc_inode;
+
+       if (btnc_inode) {
+               NILFS_I(btnc_inode)->i_assoc_inode = NULL;
+               ii->i_assoc_inode = NULL;
+               iput(btnc_inode);
+       }
+}
+
+/**
+ * nilfs_iget_for_shadow - obtain inode for shadow mapping
+ * @inode: inode object that uses shadow mapping
+ *
+ * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
+ * caches for shadow mapping.  The page cache for data pages is set up
+ * in one inode and the one for b-tree node pages is set up in the
+ * other inode, which is attached to the former inode.
+ *
+ * Return Value: On success, a pointer to the inode for data pages is
+ * returned. On errors, one of the following negative error code is returned
+ * in a pointer type.
+ *
+ * %-ENOMEM - Insufficient memory available.
+ */
+struct inode *nilfs_iget_for_shadow(struct inode *inode)
+{
+       struct nilfs_iget_args args = {
+               .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = true
+       };
+       struct inode *s_inode;
+       int err;
+
+       s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
+                              nilfs_iget_set, &args);
+       if (unlikely(!s_inode))
+               return ERR_PTR(-ENOMEM);
+       if (!(s_inode->i_state & I_NEW))
+               return inode;
+
+       NILFS_I(s_inode)->i_flags = 0;
+       memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
+       mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
+
+       err = nilfs_attach_btree_node_cache(s_inode);
+       if (unlikely(err)) {
+               iget_failed(s_inode);
+               return ERR_PTR(err);
+       }
+       unlock_new_inode(s_inode);
+       return s_inode;
+}
+
 void nilfs_write_inode_common(struct inode *inode,
                              struct nilfs_inode *raw_inode, int has_bmap)
 {
@@ -760,7 +890,8 @@ static void nilfs_clear_inode(struct inode *inode)
        if (test_bit(NILFS_I_BMAP, &ii->i_state))
                nilfs_bmap_clear(ii->i_bmap);
 
-       nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+       if (!test_bit(NILFS_I_BTNC, &ii->i_state))
+               nilfs_detach_btree_node_cache(inode);
 
        if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
                nilfs_put_root(ii->i_root);
index 78db33d..d29a0f2 100644 (file)
@@ -471,9 +471,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
 void nilfs_mdt_clear(struct inode *inode)
 {
        struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
+       struct nilfs_shadow_map *shadow = mdi->mi_shadow;
 
        if (mdi->mi_palloc_cache)
                nilfs_palloc_destroy_cache(inode);
+
+       if (shadow) {
+               struct inode *s_inode = shadow->inode;
+
+               shadow->inode = NULL;
+               iput(s_inode);
+               mdi->mi_shadow = NULL;
+       }
 }
 
 /**
@@ -507,12 +516,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
                               struct nilfs_shadow_map *shadow)
 {
        struct nilfs_mdt_info *mi = NILFS_MDT(inode);
+       struct inode *s_inode;
 
        INIT_LIST_HEAD(&shadow->frozen_buffers);
-       address_space_init_once(&shadow->frozen_data);
-       nilfs_mapping_init(&shadow->frozen_data, inode);
-       address_space_init_once(&shadow->frozen_btnodes);
-       nilfs_mapping_init(&shadow->frozen_btnodes, inode);
+
+       s_inode = nilfs_iget_for_shadow(inode);
+       if (IS_ERR(s_inode))
+               return PTR_ERR(s_inode);
+
+       shadow->inode = s_inode;
        mi->mi_shadow = shadow;
        return 0;
 }
@@ -526,14 +538,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
        struct nilfs_mdt_info *mi = NILFS_MDT(inode);
        struct nilfs_inode_info *ii = NILFS_I(inode);
        struct nilfs_shadow_map *shadow = mi->mi_shadow;
+       struct inode *s_inode = shadow->inode;
        int ret;
 
-       ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
+       ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
        if (ret)
                goto out;
 
-       ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
-                                    &ii->i_btnode_cache);
+       ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
+                                    ii->i_assoc_inode->i_mapping);
        if (ret)
                goto out;
 
@@ -549,7 +562,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
        struct page *page;
        int blkbits = inode->i_blkbits;
 
-       page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
+       page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index);
        if (!page)
                return -ENOMEM;
 
@@ -581,7 +594,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
        struct page *page;
        int n;
 
-       page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
+       page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index);
        if (page) {
                if (page_has_buffers(page)) {
                        n = bh_offset(bh) >> inode->i_blkbits;
@@ -622,10 +635,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
                nilfs_palloc_clear_cache(inode);
 
        nilfs_clear_dirty_pages(inode->i_mapping, true);
-       nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
+       nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
 
-       nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
-       nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
+       nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
+       nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
+                             NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
 
        nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
 
@@ -640,10 +654,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
 {
        struct nilfs_mdt_info *mi = NILFS_MDT(inode);
        struct nilfs_shadow_map *shadow = mi->mi_shadow;
+       struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
 
        down_write(&mi->mi_sem);
        nilfs_release_frozen_buffers(shadow);
-       truncate_inode_pages(&shadow->frozen_data, 0);
-       truncate_inode_pages(&shadow->frozen_btnodes, 0);
+       truncate_inode_pages(shadow->inode->i_mapping, 0);
+       truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
        up_write(&mi->mi_sem);
 }
index 8f86080..9e23bab 100644 (file)
 /**
  * struct nilfs_shadow_map - shadow mapping of meta data file
  * @bmap_store: shadow copy of bmap state
- * @frozen_data: shadowed dirty data pages
- * @frozen_btnodes: shadowed dirty b-tree nodes' pages
+ * @inode: holder of page caches used in shadow mapping
  * @frozen_buffers: list of frozen buffers
  */
 struct nilfs_shadow_map {
        struct nilfs_bmap_store bmap_store;
-       struct address_space frozen_data;
-       struct address_space frozen_btnodes;
+       struct inode *inode;
        struct list_head frozen_buffers;
 };
 
index a7b8175..1344f7d 100644 (file)
@@ -28,7 +28,7 @@
  * @i_xattr: <TODO>
  * @i_dir_start_lookup: page index of last successful search
  * @i_cno: checkpoint number for GC inode
- * @i_btnode_cache: cached pages of b-tree nodes
+ * @i_assoc_inode: associated inode (B-tree node cache holder or back pointer)
  * @i_dirty: list for connecting dirty files
  * @xattr_sem: semaphore for extended attributes processing
  * @i_bh: buffer contains disk inode
@@ -43,7 +43,7 @@ struct nilfs_inode_info {
        __u64 i_xattr;  /* sector_t ??? */
        __u32 i_dir_start_lookup;
        __u64 i_cno;            /* check point number for GC inode */
-       struct address_space i_btnode_cache;
+       struct inode *i_assoc_inode;
        struct list_head i_dirty;       /* List for connecting dirty files */
 
 #ifdef CONFIG_NILFS_XATTR
@@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap)
        return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
 }
 
-static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
-{
-       struct nilfs_inode_info *ii =
-               container_of(btnc, struct nilfs_inode_info, i_btnode_cache);
-       return &ii->vfs_inode;
-}
-
 /*
  * Dynamic state flags of NILFS on-memory inode (i_state)
  */
@@ -98,6 +91,8 @@ enum {
        NILFS_I_INODE_SYNC,             /* dsync is not allowed for inode */
        NILFS_I_BMAP,                   /* has bmap and btnode_cache */
        NILFS_I_GCINODE,                /* inode for GC, on memory only */
+       NILFS_I_BTNC,                   /* inode for btree node cache */
+       NILFS_I_SHADOW,                 /* inode for shadowed page cache */
 };
 
 /*
@@ -267,6 +262,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
                         unsigned long ino);
 extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
                                       unsigned long ino, __u64 cno);
+int nilfs_attach_btree_node_cache(struct inode *inode);
+void nilfs_detach_btree_node_cache(struct inode *inode);
+struct inode *nilfs_iget_for_shadow(struct inode *inode);
 extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
 extern void nilfs_truncate(struct inode *);
 extern void nilfs_evict_inode(struct inode *);
index 063dd16..a8e88cc 100644 (file)
@@ -436,22 +436,12 @@ unsigned int nilfs_page_count_clean_buffers(struct page *page,
        return nc;
 }
 
-void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
-{
-       mapping->host = inode;
-       mapping->flags = 0;
-       mapping_set_gfp_mask(mapping, GFP_NOFS);
-       mapping->private_data = NULL;
-       mapping->a_ops = &empty_aops;
-}
-
 /*
  * NILFS2 needs clear_page_dirty() in the following two cases:
  *
- * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
- *    page dirty flags when it copies back pages from the shadow cache
- *    (gcdat->{i_mapping,i_btnode_cache}) to its original cache
- *    (dat->{i_mapping,i_btnode_cache}).
+ * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
+ *    flag of pages when it copies back pages from shadow cache to the
+ *    original cache.
  *
  * 2) Some B-tree operations like insertion or deletion may dispose buffers
  *    in dirty state, and this needs to cancel the dirty state of their pages.
index 569263b..21ddcdd 100644 (file)
@@ -43,7 +43,6 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
 void nilfs_copy_back_pages(struct address_space *, struct address_space *);
 void nilfs_clear_dirty_page(struct page *, bool);
 void nilfs_clear_dirty_pages(struct address_space *, bool);
-void nilfs_mapping_init(struct address_space *mapping, struct inode *inode);
 unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
                                            unsigned int);
 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
index 85a8533..0afe083 100644 (file)
@@ -733,15 +733,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
                                            struct list_head *listp)
 {
        struct nilfs_inode_info *ii = NILFS_I(inode);
-       struct address_space *mapping = &ii->i_btnode_cache;
+       struct inode *btnc_inode = ii->i_assoc_inode;
        struct pagevec pvec;
        struct buffer_head *bh, *head;
        unsigned int i;
        pgoff_t index = 0;
 
+       if (!btnc_inode)
+               return;
+
        pagevec_init(&pvec);
 
-       while (pagevec_lookup_tag(&pvec, mapping, &index,
+       while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
                                        PAGECACHE_TAG_DIRTY)) {
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        bh = head = page_buffers(pvec.pages[i]);
@@ -2410,7 +2413,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
                        continue;
                list_del_init(&ii->i_dirty);
                truncate_inode_pages(&ii->vfs_inode.i_data, 0);
-               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+               nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
                iput(&ii->vfs_inode);
        }
 }
index 3e05c98..ba108f9 100644 (file)
@@ -157,7 +157,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
        ii->i_bh = NULL;
        ii->i_state = 0;
        ii->i_cno = 0;
-       nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
+       ii->i_assoc_inode = NULL;
+       ii->i_bmap = &ii->i_bmap_data;
        return &ii->vfs_inode;
 }
 
@@ -1377,8 +1378,6 @@ static void nilfs_inode_init_once(void *obj)
 #ifdef CONFIG_NILFS_XATTR
        init_rwsem(&ii->xattr_sem);
 #endif
-       address_space_init_once(&ii->i_btnode_cache);
-       ii->i_bmap = &ii->i_bmap_data;
        inode_init_once(&ii->vfs_inode);
 }
 
index d154dcf..90e3dad 100644 (file)
@@ -1746,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
                set_buffer_dirty(bh);
        } while ((bh = bh->b_this_page) != head);
        spin_unlock(&mapping->private_lock);
-       block_dirty_folio(mapping, page_folio(page));
+       filemap_dirty_folio(mapping, page_folio(page));
        if (unlikely(buffers_to_free)) {
                do {
                        bh = buffers_to_free->b_this_page;
index 273f65e..0b6f551 100644 (file)
@@ -337,7 +337,6 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
 /* Read information header from global quota file */
 int ocfs2_global_read_info(struct super_block *sb, int type)
 {
-       struct inode *gqinode = NULL;
        unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
                                              GROUP_QUOTA_SYSTEM_INODE };
        struct ocfs2_global_disk_dqinfo dinfo;
@@ -346,29 +345,31 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
        u64 pcount;
        int status;
 
+       oinfo->dqi_gi.dqi_sb = sb;
+       oinfo->dqi_gi.dqi_type = type;
+       ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
+       oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
+       oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
+       oinfo->dqi_gqi_bh = NULL;
+       oinfo->dqi_gqi_count = 0;
+
        /* Read global header */
-       gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
+       oinfo->dqi_gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
                        OCFS2_INVALID_SLOT);
-       if (!gqinode) {
+       if (!oinfo->dqi_gqinode) {
                mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
                        type);
                status = -EINVAL;
                goto out_err;
        }
-       oinfo->dqi_gi.dqi_sb = sb;
-       oinfo->dqi_gi.dqi_type = type;
-       oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
-       oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
-       oinfo->dqi_gqi_bh = NULL;
-       oinfo->dqi_gqi_count = 0;
-       oinfo->dqi_gqinode = gqinode;
+
        status = ocfs2_lock_global_qf(oinfo, 0);
        if (status < 0) {
                mlog_errno(status);
                goto out_err;
        }
 
-       status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
+       status = ocfs2_extent_map_get_blocks(oinfo->dqi_gqinode, 0, &oinfo->dqi_giblk,
                                             &pcount, NULL);
        if (status < 0)
                goto out_unlock;
index 0e4b16d..b1a8b04 100644 (file)
@@ -702,8 +702,6 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
        info->dqi_priv = oinfo;
        oinfo->dqi_type = type;
        INIT_LIST_HEAD(&oinfo->dqi_chunk);
-       oinfo->dqi_gqinode = NULL;
-       ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
        oinfo->dqi_rec = NULL;
        oinfo->dqi_lqi_bh = NULL;
        oinfo->dqi_libh = NULL;
index 6d8d4bf..2e244ad 100644 (file)
@@ -32,6 +32,8 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size)
        int ret = 0;
 
        key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL);
+       if (!key)
+               return -ENOMEM;
 
        xbc_for_each_key_value(leaf, val) {
                ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
index dc50001..e643aec 100644 (file)
@@ -1630,7 +1630,6 @@ int generic_write_checks_count(struct kiocb *iocb, loff_t *count)
        if (!*count)
                return 0;
 
-       /* FIXME: this is for backwards compatibility with 2.4 */
        if (iocb->ki_flags & IOCB_APPEND)
                iocb->ki_pos = i_size_read(inode);
 
index f8e1f4e..7ab8a58 100644 (file)
@@ -554,9 +554,9 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc)
 }
 EXPORT_SYMBOL(seq_dentry);
 
-static void *single_start(struct seq_file *p, loff_t *pos)
+void *single_start(struct seq_file *p, loff_t *pos)
 {
-       return NULL + (*pos == 0);
+       return *pos ? NULL : SEQ_START_TOKEN;
 }
 
 static void *single_next(struct seq_file *p, void *v, loff_t *pos)
index 38b8fc5..0507aec 100644 (file)
 #define NUMBER_OF_SMB2_COMMANDS        0x0013
 
 /*
+ * Size of the session key (crypto key encrypted with the password
+ */
+#define SMB2_NTLMV2_SESSKEY_SIZE       16
+#define SMB2_SIGNATURE_SIZE            16
+#define SMB2_HMACSHA256_SIZE           32
+#define SMB2_CMACAES_SIZE              16
+#define SMB3_GCM128_CRYPTKEY_SIZE      16
+#define SMB3_GCM256_CRYPTKEY_SIZE      32
+
+/*
+ * Size of the smb3 encryption/decryption keys
+ * This size is big enough to store any cipher key types.
+ */
+#define SMB3_ENC_DEC_KEY_SIZE          32
+
+/*
+ * Size of the smb3 signing key
+ */
+#define SMB3_SIGN_KEY_SIZE             16
+
+#define CIFS_CLIENT_CHALLENGE_SIZE     8
+
+/* Maximum buffer size value we can send with 1 credit */
+#define SMB2_MAX_BUFFER_SIZE 65536
+
+/*
+ * The default wsize is 1M for SMB2 (and for some CIFS cases).
+ * find_get_pages seems to return a maximum of 256
+ * pages in a single call. With PAGE_SIZE == 4k, this means we can
+ * fill a single wsize request with a single call.
+ */
+#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
+
+/*
  * SMB2 Header Definition
  *
  * "MBZ" :  Must be Zero
 #define SMB2_FLAGS_DFS_OPERATIONS      cpu_to_le32(0x10000000)
 #define SMB2_FLAGS_REPLAY_OPERATION    cpu_to_le32(0x20000000) /* SMB3 & up */
 
+/*
+ *     Definitions for SMB2 Protocol Data Units (network frames)
+ *
+ *  See MS-SMB2.PDF specification for protocol details.
+ *  The Naming convention is the lower case version of the SMB2
+ *  command code name for the struct. Note that structures must be packed.
+ *
+ */
+
 /* See MS-SMB2 section 2.2.1 */
 struct smb2_hdr {
        __le32 ProtocolId;      /* 0xFE 'S' 'M' 'B' */
@@ -115,6 +158,18 @@ struct smb2_pdu {
        __le16 StructureSize2; /* size of wct area (varies, request specific) */
 } __packed;
 
+#define SMB2_ERROR_STRUCTURE_SIZE2     9
+#define SMB2_ERROR_STRUCTURE_SIZE2_LE  cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2)
+
+struct smb2_err_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;
+       __u8   ErrorContextCount;
+       __u8   Reserved;
+       __le32 ByteCount;  /* even if zero, at least one byte follows */
+       __u8   ErrorData[1];  /* variable length */
+} __packed;
+
 #define SMB3_AES_CCM_NONCE 11
 #define SMB3_AES_GCM_NONCE 12
 
@@ -608,8 +663,8 @@ struct smb2_close_req {
        __le16 StructureSize;   /* Must be 24 */
        __le16 Flags;
        __le32 Reserved;
-       __le64  PersistentFileId; /* opaque endianness */
-       __le64  VolatileFileId; /* opaque endianness */
+       __u64  PersistentFileId; /* opaque endianness */
+       __u64  VolatileFileId; /* opaque endianness */
 } __packed;
 
 /*
@@ -653,8 +708,8 @@ struct smb2_read_req {
        __u8   Flags; /* MBZ unless SMB3.02 or later */
        __le32 Length;
        __le64 Offset;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
        __le32 MinimumCount;
        __le32 Channel; /* MBZ except for SMB3 or later */
        __le32 RemainingBytes;
@@ -692,8 +747,8 @@ struct smb2_write_req {
        __le16 DataOffset; /* offset from start of SMB2 header to write data */
        __le32 Length;
        __le64 Offset;
-       __le64  PersistentFileId; /* opaque endianness */
-       __le64  VolatileFileId; /* opaque endianness */
+       __u64  PersistentFileId; /* opaque endianness */
+       __u64  VolatileFileId; /* opaque endianness */
        __le32 Channel; /* MBZ unless SMB3.02 or later */
        __le32 RemainingBytes;
        __le16 WriteChannelInfoOffset;
@@ -722,8 +777,8 @@ struct smb2_flush_req {
        __le16 StructureSize;   /* Must be 24 */
        __le16 Reserved1;
        __le32 Reserved2;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
 } __packed;
 
 struct smb2_flush_rsp {
@@ -732,6 +787,123 @@ struct smb2_flush_rsp {
        __le16 Reserved;
 } __packed;
 
+#define SMB2_LOCKFLAG_SHARED           0x0001
+#define SMB2_LOCKFLAG_EXCLUSIVE                0x0002
+#define SMB2_LOCKFLAG_UNLOCK           0x0004
+#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
+#define SMB2_LOCKFLAG_MASK             0x0007
+
+struct smb2_lock_element {
+       __le64 Offset;
+       __le64 Length;
+       __le32 Flags;
+       __le32 Reserved;
+} __packed;
+
+struct smb2_lock_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 48 */
+       __le16 LockCount;
+       /*
+        * The least significant four bits are the index, the other 28 bits are
+        * the lock sequence number (0 to 64). See MS-SMB2 2.2.26
+        */
+       __le32 LockSequenceNumber;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       /* Followed by at least one */
+       struct smb2_lock_element locks[1];
+} __packed;
+
+struct smb2_lock_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 4 */
+       __le16 Reserved;
+} __packed;
+
+struct smb2_echo_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 4 */
+       __u16  Reserved;
+} __packed;
+
+struct smb2_echo_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 4 */
+       __u16  Reserved;
+} __packed;
+
+/*
+ * Valid FileInformation classes for query directory
+ *
+ * Note that these are a subset of the (file) QUERY_INFO levels defined
+ * later in this file (but since QUERY_DIRECTORY uses equivalent numbers
+ * we do not redefine them here)
+ *
+ * FileDirectoryInfomation             0x01
+ * FileFullDirectoryInformation                0x02
+ * FileIdFullDirectoryInformation      0x26
+ * FileBothDirectoryInformation                0x03
+ * FileIdBothDirectoryInformation      0x25
+ * FileNamesInformation                        0x0C
+ * FileIdExtdDirectoryInformation      0x3C
+ */
+
+/* search (query_directory) Flags field */
+#define SMB2_RESTART_SCANS             0x01
+#define SMB2_RETURN_SINGLE_ENTRY       0x02
+#define SMB2_INDEX_SPECIFIED           0x04
+#define SMB2_REOPEN                    0x10
+
+struct smb2_query_directory_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 33 */
+       __u8   FileInformationClass;
+       __u8   Flags;
+       __le32 FileIndex;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       __le16 FileNameOffset;
+       __le16 FileNameLength;
+       __le32 OutputBufferLength;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_query_directory_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 9 */
+       __le16 OutputBufferOffset;
+       __le32 OutputBufferLength;
+       __u8   Buffer[1];
+} __packed;
+
+/*
+ * Maximum number of iovs we need for a set-info request.
+ * The largest one is rename/hardlink
+ * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
+ * [1] : path
+ * [2] : compound padding
+ */
+#define SMB2_SET_INFO_IOV_SIZE 3
+
+struct smb2_set_info_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 33 */
+       __u8   InfoType;
+       __u8   FileInfoClass;
+       __le32 BufferLength;
+       __le16 BufferOffset;
+       __u16  Reserved;
+       __le32 AdditionalInformation;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_set_info_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 2 */
+} __packed;
 
 /*
  * SMB2_NOTIFY  See MS-SMB2 section 2.2.35
@@ -769,8 +941,8 @@ struct smb2_change_notify_req {
        __le16  StructureSize;
        __le16  Flags;
        __le32  OutputBufferLength;
-       __le64  PersistentFileId; /* opaque endianness */
-       __le64  VolatileFileId; /* opaque endianness */
+       __u64   PersistentFileId; /* opaque endianness */
+       __u64   VolatileFileId; /* opaque endianness */
        __le32  CompletionFilter;
        __u32   Reserved;
 } __packed;
@@ -978,12 +1150,455 @@ struct smb2_create_rsp {
        __le64 EndofFile;
        __le32 FileAttributes;
        __le32 Reserved2;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
        __le32 CreateContextsOffset;
        __le32 CreateContextsLength;
        __u8   Buffer[1];
 } __packed;
 
+struct create_posix {
+       struct create_context ccontext;
+       __u8    Name[16];
+       __le32  Mode;
+       __u32   Reserved;
+} __packed;
+
+#define SMB2_LEASE_NONE_LE                     cpu_to_le32(0x00)
+#define SMB2_LEASE_READ_CACHING_LE             cpu_to_le32(0x01)
+#define SMB2_LEASE_HANDLE_CACHING_LE           cpu_to_le32(0x02)
+#define SMB2_LEASE_WRITE_CACHING_LE            cpu_to_le32(0x04)
+
+#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE   cpu_to_le32(0x02)
+
+#define SMB2_LEASE_KEY_SIZE                    16
+
+struct lease_context {
+       __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
+       __le32 LeaseState;
+       __le32 LeaseFlags;
+       __le64 LeaseDuration;
+} __packed;
+
+struct lease_context_v2 {
+       __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
+       __le32 LeaseState;
+       __le32 LeaseFlags;
+       __le64 LeaseDuration;
+       __u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE];
+       __le16 Epoch;
+       __le16 Reserved;
+} __packed;
+
+struct create_lease {
+       struct create_context ccontext;
+       __u8   Name[8];
+       struct lease_context lcontext;
+} __packed;
+
+struct create_lease_v2 {
+       struct create_context ccontext;
+       __u8   Name[8];
+       struct lease_context_v2 lcontext;
+       __u8   Pad[4];
+} __packed;
+
+/* See MS-SMB2 2.2.31 and 2.2.32 */
+struct smb2_ioctl_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 57 */
+       __le16 Reserved; /* offset from start of SMB2 header to write data */
+       __le32 CtlCode;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       __le32 InputOffset; /* Reserved MBZ */
+       __le32 InputCount;
+       __le32 MaxInputResponse;
+       __le32 OutputOffset;
+       __le32 OutputCount;
+       __le32 MaxOutputResponse;
+       __le32 Flags;
+       __le32 Reserved2;
+       __u8   Buffer[];
+} __packed;
+
+struct smb2_ioctl_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 49 */
+       __le16 Reserved;
+       __le32 CtlCode;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       __le32 InputOffset; /* Reserved MBZ */
+       __le32 InputCount;
+       __le32 OutputOffset;
+       __le32 OutputCount;
+       __le32 Flags;
+       __le32 Reserved2;
+       __u8   Buffer[];
+} __packed;
+
+/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
+struct file_zero_data_information {
+       __le64  FileOffset;
+       __le64  BeyondFinalZero;
+} __packed;
+
+/* Reparse structures - see MS-FSCC 2.1.2 */
+
+/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
+struct reparse_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __u8    DataBuffer[]; /* Variable Length */
+} __packed;
+
+struct reparse_guid_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __u8    ReparseGuid[16];
+       __u8    DataBuffer[]; /* Variable Length */
+} __packed;
+
+struct reparse_mount_point_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __le16  SubstituteNameOffset;
+       __le16  SubstituteNameLength;
+       __le16  PrintNameOffset;
+       __le16  PrintNameLength;
+       __u8    PathBuffer[]; /* Variable Length */
+} __packed;
+
+#define SYMLINK_FLAG_RELATIVE 0x00000001
+
+struct reparse_symlink_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __le16  SubstituteNameOffset;
+       __le16  SubstituteNameLength;
+       __le16  PrintNameOffset;
+       __le16  PrintNameLength;
+       __le32  Flags;
+       __u8    PathBuffer[]; /* Variable Length */
+} __packed;
+
+/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
+
+struct validate_negotiate_info_req {
+       __le32 Capabilities;
+       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
+       __le16 SecurityMode;
+       __le16 DialectCount;
+       __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
+} __packed;
+
+struct validate_negotiate_info_rsp {
+       __le32 Capabilities;
+       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
+       __le16 SecurityMode;
+       __le16 Dialect; /* Dialect in use for the connection */
+} __packed;
+
+struct duplicate_extents_to_file {
+       __u64 PersistentFileHandle; /* source file handle, opaque endianness */
+       __u64 VolatileFileHandle;
+       __le64 SourceFileOffset;
+       __le64 TargetFileOffset;
+       __le64 ByteCount;  /* Bytes to be copied */
+} __packed;
+
+/* Possible InfoType values */
+#define SMB2_O_INFO_FILE       0x01
+#define SMB2_O_INFO_FILESYSTEM 0x02
+#define SMB2_O_INFO_SECURITY   0x03
+#define SMB2_O_INFO_QUOTA      0x04
+
+/* SMB2 Query Info see MS-SMB2 (2.2.37) or MS-DTYP */
+
+/* List of QUERY INFO levels (those also valid for QUERY_DIR are noted below */
+#define FILE_DIRECTORY_INFORMATION     1       /* also for QUERY_DIR */
+#define FILE_FULL_DIRECTORY_INFORMATION 2      /* also for QUERY_DIR */
+#define FILE_BOTH_DIRECTORY_INFORMATION 3      /* also for QUERY_DIR */
+#define FILE_BASIC_INFORMATION         4
+#define FILE_STANDARD_INFORMATION      5
+#define FILE_INTERNAL_INFORMATION      6
+#define FILE_EA_INFORMATION            7
+#define FILE_ACCESS_INFORMATION                8
+#define FILE_NAME_INFORMATION          9
+#define FILE_RENAME_INFORMATION                10
+#define FILE_LINK_INFORMATION          11
+#define FILE_NAMES_INFORMATION         12      /* also for QUERY_DIR */
+#define FILE_DISPOSITION_INFORMATION   13
+#define FILE_POSITION_INFORMATION      14
+#define FILE_FULL_EA_INFORMATION       15
+#define FILE_MODE_INFORMATION          16
+#define FILE_ALIGNMENT_INFORMATION     17
+#define FILE_ALL_INFORMATION           18
+#define FILE_ALLOCATION_INFORMATION    19
+#define FILE_END_OF_FILE_INFORMATION   20
+#define FILE_ALTERNATE_NAME_INFORMATION 21
+#define FILE_STREAM_INFORMATION                22
+#define FILE_PIPE_INFORMATION          23
+#define FILE_PIPE_LOCAL_INFORMATION    24
+#define FILE_PIPE_REMOTE_INFORMATION   25
+#define FILE_MAILSLOT_QUERY_INFORMATION 26
+#define FILE_MAILSLOT_SET_INFORMATION  27
+#define FILE_COMPRESSION_INFORMATION   28
+#define FILE_OBJECT_ID_INFORMATION     29
+/* Number 30 not defined in documents */
+#define FILE_MOVE_CLUSTER_INFORMATION  31
+#define FILE_QUOTA_INFORMATION         32
+#define FILE_REPARSE_POINT_INFORMATION 33
+#define FILE_NETWORK_OPEN_INFORMATION  34
+#define FILE_ATTRIBUTE_TAG_INFORMATION 35
+#define FILE_TRACKING_INFORMATION      36
+#define FILEID_BOTH_DIRECTORY_INFORMATION 37   /* also for QUERY_DIR */
+#define FILEID_FULL_DIRECTORY_INFORMATION 38   /* also for QUERY_DIR */
+#define FILE_VALID_DATA_LENGTH_INFORMATION 39
+#define FILE_SHORT_NAME_INFORMATION    40
+#define FILE_SFIO_RESERVE_INFORMATION  44
+#define FILE_SFIO_VOLUME_INFORMATION   45
+#define FILE_HARD_LINK_INFORMATION     46
+#define FILE_NORMALIZED_NAME_INFORMATION 48
+#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
+#define FILE_STANDARD_LINK_INFORMATION 54
+#define FILE_ID_INFORMATION            59
+#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60  /* also for QUERY_DIR */
+/* Used for Query Info and Find File POSIX Info for SMB3.1.1 and SMB1 */
+#define SMB_FIND_FILE_POSIX_INFO       0x064
+
+/* Security info type additionalinfo flags. */
+#define OWNER_SECINFO   0x00000001
+#define GROUP_SECINFO   0x00000002
+#define DACL_SECINFO   0x00000004
+#define SACL_SECINFO   0x00000008
+#define LABEL_SECINFO   0x00000010
+#define ATTRIBUTE_SECINFO   0x00000020
+#define SCOPE_SECINFO   0x00000040
+#define BACKUP_SECINFO   0x00010000
+#define UNPROTECTED_SACL_SECINFO   0x10000000
+#define UNPROTECTED_DACL_SECINFO   0x20000000
+#define PROTECTED_SACL_SECINFO   0x40000000
+#define PROTECTED_DACL_SECINFO   0x80000000
+
+/* Flags used for FileFullEAinfo */
+#define SL_RESTART_SCAN                0x00000001
+#define SL_RETURN_SINGLE_ENTRY 0x00000002
+#define SL_INDEX_SPECIFIED     0x00000004
+
+struct smb2_query_info_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 41 */
+       __u8   InfoType;
+       __u8   FileInfoClass;
+       __le32 OutputBufferLength;
+       __le16 InputBufferOffset;
+       __u16  Reserved;
+       __le32 InputBufferLength;
+       __le32 AdditionalInformation;
+       __le32 Flags;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_query_info_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 9 */
+       __le16 OutputBufferOffset;
+       __le32 OutputBufferLength;
+       __u8   Buffer[1];
+} __packed;
+
+/*
+ *     PDU query infolevel structure definitions
+ */
+
+struct file_allocated_range_buffer {
+       __le64  file_offset;
+       __le64  length;
+} __packed;
+
+struct smb2_file_internal_info {
+       __le64 IndexNumber;
+} __packed; /* level 6 Query */
+
+struct smb2_file_rename_info { /* encoding of request for level 10 */
+       __u8   ReplaceIfExists; /* 1 = replace existing target with new */
+                               /* 0 = fail if target already exists */
+       __u8   Reserved[7];
+       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
+       __le32 FileNameLength;
+       char   FileName[];     /* New name to be assigned */
+       /* padding - overall struct size must be >= 24 so filename + pad >= 6 */
+} __packed; /* level 10 Set */
+
+struct smb2_file_link_info { /* encoding of request for level 11 */
+       __u8   ReplaceIfExists; /* 1 = replace existing link with new */
+                               /* 0 = fail if link already exists */
+       __u8   Reserved[7];
+       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
+       __le32 FileNameLength;
+       char   FileName[];     /* Name to be assigned to new link */
+} __packed; /* level 11 Set */
+
+/*
+ * This level 18, although with struct with same name is different from cifs
+ * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
+ * CurrentByteOffset.
+ */
+struct smb2_file_all_info { /* data block encoding of response to level 18 */
+       __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le32 Attributes;
+       __u32  Pad1;            /* End of FILE_BASIC_INFO_INFO equivalent */
+       __le64 AllocationSize;  /* Beginning of FILE_STANDARD_INFO equivalent */
+       __le64 EndOfFile;       /* size ie offset to first free byte in file */
+       __le32 NumberOfLinks;   /* hard links */
+       __u8   DeletePending;
+       __u8   Directory;
+       __u16  Pad2;            /* End of FILE_STANDARD_INFO equivalent */
+       __le64 IndexNumber;
+       __le32 EASize;
+       __le32 AccessFlags;
+       __le64 CurrentByteOffset;
+       __le32 Mode;
+       __le32 AlignmentRequirement;
+       __le32 FileNameLength;
+       char   FileName[1];
+} __packed; /* level 18 Query */
+
+struct smb2_file_eof_info { /* encoding of request for level 10 */
+       __le64 EndOfFile; /* new end of file value */
+} __packed; /* level 20 Set */
+
+/* Level 100 query info */
+struct smb311_posix_qinfo {
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 EndOfFile;
+       __le64 AllocationSize;
+       __le32 DosAttributes;
+       __le64 Inode;
+       __le32 DeviceId;
+       __le32 Zero;
+       /* beginning of POSIX Create Context Response */
+       __le32 HardLinks;
+       __le32 ReparseTag;
+       __le32 Mode;
+       u8     Sids[];
+       /*
+        * var sized owner SID
+        * var sized group SID
+        * le32 filenamelength
+        * u8  filename[]
+        */
+} __packed;
+
+/* File System Information Classes */
+#define FS_VOLUME_INFORMATION          1 /* Query */
+#define FS_LABEL_INFORMATION           2 /* Set */
+#define FS_SIZE_INFORMATION            3 /* Query */
+#define FS_DEVICE_INFORMATION          4 /* Query */
+#define FS_ATTRIBUTE_INFORMATION       5 /* Query */
+#define FS_CONTROL_INFORMATION         6 /* Query, Set */
+#define FS_FULL_SIZE_INFORMATION       7 /* Query */
+#define FS_OBJECT_ID_INFORMATION       8 /* Query, Set */
+#define FS_DRIVER_PATH_INFORMATION     9 /* Query */
+#define FS_SECTOR_SIZE_INFORMATION     11 /* SMB3 or later. Query */
+#define FS_POSIX_INFORMATION           100 /* SMB3.1.1 POSIX. Query */
+
+struct smb2_fs_full_size_info {
+       __le64 TotalAllocationUnits;
+       __le64 CallerAvailableAllocationUnits;
+       __le64 ActualAvailableAllocationUnits;
+       __le32 SectorsPerAllocationUnit;
+       __le32 BytesPerSector;
+} __packed;
+
+#define SSINFO_FLAGS_ALIGNED_DEVICE            0x00000001
+#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
+#define SSINFO_FLAGS_NO_SEEK_PENALTY           0x00000004
+#define SSINFO_FLAGS_TRIM_ENABLED              0x00000008
+
+/* sector size info struct */
+struct smb3_fs_ss_info {
+       __le32 LogicalBytesPerSector;
+       __le32 PhysicalBytesPerSectorForAtomicity;
+       __le32 PhysicalBytesPerSectorForPerf;
+       __le32 FSEffPhysicalBytesPerSectorForAtomicity;
+       __le32 Flags;
+       __le32 ByteOffsetForSectorAlignment;
+       __le32 ByteOffsetForPartitionAlignment;
+} __packed;
+
+/* File System Control Information */
+struct smb2_fs_control_info {
+       __le64 FreeSpaceStartFiltering;
+       __le64 FreeSpaceThreshold;
+       __le64 FreeSpaceStopFiltering;
+       __le64 DefaultQuotaThreshold;
+       __le64 DefaultQuotaLimit;
+       __le32 FileSystemControlFlags;
+       __le32 Padding;
+} __packed;
+
+/* volume info struct - see MS-FSCC 2.5.9 */
+#define MAX_VOL_LABEL_LEN      32
+struct smb3_fs_vol_info {
+       __le64  VolumeCreationTime;
+       __u32   VolumeSerialNumber;
+       __le32  VolumeLabelLength; /* includes trailing null */
+       __u8    SupportsObjects; /* True if eg like NTFS, supports objects */
+       __u8    Reserved;
+       __u8    VolumeLabel[]; /* variable len */
+} __packed;
+
+/* See MS-SMB2 2.2.23 through 2.2.25 */
+struct smb2_oplock_break {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 24 */
+       __u8   OplockLevel;
+       __u8   Reserved;
+       __le32 Reserved2;
+       __u64  PersistentFid;
+       __u64  VolatileFid;
+} __packed;
+
+#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
+
+struct smb2_lease_break {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 44 */
+       __le16 Epoch;
+       __le32 Flags;
+       __u8   LeaseKey[16];
+       __le32 CurrentLeaseState;
+       __le32 NewLeaseState;
+       __le32 BreakReason;
+       __le32 AccessMaskHint;
+       __le32 ShareMaskHint;
+} __packed;
+
+struct smb2_lease_ack {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 36 */
+       __le16 Reserved;
+       __le32 Flags;
+       __u8   LeaseKey[16];
+       __le32 LeaseState;
+       __le64 LeaseDuration;
+} __packed;
 
+#define OP_BREAK_STRUCT_SIZE_20                24
+#define OP_BREAK_STRUCT_SIZE_21                36
 #endif                         /* _COMMON_SMB2PDU_H */
index dbe72f6..8615188 100644 (file)
@@ -349,20 +349,97 @@ out_budg:
        return err;
 }
 
-static int do_tmpfile(struct inode *dir, struct dentry *dentry,
-                     umode_t mode, struct inode **whiteout)
+static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
+{
+       int err;
+       umode_t mode = S_IFCHR | WHITEOUT_MODE;
+       struct inode *inode;
+       struct ubifs_info *c = dir->i_sb->s_fs_info;
+       struct fscrypt_name nm;
+
+       /*
+        * Create an inode('nlink = 1') for whiteout without updating journal,
+        * let ubifs_jnl_rename() store it on flash to complete rename whiteout
+        * atomically.
+        */
+
+       dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+               dentry, mode, dir->i_ino);
+
+       err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+       if (err)
+               return ERR_PTR(err);
+
+       inode = ubifs_new_inode(c, dir, mode);
+       if (IS_ERR(inode)) {
+               err = PTR_ERR(inode);
+               goto out_free;
+       }
+
+       init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
+       ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations);
+
+       err = ubifs_init_security(dir, inode, &dentry->d_name);
+       if (err)
+               goto out_inode;
+
+       /* The dir size is updated by do_rename. */
+       insert_inode_hash(inode);
+
+       return inode;
+
+out_inode:
+       make_bad_inode(inode);
+       iput(inode);
+out_free:
+       fscrypt_free_filename(&nm);
+       ubifs_err(c, "cannot create whiteout file, error %d", err);
+       return ERR_PTR(err);
+}
+
+/**
+ * lock_2_inodes - a wrapper for locking two UBIFS inodes.
+ * @inode1: first inode
+ * @inode2: second inode
+ *
+ * We do not implement any tricks to guarantee strict lock ordering, because
+ * VFS has already done it for us on the @i_mutex. So this is just a simple
+ * wrapper function.
+ */
+static void lock_2_inodes(struct inode *inode1, struct inode *inode2)
+{
+       mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
+       mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
+}
+
+/**
+ * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes.
+ * @inode1: first inode
+ * @inode2: second inode
+ */
+static void unlock_2_inodes(struct inode *inode1, struct inode *inode2)
+{
+       mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
+       mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
+}
+
+static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+                        struct dentry *dentry, umode_t mode)
 {
        struct inode *inode;
        struct ubifs_info *c = dir->i_sb->s_fs_info;
-       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1};
+       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
+                                       .dirtied_ino = 1};
        struct ubifs_budget_req ino_req = { .dirtied_ino = 1 };
-       struct ubifs_inode *ui, *dir_ui = ubifs_inode(dir);
+       struct ubifs_inode *ui;
        int err, instantiated = 0;
        struct fscrypt_name nm;
 
        /*
-        * Budget request settings: new dirty inode, new direntry,
-        * budget for dirtied inode will be released via writeback.
+        * Budget request settings: new inode, new direntry, changing the
+        * parent directory inode.
+        * Allocate budget separately for new dirtied inode, the budget will
+        * be released via writeback.
         */
 
        dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
@@ -392,42 +469,30 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
        }
        ui = ubifs_inode(inode);
 
-       if (whiteout) {
-               init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
-               ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations);
-       }
-
        err = ubifs_init_security(dir, inode, &dentry->d_name);
        if (err)
                goto out_inode;
 
        mutex_lock(&ui->ui_mutex);
        insert_inode_hash(inode);
-
-       if (whiteout) {
-               mark_inode_dirty(inode);
-               drop_nlink(inode);
-               *whiteout = inode;
-       } else {
-               d_tmpfile(dentry, inode);
-       }
+       d_tmpfile(dentry, inode);
        ubifs_assert(c, ui->dirty);
 
        instantiated = 1;
        mutex_unlock(&ui->ui_mutex);
 
-       mutex_lock(&dir_ui->ui_mutex);
+       lock_2_inodes(dir, inode);
        err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
        if (err)
                goto out_cancel;
-       mutex_unlock(&dir_ui->ui_mutex);
+       unlock_2_inodes(dir, inode);
 
        ubifs_release_budget(c, &req);
 
        return 0;
 
 out_cancel:
-       mutex_unlock(&dir_ui->ui_mutex);
+       unlock_2_inodes(dir, inode);
 out_inode:
        make_bad_inode(inode);
        if (!instantiated)
@@ -441,12 +506,6 @@ out_budg:
        return err;
 }
 
-static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
-                        struct dentry *dentry, umode_t mode)
-{
-       return do_tmpfile(dir, dentry, mode, NULL);
-}
-
 /**
  * vfs_dent_type - get VFS directory entry type.
  * @type: UBIFS directory entry type
@@ -660,32 +719,6 @@ static int ubifs_dir_release(struct inode *dir, struct file *file)
        return 0;
 }
 
-/**
- * lock_2_inodes - a wrapper for locking two UBIFS inodes.
- * @inode1: first inode
- * @inode2: second inode
- *
- * We do not implement any tricks to guarantee strict lock ordering, because
- * VFS has already done it for us on the @i_mutex. So this is just a simple
- * wrapper function.
- */
-static void lock_2_inodes(struct inode *inode1, struct inode *inode2)
-{
-       mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
-       mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
-}
-
-/**
- * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes.
- * @inode1: first inode
- * @inode2: second inode
- */
-static void unlock_2_inodes(struct inode *inode1, struct inode *inode2)
-{
-       mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
-       mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
-}
-
 static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
                      struct dentry *dentry)
 {
@@ -949,7 +982,8 @@ static int ubifs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
        struct ubifs_inode *dir_ui = ubifs_inode(dir);
        struct ubifs_info *c = dir->i_sb->s_fs_info;
        int err, sz_change;
-       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1 };
+       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
+                                       .dirtied_ino = 1};
        struct fscrypt_name nm;
 
        /*
@@ -1264,17 +1298,19 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
                                        .dirtied_ino = 3 };
        struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
                        .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
+       struct ubifs_budget_req wht_req;
        struct timespec64 time;
        unsigned int saved_nlink;
        struct fscrypt_name old_nm, new_nm;
 
        /*
-        * Budget request settings: deletion direntry, new direntry, removing
-        * the old inode, and changing old and new parent directory inodes.
+        * Budget request settings:
+        *   req: deletion direntry, new direntry, removing the old inode,
+        *   and changing old and new parent directory inodes.
+        *
+        *   wht_req: new whiteout inode for RENAME_WHITEOUT.
         *
-        * However, this operation also marks the target inode as dirty and
-        * does not write it, so we allocate budget for the target inode
-        * separately.
+        *   ino_req: marks the target inode as dirty and does not write it.
         */
 
        dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu flags 0x%x",
@@ -1331,20 +1367,44 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto out_release;
                }
 
-               err = do_tmpfile(old_dir, old_dentry, S_IFCHR | WHITEOUT_MODE, &whiteout);
-               if (err) {
+               /*
+                * The whiteout inode without dentry is pinned in memory,
+                * umount won't happen during rename process because we
+                * got parent dentry.
+                */
+               whiteout = create_whiteout(old_dir, old_dentry);
+               if (IS_ERR(whiteout)) {
+                       err = PTR_ERR(whiteout);
                        kfree(dev);
                        goto out_release;
                }
 
-               spin_lock(&whiteout->i_lock);
-               whiteout->i_state |= I_LINKABLE;
-               spin_unlock(&whiteout->i_lock);
-
                whiteout_ui = ubifs_inode(whiteout);
                whiteout_ui->data = dev;
                whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0));
                ubifs_assert(c, !whiteout_ui->dirty);
+
+               memset(&wht_req, 0, sizeof(struct ubifs_budget_req));
+               wht_req.new_ino = 1;
+               wht_req.new_ino_d = ALIGN(whiteout_ui->data_len, 8);
+               /*
+                * To avoid deadlock between space budget (holds ui_mutex and
+                * waits wb work) and writeback work(waits ui_mutex), do space
+                * budget before ubifs inodes locked.
+                */
+               err = ubifs_budget_space(c, &wht_req);
+               if (err) {
+                       /*
+                        * Whiteout inode can not be written on flash by
+                        * ubifs_jnl_write_inode(), because it's neither
+                        * dirty nor zero-nlink.
+                        */
+                       iput(whiteout);
+                       goto out_release;
+               }
+
+               /* Add the old_dentry size to the old_dir size. */
+               old_sz -= CALC_DENT_SIZE(fname_len(&old_nm));
        }
 
        lock_4_inodes(old_dir, new_dir, new_inode, whiteout);
@@ -1416,29 +1476,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
                sync = IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir);
                if (unlink && IS_SYNC(new_inode))
                        sync = 1;
-       }
-
-       if (whiteout) {
-               struct ubifs_budget_req wht_req = { .dirtied_ino = 1,
-                               .dirtied_ino_d = \
-                               ALIGN(ubifs_inode(whiteout)->data_len, 8) };
-
-               err = ubifs_budget_space(c, &wht_req);
-               if (err) {
-                       kfree(whiteout_ui->data);
-                       whiteout_ui->data_len = 0;
-                       iput(whiteout);
-                       goto out_release;
-               }
-
-               inc_nlink(whiteout);
-               mark_inode_dirty(whiteout);
-
-               spin_lock(&whiteout->i_lock);
-               whiteout->i_state &= ~I_LINKABLE;
-               spin_unlock(&whiteout->i_lock);
-
-               iput(whiteout);
+               /*
+                * S_SYNC flag of whiteout inherits from the old_dir, and we
+                * have already checked the old dir inode. So there is no need
+                * to check whiteout.
+                */
        }
 
        err = ubifs_jnl_rename(c, old_dir, old_inode, &old_nm, new_dir,
@@ -1449,6 +1491,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
        unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
        ubifs_release_budget(c, &req);
 
+       if (whiteout) {
+               ubifs_release_budget(c, &wht_req);
+               iput(whiteout);
+       }
+
        mutex_lock(&old_inode_ui->ui_mutex);
        release = old_inode_ui->dirty;
        mark_inode_dirty_sync(old_inode);
@@ -1457,11 +1504,16 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (release)
                ubifs_release_budget(c, &ino_req);
        if (IS_SYNC(old_inode))
-               err = old_inode->i_sb->s_op->write_inode(old_inode, NULL);
+               /*
+                * Rename finished here. Although old inode cannot be updated
+                * on flash, old ctime is not a big problem, don't return err
+                * code to userspace.
+                */
+               old_inode->i_sb->s_op->write_inode(old_inode, NULL);
 
        fscrypt_free_filename(&old_nm);
        fscrypt_free_filename(&new_nm);
-       return err;
+       return 0;
 
 out_cancel:
        if (unlink) {
@@ -1482,11 +1534,11 @@ out_cancel:
                                inc_nlink(old_dir);
                }
        }
+       unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
        if (whiteout) {
-               drop_nlink(whiteout);
+               ubifs_release_budget(c, &wht_req);
                iput(whiteout);
        }
-       unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
 out_release:
        ubifs_release_budget(c, &ino_req);
        ubifs_release_budget(c, &req);
index 8a9ffc2..0383fbd 100644 (file)
@@ -570,7 +570,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
        }
 
        if (!PagePrivate(page)) {
-               SetPagePrivate(page);
+               attach_page_private(page, (void *)1);
                atomic_long_inc(&c->dirty_pg_cnt);
                __set_page_dirty_nobuffers(page);
        }
@@ -947,7 +947,7 @@ static int do_writepage(struct page *page, int len)
                release_existing_page_budget(c);
 
        atomic_long_dec(&c->dirty_pg_cnt);
-       ClearPagePrivate(page);
+       detach_page_private(page);
        ClearPageChecked(page);
 
        kunmap(page);
@@ -1304,7 +1304,7 @@ static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
                release_existing_page_budget(c);
 
        atomic_long_dec(&c->dirty_pg_cnt);
-       folio_clear_private(folio);
+       folio_detach_private(folio);
        folio_clear_checked(folio);
 }
 
@@ -1471,8 +1471,8 @@ static int ubifs_migrate_page(struct address_space *mapping,
                return rc;
 
        if (PagePrivate(page)) {
-               ClearPagePrivate(page);
-               SetPagePrivate(newpage);
+               detach_page_private(page);
+               attach_page_private(newpage, (void *)1);
        }
 
        if (mode != MIGRATE_SYNC_NO_COPY)
@@ -1496,7 +1496,7 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
                return 0;
        ubifs_assert(c, PagePrivate(page));
        ubifs_assert(c, 0);
-       ClearPagePrivate(page);
+       detach_page_private(page);
        ClearPageChecked(page);
        return 1;
 }
@@ -1567,7 +1567,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
        else {
                if (!PageChecked(page))
                        ubifs_convert_page_budget(c);
-               SetPagePrivate(page);
+               attach_page_private(page, (void *)1);
                atomic_long_inc(&c->dirty_pg_cnt);
                __set_page_dirty_nobuffers(page);
        }
index 789a781..1607a3c 100644 (file)
@@ -854,16 +854,42 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
         */
        n = aligned_len >> c->max_write_shift;
        if (n) {
-               n <<= c->max_write_shift;
+               int m = n - 1;
+
                dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
                       wbuf->offs);
-               err = ubifs_leb_write(c, wbuf->lnum, buf + written,
-                                     wbuf->offs, n);
+
+               if (m) {
+                       /* '(n-1)<<c->max_write_shift < len' is always true. */
+                       m <<= c->max_write_shift;
+                       err = ubifs_leb_write(c, wbuf->lnum, buf + written,
+                                             wbuf->offs, m);
+                       if (err)
+                               goto out;
+                       wbuf->offs += m;
+                       aligned_len -= m;
+                       len -= m;
+                       written += m;
+               }
+
+               /*
+                * The non-written len of buf may be less than 'n' because
+                * parameter 'len' is not 8 bytes aligned, so here we read
+                * min(len, n) bytes from buf.
+                */
+               n = 1 << c->max_write_shift;
+               memcpy(wbuf->buf, buf + written, min(len, n));
+               if (n > len) {
+                       ubifs_assert(c, n - len < 8);
+                       ubifs_pad(c, wbuf->buf + len, n - len);
+               }
+
+               err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, n);
                if (err)
                        goto out;
                wbuf->offs += n;
                aligned_len -= n;
-               len -= n;
+               len -= min(len, n);
                written += n;
        }
 
index c6a8634..71bcebe 100644 (file)
@@ -108,7 +108,7 @@ static int setflags(struct inode *inode, int flags)
        struct ubifs_inode *ui = ubifs_inode(inode);
        struct ubifs_info *c = inode->i_sb->s_fs_info;
        struct ubifs_budget_req req = { .dirtied_ino = 1,
-                                       .dirtied_ino_d = ui->data_len };
+                       .dirtied_ino_d = ALIGN(ui->data_len, 8) };
 
        err = ubifs_budget_space(c, &req);
        if (err)
index 8ea680d..75dab0a 100644 (file)
@@ -1207,9 +1207,9 @@ out_free:
  * @sync: non-zero if the write-buffer has to be synchronized
  *
  * This function implements the re-name operation which may involve writing up
- * to 4 inodes and 2 directory entries. It marks the written inodes as clean
- * and returns zero on success. In case of failure, a negative error code is
- * returned.
+ * to 4 inodes(new inode, whiteout inode, old and new parent directory inodes)
+ * and 2 directory entries. It marks the written inodes as clean and returns
+ * zero on success. In case of failure, a negative error code is returned.
  */
 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                     const struct inode *old_inode,
@@ -1222,14 +1222,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        void *p;
        union ubifs_key key;
        struct ubifs_dent_node *dent, *dent2;
-       int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0;
+       int err, dlen1, dlen2, ilen, wlen, lnum, offs, len, orphan_added = 0;
        int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
        int last_reference = !!(new_inode && new_inode->i_nlink == 0);
        int move = (old_dir != new_dir);
-       struct ubifs_inode *new_ui;
+       struct ubifs_inode *new_ui, *whiteout_ui;
        u8 hash_old_dir[UBIFS_HASH_ARR_SZ];
        u8 hash_new_dir[UBIFS_HASH_ARR_SZ];
        u8 hash_new_inode[UBIFS_HASH_ARR_SZ];
+       u8 hash_whiteout_inode[UBIFS_HASH_ARR_SZ];
        u8 hash_dent1[UBIFS_HASH_ARR_SZ];
        u8 hash_dent2[UBIFS_HASH_ARR_SZ];
 
@@ -1249,9 +1250,20 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        } else
                ilen = 0;
 
+       if (whiteout) {
+               whiteout_ui = ubifs_inode(whiteout);
+               ubifs_assert(c, mutex_is_locked(&whiteout_ui->ui_mutex));
+               ubifs_assert(c, whiteout->i_nlink == 1);
+               ubifs_assert(c, !whiteout_ui->dirty);
+               wlen = UBIFS_INO_NODE_SZ;
+               wlen += whiteout_ui->data_len;
+       } else
+               wlen = 0;
+
        aligned_dlen1 = ALIGN(dlen1, 8);
        aligned_dlen2 = ALIGN(dlen2, 8);
-       len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
+       len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) +
+             ALIGN(wlen, 8) + ALIGN(plen, 8);
        if (move)
                len += plen;
 
@@ -1313,6 +1325,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                p += ALIGN(ilen, 8);
        }
 
+       if (whiteout) {
+               pack_inode(c, p, whiteout, 0);
+               err = ubifs_node_calc_hash(c, p, hash_whiteout_inode);
+               if (err)
+                       goto out_release;
+
+               p += ALIGN(wlen, 8);
+       }
+
        if (!move) {
                pack_inode(c, p, old_dir, 1);
                err = ubifs_node_calc_hash(c, p, hash_old_dir);
@@ -1352,6 +1373,9 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                if (new_inode)
                        ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
                                                  new_inode->i_ino);
+               if (whiteout)
+                       ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
+                                                 whiteout->i_ino);
        }
        release_head(c, BASEHD);
 
@@ -1368,8 +1392,6 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm);
                if (err)
                        goto out_ro;
-
-               ubifs_delete_orphan(c, whiteout->i_ino);
        } else {
                err = ubifs_add_dirt(c, lnum, dlen2);
                if (err)
@@ -1390,6 +1412,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                offs += ALIGN(ilen, 8);
        }
 
+       if (whiteout) {
+               ino_key_init(c, &key, whiteout->i_ino);
+               err = ubifs_tnc_add(c, &key, lnum, offs, wlen,
+                                   hash_whiteout_inode);
+               if (err)
+                       goto out_ro;
+               offs += ALIGN(wlen, 8);
+       }
+
        ino_key_init(c, &key, old_dir->i_ino);
        err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir);
        if (err)
@@ -1410,6 +1441,11 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                new_ui->synced_i_size = new_ui->ui_size;
                spin_unlock(&new_ui->ui_lock);
        }
+       /*
+        * No need to mark whiteout inode clean.
+        * Whiteout doesn't have non-zero size, no need to update
+        * synced_i_size for whiteout_ui.
+        */
        mark_inode_clean(c, ubifs_inode(old_dir));
        if (move)
                mark_inode_clean(c, ubifs_inode(new_dir));
index f55828c..008fa46 100644 (file)
@@ -381,7 +381,7 @@ struct ubifs_gced_idx_leb {
  * @ui_mutex exists for two main reasons. At first it prevents inodes from
  * being written back while UBIFS changing them, being in the middle of an VFS
  * operation. This way UBIFS makes sure the inode fields are consistent. For
- * example, in 'ubifs_rename()' we change 3 inodes simultaneously, and
+ * example, in 'ubifs_rename()' we change 4 inodes simultaneously, and
  * write-back must not write any of them before we have finished.
  *
  * The second reason is budgeting - UBIFS has to budget all operations. If an
index 0cc8742..0e51c00 100644 (file)
@@ -33,7 +33,7 @@ $(obj)/utf8data.c: $(obj)/mkutf8data $(filter %.txt, $(cmd_utf8data)) FORCE
 else
 
 $(obj)/utf8data.c: $(src)/utf8data.c_shipped FORCE
-       $(call if_changed,shipped)
+       $(call if_changed,copy)
 
 endif
 
index 0adb970..14e2fb4 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Data verification functions, i.e. hooks for ->readpages()
+ * Data verification functions, i.e. hooks for ->readahead()
  *
  * Copyright 2019 Google LLC
  */
@@ -214,7 +214,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page);
  * that fail verification are set to the Error state.  Verification is skipped
  * for pages already in the Error state, e.g. due to fscrypt decryption failure.
  *
- * This is a helper function for use by the ->readpages() method of filesystems
+ * This is a helper function for use by the ->readahead() method of filesystems
  * that issue bios to read data directly into the page cache.  Filesystems that
  * populate the page cache without issuing bios (e.g. non block-based
  * filesystems) must instead call fsverity_verify_page() directly on each page.
index 353e53b..b52ed33 100644 (file)
@@ -82,6 +82,24 @@ xfs_prealloc_blocks(
 }
 
 /*
+ * The number of blocks per AG that we withhold from xfs_mod_fdblocks to
+ * guarantee that we can refill the AGFL prior to allocating space in a nearly
+ * full AG.  Although the the space described by the free space btrees, the
+ * blocks used by the freesp btrees themselves, and the blocks owned by the
+ * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
+ * free space in the AG drop so low that the free space btrees cannot refill an
+ * empty AGFL up to the minimum level.  Rather than grind through empty AGs
+ * until the fs goes down, we subtract this many AG blocks from the incore
+ * fdblocks to ensure user allocation does not overcommit the space the
+ * filesystem needs for the AGFLs.  The rmap btree uses a per-AG reservation to
+ * withhold space from xfs_mod_fdblocks, so we do not account for that here.
+ */
+#define XFS_ALLOCBT_AGFL_RESERVE       4
+
+/*
+ * Compute the number of blocks that we set aside to guarantee the ability to
+ * refill the AGFL and handle a full bmap btree split.
+ *
  * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
  * AGF buffer (PV 947395), we place constraints on the relationship among
  * actual allocations for data blocks, freelist blocks, and potential file data
@@ -93,14 +111,14 @@ xfs_prealloc_blocks(
  * extents need to be actually allocated. To get around this, we explicitly set
  * aside a few blocks which will not be reserved in delayed allocation.
  *
- * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
- * potential split of the file's bmap btree.
+ * For each AG, we need to reserve enough blocks to replenish a totally empty
+ * AGFL and 4 more to handle a potential split of the file's bmap btree.
  */
 unsigned int
 xfs_alloc_set_aside(
        struct xfs_mount        *mp)
 {
-       return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
+       return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
 }
 
 /*
@@ -124,12 +142,12 @@ xfs_alloc_ag_max_usable(
        unsigned int            blocks;
 
        blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
-       blocks += XFS_ALLOC_AGFL_RESERVE;
+       blocks += XFS_ALLOCBT_AGFL_RESERVE;
        blocks += 3;                    /* AGF, AGI btree root blocks */
        if (xfs_has_finobt(mp))
                blocks++;               /* finobt root block */
        if (xfs_has_rmapbt(mp))
-               blocks++;               /* rmap root block */
+               blocks++;               /* rmap root block */
        if (xfs_has_reflink(mp))
                blocks++;               /* refcount root block */
 
index 1c14a0b..d4c057b 100644 (file)
@@ -88,7 +88,6 @@ typedef struct xfs_alloc_arg {
 #define XFS_ALLOC_NOBUSY               (1 << 2)/* Busy extents not allowed */
 
 /* freespace limit calculations */
-#define XFS_ALLOC_AGFL_RESERVE 4
 unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
 unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
 
index 32fa029..ae4345b 100644 (file)
@@ -9,39 +9,6 @@ static inline unsigned int bio_max_vecs(unsigned int count)
        return bio_max_segs(howmany(count, PAGE_SIZE));
 }
 
-static void
-xfs_flush_bdev_async_endio(
-       struct bio      *bio)
-{
-       complete(bio->bi_private);
-}
-
-/*
- * Submit a request for an async cache flush to run. If the request queue does
- * not require flush operations, just skip it altogether. If the caller needs
- * to wait for the flush completion at a later point in time, they must supply a
- * valid completion. This will be signalled when the flush completes.  The
- * caller never sees the bio that is issued here.
- */
-void
-xfs_flush_bdev_async(
-       struct bio              *bio,
-       struct block_device     *bdev,
-       struct completion       *done)
-{
-       struct request_queue    *q = bdev->bd_disk->queue;
-
-       if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
-               complete(done);
-               return;
-       }
-
-       bio_init(bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
-       bio->bi_private = done;
-       bio->bi_end_io = xfs_flush_bdev_async_endio;
-
-       submit_bio(bio);
-}
 int
 xfs_rw_bdev(
        struct block_device     *bdev,
index 33e2669..68f7454 100644 (file)
@@ -17,6 +17,7 @@
 #include "xfs_fsops.h"
 #include "xfs_trans_space.h"
 #include "xfs_log.h"
+#include "xfs_log_priv.h"
 #include "xfs_ag.h"
 #include "xfs_ag_resv.h"
 #include "xfs_trace.h"
@@ -347,7 +348,7 @@ xfs_fs_counts(
        cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
        cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
        cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
-                                               mp->m_alloc_set_aside;
+                                               xfs_fdblocks_unavailable(mp);
 
        spin_lock(&mp->m_sb_lock);
        cnt->freertx = mp->m_sb.sb_frextents;
@@ -430,46 +431,36 @@ xfs_reserve_blocks(
         * If the request is larger than the current reservation, reserve the
         * blocks before we update the reserve counters. Sample m_fdblocks and
         * perform a partial reservation if the request exceeds free space.
+        *
+        * The code below estimates how many blocks it can request from
+        * fdblocks to stash in the reserve pool.  This is a classic TOCTOU
+        * race since fdblocks updates are not always coordinated via
+        * m_sb_lock.  Set the reserve size even if there's not enough free
+        * space to fill it because mod_fdblocks will refill an undersized
+        * reserve when it can.
         */
-       error = -ENOSPC;
-       do {
-               free = percpu_counter_sum(&mp->m_fdblocks) -
-                                               mp->m_alloc_set_aside;
-               if (free <= 0)
-                       break;
-
-               delta = request - mp->m_resblks;
-               lcounter = free - delta;
-               if (lcounter < 0)
-                       /* We can't satisfy the request, just get what we can */
-                       fdblks_delta = free;
-               else
-                       fdblks_delta = delta;
-
+       free = percpu_counter_sum(&mp->m_fdblocks) -
+                                               xfs_fdblocks_unavailable(mp);
+       delta = request - mp->m_resblks;
+       mp->m_resblks = request;
+       if (delta > 0 && free > 0) {
                /*
                 * We'll either succeed in getting space from the free block
-                * count or we'll get an ENOSPC. If we get a ENOSPC, it means
-                * things changed while we were calculating fdblks_delta and so
-                * we should try again to see if there is anything left to
-                * reserve.
+                * count or we'll get an ENOSPC.  Don't set the reserved flag
+                * here - we don't want to reserve the extra reserve blocks
+                * from the reserve.
                 *
-                * Don't set the reserved flag here - we don't want to reserve
-                * the extra reserve blocks from the reserve.....
+                * The desired reserve size can change after we drop the lock.
+                * Use mod_fdblocks to put the space into the reserve or into
+                * fdblocks as appropriate.
                 */
+               fdblks_delta = min(free, delta);
                spin_unlock(&mp->m_sb_lock);
                error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
+               if (!error)
+                       xfs_mod_fdblocks(mp, fdblks_delta, 0);
                spin_lock(&mp->m_sb_lock);
-       } while (error == -ENOSPC);
-
-       /*
-        * Update the reserve counters if blocks have been successfully
-        * allocated.
-        */
-       if (!error && fdblks_delta) {
-               mp->m_resblks += fdblks_delta;
-               mp->m_resblks_avail += fdblks_delta;
        }
-
 out:
        if (outval) {
                outval->resblks = mp->m_resblks;
@@ -528,8 +519,11 @@ xfs_do_force_shutdown(
        int             tag;
        const char      *why;
 
-       if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate))
+
+       if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) {
+               xlog_shutdown_wait(mp->m_log);
                return;
+       }
        if (mp->m_sb_bp)
                mp->m_sb_bp->b_flags |= XBF_DONE;
 
index 20186c5..bffd6eb 100644 (file)
@@ -883,7 +883,7 @@ xfs_reclaim_inode(
         */
        if (xlog_is_shutdown(ip->i_mount->m_log)) {
                xfs_iunpin_wait(ip);
-               xfs_iflush_abort(ip);
+               xfs_iflush_shutdown_abort(ip);
                goto reclaim;
        }
        if (xfs_ipincount(ip))
index 26227d2..9de6205 100644 (file)
@@ -3631,7 +3631,7 @@ xfs_iflush_cluster(
 
        /*
         * We must use the safe variant here as on shutdown xfs_iflush_abort()
-        * can remove itself from the list.
+        * will remove itself from the list.
         */
        list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
                iip = (struct xfs_inode_log_item *)lip;
index 11158fa..9e6ef55 100644 (file)
@@ -544,10 +544,17 @@ xfs_inode_item_push(
        uint                    rval = XFS_ITEM_SUCCESS;
        int                     error;
 
-       ASSERT(iip->ili_item.li_buf);
+       if (!bp || (ip->i_flags & XFS_ISTALE)) {
+               /*
+                * Inode item/buffer is being being aborted due to cluster
+                * buffer deletion. Trigger a log force to have that operation
+                * completed and items removed from the AIL before the next push
+                * attempt.
+                */
+               return XFS_ITEM_PINNED;
+       }
 
-       if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) ||
-           (ip->i_flags & XFS_ISTALE))
+       if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp))
                return XFS_ITEM_PINNED;
 
        if (xfs_iflags_test(ip, XFS_IFLUSHING))
@@ -834,46 +841,143 @@ xfs_buf_inode_io_fail(
 }
 
 /*
- * This is the inode flushing abort routine.  It is called when
- * the filesystem is shutting down to clean up the inode state.  It is
- * responsible for removing the inode item from the AIL if it has not been
- * re-logged and clearing the inode's flush state.
+ * Clear the inode logging fields so no more flushes are attempted.  If we are
+ * on a buffer list, it is now safe to remove it because the buffer is
+ * guaranteed to be locked. The caller will drop the reference to the buffer
+ * the log item held.
+ */
+static void
+xfs_iflush_abort_clean(
+       struct xfs_inode_log_item *iip)
+{
+       iip->ili_last_fields = 0;
+       iip->ili_fields = 0;
+       iip->ili_fsync_fields = 0;
+       iip->ili_flush_lsn = 0;
+       iip->ili_item.li_buf = NULL;
+       list_del_init(&iip->ili_item.li_bio_list);
+}
+
+/*
+ * Abort flushing the inode from a context holding the cluster buffer locked.
+ *
+ * This is the normal runtime method of aborting writeback of an inode that is
+ * attached to a cluster buffer. It occurs when the inode and the backing
+ * cluster buffer have been freed (i.e. inode is XFS_ISTALE), or when cluster
+ * flushing or buffer IO completion encounters a log shutdown situation.
+ *
+ * If we need to abort inode writeback and we don't already hold the buffer
+ * locked, call xfs_iflush_shutdown_abort() instead as this should only ever be
+ * necessary in a shutdown situation.
  */
 void
 xfs_iflush_abort(
        struct xfs_inode        *ip)
 {
        struct xfs_inode_log_item *iip = ip->i_itemp;
-       struct xfs_buf          *bp = NULL;
+       struct xfs_buf          *bp;
 
-       if (iip) {
-               /*
-                * Clear the failed bit before removing the item from the AIL so
-                * xfs_trans_ail_delete() doesn't try to clear and release the
-                * buffer attached to the log item before we are done with it.
-                */
-               clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
-               xfs_trans_ail_delete(&iip->ili_item, 0);
+       if (!iip) {
+               /* clean inode, nothing to do */
+               xfs_iflags_clear(ip, XFS_IFLUSHING);
+               return;
+       }
+
+       /*
+        * Remove the inode item from the AIL before we clear its internal
+        * state. Whilst the inode is in the AIL, it should have a valid buffer
+        * pointer for push operations to access - it is only safe to remove the
+        * inode from the buffer once it has been removed from the AIL.
+        *
+        * We also clear the failed bit before removing the item from the AIL
+        * as xfs_trans_ail_delete()->xfs_clear_li_failed() will release buffer
+        * references the inode item owns and needs to hold until we've fully
+        * aborted the inode log item and detached it from the buffer.
+        */
+       clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
+       xfs_trans_ail_delete(&iip->ili_item, 0);
+
+       /*
+        * Grab the inode buffer so can we release the reference the inode log
+        * item holds on it.
+        */
+       spin_lock(&iip->ili_lock);
+       bp = iip->ili_item.li_buf;
+       xfs_iflush_abort_clean(iip);
+       spin_unlock(&iip->ili_lock);
 
+       xfs_iflags_clear(ip, XFS_IFLUSHING);
+       if (bp)
+               xfs_buf_rele(bp);
+}
+
+/*
+ * Abort an inode flush in the case of a shutdown filesystem. This can be called
+ * from anywhere with just an inode reference and does not require holding the
+ * inode cluster buffer locked. If the inode is attached to a cluster buffer,
+ * it will grab and lock it safely, then abort the inode flush.
+ */
+void
+xfs_iflush_shutdown_abort(
+       struct xfs_inode        *ip)
+{
+       struct xfs_inode_log_item *iip = ip->i_itemp;
+       struct xfs_buf          *bp;
+
+       if (!iip) {
+               /* clean inode, nothing to do */
+               xfs_iflags_clear(ip, XFS_IFLUSHING);
+               return;
+       }
+
+       spin_lock(&iip->ili_lock);
+       bp = iip->ili_item.li_buf;
+       if (!bp) {
+               spin_unlock(&iip->ili_lock);
+               xfs_iflush_abort(ip);
+               return;
+       }
+
+       /*
+        * We have to take a reference to the buffer so that it doesn't get
+        * freed when we drop the ili_lock and then wait to lock the buffer.
+        * We'll clean up the extra reference after we pick up the ili_lock
+        * again.
+        */
+       xfs_buf_hold(bp);
+       spin_unlock(&iip->ili_lock);
+       xfs_buf_lock(bp);
+
+       spin_lock(&iip->ili_lock);
+       if (!iip->ili_item.li_buf) {
                /*
-                * Clear the inode logging fields so no more flushes are
-                * attempted.
+                * Raced with another removal, hold the only reference
+                * to bp now. Inode should not be in the AIL now, so just clean
+                * up and return;
                 */
-               spin_lock(&iip->ili_lock);
-               iip->ili_last_fields = 0;
-               iip->ili_fields = 0;
-               iip->ili_fsync_fields = 0;
-               iip->ili_flush_lsn = 0;
-               bp = iip->ili_item.li_buf;
-               iip->ili_item.li_buf = NULL;
-               list_del_init(&iip->ili_item.li_bio_list);
+               ASSERT(list_empty(&iip->ili_item.li_bio_list));
+               ASSERT(!test_bit(XFS_LI_IN_AIL, &iip->ili_item.li_flags));
+               xfs_iflush_abort_clean(iip);
                spin_unlock(&iip->ili_lock);
+               xfs_iflags_clear(ip, XFS_IFLUSHING);
+               xfs_buf_relse(bp);
+               return;
        }
-       xfs_iflags_clear(ip, XFS_IFLUSHING);
-       if (bp)
-               xfs_buf_rele(bp);
+
+       /*
+        * Got two references to bp. The first will get dropped by
+        * xfs_iflush_abort() when the item is removed from the buffer list, but
+        * we can't drop our reference until _abort() returns because we have to
+        * unlock the buffer as well. Hence we abort and then unlock and release
+        * our reference to the buffer.
+        */
+       ASSERT(iip->ili_item.li_buf == bp);
+       spin_unlock(&iip->ili_lock);
+       xfs_iflush_abort(ip);
+       xfs_buf_relse(bp);
 }
 
+
 /*
  * convert an xfs_inode_log_format struct from the old 32 bit version
  * (which can have different field alignments) to the native 64 bit version
index 1a30200..bbd836a 100644 (file)
@@ -44,6 +44,7 @@ static inline int xfs_inode_clean(struct xfs_inode *ip)
 extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
 extern void xfs_inode_item_destroy(struct xfs_inode *);
 extern void xfs_iflush_abort(struct xfs_inode *);
+extern void xfs_iflush_shutdown_abort(struct xfs_inode *);
 extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
                                         struct xfs_inode_log_format *);
 
index 09a8fba..cb9105d 100644 (file)
@@ -197,8 +197,6 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
 
 int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
                char *data, unsigned int op);
-void xfs_flush_bdev_async(struct bio *bio, struct block_device *bdev,
-               struct completion *done);
 
 #define ASSERT_ALWAYS(expr)    \
        (likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__))
index a8034c0..499e15b 100644 (file)
@@ -487,7 +487,10 @@ out_error:
  * Run all the pending iclog callbacks and wake log force waiters and iclog
  * space waiters so they can process the newly set shutdown state. We really
  * don't care what order we process callbacks here because the log is shut down
- * and so state cannot change on disk anymore.
+ * and so state cannot change on disk anymore. However, we cannot wake waiters
+ * until the callbacks have been processed because we may be in unmount and
+ * we must ensure that all AIL operations the callbacks perform have completed
+ * before we tear down the AIL.
  *
  * We avoid processing actively referenced iclogs so that we don't run callbacks
  * while the iclog owner might still be preparing the iclog for IO submssion.
@@ -501,7 +504,6 @@ xlog_state_shutdown_callbacks(
        struct xlog_in_core     *iclog;
        LIST_HEAD(cb_list);
 
-       spin_lock(&log->l_icloglock);
        iclog = log->l_iclog;
        do {
                if (atomic_read(&iclog->ic_refcnt)) {
@@ -509,26 +511,22 @@ xlog_state_shutdown_callbacks(
                        continue;
                }
                list_splice_init(&iclog->ic_callbacks, &cb_list);
+               spin_unlock(&log->l_icloglock);
+
+               xlog_cil_process_committed(&cb_list);
+
+               spin_lock(&log->l_icloglock);
                wake_up_all(&iclog->ic_write_wait);
                wake_up_all(&iclog->ic_force_wait);
        } while ((iclog = iclog->ic_next) != log->l_iclog);
 
        wake_up_all(&log->l_flush_wait);
-       spin_unlock(&log->l_icloglock);
-
-       xlog_cil_process_committed(&cb_list);
 }
 
 /*
  * Flush iclog to disk if this is the last reference to the given iclog and the
  * it is in the WANT_SYNC state.
  *
- * If the caller passes in a non-zero @old_tail_lsn and the current log tail
- * does not match, there may be metadata on disk that must be persisted before
- * this iclog is written.  To satisfy that requirement, set the
- * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new
- * log tail value.
- *
  * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
  * log tail is updated correctly. NEED_FUA indicates that the iclog will be
  * written to stable storage, and implies that a commit record is contained
@@ -545,12 +543,10 @@ xlog_state_shutdown_callbacks(
  * always capture the tail lsn on the iclog on the first NEED_FUA release
  * regardless of the number of active reference counts on this iclog.
  */
-
 int
 xlog_state_release_iclog(
        struct xlog             *log,
-       struct xlog_in_core     *iclog,
-       xfs_lsn_t               old_tail_lsn)
+       struct xlog_in_core     *iclog)
 {
        xfs_lsn_t               tail_lsn;
        bool                    last_ref;
@@ -561,18 +557,14 @@ xlog_state_release_iclog(
        /*
         * Grabbing the current log tail needs to be atomic w.r.t. the writing
         * of the tail LSN into the iclog so we guarantee that the log tail does
-        * not move between deciding if a cache flush is required and writing
-        * the LSN into the iclog below.
+        * not move between the first time we know that the iclog needs to be
+        * made stable and when we eventually submit it.
         */
-       if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+       if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+            (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
+           !iclog->ic_header.h_tail_lsn) {
                tail_lsn = xlog_assign_tail_lsn(log->l_mp);
-
-               if (old_tail_lsn && tail_lsn != old_tail_lsn)
-                       iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
-
-               if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) &&
-                   !iclog->ic_header.h_tail_lsn)
-                       iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+               iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
        }
 
        last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
@@ -583,11 +575,8 @@ xlog_state_release_iclog(
                 * pending iclog callbacks that were waiting on the release of
                 * this iclog.
                 */
-               if (last_ref) {
-                       spin_unlock(&log->l_icloglock);
+               if (last_ref)
                        xlog_state_shutdown_callbacks(log);
-                       spin_lock(&log->l_icloglock);
-               }
                return -EIO;
        }
 
@@ -600,8 +589,6 @@ xlog_state_release_iclog(
        }
 
        iclog->ic_state = XLOG_STATE_SYNCING;
-       if (!iclog->ic_header.h_tail_lsn)
-               iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
        xlog_verify_tail_lsn(log, iclog);
        trace_xlog_iclog_syncing(iclog, _RET_IP_);
 
@@ -873,7 +860,7 @@ xlog_force_iclog(
        iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
        if (iclog->ic_state == XLOG_STATE_ACTIVE)
                xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
-       return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
+       return xlog_state_release_iclog(iclog->ic_log, iclog);
 }
 
 /*
@@ -1373,7 +1360,7 @@ xlog_ioend_work(
         */
        if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
                xfs_alert(log->l_mp, "log I/O error %d", error);
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
        }
 
        xlog_state_done_syncing(iclog);
@@ -1912,7 +1899,7 @@ xlog_write_iclog(
        iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
 
        if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
                return;
        }
        if (is_vmalloc_addr(iclog->ic_data))
@@ -2411,7 +2398,7 @@ xlog_write_copy_finish(
                ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
                        xlog_is_shutdown(log));
 release_iclog:
-       error = xlog_state_release_iclog(log, iclog, 0);
+       error = xlog_state_release_iclog(log, iclog);
        spin_unlock(&log->l_icloglock);
        return error;
 }
@@ -2487,7 +2474,7 @@ xlog_write(
                xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
                     "ctx ticket reservation ran out. Need to up reservation");
                xlog_print_tic_res(log->l_mp, ticket);
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
        }
 
        len = xlog_write_calc_vec_length(ticket, log_vector, optype);
@@ -2628,7 +2615,7 @@ next_lv:
 
        spin_lock(&log->l_icloglock);
        xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
-       error = xlog_state_release_iclog(log, iclog, 0);
+       error = xlog_state_release_iclog(log, iclog);
        spin_unlock(&log->l_icloglock);
 
        return error;
@@ -3052,7 +3039,7 @@ restart:
                 * reference to the iclog.
                 */
                if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
-                       error = xlog_state_release_iclog(log, iclog, 0);
+                       error = xlog_state_release_iclog(log, iclog);
                spin_unlock(&log->l_icloglock);
                if (error)
                        return error;
@@ -3821,9 +3808,10 @@ xlog_verify_iclog(
 #endif
 
 /*
- * Perform a forced shutdown on the log. This should be called once and once
- * only by the high level filesystem shutdown code to shut the log subsystem
- * down cleanly.
+ * Perform a forced shutdown on the log.
+ *
+ * This can be called from low level log code to trigger a shutdown, or from the
+ * high level mount shutdown code when the mount shuts down.
  *
  * Our main objectives here are to make sure that:
  *     a. if the shutdown was not due to a log IO error, flush the logs to
@@ -3832,6 +3820,8 @@ xlog_verify_iclog(
  *        parties to find out. Nothing new gets queued after this is done.
  *     c. Tasks sleeping on log reservations, pinned objects and
  *        other resources get woken up.
+ *     d. The mount is also marked as shut down so that log triggered shutdowns
+ *        still behave the same as if they called xfs_forced_shutdown().
  *
  * Return true if the shutdown cause was a log IO error and we actually shut the
  * log down.
@@ -3843,25 +3833,25 @@ xlog_force_shutdown(
 {
        bool            log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
 
-       /*
-        * If this happens during log recovery then we aren't using the runtime
-        * log mechanisms yet so there's nothing to shut down.
-        */
-       if (!log || xlog_in_recovery(log))
+       if (!log)
                return false;
 
-       ASSERT(!xlog_is_shutdown(log));
-
        /*
         * Flush all the completed transactions to disk before marking the log
         * being shut down. We need to do this first as shutting down the log
         * before the force will prevent the log force from flushing the iclogs
         * to disk.
         *
-        * Re-entry due to a log IO error shutdown during the log force is
-        * prevented by the atomicity of higher level shutdown code.
+        * When we are in recovery, there are no transactions to flush, and
+        * we don't want to touch the log because we don't want to perturb the
+        * current head/tail for future recovery attempts. Hence we need to
+        * avoid a log force in this case.
+        *
+        * If we are shutting down due to a log IO error, then we must avoid
+        * trying to write the log as that may just result in more IO errors and
+        * an endless shutdown/force loop.
         */
-       if (!log_error)
+       if (!log_error && !xlog_in_recovery(log))
                xfs_log_force(log->l_mp, XFS_LOG_SYNC);
 
        /*
@@ -3878,12 +3868,25 @@ xlog_force_shutdown(
        spin_lock(&log->l_icloglock);
        if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
                spin_unlock(&log->l_icloglock);
-               ASSERT(0);
                return false;
        }
        spin_unlock(&log->l_icloglock);
 
        /*
+        * If this log shutdown also sets the mount shutdown state, issue a
+        * shutdown warning message.
+        */
+       if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
+               xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
+"Filesystem has been shut down due to log error (0x%x).",
+                               shutdown_flags);
+               xfs_alert(log->l_mp,
+"Please unmount the filesystem and rectify the problem(s).");
+               if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
+                       xfs_stack_trace();
+       }
+
+       /*
         * We don't want anybody waiting for log reservations after this. That
         * means we have to wake up everybody queued up on reserveq as well as
         * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
@@ -3903,8 +3906,12 @@ xlog_force_shutdown(
        wake_up_all(&log->l_cilp->xc_start_wait);
        wake_up_all(&log->l_cilp->xc_commit_wait);
        spin_unlock(&log->l_cilp->xc_push_lock);
+
+       spin_lock(&log->l_icloglock);
        xlog_state_shutdown_callbacks(log);
+       spin_unlock(&log->l_icloglock);
 
+       wake_up_var(&log->l_opstate);
        return log_error;
 }
 
index 796e446..ba57323 100644 (file)
@@ -540,7 +540,7 @@ xlog_cil_insert_items(
        spin_unlock(&cil->xc_cil_lock);
 
        if (tp->t_ticket->t_curr_res < 0)
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
 }
 
 static void
@@ -705,11 +705,21 @@ xlog_cil_set_ctx_write_state(
                 * The LSN we need to pass to the log items on transaction
                 * commit is the LSN reported by the first log vector write, not
                 * the commit lsn. If we use the commit record lsn then we can
-                * move the tail beyond the grant write head.
+                * move the grant write head beyond the tail LSN and overwrite
+                * it.
                 */
                ctx->start_lsn = lsn;
                wake_up_all(&cil->xc_start_wait);
                spin_unlock(&cil->xc_push_lock);
+
+               /*
+                * Make sure the metadata we are about to overwrite in the log
+                * has been flushed to stable storage before this iclog is
+                * issued.
+                */
+               spin_lock(&cil->xc_log->l_icloglock);
+               iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
+               spin_unlock(&cil->xc_log->l_icloglock);
                return;
        }
 
@@ -854,7 +864,7 @@ xlog_cil_write_commit_record(
 
        error = xlog_write(log, ctx, &vec, ctx->ticket, XLOG_COMMIT_TRANS);
        if (error)
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
        return error;
 }
 
@@ -888,10 +898,7 @@ xlog_cil_push_work(
        struct xfs_trans_header thdr;
        struct xfs_log_iovec    lhdr;
        struct xfs_log_vec      lvhdr = { NULL };
-       xfs_lsn_t               preflush_tail_lsn;
        xfs_csn_t               push_seq;
-       struct bio              bio;
-       DECLARE_COMPLETION_ONSTACK(bdev_flush);
        bool                    push_commit_stable;
 
        new_ctx = xlog_cil_ctx_alloc();
@@ -962,23 +969,6 @@ xlog_cil_push_work(
        spin_unlock(&cil->xc_push_lock);
 
        /*
-        * The CIL is stable at this point - nothing new will be added to it
-        * because we hold the flush lock exclusively. Hence we can now issue
-        * a cache flush to ensure all the completed metadata in the journal we
-        * are about to overwrite is on stable storage.
-        *
-        * Because we are issuing this cache flush before we've written the
-        * tail lsn to the iclog, we can have metadata IO completions move the
-        * tail forwards between the completion of this flush and the iclog
-        * being written. In this case, we need to re-issue the cache flush
-        * before the iclog write. To detect whether the log tail moves, sample
-        * the tail LSN *before* we issue the flush.
-        */
-       preflush_tail_lsn = atomic64_read(&log->l_tail_lsn);
-       xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
-                               &bdev_flush);
-
-       /*
         * Pull all the log vectors off the items in the CIL, and remove the
         * items from the CIL. We don't need the CIL lock here because it's only
         * needed on the transaction commit side which is currently locked out
@@ -1054,12 +1044,6 @@ xlog_cil_push_work(
        lvhdr.lv_iovecp = &lhdr;
        lvhdr.lv_next = ctx->lv_chain;
 
-       /*
-        * Before we format and submit the first iclog, we have to ensure that
-        * the metadata writeback ordering cache flush is complete.
-        */
-       wait_for_completion(&bdev_flush);
-
        error = xlog_cil_write_chain(ctx, &lvhdr);
        if (error)
                goto out_abort_free_ticket;
@@ -1118,7 +1102,7 @@ xlog_cil_push_work(
        if (push_commit_stable &&
            ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
                xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
-       xlog_state_release_iclog(log, ctx->commit_iclog, preflush_tail_lsn);
+       xlog_state_release_iclog(log, ctx->commit_iclog);
 
        /* Not safe to reference ctx now! */
 
@@ -1139,7 +1123,7 @@ out_abort_free_ticket:
                return;
        }
        spin_lock(&log->l_icloglock);
-       xlog_state_release_iclog(log, ctx->commit_iclog, 0);
+       xlog_state_release_iclog(log, ctx->commit_iclog);
        /* Not safe to reference ctx now! */
        spin_unlock(&log->l_icloglock);
 }
index 23103d6..401cdc4 100644 (file)
@@ -484,6 +484,17 @@ xlog_is_shutdown(struct xlog *log)
        return test_bit(XLOG_IO_ERROR, &log->l_opstate);
 }
 
+/*
+ * Wait until the xlog_force_shutdown() has marked the log as shut down
+ * so xlog_is_shutdown() will always return true.
+ */
+static inline void
+xlog_shutdown_wait(
+       struct xlog     *log)
+{
+       wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
+}
+
 /* common routines */
 extern int
 xlog_recover(
@@ -524,8 +535,7 @@ void        xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
 
 void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
                int eventual_size);
-int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
-               xfs_lsn_t log_tail_lsn);
+int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
 
 /*
  * When we crack an atomic LSN, we sample it first so that the value will not
index 96c997e..c4ad429 100644 (file)
@@ -2485,7 +2485,7 @@ xlog_finish_defer_ops(
                error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
                                dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
                if (error) {
-                       xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
+                       xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR);
                        return error;
                }
 
@@ -2519,21 +2519,22 @@ xlog_abort_defer_ops(
                xfs_defer_ops_capture_free(mp, dfc);
        }
 }
+
 /*
  * When this is called, all of the log intent items which did not have
- * corresponding log done items should be in the AIL.  What we do now
- * is update the data structures associated with each one.
+ * corresponding log done items should be in the AIL.  What we do now is update
+ * the data structures associated with each one.
  *
- * Since we process the log intent items in normal transactions, they
- * will be removed at some point after the commit.  This prevents us
- * from just walking down the list processing each one.  We'll use a
- * flag in the intent item to skip those that we've already processed
- * and use the AIL iteration mechanism's generation count to try to
- * speed this up at least a bit.
+ * Since we process the log intent items in normal transactions, they will be
+ * removed at some point after the commit.  This prevents us from just walking
+ * down the list processing each one.  We'll use a flag in the intent item to
+ * skip those that we've already processed and use the AIL iteration mechanism's
+ * generation count to try to speed this up at least a bit.
  *
- * When we start, we know that the intents are the only things in the
- * AIL.  As we process them, however, other items are added to the
- * AIL.
+ * When we start, we know that the intents are the only things in the AIL. As we
+ * process them, however, other items are added to the AIL. Hence we know we
+ * have started recovery on all the pending intents when we find an non-intent
+ * item in the AIL.
  */
 STATIC int
 xlog_recover_process_intents(
@@ -2556,17 +2557,8 @@ xlog_recover_process_intents(
        for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
             lip != NULL;
             lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
-               /*
-                * We're done when we see something other than an intent.
-                * There should be no intents left in the AIL now.
-                */
-               if (!xlog_item_is_intent(lip)) {
-#ifdef DEBUG
-                       for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
-                               ASSERT(!xlog_item_is_intent(lip));
-#endif
+               if (!xlog_item_is_intent(lip))
                        break;
-               }
 
                /*
                 * We should never see a redo item with a LSN higher than
@@ -2607,8 +2599,9 @@ err:
 }
 
 /*
- * A cancel occurs when the mount has failed and we're bailing out.
- * Release all pending log intent items so they don't pin the AIL.
+ * A cancel occurs when the mount has failed and we're bailing out.  Release all
+ * pending log intent items that we haven't started recovery on so they don't
+ * pin the AIL.
  */
 STATIC void
 xlog_recover_cancel_intents(
@@ -2622,17 +2615,8 @@ xlog_recover_cancel_intents(
        spin_lock(&ailp->ail_lock);
        lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
        while (lip != NULL) {
-               /*
-                * We're done when we see something other than an intent.
-                * There should be no intents left in the AIL now.
-                */
-               if (!xlog_item_is_intent(lip)) {
-#ifdef DEBUG
-                       for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
-                               ASSERT(!xlog_item_is_intent(lip));
-#endif
+               if (!xlog_item_is_intent(lip))
                        break;
-               }
 
                spin_unlock(&ailp->ail_lock);
                lip->li_ops->iop_release(lip);
@@ -3470,7 +3454,7 @@ xlog_recover_finish(
                 */
                xlog_recover_cancel_intents(log);
                xfs_alert(log->l_mp, "Failed to recover intents");
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
                return error;
        }
 
@@ -3517,7 +3501,7 @@ xlog_recover_finish(
                 * end of intents processing can be pushed through the CIL
                 * and AIL.
                 */
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
        }
 
        return 0;
index bed73e8..c5f153c 100644 (file)
@@ -21,6 +21,7 @@
 #include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_log.h"
+#include "xfs_log_priv.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_fsops.h"
@@ -1146,7 +1147,7 @@ xfs_mod_fdblocks(
         * problems (i.e. transaction abort, pagecache discards, etc.) than
         * slightly premature -ENOSPC.
         */
-       set_aside = mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
+       set_aside = xfs_fdblocks_unavailable(mp);
        percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
        if (__percpu_counter_compare(&mp->m_fdblocks, set_aside,
                                     XFS_FDBLOCKS_BATCH) >= 0) {
index 00720a0..f6dc19d 100644 (file)
@@ -479,6 +479,21 @@ extern void        xfs_unmountfs(xfs_mount_t *);
  */
 #define XFS_FDBLOCKS_BATCH     1024
 
+/*
+ * Estimate the amount of free space that is not available to userspace and is
+ * not explicitly reserved from the incore fdblocks.  This includes:
+ *
+ * - The minimum number of blocks needed to support splitting a bmap btree
+ * - The blocks currently in use by the freespace btrees because they record
+ *   the actual blocks that will fill per-AG metadata space reservations
+ */
+static inline uint64_t
+xfs_fdblocks_unavailable(
+       struct xfs_mount        *mp)
+{
+       return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
+}
+
 extern int     xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
                                 bool reserved);
 extern int     xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
index d84714e..54be9d6 100644 (file)
@@ -815,7 +815,8 @@ xfs_fs_statfs(
        spin_unlock(&mp->m_sb_lock);
 
        /* make sure statp->f_bfree does not underflow */
-       statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
+       statp->f_bfree = max_t(int64_t, 0,
+                               fdblocks - xfs_fdblocks_unavailable(mp));
        statp->f_bavail = statp->f_bfree;
 
        fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
index 917a69f..0ac717a 100644 (file)
@@ -836,6 +836,7 @@ __xfs_trans_commit(
        bool                    regrant)
 {
        struct xfs_mount        *mp = tp->t_mountp;
+       struct xlog             *log = mp->m_log;
        xfs_csn_t               commit_seq = 0;
        int                     error = 0;
        int                     sync = tp->t_flags & XFS_TRANS_SYNC;
@@ -864,7 +865,13 @@ __xfs_trans_commit(
        if (!(tp->t_flags & XFS_TRANS_DIRTY))
                goto out_unreserve;
 
-       if (xfs_is_shutdown(mp)) {
+       /*
+        * We must check against log shutdown here because we cannot abort log
+        * items and leave them dirty, inconsistent and unpinned in memory while
+        * the log is active. This leaves them open to being written back to
+        * disk, and that will lead to on-disk corruption.
+        */
+       if (xlog_is_shutdown(log)) {
                error = -EIO;
                goto out_unreserve;
        }
@@ -878,7 +885,7 @@ __xfs_trans_commit(
                xfs_trans_apply_sb_deltas(tp);
        xfs_trans_apply_dquot_deltas(tp);
 
-       xlog_cil_commit(mp->m_log, tp, &commit_seq, regrant);
+       xlog_cil_commit(log, tp, &commit_seq, regrant);
 
        xfs_trans_free(tp);
 
@@ -905,10 +912,10 @@ out_unreserve:
         */
        xfs_trans_unreserve_and_mod_dquots(tp);
        if (tp->t_ticket) {
-               if (regrant && !xlog_is_shutdown(mp->m_log))
-                       xfs_log_ticket_regrant(mp->m_log, tp->t_ticket);
+               if (regrant && !xlog_is_shutdown(log))
+                       xfs_log_ticket_regrant(log, tp->t_ticket);
                else
-                       xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
+                       xfs_log_ticket_ungrant(log, tp->t_ticket);
                tp->t_ticket = NULL;
        }
        xfs_trans_free_items(tp, !!error);
@@ -926,18 +933,27 @@ xfs_trans_commit(
 }
 
 /*
- * Unlock all of the transaction's items and free the transaction.
- * The transaction must not have modified any of its items, because
- * there is no way to restore them to their previous state.
+ * Unlock all of the transaction's items and free the transaction.  If the
+ * transaction is dirty, we must shut down the filesystem because there is no
+ * way to restore them to their previous state.
  *
- * If the transaction has made a log reservation, make sure to release
- * it as well.
+ * If the transaction has made a log reservation, make sure to release it as
+ * well.
+ *
+ * This is a high level function (equivalent to xfs_trans_commit()) and so can
+ * be called after the transaction has effectively been aborted due to the mount
+ * being shut down. However, if the mount has not been shut down and the
+ * transaction is dirty we will shut the mount down and, in doing so, that
+ * guarantees that the log is shut down, too. Hence we don't need to be as
+ * careful with shutdown state and dirty items here as we need to be in
+ * xfs_trans_commit().
  */
 void
 xfs_trans_cancel(
        struct xfs_trans        *tp)
 {
        struct xfs_mount        *mp = tp->t_mountp;
+       struct xlog             *log = mp->m_log;
        bool                    dirty = (tp->t_flags & XFS_TRANS_DIRTY);
 
        trace_xfs_trans_cancel(tp, _RET_IP_);
@@ -955,16 +971,18 @@ xfs_trans_cancel(
        }
 
        /*
-        * See if the caller is relying on us to shut down the
-        * filesystem.  This happens in paths where we detect
-        * corruption and decide to give up.
+        * See if the caller is relying on us to shut down the filesystem. We
+        * only want an error report if there isn't already a shutdown in
+        * progress, so we only need to check against the mount shutdown state
+        * here.
         */
        if (dirty && !xfs_is_shutdown(mp)) {
                XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
                xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
        }
 #ifdef DEBUG
-       if (!dirty && !xfs_is_shutdown(mp)) {
+       /* Log items need to be consistent until the log is shut down. */
+       if (!dirty && !xlog_is_shutdown(log)) {
                struct xfs_log_item *lip;
 
                list_for_each_entry(lip, &tp->t_items, li_trans)
@@ -975,7 +993,7 @@ xfs_trans_cancel(
        xfs_trans_unreserve_and_mod_dquots(tp);
 
        if (tp->t_ticket) {
-               xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
+               xfs_log_ticket_ungrant(log, tp->t_ticket);
                tp->t_ticket = NULL;
        }
 
index c2ccb98..d3a97a0 100644 (file)
@@ -873,17 +873,17 @@ xfs_trans_ail_delete(
        int                     shutdown_type)
 {
        struct xfs_ail          *ailp = lip->li_ailp;
-       struct xfs_mount        *mp = ailp->ail_log->l_mp;
+       struct xlog             *log = ailp->ail_log;
        xfs_lsn_t               tail_lsn;
 
        spin_lock(&ailp->ail_lock);
        if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
                spin_unlock(&ailp->ail_lock);
-               if (shutdown_type && !xlog_is_shutdown(ailp->ail_log)) {
-                       xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
+               if (shutdown_type && !xlog_is_shutdown(log)) {
+                       xfs_alert_tag(log->l_mp, XFS_PTAG_AILDELETE,
        "%s: attempting to delete a log item that is not in the AIL",
                                        __func__);
-                       xfs_force_shutdown(mp, shutdown_type);
+                       xlog_force_shutdown(log, shutdown_type);
                }
                return;
        }
index c08758b..c05d2ce 100644 (file)
@@ -269,6 +269,7 @@ bool hv_isolation_type_snp(void);
 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
 void hyperv_cleanup(void);
 bool hv_query_ext_cap(u64 cap_query);
+void hv_setup_dma_ops(struct device *dev, bool coherent);
 void *hv_map_memory(void *addr, unsigned long size);
 void hv_unmap_memory(void *addr);
 #else /* CONFIG_HYPERV */
diff --git a/include/dt-bindings/clock/sun6i-rtc.h b/include/dt-bindings/clock/sun6i-rtc.h
new file mode 100644 (file)
index 0000000..c845493
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+
+#ifndef _DT_BINDINGS_CLK_SUN6I_RTC_H_
+#define _DT_BINDINGS_CLK_SUN6I_RTC_H_
+
+#define CLK_OSC32K             0
+#define CLK_OSC32K_FANOUT      1
+#define CLK_IOSC               2
+
+#endif /* _DT_BINDINGS_CLK_SUN6I_RTC_H_ */
index 338aa27..edb7f6d 100644 (file)
@@ -80,12 +80,6 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
 
 #ifdef CONFIG_BALLOON_COMPACTION
 extern const struct address_space_operations balloon_aops;
-extern bool balloon_page_isolate(struct page *page,
-                               isolate_mode_t mode);
-extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct address_space *mapping,
-                               struct page *newpage,
-                               struct page *page, enum migrate_mode mode);
 
 /*
  * balloon_page_insert - insert a page into the balloon's page list and make
@@ -155,22 +149,6 @@ static inline void balloon_page_delete(struct page *page)
        list_del(&page->lru);
 }
 
-static inline bool balloon_page_isolate(struct page *page)
-{
-       return false;
-}
-
-static inline void balloon_page_putback(struct page *page)
-{
-       return;
-}
-
-static inline int balloon_page_migrate(struct page *newpage,
-                               struct page *page, enum migrate_mode mode)
-{
-       return 0;
-}
-
 static inline gfp_t balloon_mapping_gfp_mask(void)
 {
        return GFP_HIGHUSER;
index f2ad8ed..652cd05 100644 (file)
@@ -95,7 +95,10 @@ struct blkcg_gq {
 
        spinlock_t                      async_bio_lock;
        struct bio_list                 async_bios;
-       struct work_struct              async_bio_work;
+       union {
+               struct work_struct      async_bio_work;
+               struct work_struct      free_work;
+       };
 
        atomic_t                        use_delay;
        atomic64_t                      delay_nsec;
index dd0763a..1973ef9 100644 (file)
@@ -85,8 +85,10 @@ struct block_device {
  */
 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
 typedef u32 __bitwise blk_status_t;
+typedef u32 blk_short_t;
 #else
 typedef u8 __bitwise blk_status_t;
+typedef u16 blk_short_t;
 #endif
 #define        BLK_STS_OK 0
 #define BLK_STS_NOTSUPP                ((__force blk_status_t)1)
index cf32123..57c8ec4 100644 (file)
@@ -9,4 +9,6 @@
 int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
 int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
 
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg);
+
 #endif
index 90fd742..a6f6373 100644 (file)
  */
 #ifdef CONFIG_CMA_AREAS
 #define MAX_CMA_AREAS  (1 + CONFIG_CMA_AREAS)
-
-#else
-#define MAX_CMA_AREAS  (0)
-
 #endif
 
 #define CMA_MAX_NAME 64
index 1831608..bbde953 100644 (file)
@@ -275,7 +275,6 @@ enum positive_aop_returns {
        AOP_TRUNCATED_PAGE      = 0x80001,
 };
 
-#define AOP_FLAG_CONT_EXPAND           0x0001 /* called from cont_expand */
 #define AOP_FLAG_NOFS                  0x0002 /* used by filesystem to direct
                                                * helper code (eg buffer layer)
                                                * to clear GFP_FS from alloc */
@@ -338,28 +337,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
        return kiocb->ki_complete == NULL;
 }
 
-/*
- * "descriptor" for what we're up to with a read.
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
-       size_t written;
-       size_t count;
-       union {
-               char __user *buf;
-               void *data;
-       } arg;
-       int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
-               unsigned long, unsigned long);
-
 struct address_space_operations {
        int (*writepage)(struct page *page, struct writeback_control *wbc);
        int (*readpage)(struct file *, struct page *);
@@ -370,12 +347,6 @@ struct address_space_operations {
        /* Mark a folio dirty.  Return true if this dirtied it */
        bool (*dirty_folio)(struct address_space *, struct folio *);
 
-       /*
-        * Reads in the requested pages. Unlike ->readpage(), this is
-        * PURELY used for read-ahead!.
-        */
-       int (*readpages)(struct file *filp, struct address_space *mapping,
-                       struct list_head *pages, unsigned nr_pages);
        void (*readahead)(struct readahead_control *);
 
        int (*write_begin)(struct file *, struct address_space *mapping,
@@ -3027,7 +2998,7 @@ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
+ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
 
 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
                rwf_t flags);
index d44ff74..6727fb0 100644 (file)
@@ -457,6 +457,20 @@ int fscache_begin_read_operation(struct netfs_cache_resources *cres,
 }
 
 /**
+ * fscache_end_operation - End the read operation for the netfs lib
+ * @cres: The cache resources for the read operation
+ *
+ * Clean up the resources at the end of the read request.
+ */
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
+{
+       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+
+       if (ops)
+               ops->end_operation(cres);
+}
+
+/**
  * fscache_read - Start a read from the cache.
  * @cres: The cache resources to use
  * @start_pos: The beginning file offset in the cache file
index b568b3c..a7afc80 100644 (file)
@@ -221,7 +221,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work)
  *
  * This checks whether ->i_verity_info has been set.
  *
- * Filesystems call this from ->readpages() to check whether the pages need to
+ * Filesystems call this from ->readahead() to check whether the pages need to
  * be verified or not.  Don't use IS_VERITY() for this purpose; it's subject to
  * a race condition where the file is being read concurrently with
  * FS_IOC_ENABLE_VERITY completing.  (S_VERITY is set before ->i_verity_info.)
index ed8cf43..4816b7e 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/trace_recursion.h>
 #include <linux/trace_clock.h>
+#include <linux/jump_label.h>
 #include <linux/kallsyms.h>
 #include <linux/linkage.h>
 #include <linux/bitops.h>
@@ -1018,7 +1019,20 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 extern int register_ftrace_graph(struct fgraph_ops *ops);
 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
 
-extern bool ftrace_graph_is_dead(void);
+/**
+ * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
+ *
+ * ftrace_graph_stop() is called when a severe error is detected in
+ * the function graph tracing. This function is called by the critical
+ * paths of function graph to keep those paths from doing any more harm.
+ */
+DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
+
+static inline bool ftrace_graph_is_dead(void)
+{
+       return static_branch_unlikely(&kill_ftrace_graph);
+}
+
 extern void ftrace_graph_stop(void);
 
 /* The current handlers in use */
index 0fa17fb..761f8f1 100644 (file)
@@ -264,9 +264,7 @@ struct vm_area_struct;
 #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
 
 /* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (24 +                                         \
-                         3 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) +        \
-                         IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /**
index b0728c8..98c9351 100644 (file)
@@ -168,13 +168,16 @@ struct gpio_irq_chip {
 
        /**
         * @parent_handler_data:
+        *
+        * If @per_parent_data is false, @parent_handler_data is a single
+        * pointer used as the data associated with every parent interrupt.
+        *
         * @parent_handler_data_array:
         *
-        * Data associated, and passed to, the handler for the parent
-        * interrupt. Can either be a single pointer if @per_parent_data
-        * is false, or an array of @num_parents pointers otherwise.  If
-        * @per_parent_data is true, @parent_handler_data_array cannot be
-        * NULL.
+        * If @per_parent_data is true, @parent_handler_data_array is
+        * an array of @num_parents pointers, and is used to associate
+        * different data for each parent. This cannot be NULL if
+        * @per_parent_data is true.
         */
        union {
                void *parent_handler_data;
index 0354b29..49790c1 100644 (file)
@@ -475,6 +475,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even
 void input_alloc_absinfo(struct input_dev *dev);
 void input_set_abs_params(struct input_dev *dev, unsigned int axis,
                          int min, int max, int fuzz, int flat);
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+                   const struct input_dev *src, unsigned int src_axis);
 
 #define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item)                   \
 static inline int input_abs_get_##_suffix(struct input_dev *dev,       \
diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h
new file mode 100644 (file)
index 0000000..7e4b702
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VIVALDI_FMAP_H
+#define _VIVALDI_FMAP_H
+
+#include <linux/types.h>
+
+#define VIVALDI_MAX_FUNCTION_ROW_KEYS  24
+
+/**
+ * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards
+ * @function_row_physmap: An array of scancodes or their equivalent (HID usage
+ *                        codes, encoded rows/columns, etc) for the top
+ *                        row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
+ *
+ * This structure is supposed to be used by ChromeOS keyboards using
+ * the Vivaldi keyboard function row design.
+ */
+struct vivaldi_data {
+       u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS];
+       unsigned int num_function_row_keys;
+};
+
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+                                         char *buf);
+
+#endif /* _VIVALDI_FMAP_H */
index 9536ffa..3f9b22c 100644 (file)
@@ -148,6 +148,7 @@ static inline bool is_error_page(struct page *page)
 #define KVM_REQUEST_MASK           GENMASK(7,0)
 #define KVM_REQUEST_NO_WAKEUP      BIT(8)
 #define KVM_REQUEST_WAIT           BIT(9)
+#define KVM_REQUEST_NO_ACTION      BIT(10)
 /*
  * Architecture-independent vcpu->requests bit members
  * Bits 4-7 are reserved for more arch-independent bits.
@@ -156,9 +157,18 @@ static inline bool is_error_page(struct page *page)
 #define KVM_REQ_VM_DEAD           (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_UNBLOCK           2
 #define KVM_REQ_UNHALT            3
-#define KVM_REQ_GPC_INVALIDATE    (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQUEST_ARCH_BASE     8
 
+/*
+ * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
+ * OUTSIDE_GUEST_MODE.  KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
+ * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
+ * on.  A kick only guarantees that the vCPU is on its way out, e.g. a previous
+ * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
+ * guarantee the vCPU received an IPI and has actually exited guest mode.
+ */
+#define KVM_REQ_OUTSIDE_GUEST_MODE     (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+
 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
        BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
        (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
@@ -1221,27 +1231,27 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
  * @gpc:          struct gfn_to_pfn_cache object.
  * @vcpu:         vCPU to be used for marking pages dirty and to be woken on
  *                invalidation.
- * @guest_uses_pa: indicates that the resulting host physical PFN is used while
- *                @vcpu is IN_GUEST_MODE so invalidations should wake it.
- * @kernel_map:    requests a kernel virtual mapping (kmap / memremap).
+ * @usage:        indicates if the resulting host physical PFN is used while
+ *                the @vcpu is IN_GUEST_MODE (in which case invalidation of 
+ *                the cache from MMU notifiers---but not for KVM memslot
+ *                changes!---will also force @vcpu to exit the guest and
+ *                refresh the cache); and/or if the PFN used directly
+ *                by KVM (and thus needs a kernel virtual mapping).
  * @gpa:          guest physical address to map.
  * @len:          sanity check; the range being access must fit a single page.
- * @dirty:         mark the cache dirty immediately.
  *
  * @return:       0 for success.
  *                -EINVAL for a mapping which would cross a page boundary.
  *                 -EFAULT for an untranslatable guest physical address.
  *
  * This primes a gfn_to_pfn_cache and links it into the @kvm's list for
- * invalidations to be processed. Invalidation callbacks to @vcpu using
- * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM
- * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check()
- * to ensure that the cache is valid before accessing the target page.
+ * invalidations to be processed.  Callers are required to use
+ * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
+ * accessing the target page.
  */
 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                             struct kvm_vcpu *vcpu, bool guest_uses_pa,
-                             bool kernel_map, gpa_t gpa, unsigned long len,
-                             bool dirty);
+                             struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+                             gpa_t gpa, unsigned long len);
 
 /**
  * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
@@ -1250,7 +1260,6 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  * @gpc:          struct gfn_to_pfn_cache object.
  * @gpa:          current guest physical address to map.
  * @len:          sanity check; the range being access must fit a single page.
- * @dirty:         mark the cache dirty immediately.
  *
  * @return:       %true if the cache is still valid and the address matches.
  *                %false if the cache is not valid.
@@ -1272,7 +1281,6 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  * @gpc:          struct gfn_to_pfn_cache object.
  * @gpa:          updated guest physical address to map.
  * @len:          sanity check; the range being access must fit a single page.
- * @dirty:         mark the cache dirty immediately.
  *
  * @return:       0 for success.
  *                -EINVAL for a mapping which would cross a page boundary.
@@ -1285,7 +1293,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  * with the lock still held to permit access.
  */
 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                                gpa_t gpa, unsigned long len, bool dirty);
+                                gpa_t gpa, unsigned long len);
 
 /**
  * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
@@ -1293,10 +1301,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  * @kvm:          pointer to kvm instance.
  * @gpc:          struct gfn_to_pfn_cache object.
  *
- * This unmaps the referenced page and marks it dirty, if appropriate. The
- * cache is left in the invalid state but at least the mapping from GPA to
- * userspace HVA will remain cached and can be reused on a subsequent
- * refresh.
+ * This unmaps the referenced page. The cache is left in the invalid state
+ * but at least the mapping from GPA to userspace HVA will remain cached
+ * and can be reused on a subsequent refresh.
  */
 void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
 
@@ -1984,7 +1991,7 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 
 void kvm_arch_irq_routing_update(struct kvm *kvm);
 
-static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
 {
        /*
         * Ensure the rest of the request is published to kvm_check_request's
@@ -1994,6 +2001,19 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
        set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
 }
 
+static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+{
+       /*
+        * Request that don't require vCPU action should never be logged in
+        * vcpu->requests.  The vCPU won't clear the request, so it will stay
+        * logged indefinitely and prevent the vCPU from entering the guest.
+        */
+       BUILD_BUG_ON(!__builtin_constant_p(req) ||
+                    (req & KVM_REQUEST_NO_ACTION));
+
+       __kvm_make_request(req, vcpu);
+}
+
 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
 {
        return READ_ONCE(vcpu->requests);
index dceac12..ac1ebb3 100644 (file)
@@ -18,6 +18,7 @@ struct kvm_memslots;
 
 enum kvm_mr_change;
 
+#include <linux/bits.h>
 #include <linux/types.h>
 #include <linux/spinlock_types.h>
 
@@ -46,6 +47,12 @@ typedef u64            hfn_t;
 
 typedef hfn_t kvm_pfn_t;
 
+enum pfn_cache_usage {
+       KVM_GUEST_USES_PFN = BIT(0),
+       KVM_HOST_USES_PFN  = BIT(1),
+       KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
+};
+
 struct gfn_to_hva_cache {
        u64 generation;
        gpa_t gpa;
@@ -64,11 +71,9 @@ struct gfn_to_pfn_cache {
        rwlock_t lock;
        void *khva;
        kvm_pfn_t pfn;
+       enum pfn_cache_usage usage;
        bool active;
        bool valid;
-       bool dirty;
-       bool kernel_map;
-       bool guest_uses_pa;
 };
 
 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
index 808bb4c..b0da04f 100644 (file)
@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
    /* 2 values for divider stage reset, others for "testing purposes only" */
 #  define RTC_DIV_RESET1       0x60
 #  define RTC_DIV_RESET2       0x70
+   /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
+#  define RTC_AMD_BANK_SELECT  0x10
   /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
 # define RTC_RATE_SELECT       0x0F
 
index ba736b4..12093f4 100644 (file)
@@ -125,6 +125,25 @@ struct socket {
        struct socket_wq        wq;
 };
 
+/*
+ * "descriptor" for what we're up to with a read.
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+       size_t written;
+       size_t count;
+       union {
+               char __user *buf;
+               void *data;
+       } arg;
+       int error;
+} read_descriptor_t;
+
 struct vm_area_struct;
 struct page;
 struct sockaddr;
index 614f222..c7bf1ea 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/fs.h>
 #include <linux/pagemap.h>
 
+enum netfs_sreq_ref_trace;
+
 /*
  * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
  * a page is currently backed by a local disk cache
@@ -106,7 +108,7 @@ static inline int wait_on_page_fscache_killable(struct page *page)
        return folio_wait_private_2_killable(page_folio(page));
 }
 
-enum netfs_read_source {
+enum netfs_io_source {
        NETFS_FILL_WITH_ZEROES,
        NETFS_DOWNLOAD_FROM_SERVER,
        NETFS_READ_FROM_CACHE,
@@ -117,6 +119,17 @@ typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
                                      bool was_async);
 
 /*
+ * Per-inode description.  This must be directly after the inode struct.
+ */
+struct netfs_i_context {
+       const struct netfs_request_ops *ops;
+#if IS_ENABLED(CONFIG_FSCACHE)
+       struct fscache_cookie   *cache;
+#endif
+       loff_t                  remote_i_size;  /* Size of the remote file */
+};
+
+/*
  * Resources required to do operations on a cache.
  */
 struct netfs_cache_resources {
@@ -130,69 +143,75 @@ struct netfs_cache_resources {
 /*
  * Descriptor for a single component subrequest.
  */
-struct netfs_read_subrequest {
-       struct netfs_read_request *rreq;        /* Supervising read request */
+struct netfs_io_subrequest {
+       struct netfs_io_request *rreq;          /* Supervising I/O request */
        struct list_head        rreq_link;      /* Link in rreq->subrequests */
        loff_t                  start;          /* Where to start the I/O */
        size_t                  len;            /* Size of the I/O */
        size_t                  transferred;    /* Amount of data transferred */
-       refcount_t              usage;
+       refcount_t              ref;
        short                   error;          /* 0 or error that occurred */
        unsigned short          debug_index;    /* Index in list (for debugging output) */
-       enum netfs_read_source  source;         /* Where to read from */
+       enum netfs_io_source    source;         /* Where to read from/write to */
        unsigned long           flags;
-#define NETFS_SREQ_WRITE_TO_CACHE      0       /* Set if should write to cache */
+#define NETFS_SREQ_COPY_TO_CACHE       0       /* Set if should copy the data to the cache */
 #define NETFS_SREQ_CLEAR_TAIL          1       /* Set if the rest of the read should be cleared */
-#define NETFS_SREQ_SHORT_READ          2       /* Set if there was a short read from the cache */
+#define NETFS_SREQ_SHORT_IO            2       /* Set if the I/O was short */
 #define NETFS_SREQ_SEEK_DATA_READ      3       /* Set if ->read() should SEEK_DATA first */
 #define NETFS_SREQ_NO_PROGRESS         4       /* Set if we didn't manage to read any data */
 };
 
+enum netfs_io_origin {
+       NETFS_READAHEAD,                /* This read was triggered by readahead */
+       NETFS_READPAGE,                 /* This read is a synchronous read */
+       NETFS_READ_FOR_WRITE,           /* This read is to prepare a write */
+} __mode(byte);
+
 /*
- * Descriptor for a read helper request.  This is used to make multiple I/O
- * requests on a variety of sources and then stitch the result together.
+ * Descriptor for an I/O helper request.  This is used to make multiple I/O
+ * operations to a variety of data stores and then stitch the result together.
  */
-struct netfs_read_request {
+struct netfs_io_request {
        struct work_struct      work;
        struct inode            *inode;         /* The file being accessed */
        struct address_space    *mapping;       /* The mapping being accessed */
        struct netfs_cache_resources cache_resources;
-       struct list_head        subrequests;    /* Requests to fetch I/O from disk or net */
+       struct list_head        subrequests;    /* Contributory I/O operations */
        void                    *netfs_priv;    /* Private data for the netfs */
        unsigned int            debug_id;
-       atomic_t                nr_rd_ops;      /* Number of read ops in progress */
-       atomic_t                nr_wr_ops;      /* Number of write ops in progress */
+       atomic_t                nr_outstanding; /* Number of ops in progress */
+       atomic_t                nr_copy_ops;    /* Number of copy-to-cache ops in progress */
        size_t                  submitted;      /* Amount submitted for I/O so far */
        size_t                  len;            /* Length of the request */
        short                   error;          /* 0 or error that occurred */
+       enum netfs_io_origin    origin;         /* Origin of the request */
        loff_t                  i_size;         /* Size of the file */
        loff_t                  start;          /* Start position */
        pgoff_t                 no_unlock_folio; /* Don't unlock this folio after read */
-       refcount_t              usage;
+       refcount_t              ref;
        unsigned long           flags;
 #define NETFS_RREQ_INCOMPLETE_IO       0       /* Some ioreqs terminated short or with error */
-#define NETFS_RREQ_WRITE_TO_CACHE      1       /* Need to write to the cache */
+#define NETFS_RREQ_COPY_TO_CACHE       1       /* Need to write to the cache */
 #define NETFS_RREQ_NO_UNLOCK_FOLIO     2       /* Don't unlock no_unlock_folio on completion */
 #define NETFS_RREQ_DONT_UNLOCK_FOLIOS  3       /* Don't unlock the folios on completion */
 #define NETFS_RREQ_FAILED              4       /* The request failed */
 #define NETFS_RREQ_IN_PROGRESS         5       /* Unlocked when the request completes */
-       const struct netfs_read_request_ops *netfs_ops;
+       const struct netfs_request_ops *netfs_ops;
 };
 
 /*
  * Operations the network filesystem can/must provide to the helpers.
  */
-struct netfs_read_request_ops {
-       bool (*is_cache_enabled)(struct inode *inode);
-       void (*init_rreq)(struct netfs_read_request *rreq, struct file *file);
-       int (*begin_cache_operation)(struct netfs_read_request *rreq);
-       void (*expand_readahead)(struct netfs_read_request *rreq);
-       bool (*clamp_length)(struct netfs_read_subrequest *subreq);
-       void (*issue_op)(struct netfs_read_subrequest *subreq);
-       bool (*is_still_valid)(struct netfs_read_request *rreq);
+struct netfs_request_ops {
+       int (*init_request)(struct netfs_io_request *rreq, struct file *file);
+       int (*begin_cache_operation)(struct netfs_io_request *rreq);
+       void (*expand_readahead)(struct netfs_io_request *rreq);
+       bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+       void (*issue_read)(struct netfs_io_subrequest *subreq);
+       bool (*is_still_valid)(struct netfs_io_request *rreq);
        int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
                                 struct folio *folio, void **_fsdata);
-       void (*done)(struct netfs_read_request *rreq);
+       void (*done)(struct netfs_io_request *rreq);
        void (*cleanup)(struct address_space *mapping, void *netfs_priv);
 };
 
@@ -235,7 +254,7 @@ struct netfs_cache_ops {
        /* Prepare a read operation, shortening it to a cached/uncached
         * boundary as appropriate.
         */
-       enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq,
+       enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
                                               loff_t i_size);
 
        /* Prepare a write operation, working out what part of the write we can
@@ -254,20 +273,89 @@ struct netfs_cache_ops {
 };
 
 struct readahead_control;
-extern void netfs_readahead(struct readahead_control *,
-                           const struct netfs_read_request_ops *,
-                           void *);
-extern int netfs_readpage(struct file *,
-                         struct folio *,
-                         const struct netfs_read_request_ops *,
-                         void *);
+extern void netfs_readahead(struct readahead_control *);
+extern int netfs_readpage(struct file *, struct page *);
 extern int netfs_write_begin(struct file *, struct address_space *,
                             loff_t, unsigned int, unsigned int, struct folio **,
-                            void **,
-                            const struct netfs_read_request_ops *,
-                            void *);
+                            void **);
 
-extern void netfs_subreq_terminated(struct netfs_read_subrequest *, ssize_t, bool);
+extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
+extern void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+                                enum netfs_sreq_ref_trace what);
+extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
+                                bool was_async, enum netfs_sreq_ref_trace what);
 extern void netfs_stats_show(struct seq_file *);
 
+/**
+ * netfs_i_context - Get the netfs inode context from the inode
+ * @inode: The inode to query
+ *
+ * Get the netfs lib inode context from the network filesystem's inode.  The
+ * context struct is expected to directly follow on from the VFS inode struct.
+ */
+static inline struct netfs_i_context *netfs_i_context(struct inode *inode)
+{
+       return (struct netfs_i_context *)(inode + 1);
+}
+
+/**
+ * netfs_inode - Get the netfs inode from the inode context
+ * @ctx: The context to query
+ *
+ * Get the netfs inode from the netfs library's inode context.  The VFS inode
+ * is expected to directly precede the context struct.
+ */
+static inline struct inode *netfs_inode(struct netfs_i_context *ctx)
+{
+       return ((struct inode *)ctx) - 1;
+}
+
+/**
+ * netfs_i_context_init - Initialise a netfs lib context
+ * @inode: The inode with which the context is associated
+ * @ops: The netfs's operations list
+ *
+ * Initialise the netfs library context struct.  This is expected to follow on
+ * directly from the VFS inode struct.
+ */
+static inline void netfs_i_context_init(struct inode *inode,
+                                       const struct netfs_request_ops *ops)
+{
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->ops = ops;
+       ctx->remote_i_size = i_size_read(inode);
+}
+
+/**
+ * netfs_resize_file - Note that a file got resized
+ * @inode: The inode being resized
+ * @new_i_size: The new file size
+ *
+ * Inform the netfs lib that a file got resized so that it can adjust its state.
+ */
+static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size)
+{
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+
+       ctx->remote_i_size = new_i_size;
+}
+
+/**
+ * netfs_i_cookie - Get the cache cookie from the inode
+ * @inode: The inode to query
+ *
+ * Get the caching cookie (if enabled) from the network filesystem's inode.
+ */
+static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+       return ctx->cache;
+#else
+       return NULL;
+#endif
+}
+
 #endif /* _LINUX_NETFS_H */
index 4f44f83..f626a44 100644 (file)
@@ -346,6 +346,7 @@ enum {
        NVME_CTRL_ONCS_TIMESTAMP                = 1 << 6,
        NVME_CTRL_VWC_PRESENT                   = 1 << 0,
        NVME_CTRL_OACS_SEC_SUPP                 = 1 << 0,
+       NVME_CTRL_OACS_NS_MNGT_SUPP             = 1 << 3,
        NVME_CTRL_OACS_DIRECTIVES               = 1 << 5,
        NVME_CTRL_OACS_DBBUF_SUPP               = 1 << 8,
        NVME_CTRL_LPA_CMD_EFFECTS_LOG           = 1 << 1,
index a8d0b32..993994c 100644 (file)
@@ -752,8 +752,6 @@ struct page *read_cache_page(struct address_space *, pgoff_t index,
                filler_t *filler, void *data);
 extern struct page * read_cache_page_gfp(struct address_space *mapping,
                                pgoff_t index, gfp_t gfp_mask);
-extern int read_cache_pages(struct address_space *mapping,
-               struct list_head *pages, filler_t *filler, void *data);
 
 static inline struct page *read_mapping_page(struct address_space *mapping,
                                pgoff_t index, struct file *file)
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
deleted file mode 100644 (file)
index 249d4d7..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* include this file if the platform implements the dma_ DMA Mapping API
- * and wants to provide the pci_ DMA Mapping API in terms of it */
-
-#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
-#define _ASM_GENERIC_PCI_DMA_COMPAT_H
-
-#include <linux/dma-mapping.h>
-
-/* This defines the direction arg to the DMA mapping routines. */
-#define PCI_DMA_BIDIRECTIONAL  DMA_BIDIRECTIONAL
-#define PCI_DMA_TODEVICE       DMA_TO_DEVICE
-#define PCI_DMA_FROMDEVICE     DMA_FROM_DEVICE
-#define PCI_DMA_NONE           DMA_NONE
-
-static inline void *
-pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
-                    dma_addr_t *dma_handle)
-{
-       return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void *
-pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
-                     dma_addr_t *dma_handle)
-{
-       return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void
-pci_free_consistent(struct pci_dev *hwdev, size_t size,
-                   void *vaddr, dma_addr_t dma_handle)
-{
-       dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle);
-}
-
-static inline dma_addr_t
-pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
-       return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
-                size_t size, int direction)
-{
-       dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
-}
-
-static inline dma_addr_t
-pci_map_page(struct pci_dev *hwdev, struct page *page,
-            unsigned long offset, size_t size, int direction)
-{
-       return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
-              size_t size, int direction)
-{
-       dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
-          int nents, int direction)
-{
-       return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
-            int nents, int direction)
-{
-       dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
-                   size_t size, int direction)
-{
-       dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
-                   size_t size, int direction)
-{
-       dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
-               int nelems, int direction)
-{
-       dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
-               int nelems, int direction)
-{
-       dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
-{
-       return dma_mapping_error(&pdev->dev, dma_addr);
-}
-
-#ifdef CONFIG_PCI
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
-       return dma_set_mask(&dev->dev, mask);
-}
-
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{
-       return dma_set_coherent_mask(&dev->dev, mask);
-}
-#else
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-#endif
-
-#endif
index b957eeb..60adf42 100644 (file)
@@ -2473,8 +2473,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
 void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
 #endif
 
-/* Provide the legacy pci_dma_* API */
-#include <linux/pci-dma-compat.h>
+#include <linux/dma-mapping.h>
 
 #define pci_printk(level, pdev, fmt, arg...) \
        dev_printk(level, &(pdev)->dev, fmt, ##arg)
index 47fd1c2..1fd9c6a 100644 (file)
@@ -110,8 +110,6 @@ struct rtc_device {
        struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
        int pie_enabled;
        struct work_struct irqwork;
-       /* Some hardware can't support UIE mode */
-       int uie_unsupported;
 
        /*
         * This offset specifies the update timing of the RTC.
index 67ee9d2..5a41c3b 100644 (file)
@@ -46,7 +46,6 @@ struct ds1685_priv {
        u32 regstep;
        int irq_num;
        bool bcd_mode;
-       bool no_irq;
        u8 (*read)(struct ds1685_priv *, int);
        void (*write)(struct ds1685_priv *, int, u8);
        void (*prepare_poweroff)(void);
index dffeb82..8f5a86e 100644 (file)
@@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
 static inline void sbitmap_free(struct sbitmap *sb)
 {
        free_percpu(sb->alloc_hint);
-       kfree(sb->map);
+       kvfree(sb->map);
        sb->map = NULL;
 }
 
index 4a6fdd2..d5e3c00 100644 (file)
@@ -1090,9 +1090,6 @@ struct task_struct {
        /* Restored if set_restore_sigmask() was used: */
        sigset_t                        saved_sigmask;
        struct sigpending               pending;
-#ifdef CONFIG_RT_DELAYED_SIGNALS
-       struct kernel_siginfo           forced_info;
-#endif
        unsigned long                   sas_ss_sp;
        size_t                          sas_ss_size;
        unsigned int                    sas_ss_flags;
index 88cc164..60820ab 100644 (file)
@@ -162,6 +162,7 @@ int seq_dentry(struct seq_file *, struct dentry *, const char *);
 int seq_path_root(struct seq_file *m, const struct path *path,
                  const struct path *root, const char *esc);
 
+void *single_start(struct seq_file *, loff_t *);
 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
 int single_release(struct inode *, struct file *);
similarity index 58%
rename from include/uapi/linux/user_events.h
rename to include/linux/user_events.h
index e570840..736e056 100644 (file)
@@ -32,9 +32,6 @@
 /* Create dynamic location entry within a 32-bit value */
 #define DYN_LOC(offset, size) ((size) << 16 | (offset))
 
-/* Use raw iterator for attached BPF program(s), no affect on ftrace/perf */
-#define FLAG_BPF_ITER (1 << 0)
-
 /*
  * Describes an event registration and stores the results of the registration.
  * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum
@@ -63,54 +60,4 @@ struct user_reg {
 /* Requests to delete a user_event */
 #define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*)
 
-/* Data type that was passed to the BPF program */
-enum {
-       /* Data resides in kernel space */
-       USER_BPF_DATA_KERNEL,
-
-       /* Data resides in user space */
-       USER_BPF_DATA_USER,
-
-       /* Data is a pointer to a user_bpf_iter structure */
-       USER_BPF_DATA_ITER,
-};
-
-/*
- * Describes an iovec iterator that BPF programs can use to access data for
- * a given user_event write() / writev() call.
- */
-struct user_bpf_iter {
-
-       /* Offset of the data within the first iovec */
-       __u32 iov_offset;
-
-       /* Number of iovec structures */
-       __u32 nr_segs;
-
-       /* Pointer to iovec structures */
-       const struct iovec *iov;
-};
-
-/* Context that BPF programs receive when attached to a user_event */
-struct user_bpf_context {
-
-       /* Data type being passed (see union below) */
-       __u32 data_type;
-
-       /* Length of the data */
-       __u32 data_len;
-
-       /* Pointer to data, varies by data type */
-       union {
-               /* Kernel data (data_type == USER_BPF_DATA_KERNEL) */
-               void *kdata;
-
-               /* User data (data_type == USER_BPF_DATA_USER) */
-               void *udata;
-
-               /* Direct iovec (data_type == USER_BPF_DATA_ITER) */
-               struct user_bpf_iter *iter;
-       };
-};
-
 #endif /* _UAPI_LINUX_USER_EVENTS_H */
index 721089b..8943a20 100644 (file)
@@ -83,7 +83,7 @@ struct vdpa_device {
        unsigned int index;
        bool features_valid;
        bool use_va;
-       int nvqs;
+       u32 nvqs;
        struct vdpa_mgmt_dev *mdev;
 };
 
@@ -207,7 +207,8 @@ struct vdpa_map_file {
  * @reset:                     Reset device
  *                             @vdev: vdpa device
  *                             Returns integer: success (0) or error (< 0)
- * @get_config_size:           Get the size of the configuration space
+ * @get_config_size:           Get the size of the configuration space includes
+ *                             fields that are conditional on feature bits.
  *                             @vdev: vdpa device
  *                             Returns size_t: configuration size
  * @get_config:                        Read from device specific configuration space
@@ -337,10 +338,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                       dev_struct, member)), name, use_va), \
                                       dev_struct, member)
 
-int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
+int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
 void vdpa_unregister_device(struct vdpa_device *vdev);
 
-int _vdpa_register_device(struct vdpa_device *vdev, int nvqs);
+int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
 void _vdpa_unregister_device(struct vdpa_device *vdev);
 
 /**
index dafdc7f..b341dd6 100644 (file)
@@ -23,8 +23,6 @@ struct virtio_shm_region {
  *       any of @get/@set, @get_status/@set_status, or @get_features/
  *       @finalize_features are NOT safe to be called from an atomic
  *       context.
- * @enable_cbs: enable the callbacks
- *      vdev: the virtio_device
  * @get: read the value of a configuration field
  *     vdev: the virtio_device
  *     offset: the offset of the configuration field
@@ -78,7 +76,6 @@ struct virtio_shm_region {
  */
 typedef void vq_callback_t(struct virtqueue *);
 struct virtio_config_ops {
-       void (*enable_cbs)(struct virtio_device *vdev);
        void (*get)(struct virtio_device *vdev, unsigned offset,
                    void *buf, unsigned len);
        void (*set)(struct virtio_device *vdev, unsigned offset,
@@ -233,9 +230,6 @@ void virtio_device_ready(struct virtio_device *dev)
 {
        unsigned status = dev->config->get_status(dev);
 
-       if (dev->config->enable_cbs)
-                  dev->config->enable_cbs(dev);
-
        BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
        dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
 }
index bb52b78..72feab5 100644 (file)
@@ -9,6 +9,7 @@
  * See Documentation/core-api/xarray.rst for how to use the XArray.
  */
 
+#include <linux/bitmap.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/gfp.h>
index 314f277..6b99310 100644 (file)
@@ -402,6 +402,7 @@ struct snd_pcm_runtime {
        struct fasync_struct *fasync;
        bool stop_operating;            /* sync_stop will be called */
        struct mutex buffer_mutex;      /* protect for buffer changes */
+       atomic_t buffer_accessing;      /* >0: in r/w operation, <0: blocked */
 
        /* -- private section -- */
        void *private_data;
index 2c53063..311c14a 100644 (file)
@@ -426,8 +426,8 @@ TRACE_EVENT(cachefiles_vol_coherency,
            );
 
 TRACE_EVENT(cachefiles_prep_read,
-           TP_PROTO(struct netfs_read_subrequest *sreq,
-                    enum netfs_read_source source,
+           TP_PROTO(struct netfs_io_subrequest *sreq,
+                    enum netfs_io_source source,
                     enum cachefiles_prepare_read_trace why,
                     ino_t cache_inode),
 
@@ -437,7 +437,7 @@ TRACE_EVENT(cachefiles_prep_read,
                    __field(unsigned int,               rreq            )
                    __field(unsigned short,             index           )
                    __field(unsigned short,             flags           )
-                   __field(enum netfs_read_source,     source          )
+                   __field(enum netfs_io_source,       source          )
                    __field(enum cachefiles_prepare_read_trace, why     )
                    __field(size_t,                     len             )
                    __field(loff_t,                     start           )
index e6f4ebb..beec534 100644 (file)
 /*
  * Define enums for tracing information.
  */
-#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-
-enum netfs_read_trace {
-       netfs_read_trace_expanded,
-       netfs_read_trace_readahead,
-       netfs_read_trace_readpage,
-       netfs_read_trace_write_begin,
-};
-
-enum netfs_rreq_trace {
-       netfs_rreq_trace_assess,
-       netfs_rreq_trace_done,
-       netfs_rreq_trace_free,
-       netfs_rreq_trace_resubmit,
-       netfs_rreq_trace_unlock,
-       netfs_rreq_trace_unmark,
-       netfs_rreq_trace_write,
-};
-
-enum netfs_sreq_trace {
-       netfs_sreq_trace_download_instead,
-       netfs_sreq_trace_free,
-       netfs_sreq_trace_prepare,
-       netfs_sreq_trace_resubmit_short,
-       netfs_sreq_trace_submit,
-       netfs_sreq_trace_terminated,
-       netfs_sreq_trace_write,
-       netfs_sreq_trace_write_skip,
-       netfs_sreq_trace_write_term,
-};
-
-enum netfs_failure {
-       netfs_fail_check_write_begin,
-       netfs_fail_copy_to_cache,
-       netfs_fail_read,
-       netfs_fail_short_readpage,
-       netfs_fail_short_write_begin,
-       netfs_fail_prepare_write,
-};
-
-#endif
-
 #define netfs_read_traces                                      \
        EM(netfs_read_trace_expanded,           "EXPANDED ")    \
        EM(netfs_read_trace_readahead,          "READAHEAD")    \
        EM(netfs_read_trace_readpage,           "READPAGE ")    \
        E_(netfs_read_trace_write_begin,        "WRITEBEGN")
 
+#define netfs_rreq_origins                                     \
+       EM(NETFS_READAHEAD,                     "RA")           \
+       EM(NETFS_READPAGE,                      "RP")           \
+       E_(NETFS_READ_FOR_WRITE,                "RW")
+
 #define netfs_rreq_traces                                      \
-       EM(netfs_rreq_trace_assess,             "ASSESS")       \
-       EM(netfs_rreq_trace_done,               "DONE  ")       \
-       EM(netfs_rreq_trace_free,               "FREE  ")       \
-       EM(netfs_rreq_trace_resubmit,           "RESUBM")       \
-       EM(netfs_rreq_trace_unlock,             "UNLOCK")       \
-       EM(netfs_rreq_trace_unmark,             "UNMARK")       \
-       E_(netfs_rreq_trace_write,              "WRITE ")
+       EM(netfs_rreq_trace_assess,             "ASSESS ")      \
+       EM(netfs_rreq_trace_copy,               "COPY   ")      \
+       EM(netfs_rreq_trace_done,               "DONE   ")      \
+       EM(netfs_rreq_trace_free,               "FREE   ")      \
+       EM(netfs_rreq_trace_resubmit,           "RESUBMT")      \
+       EM(netfs_rreq_trace_unlock,             "UNLOCK ")      \
+       E_(netfs_rreq_trace_unmark,             "UNMARK ")
 
 #define netfs_sreq_sources                                     \
        EM(NETFS_FILL_WITH_ZEROES,              "ZERO")         \
@@ -94,10 +56,47 @@ enum netfs_failure {
        EM(netfs_fail_check_write_begin,        "check-write-begin")    \
        EM(netfs_fail_copy_to_cache,            "copy-to-cache")        \
        EM(netfs_fail_read,                     "read")                 \
-       EM(netfs_fail_short_readpage,           "short-readpage")       \
-       EM(netfs_fail_short_write_begin,        "short-write-begin")    \
+       EM(netfs_fail_short_read,               "short-read")           \
        E_(netfs_fail_prepare_write,            "prep-write")
 
+#define netfs_rreq_ref_traces                                  \
+       EM(netfs_rreq_trace_get_hold,           "GET HOLD   ")  \
+       EM(netfs_rreq_trace_get_subreq,         "GET SUBREQ ")  \
+       EM(netfs_rreq_trace_put_complete,       "PUT COMPLT ")  \
+       EM(netfs_rreq_trace_put_discard,        "PUT DISCARD")  \
+       EM(netfs_rreq_trace_put_failed,         "PUT FAILED ")  \
+       EM(netfs_rreq_trace_put_hold,           "PUT HOLD   ")  \
+       EM(netfs_rreq_trace_put_subreq,         "PUT SUBREQ ")  \
+       EM(netfs_rreq_trace_put_zero_len,       "PUT ZEROLEN")  \
+       E_(netfs_rreq_trace_new,                "NEW        ")
+
+#define netfs_sreq_ref_traces                                  \
+       EM(netfs_sreq_trace_get_copy_to_cache,  "GET COPY2C ")  \
+       EM(netfs_sreq_trace_get_resubmit,       "GET RESUBMIT") \
+       EM(netfs_sreq_trace_get_short_read,     "GET SHORTRD")  \
+       EM(netfs_sreq_trace_new,                "NEW        ")  \
+       EM(netfs_sreq_trace_put_clear,          "PUT CLEAR  ")  \
+       EM(netfs_sreq_trace_put_failed,         "PUT FAILED ")  \
+       EM(netfs_sreq_trace_put_merged,         "PUT MERGED ")  \
+       EM(netfs_sreq_trace_put_no_copy,        "PUT NO COPY")  \
+       E_(netfs_sreq_trace_put_terminated,     "PUT TERM   ")
+
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum netfs_read_trace { netfs_read_traces } __mode(byte);
+enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
+enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
+enum netfs_failure { netfs_failures } __mode(byte);
+enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
+enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
+
+#endif
 
 /*
  * Export enum symbols via userspace.
@@ -108,10 +107,13 @@ enum netfs_failure {
 #define E_(a, b) TRACE_DEFINE_ENUM(a);
 
 netfs_read_traces;
+netfs_rreq_origins;
 netfs_rreq_traces;
 netfs_sreq_sources;
 netfs_sreq_traces;
 netfs_failures;
+netfs_rreq_ref_traces;
+netfs_sreq_ref_traces;
 
 /*
  * Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -123,7 +125,7 @@ netfs_failures;
 #define E_(a, b)       { a, b }
 
 TRACE_EVENT(netfs_read,
-           TP_PROTO(struct netfs_read_request *rreq,
+           TP_PROTO(struct netfs_io_request *rreq,
                     loff_t start, size_t len,
                     enum netfs_read_trace what),
 
@@ -156,31 +158,34 @@ TRACE_EVENT(netfs_read,
            );
 
 TRACE_EVENT(netfs_rreq,
-           TP_PROTO(struct netfs_read_request *rreq,
+           TP_PROTO(struct netfs_io_request *rreq,
                     enum netfs_rreq_trace what),
 
            TP_ARGS(rreq, what),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               rreq            )
-                   __field(unsigned short,             flags           )
+                   __field(unsigned int,               flags           )
+                   __field(enum netfs_io_origin,       origin          )
                    __field(enum netfs_rreq_trace,      what            )
                             ),
 
            TP_fast_assign(
                    __entry->rreq       = rreq->debug_id;
                    __entry->flags      = rreq->flags;
+                   __entry->origin     = rreq->origin;
                    __entry->what       = what;
                           ),
 
-           TP_printk("R=%08x %s f=%02x",
+           TP_printk("R=%08x %s %s f=%02x",
                      __entry->rreq,
+                     __print_symbolic(__entry->origin, netfs_rreq_origins),
                      __print_symbolic(__entry->what, netfs_rreq_traces),
                      __entry->flags)
            );
 
 TRACE_EVENT(netfs_sreq,
-           TP_PROTO(struct netfs_read_subrequest *sreq,
+           TP_PROTO(struct netfs_io_subrequest *sreq,
                     enum netfs_sreq_trace what),
 
            TP_ARGS(sreq, what),
@@ -190,7 +195,7 @@ TRACE_EVENT(netfs_sreq,
                    __field(unsigned short,             index           )
                    __field(short,                      error           )
                    __field(unsigned short,             flags           )
-                   __field(enum netfs_read_source,     source          )
+                   __field(enum netfs_io_source,       source          )
                    __field(enum netfs_sreq_trace,      what            )
                    __field(size_t,                     len             )
                    __field(size_t,                     transferred     )
@@ -211,26 +216,26 @@ TRACE_EVENT(netfs_sreq,
 
            TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d",
                      __entry->rreq, __entry->index,
-                     __print_symbolic(__entry->what, netfs_sreq_traces),
                      __print_symbolic(__entry->source, netfs_sreq_sources),
+                     __print_symbolic(__entry->what, netfs_sreq_traces),
                      __entry->flags,
                      __entry->start, __entry->transferred, __entry->len,
                      __entry->error)
            );
 
 TRACE_EVENT(netfs_failure,
-           TP_PROTO(struct netfs_read_request *rreq,
-                    struct netfs_read_subrequest *sreq,
+           TP_PROTO(struct netfs_io_request *rreq,
+                    struct netfs_io_subrequest *sreq,
                     int error, enum netfs_failure what),
 
            TP_ARGS(rreq, sreq, error, what),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               rreq            )
-                   __field(unsigned short,             index           )
+                   __field(short,                      index           )
                    __field(short,                      error           )
                    __field(unsigned short,             flags           )
-                   __field(enum netfs_read_source,     source          )
+                   __field(enum netfs_io_source,       source          )
                    __field(enum netfs_failure,         what            )
                    __field(size_t,                     len             )
                    __field(size_t,                     transferred     )
@@ -239,17 +244,17 @@ TRACE_EVENT(netfs_failure,
 
            TP_fast_assign(
                    __entry->rreq       = rreq->debug_id;
-                   __entry->index      = sreq ? sreq->debug_index : 0;
+                   __entry->index      = sreq ? sreq->debug_index : -1;
                    __entry->error      = error;
                    __entry->flags      = sreq ? sreq->flags : 0;
                    __entry->source     = sreq ? sreq->source : NETFS_INVALID_READ;
                    __entry->what       = what;
-                   __entry->len        = sreq ? sreq->len : 0;
+                   __entry->len        = sreq ? sreq->len : rreq->len;
                    __entry->transferred = sreq ? sreq->transferred : 0;
                    __entry->start      = sreq ? sreq->start : 0;
                           ),
 
-           TP_printk("R=%08x[%u] %s f=%02x s=%llx %zx/%zx %s e=%d",
+           TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d",
                      __entry->rreq, __entry->index,
                      __print_symbolic(__entry->source, netfs_sreq_sources),
                      __entry->flags,
@@ -258,6 +263,59 @@ TRACE_EVENT(netfs_failure,
                      __entry->error)
            );
 
+TRACE_EVENT(netfs_rreq_ref,
+           TP_PROTO(unsigned int rreq_debug_id, int ref,
+                    enum netfs_rreq_ref_trace what),
+
+           TP_ARGS(rreq_debug_id, ref, what),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,               rreq            )
+                   __field(int,                        ref             )
+                   __field(enum netfs_rreq_ref_trace,  what            )
+                            ),
+
+           TP_fast_assign(
+                   __entry->rreq       = rreq_debug_id;
+                   __entry->ref        = ref;
+                   __entry->what       = what;
+                          ),
+
+           TP_printk("R=%08x %s r=%u",
+                     __entry->rreq,
+                     __print_symbolic(__entry->what, netfs_rreq_ref_traces),
+                     __entry->ref)
+           );
+
+TRACE_EVENT(netfs_sreq_ref,
+           TP_PROTO(unsigned int rreq_debug_id, unsigned int subreq_debug_index,
+                    int ref, enum netfs_sreq_ref_trace what),
+
+           TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,               rreq            )
+                   __field(unsigned int,               subreq          )
+                   __field(int,                        ref             )
+                   __field(enum netfs_sreq_ref_trace,  what            )
+                            ),
+
+           TP_fast_assign(
+                   __entry->rreq       = rreq_debug_id;
+                   __entry->subreq     = subreq_debug_index;
+                   __entry->ref        = ref;
+                   __entry->what       = what;
+                          ),
+
+           TP_printk("R=%08x[%x] %s r=%u",
+                     __entry->rreq,
+                     __entry->subreq,
+                     __print_symbolic(__entry->what, netfs_sreq_ref_traces),
+                     __entry->ref)
+           );
+
+#undef EM
+#undef E_
 #endif /* _TRACE_NETFS_H */
 
 /* This part must be outside protection */
index b567c72..6e492db 100644 (file)
@@ -35,7 +35,7 @@
 
 /* Stage 1 creates the structure of the recorded event layout */
 
-#include "stages/stage1_defines.h"
+#include "stages/stage1_struct_define.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
@@ -56,7 +56,7 @@
 
 /* Stage 2 creates the custom class */
 
-#include "stages/stage2_defines.h"
+#include "stages/stage2_data_offsets.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
@@ -71,7 +71,7 @@
 
 /* Stage 3 create the way to print the custom event */
 
-#include "stages/stage3_defines.h"
+#include "stages/stage3_trace_output.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -102,7 +102,7 @@ static struct trace_event_functions trace_custom_event_type_funcs_##call = { \
 
 /* Stage 4 creates the offset layout for the fields */
 
-#include "stages/stage4_defines.h"
+#include "stages/stage4_event_fields.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print)    \
@@ -114,7 +114,7 @@ static struct trace_event_fields trace_custom_event_fields_##call[] = {     \
 
 /* Stage 5 creates the helper function for dynamic fields */
 
-#include "stages/stage5_defines.h"
+#include "stages/stage5_get_offsets.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -134,7 +134,7 @@ static inline notrace int trace_custom_event_get_offsets_##call(    \
 
 /* Stage 6 creates the probe function that records the event */
 
-#include "stages/stage6_defines.h"
+#include "stages/stage6_event_callback.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -182,7 +182,7 @@ static inline void ftrace_test_custom_probe_##call(void)            \
 
 /* Stage 7 creates the actual class and event structure for the custom event */
 
-#include "stages/stage7_defines.h"
+#include "stages/stage7_class_define.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
index 8a8cd66..c2f9cab 100644 (file)
@@ -45,7 +45,7 @@
                             PARAMS(print));                   \
        DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
 
-#include "stages/stage1_defines.h"
+#include "stages/stage1_struct_define.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
  * The size of an array is also encoded, in the higher 16 bits of <item>.
  */
 
-#include "stages/stage2_defines.h"
+#include "stages/stage2_data_offsets.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  * in binary.
  */
 
-#include "stages/stage3_defines.h"
+#include "stages/stage3_trace_output.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -236,7 +236,7 @@ static struct trace_event_functions trace_event_type_funcs_##call = {       \
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-#include "stages/stage4_defines.h"
+#include "stages/stage4_event_fields.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)   \
@@ -249,7 +249,7 @@ static struct trace_event_fields trace_event_fields_##call[] = {    \
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-#include "stages/stage5_defines.h"
+#include "stages/stage5_get_offsets.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -372,7 +372,7 @@ static inline notrace int trace_event_get_offsets_##call(           \
 #define _TRACE_PERF_INIT(call)
 #endif /* CONFIG_PERF_EVENTS */
 
-#include "stages/stage6_defines.h"
+#include "stages/stage6_event_callback.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -418,7 +418,7 @@ static inline void ftrace_test_probe_##call(void)                   \
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-#include "stages/stage7_defines.h"
+#include "stages/stage7_class_define.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
index d2be4eb..784adc6 100644 (file)
@@ -201,11 +201,9 @@ struct io_uring_cqe {
  *
  * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
  * IORING_CQE_F_MORE   If set, parent SQE will generate more CQE entries
- * IORING_CQE_F_MSG    If set, CQE was generated with IORING_OP_MSG_RING
  */
 #define IORING_CQE_F_BUFFER            (1U << 0)
 #define IORING_CQE_F_MORE              (1U << 1)
-#define IORING_CQE_F_MSG               (1U << 2)
 
 enum {
        IORING_CQE_BUFFER_SHIFT         = 16,
index 24a1c45..98e6080 100644 (file)
@@ -45,7 +45,7 @@ struct loop_info {
        unsigned long      lo_inode;            /* ioctl r/o */
        __kernel_old_dev_t lo_rdevice;          /* ioctl r/o */
        int                lo_offset;
-       int                lo_encrypt_type;
+       int                lo_encrypt_type;             /* obsolete, ignored */
        int                lo_encrypt_key_size;         /* ioctl w/o */
        int                lo_flags;
        char               lo_name[LO_NAME_SIZE];
@@ -61,7 +61,7 @@ struct loop_info64 {
        __u64              lo_offset;
        __u64              lo_sizelimit;/* bytes, 0 == max available */
        __u32              lo_number;                   /* ioctl r/o */
-       __u32              lo_encrypt_type;
+       __u32              lo_encrypt_type;             /* obsolete, ignored */
        __u32              lo_encrypt_key_size;         /* ioctl w/o */
        __u32              lo_flags;
        __u8               lo_file_name[LO_NAME_SIZE];
index 03e5b77..97aca45 100644 (file)
@@ -133,7 +133,8 @@ struct rtc_param {
 #define RTC_FEATURE_UPDATE_INTERRUPT   4
 #define RTC_FEATURE_CORRECTION         5
 #define RTC_FEATURE_BACKUP_SWITCH_MODE 6
-#define RTC_FEATURE_CNT                        7
+#define RTC_FEATURE_ALARM_WAKEUP_ONLY  7
+#define RTC_FEATURE_CNT                        8
 
 /* parameter list */
 #define RTC_PARAM_FEATURES             0
index c998860..5d99e7c 100644 (file)
 /* Get the valid iova range */
 #define VHOST_VDPA_GET_IOVA_RANGE      _IOR(VHOST_VIRTIO, 0x78, \
                                             struct vhost_vdpa_iova_range)
+
+/* Get the config size */
+#define VHOST_VDPA_GET_CONFIG_SIZE     _IOR(VHOST_VIRTIO, 0x79, __u32)
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT       _IOR(VHOST_VIRTIO, 0x80, __u32)
+
 #endif
index b5eda06..f0fb0ae 100644 (file)
 #define VIRTIO_F_RING_PACKED           34
 
 /*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER              35
+
+/*
  * This feature indicates that memory accesses by the driver and the
  * device are ordered in a way described by the platform.
  */
index a03932f..71a54a6 100644 (file)
@@ -37,6 +37,7 @@
 #define VIRTIO_CRYPTO_SERVICE_HASH   1
 #define VIRTIO_CRYPTO_SERVICE_MAC    2
 #define VIRTIO_CRYPTO_SERVICE_AEAD   3
+#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
 
 #define VIRTIO_CRYPTO_OPCODE(service, op)   (((service) << 8) | (op))
 
@@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header {
           VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
 #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
           VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
+          VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
+#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
+          VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
        __le32 opcode;
        __le32 algo;
        __le32 flag;
@@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req {
        __u8 padding[32];
 };
 
+struct virtio_crypto_rsa_session_para {
+#define VIRTIO_CRYPTO_RSA_RAW_PADDING   0
+#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
+       __le32 padding_algo;
+
+#define VIRTIO_CRYPTO_RSA_NO_HASH   0
+#define VIRTIO_CRYPTO_RSA_MD2       1
+#define VIRTIO_CRYPTO_RSA_MD3       2
+#define VIRTIO_CRYPTO_RSA_MD4       3
+#define VIRTIO_CRYPTO_RSA_MD5       4
+#define VIRTIO_CRYPTO_RSA_SHA1      5
+#define VIRTIO_CRYPTO_RSA_SHA256    6
+#define VIRTIO_CRYPTO_RSA_SHA384    7
+#define VIRTIO_CRYPTO_RSA_SHA512    8
+#define VIRTIO_CRYPTO_RSA_SHA224    9
+       __le32 hash_algo;
+};
+
+struct virtio_crypto_ecdsa_session_para {
+#define VIRTIO_CRYPTO_CURVE_UNKNOWN   0
+#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
+#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
+#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
+#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
+#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
+       __le32 curve_id;
+       __le32 padding;
+};
+
+struct virtio_crypto_akcipher_session_para {
+#define VIRTIO_CRYPTO_NO_AKCIPHER    0
+#define VIRTIO_CRYPTO_AKCIPHER_RSA   1
+#define VIRTIO_CRYPTO_AKCIPHER_DSA   2
+#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
+       __le32 algo;
+
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC  1
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
+       __le32 keytype;
+       __le32 keylen;
+
+       union {
+               struct virtio_crypto_rsa_session_para rsa;
+               struct virtio_crypto_ecdsa_session_para ecdsa;
+       } u;
+};
+
+struct virtio_crypto_akcipher_create_session_req {
+       struct virtio_crypto_akcipher_session_para para;
+       __u8 padding[36];
+};
+
 struct virtio_crypto_alg_chain_session_para {
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER  1
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH  2
@@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req {
                        mac_create_session;
                struct virtio_crypto_aead_create_session_req
                        aead_create_session;
+               struct virtio_crypto_akcipher_create_session_req
+                       akcipher_create_session;
                struct virtio_crypto_destroy_session_req
                        destroy_session;
                __u8 padding[56];
@@ -266,6 +325,14 @@ struct virtio_crypto_op_header {
        VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
 #define VIRTIO_CRYPTO_AEAD_DECRYPT \
        VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
+#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
+#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
        __le32 opcode;
        /* algo should be service-specific algorithms */
        __le32 algo;
@@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req {
        __u8 padding[32];
 };
 
+struct virtio_crypto_akcipher_para {
+       __le32 src_data_len;
+       __le32 dst_data_len;
+};
+
+struct virtio_crypto_akcipher_data_req {
+       struct virtio_crypto_akcipher_para para;
+       __u8 padding[40];
+};
+
 /* The request of the data virtqueue's packet */
 struct virtio_crypto_op_data_req {
        struct virtio_crypto_op_header header;
@@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req {
                struct virtio_crypto_hash_data_req hash_req;
                struct virtio_crypto_mac_data_req mac_req;
                struct virtio_crypto_aead_data_req aead_req;
+               struct virtio_crypto_akcipher_data_req akcipher_req;
                __u8 padding[48];
        } u;
 };
@@ -408,6 +486,8 @@ struct virtio_crypto_op_data_req {
 #define VIRTIO_CRYPTO_BADMSG    2
 #define VIRTIO_CRYPTO_NOTSUPP   3
 #define VIRTIO_CRYPTO_INVSESS   4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC     5 /* no free session ID */
+#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */
 
 /* The accelerator hardware is ready */
 #define VIRTIO_CRYPTO_S_HW_READY  (1 << 0)
@@ -438,7 +518,7 @@ struct virtio_crypto_config {
        __le32 max_cipher_key_len;
        /* Maximum length of authenticated key */
        __le32 max_auth_key_len;
-       __le32 reserve;
+       __le32 akcipher_algo;
        /* Maximum size of each crypto request's content */
        __le64 max_size;
 };
index 97463a3..ddcbefe 100644 (file)
@@ -62,13 +62,13 @@ config LLD_VERSION
 
 config CC_CAN_LINK
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag))
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag)) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m32-flag))
 
 config CC_CAN_LINK_STATIC
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag) -static) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag) -static)
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag) -static) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m32-flag) -static)
 
 config CC_HAS_ASM_GOTO
        def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
index 8c6de5a..c2f1fd9 100644 (file)
@@ -133,14 +133,4 @@ config SCHED_CORE
          which is the likely usage by Linux distributions, there should
          be no measurable impact on performance.
 
-config ARCH_WANTS_RT_DELAYED_SIGNALS
-       bool
-       help
-         This option is selected by architectures where raising signals
-         can happen in atomic contexts on PREEMPT_RT enabled kernels. This
-         option delays raising the signal until the return to user space
-         loop where it is also delivered. X86 requires this to deliver
-         signals from trap handlers which run on IST stacks.
-
-config RT_DELAYED_SIGNALS
-       def_bool PREEMPT_RT && ARCH_WANTS_RT_DELAYED_SIGNALS
+
index 35a1d29..9743c6c 100644 (file)
@@ -277,12 +277,16 @@ void *dma_direct_alloc(struct device *dev, size_t size,
        }
 
        if (remap) {
+               pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
+
+               if (force_dma_unencrypted(dev))
+                       prot = pgprot_decrypted(prot);
+
                /* remove any dirty cache lines on the kernel alias */
                arch_dma_prep_coherent(page, size);
 
                /* create a coherent mapping */
-               ret = dma_common_contiguous_remap(page, size,
-                               dma_pgprot(dev, PAGE_KERNEL, attrs),
+               ret = dma_common_contiguous_remap(page, size, prot,
                                __builtin_return_address(0));
                if (!ret)
                        goto out_free_pages;
@@ -535,6 +539,8 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
        int ret = -ENXIO;
 
        vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
+       if (force_dma_unencrypted(dev))
+               vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 
        if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
index 559461a..db72442 100644 (file)
@@ -407,8 +407,6 @@ EXPORT_SYMBOL(dma_get_sgtable_attrs);
  */
 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
 {
-       if (force_dma_unencrypted(dev))
-               prot = pgprot_decrypted(prot);
        if (dev_is_dma_coherent(dev))
                return prot;
 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
index ef8d94a..e57a224 100644 (file)
@@ -142,18 +142,6 @@ void noinstr exit_to_user_mode(void)
 /* Workaround to allow gradual conversion of architecture code */
 void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
 
-#ifdef CONFIG_RT_DELAYED_SIGNALS
-static inline void raise_delayed_signal(void)
-{
-       if (unlikely(current->forced_info.si_signo)) {
-               force_sig_info(&current->forced_info);
-               current->forced_info.si_signo = 0;
-       }
-}
-#else
-static inline void raise_delayed_signal(void) { }
-#endif
-
 static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
                                            unsigned long ti_work)
 {
@@ -168,8 +156,6 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
                if (ti_work & _TIF_NEED_RESCHED)
                        schedule();
 
-               raise_delayed_signal();
-
                if (ti_work & _TIF_UPROBE)
                        uprobe_notify_resume(regs);
 
index 368a34c..30cd1ca 100644 (file)
@@ -1308,43 +1308,6 @@ enum sig_handler {
 };
 
 /*
- * On some archictectures, PREEMPT_RT has to delay sending a signal from a
- * trap since it cannot enable preemption, and the signal code's
- * spin_locks turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME
- * which will send the signal on exit of the trap.
- */
-#ifdef CONFIG_RT_DELAYED_SIGNALS
-static inline bool force_sig_delayed(struct kernel_siginfo *info,
-                                    struct task_struct *t)
-{
-       if (!in_atomic())
-               return false;
-
-       if (WARN_ON_ONCE(t->forced_info.si_signo))
-               return true;
-
-       if (is_si_special(info)) {
-               WARN_ON_ONCE(info != SEND_SIG_PRIV);
-               t->forced_info.si_signo = info->si_signo;
-               t->forced_info.si_errno = 0;
-               t->forced_info.si_code = SI_KERNEL;
-               t->forced_info.si_pid = 0;
-               t->forced_info.si_uid = 0;
-       } else {
-               t->forced_info = *info;
-       }
-       set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
-       return true;
-}
-#else
-static inline bool force_sig_delayed(struct kernel_siginfo *info,
-                                    struct task_struct *t)
-{
-       return false;
-}
-#endif
-
-/*
  * Force a signal that the process can't ignore: if necessary
  * we unblock the signal and change any SIG_IGN to SIG_DFL.
  *
@@ -1364,9 +1327,6 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
        struct k_sigaction *action;
        int sig = info->si_signo;
 
-       if (force_sig_delayed(info, t))
-               return 0;
-
        spin_lock_irqsave(&t->sighand->siglock, flags);
        action = &t->sighand->action[sig-1];
        ignored = action->sa.sa_handler == SIG_IGN;
index 9bb54c0..2c43e32 100644 (file)
@@ -767,6 +767,7 @@ config USER_EVENTS
        bool "User trace events"
        select TRACING
        select DYNAMIC_EVENTS
+       depends on BROKEN || COMPILE_TEST # API needs to be straighten out
        help
          User trace events are user-defined trace events that
          can be used like an existing kernel trace event.  User trace
index 19028e0..8f4fb32 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Highly modified by Steven Rostedt (VMware).
  */
+#include <linux/jump_label.h>
 #include <linux/suspend.h>
 #include <linux/ftrace.h>
 #include <linux/slab.h>
 #define ASSIGN_OPS_HASH(opsname, val)
 #endif
 
-static bool kill_ftrace_graph;
+DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
 int ftrace_graph_active;
 
 /* Both enabled by default (can be cleared by function_graph tracer flags */
 static bool fgraph_sleep_time = true;
 
 /**
- * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
- *
- * ftrace_graph_stop() is called when a severe error is detected in
- * the function graph tracing. This function is called by the critical
- * paths of function graph to keep those paths from doing any more harm.
- */
-bool ftrace_graph_is_dead(void)
-{
-       return kill_ftrace_graph;
-}
-
-/**
  * ftrace_graph_stop - set to permanently disable function graph tracing
  *
  * In case of an error int function graph tracing, this is called
@@ -51,7 +40,7 @@ bool ftrace_graph_is_dead(void)
  */
 void ftrace_graph_stop(void)
 {
-       kill_ftrace_graph = true;
+       static_branch_enable(&kill_ftrace_graph);
 }
 
 /* Add a function return address to the trace stack on thread info.*/
index 8b3d241..706e168 100644 (file)
 #include <linux/tracefs.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
+/* Reminder to move to uapi when everything works */
+#ifdef CONFIG_COMPILE_TEST
+#include <linux/user_events.h>
+#else
 #include <uapi/linux/user_events.h>
+#endif
 #include "trace.h"
 #include "trace_dynevent.h"
 
@@ -42,9 +47,6 @@
 #define MAX_FIELD_ARRAY_SIZE 1024
 #define MAX_FIELD_ARG_NAME 256
 
-#define MAX_BPF_COPY_SIZE PAGE_SIZE
-#define MAX_STACK_BPF_DATA 512
-
 static char *register_page_data;
 
 static DEFINE_MUTEX(reg_mutex);
@@ -405,19 +407,6 @@ parse:
                                    type[0] != 'u', FILTER_OTHER);
 }
 
-static void user_event_parse_flags(struct user_event *user, char *flags)
-{
-       char *flag;
-
-       if (flags == NULL)
-               return;
-
-       while ((flag = strsep(&flags, ",")) != NULL) {
-               if (strcmp(flag, "BPF_ITER") == 0)
-                       user->flags |= FLAG_BPF_ITER;
-       }
-}
-
 static int user_event_parse_fields(struct user_event *user, char *args)
 {
        char *field;
@@ -713,64 +702,14 @@ discard:
 }
 
 #ifdef CONFIG_PERF_EVENTS
-static void user_event_bpf(struct user_event *user, struct iov_iter *i)
-{
-       struct user_bpf_context context;
-       struct user_bpf_iter bpf_i;
-       char fast_data[MAX_STACK_BPF_DATA];
-       void *temp = NULL;
-
-       if ((user->flags & FLAG_BPF_ITER) && iter_is_iovec(i)) {
-               /* Raw iterator */
-               context.data_type = USER_BPF_DATA_ITER;
-               context.data_len = i->count;
-               context.iter = &bpf_i;
-
-               bpf_i.iov_offset = i->iov_offset;
-               bpf_i.iov = i->iov;
-               bpf_i.nr_segs = i->nr_segs;
-       } else if (i->nr_segs == 1 && iter_is_iovec(i)) {
-               /* Single buffer from user */
-               context.data_type = USER_BPF_DATA_USER;
-               context.data_len = i->count;
-               context.udata = i->iov->iov_base + i->iov_offset;
-       } else {
-               /* Multi buffer from user */
-               struct iov_iter copy = *i;
-               size_t copy_size = min_t(size_t, i->count, MAX_BPF_COPY_SIZE);
-
-               context.data_type = USER_BPF_DATA_KERNEL;
-               context.kdata = fast_data;
-
-               if (unlikely(copy_size > sizeof(fast_data))) {
-                       temp = kmalloc(copy_size, GFP_NOWAIT);
-
-                       if (temp)
-                               context.kdata = temp;
-                       else
-                               copy_size = sizeof(fast_data);
-               }
-
-               context.data_len = copy_nofault(context.kdata,
-                                               copy_size, &copy);
-       }
-
-       trace_call_bpf(&user->call, &context);
-
-       kfree(temp);
-}
-
 /*
- * Writes the user supplied payload out to perf ring buffer or eBPF program.
+ * Writes the user supplied payload out to perf ring buffer.
  */
 static void user_event_perf(struct user_event *user, struct iov_iter *i,
                            void *tpdata, bool *faulted)
 {
        struct hlist_head *perf_head;
 
-       if (bpf_prog_array_valid(&user->call))
-               user_event_bpf(user, i);
-
        perf_head = this_cpu_ptr(user->call.perf_events);
 
        if (perf_head && !hlist_empty(perf_head)) {
@@ -1136,8 +1075,6 @@ static int user_event_parse(char *name, char *args, char *flags,
 
        user->tracepoint.name = name;
 
-       user_event_parse_flags(user, flags);
-
        ret = user_event_parse_fields(user, args);
 
        if (ret)
@@ -1165,11 +1102,11 @@ static int user_event_parse(char *name, char *args, char *flags,
 #endif
 
        mutex_lock(&event_mutex);
+
        ret = user_event_trace_register(user);
-       mutex_unlock(&event_mutex);
 
        if (ret)
-               goto put_user;
+               goto put_user_lock;
 
        user->index = index;
 
@@ -1181,8 +1118,12 @@ static int user_event_parse(char *name, char *args, char *flags,
        set_bit(user->index, page_bitmap);
        hash_add(register_table, &user->node, key);
 
+       mutex_unlock(&event_mutex);
+
        *newuser = user;
        return 0;
+put_user_lock:
+       mutex_unlock(&event_mutex);
 put_user:
        user_event_destroy_fields(user);
        user_event_destroy_validators(user);
@@ -1575,9 +1516,6 @@ static int user_seq_show(struct seq_file *m, void *p)
                        busy++;
                }
 
-               if (flags & FLAG_BPF_ITER)
-                       seq_puts(m, " FLAG:BPF_ITER");
-
                seq_puts(m, "\n");
                active++;
        }
index 3990e4d..230038d 100644 (file)
@@ -370,6 +370,7 @@ static void __put_watch_queue(struct kref *kref)
 
        for (i = 0; i < wqueue->nr_pages; i++)
                __free_page(wqueue->notes[i]);
+       kfree(wqueue->notes);
        bitmap_free(wqueue->notes_bitmap);
 
        wfilter = rcu_access_pointer(wqueue->filter);
index 8c3365f..b247d41 100644 (file)
@@ -68,7 +68,7 @@ int logic_iomem_add_region(struct resource *resource,
 }
 EXPORT_SYMBOL(logic_iomem_add_region);
 
-#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
 static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
 {
        WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
@@ -81,7 +81,7 @@ static void real_iounmap(volatile void __iomem *addr)
        WARN(1, "invalid iounmap for addr 0x%llx\n",
             (unsigned long long)(uintptr_t __force)addr);
 }
-#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
 
 void __iomem *ioremap(phys_addr_t offset, size_t size)
 {
@@ -168,7 +168,7 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
-#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
 #define MAKE_FALLBACK(op, sz)                                          \
 static u##sz real_raw_read ## op(const volatile void __iomem *addr)    \
 {                                                                      \
@@ -213,7 +213,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
        WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
             (unsigned long long)(uintptr_t __force)addr);
 }
-#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
 
 #define MAKE_OP(op, sz)                                                \
 u##sz __raw_read ## op(const volatile void __iomem *addr)              \
index 2eb3de1..ae4fd4d 100644 (file)
@@ -110,7 +110,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
                sb->alloc_hint = NULL;
        }
 
-       sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
+       sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
        if (!sb->map) {
                free_percpu(sb->alloc_hint);
                return -ENOMEM;
index 8b1c318..e77d485 100644 (file)
@@ -1463,6 +1463,25 @@ unlock:
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 
+static noinline void check_create_range_5(struct xarray *xa,
+               unsigned long index, unsigned int order)
+{
+       XA_STATE_ORDER(xas, xa, index, order);
+       unsigned int i;
+
+       xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+       for (i = 0; i < order + 10; i++) {
+               do {
+                       xas_lock(&xas);
+                       xas_create_range(&xas);
+                       xas_unlock(&xas);
+               } while (xas_nomem(&xas, GFP_KERNEL));
+       }
+
+       xa_destroy(xa);
+}
+
 static noinline void check_create_range(struct xarray *xa)
 {
        unsigned int order;
@@ -1490,6 +1509,9 @@ static noinline void check_create_range(struct xarray *xa)
                check_create_range_4(xa, (3U << order) + 1, order);
                check_create_range_4(xa, (3U << order) - 1, order);
                check_create_range_4(xa, (1U << 24) + 1, order);
+
+               check_create_range_5(xa, 0, order);
+               check_create_range_5(xa, (1U << order), order);
        }
 
        check_create_range_3();
index b95e925..4acc88e 100644 (file)
@@ -722,6 +722,8 @@ void xas_create_range(struct xa_state *xas)
 
                for (;;) {
                        struct xa_node *node = xas->xa_node;
+                       if (node->shift >= shift)
+                               break;
                        xas->xa_node = xa_parent_locked(xas->xa, node);
                        xas->xa_offset = node->offset - 1;
                        if (node->offset != 0)
@@ -1079,6 +1081,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
                                        xa_mk_node(child));
                        if (xa_is_value(curr))
                                values--;
+                       xas_update(xas, child);
                } else {
                        unsigned int canon = offset - xas->xa_sibs;
 
@@ -1093,6 +1096,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
        } while (offset-- > xas->xa_offset);
 
        node->nr_values += values;
+       xas_update(xas, node);
 }
 EXPORT_SYMBOL_GPL(xas_split);
 #endif
index 907fefd..4b8eab4 100644 (file)
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue);
 
 #ifdef CONFIG_BALLOON_COMPACTION
 
-bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
+static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
 
 {
        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
@@ -217,7 +217,7 @@ bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
        return true;
 }
 
-void balloon_page_putback(struct page *page)
+static void balloon_page_putback(struct page *page)
 {
        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
        unsigned long flags;
@@ -230,7 +230,7 @@ void balloon_page_putback(struct page *page)
 
 
 /* move_to_new_page() counterpart for a ballooned page */
-int balloon_page_migrate(struct address_space *mapping,
+static int balloon_page_migrate(struct address_space *mapping,
                struct page *newpage, struct page *page,
                enum migrate_mode mode)
 {
index c1e0fed..5ce8d7c 100644 (file)
@@ -1019,12 +1019,15 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
        struct damos *s;
        unsigned long wait_time;
        unsigned long min_wait_time = 0;
+       bool init_wait_time = false;
 
        while (!kdamond_need_stop(ctx)) {
                damon_for_each_scheme(s, ctx) {
                        wait_time = damos_wmark_wait_us(s);
-                       if (!min_wait_time || wait_time < min_wait_time)
+                       if (!init_wait_time || wait_time < min_wait_time) {
+                               init_wait_time = true;
                                min_wait_time = wait_time;
+                       }
                }
                if (!min_wait_time)
                        return 0;
index 647d72b..3a5ffb5 100644 (file)
@@ -2538,7 +2538,7 @@ static int filemap_create_folio(struct file *file,
         * the page cache as the locked folio would then be enough to
         * synchronize with hole punching. But there are code paths
         * such as filemap_update_page() filling in partially uptodate
-        * pages or ->readpages() that need to hold invalidate_lock
+        * pages or ->readahead() that need to hold invalidate_lock
         * while mapping blocks for IO so let's hold the lock here as
         * well to keep locking rules simple.
         */
@@ -3752,9 +3752,10 @@ out:
 }
 EXPORT_SYMBOL(generic_file_direct_write);
 
-ssize_t generic_perform_write(struct file *file,
-                               struct iov_iter *i, loff_t pos)
+ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
 {
+       struct file *file = iocb->ki_filp;
+       loff_t pos = iocb->ki_pos;
        struct address_space *mapping = file->f_mapping;
        const struct address_space_operations *a_ops = mapping->a_ops;
        long status = 0;
@@ -3884,7 +3885,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
                        goto out;
 
-               status = generic_perform_write(file, from, pos = iocb->ki_pos);
+               pos = iocb->ki_pos;
+               status = generic_perform_write(iocb, from);
                /*
                 * If generic_perform_write() returned a synchronous error
                 * then we want to return the number of bytes which were
@@ -3916,7 +3918,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                         */
                }
        } else {
-               written = generic_perform_write(file, from, iocb->ki_pos);
+               written = generic_perform_write(iocb, from);
                if (likely(written > 0))
                        iocb->ki_pos += written;
        }
index 271fbe8..f598a03 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1404,6 +1404,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long nr_pages = (end - start) / PAGE_SIZE;
        int gup_flags;
+       long ret;
 
        VM_BUG_ON(!PAGE_ALIGNED(start));
        VM_BUG_ON(!PAGE_ALIGNED(end));
@@ -1438,8 +1439,10 @@ long populate_vma_page_range(struct vm_area_struct *vma,
         * We made sure addr is within a VMA, so the following will
         * not result in a stack expansion that recurses back here.
         */
-       return __get_user_pages(mm, start, nr_pages, gup_flags,
+       ret = __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
+       lru_add_drain();
+       return ret;
 }
 
 /*
@@ -1471,6 +1474,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long nr_pages = (end - start) / PAGE_SIZE;
        int gup_flags;
+       long ret;
 
        VM_BUG_ON(!PAGE_ALIGNED(start));
        VM_BUG_ON(!PAGE_ALIGNED(end));
@@ -1498,8 +1502,10 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
        if (check_vma_flags(vma, gup_flags))
                return -EINVAL;
 
-       return __get_user_pages(mm, start, nr_pages, gup_flags,
+       ret = __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
+       lru_add_drain();
+       return ret;
 }
 
 /*
index 58dc6ad..cf16280 100644 (file)
@@ -456,7 +456,8 @@ static inline void munlock_vma_page(struct page *page,
 }
 void mlock_new_page(struct page *page);
 bool need_mlock_page_drain(int cpu);
-void mlock_page_drain(int cpu);
+void mlock_page_drain_local(void);
+void mlock_page_drain_remote(int cpu);
 
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
@@ -539,7 +540,8 @@ static inline void munlock_vma_page(struct page *page,
                        struct vm_area_struct *vma, bool compound) { }
 static inline void mlock_new_page(struct page *page) { }
 static inline bool need_mlock_page_drain(int cpu) { return false; }
-static inline void mlock_page_drain(int cpu) { }
+static inline void mlock_page_drain_local(void) { }
+static inline void mlock_page_drain_remote(int cpu) { }
 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
 {
 }
index 2f9fdfd..a203747 100644 (file)
@@ -566,6 +566,8 @@ static unsigned long kfence_init_pool(void)
         * enters __slab_free() slow-path.
         */
        for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+               struct slab *slab = page_slab(&pages[i]);
+
                if (!i || (i % 2))
                        continue;
 
@@ -573,7 +575,11 @@ static unsigned long kfence_init_pool(void)
                if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
                        return addr;
 
-               __SetPageSlab(&pages[i]);
+               __folio_set_slab(slab_folio(slab));
+#ifdef CONFIG_MEMCG
+               slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
+                                  MEMCG_DATA_OBJCGS;
+#endif
        }
 
        /*
@@ -1033,6 +1039,9 @@ void __kfence_free(void *addr)
 {
        struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
 
+#ifdef CONFIG_MEMCG
+       KFENCE_WARN_ON(meta->objcg);
+#endif
        /*
         * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
         * the object, as the object page may be recycled for other-typed
index 2a2d5de..9a6c4b1 100644 (file)
@@ -89,6 +89,9 @@ struct kfence_metadata {
        struct kfence_track free_track;
        /* For updating alloc_covered on frees. */
        u32 alloc_stack_hash;
+#ifdef CONFIG_MEMCG
+       struct obj_cgroup *objcg;
+#endif
 };
 
 extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
index 7580baa..acd7cbb 100644 (file)
@@ -796,6 +796,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
        unsigned long flags;
        struct kmemleak_object *object;
        struct kmemleak_scan_area *area = NULL;
+       unsigned long untagged_ptr;
+       unsigned long untagged_objp;
 
        object = find_and_get_object(ptr, 1);
        if (!object) {
@@ -804,6 +806,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
                return;
        }
 
+       untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
+       untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
+
        if (scan_area_cache)
                area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 
@@ -815,8 +820,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
                goto out_unlock;
        }
        if (size == SIZE_MAX) {
-               size = object->pointer + object->size - ptr;
-       } else if (ptr + size > object->pointer + object->size) {
+               size = untagged_objp + object->size - untagged_ptr;
+       } else if (untagged_ptr + size > untagged_objp + object->size) {
                kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
                dump_object_info(object);
                kmem_cache_free(scan_area_cache, area);
index b41858e..1873616 100644 (file)
@@ -1464,16 +1464,9 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
 
        while (iov_iter_count(&iter)) {
                iovec = iov_iter_iovec(&iter);
-               /*
-                * do_madvise returns ENOMEM if unmapped holes are present
-                * in the passed VMA. process_madvise() is expected to skip
-                * unmapped holes passed to it in the 'struct iovec' list
-                * and not fail because of them. Thus treat -ENOMEM return
-                * from do_madvise as valid and continue processing.
-                */
                ret = do_madvise(mm, (unsigned long)iovec.iov_base,
                                        iovec.iov_len, behavior);
-               if (ret < 0 && ret != -ENOMEM)
+               if (ret < 0)
                        break;
                iov_iter_advance(&iter, iovec.iov_len);
        }
index be44d0b..76e3af9 100644 (file)
@@ -3918,14 +3918,18 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
                return ret;
 
        if (unlikely(PageHWPoison(vmf->page))) {
+               struct page *page = vmf->page;
                vm_fault_t poisonret = VM_FAULT_HWPOISON;
                if (ret & VM_FAULT_LOCKED) {
+                       if (page_mapped(page))
+                               unmap_mapping_pages(page_mapping(page),
+                                                   page->index, 1, false);
                        /* Retry if a clean page was removed from the cache. */
-                       if (invalidate_inode_page(vmf->page))
-                               poisonret = 0;
-                       unlock_page(vmf->page);
+                       if (invalidate_inode_page(page))
+                               poisonret = VM_FAULT_NOPAGE;
+                       unlock_page(page);
                }
-               put_page(vmf->page);
+               put_page(page);
                vmf->page = NULL;
                return poisonret;
        }
index 3d60823..de175e2 100644 (file)
@@ -246,7 +246,7 @@ static bool remove_migration_pte(struct folio *folio,
                        set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
                }
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain(smp_processor_id());
+                       mlock_page_drain_local();
 
                trace_remove_migration_pte(pvmw.address, pte_val(pte),
                                           compound_order(new));
index 529fbc1..716caf8 100644 (file)
 
 #include "internal.h"
 
-static DEFINE_PER_CPU(struct pagevec, mlock_pvec);
+struct mlock_pvec {
+       local_lock_t lock;
+       struct pagevec vec;
+};
+
+static DEFINE_PER_CPU(struct mlock_pvec, mlock_pvec) = {
+       .lock = INIT_LOCAL_LOCK(lock),
+};
 
 bool can_do_mlock(void)
 {
@@ -203,18 +210,30 @@ static void mlock_pagevec(struct pagevec *pvec)
        pagevec_reinit(pvec);
 }
 
-void mlock_page_drain(int cpu)
+void mlock_page_drain_local(void)
+{
+       struct pagevec *pvec;
+
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
+       if (pagevec_count(pvec))
+               mlock_pagevec(pvec);
+       local_unlock(&mlock_pvec.lock);
+}
+
+void mlock_page_drain_remote(int cpu)
 {
        struct pagevec *pvec;
 
-       pvec = &per_cpu(mlock_pvec, cpu);
+       WARN_ON_ONCE(cpu_online(cpu));
+       pvec = &per_cpu(mlock_pvec.vec, cpu);
        if (pagevec_count(pvec))
                mlock_pagevec(pvec);
 }
 
 bool need_mlock_page_drain(int cpu)
 {
-       return pagevec_count(&per_cpu(mlock_pvec, cpu));
+       return pagevec_count(&per_cpu(mlock_pvec.vec, cpu));
 }
 
 /**
@@ -223,7 +242,10 @@ bool need_mlock_page_drain(int cpu)
  */
 void mlock_folio(struct folio *folio)
 {
-       struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+       struct pagevec *pvec;
+
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
 
        if (!folio_test_set_mlocked(folio)) {
                int nr_pages = folio_nr_pages(folio);
@@ -236,7 +258,7 @@ void mlock_folio(struct folio *folio)
        if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
            folio_test_large(folio) || lru_cache_disabled())
                mlock_pagevec(pvec);
-       put_cpu_var(mlock_pvec);
+       local_unlock(&mlock_pvec.lock);
 }
 
 /**
@@ -245,9 +267,11 @@ void mlock_folio(struct folio *folio)
  */
 void mlock_new_page(struct page *page)
 {
-       struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+       struct pagevec *pvec;
        int nr_pages = thp_nr_pages(page);
 
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
        SetPageMlocked(page);
        mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
        __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
@@ -256,7 +280,7 @@ void mlock_new_page(struct page *page)
        if (!pagevec_add(pvec, mlock_new(page)) ||
            PageHead(page) || lru_cache_disabled())
                mlock_pagevec(pvec);
-       put_cpu_var(mlock_pvec);
+       local_unlock(&mlock_pvec.lock);
 }
 
 /**
@@ -265,8 +289,10 @@ void mlock_new_page(struct page *page)
  */
 void munlock_page(struct page *page)
 {
-       struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+       struct pagevec *pvec;
 
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
        /*
         * TestClearPageMlocked(page) must be left to __munlock_page(),
         * which will check whether the page is multiply mlocked.
@@ -276,7 +302,7 @@ void munlock_page(struct page *page)
        if (!pagevec_add(pvec, page) ||
            PageHead(page) || lru_cache_disabled())
                mlock_pagevec(pvec);
-       put_cpu_var(mlock_pvec);
+       local_unlock(&mlock_pvec.lock);
 }
 
 static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
index 6c6af86..2db9578 100644 (file)
@@ -8367,6 +8367,7 @@ static int page_alloc_cpu_dead(unsigned int cpu)
        struct zone *zone;
 
        lru_add_drain_cpu(cpu);
+       mlock_page_drain_remote(cpu);
        drain_pages(cpu);
 
        /*
index d3a4754..8e37758 100644 (file)
  *
  * Readahead is used to read content into the page cache before it is
  * explicitly requested by the application.  Readahead only ever
- * attempts to read pages that are not yet in the page cache.  If a
- * page is present but not up-to-date, readahead will not try to read
+ * attempts to read folios that are not yet in the page cache.  If a
+ * folio is present but not up-to-date, readahead will not try to read
  * it. In that case a simple ->readpage() will be requested.
  *
  * Readahead is triggered when an application read request (whether a
- * systemcall or a page fault) finds that the requested page is not in
+ * system call or a page fault) finds that the requested folio is not in
  * the page cache, or that it is in the page cache and has the
- * %PG_readahead flag set.  This flag indicates that the page was loaded
- * as part of a previous read-ahead request and now that it has been
- * accessed, it is time for the next read-ahead.
+ * readahead flag set.  This flag indicates that the folio was read
+ * as part of a previous readahead request and now that it has been
+ * accessed, it is time for the next readahead.
  *
  * Each readahead request is partly synchronous read, and partly async
- * read-ahead.  This is reflected in the struct file_ra_state which
- * contains ->size being to total number of pages, and ->async_size
- * which is the number of pages in the async section.  The first page in
- * this async section will have %PG_readahead set as a trigger for a
- * subsequent read ahead.  Once a series of sequential reads has been
+ * readahead.  This is reflected in the struct file_ra_state which
+ * contains ->size being the total number of pages, and ->async_size
+ * which is the number of pages in the async section.  The readahead
+ * flag will be set on the first folio in this async section to trigger
+ * a subsequent readahead.  Once a series of sequential reads has been
  * established, there should be no need for a synchronous component and
- * all read ahead request will be fully asynchronous.
+ * all readahead request will be fully asynchronous.
  *
- * When either of the triggers causes a readahead, three numbers need to
- * be determined: the start of the region, the size of the region, and
- * the size of the async tail.
+ * When either of the triggers causes a readahead, three numbers need
+ * to be determined: the start of the region to read, the size of the
+ * region, and the size of the async tail.
  *
  * The start of the region is simply the first page address at or after
  * the accessed address, which is not currently populated in the page
  * was explicitly requested from the determined request size, unless
  * this would be less than zero - then zero is used.  NOTE THIS
  * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED
- * PAGE.
+ * PAGE.  ALSO THIS CALCULATION IS NOT USED CONSISTENTLY.
  *
  * The size of the region is normally determined from the size of the
  * previous readahead which loaded the preceding pages.  This may be
  * discovered from the struct file_ra_state for simple sequential reads,
  * or from examining the state of the page cache when multiple
  * sequential reads are interleaved.  Specifically: where the readahead
- * was triggered by the %PG_readahead flag, the size of the previous
+ * was triggered by the readahead flag, the size of the previous
  * readahead is assumed to be the number of pages from the triggering
  * page to the start of the new readahead.  In these cases, the size of
  * the previous readahead is scaled, often doubled, for the new
  * larger than the current request, and it is not scaled up, unless it
  * is at the start of file.
  *
- * In general read ahead is accelerated at the start of the file, as
+ * In general readahead is accelerated at the start of the file, as
  * reads from there are often sequential.  There are other minor
- * adjustments to the read ahead size in various special cases and these
+ * adjustments to the readahead size in various special cases and these
  * are best discovered by reading the code.
  *
- * The above calculation determines the readahead, to which any requested
- * read size may be added.
+ * The above calculation, based on the previous readahead size,
+ * determines the size of the readahead, to which any requested read
+ * size may be added.
  *
  * Readahead requests are sent to the filesystem using the ->readahead()
  * address space operation, for which mpage_readahead() is a canonical
  * implementation.  ->readahead() should normally initiate reads on all
- * pages, but may fail to read any or all pages without causing an IO
+ * folios, but may fail to read any or all folios without causing an I/O
  * error.  The page cache reading code will issue a ->readpage() request
- * for any page which ->readahead() does not provided, and only an error
+ * for any folio which ->readahead() did not read, and only an error
  * from this will be final.
  *
- * ->readahead() will generally call readahead_page() repeatedly to get
- * each page from those prepared for read ahead.  It may fail to read a
- * page by:
+ * ->readahead() will generally call readahead_folio() repeatedly to get
+ * each folio from those prepared for readahead.  It may fail to read a
+ * folio by:
  *
- * * not calling readahead_page() sufficiently many times, effectively
- *   ignoring some pages, as might be appropriate if the path to
+ * * not calling readahead_folio() sufficiently many times, effectively
+ *   ignoring some folios, as might be appropriate if the path to
  *   storage is congested.
  *
- * * failing to actually submit a read request for a given page,
+ * * failing to actually submit a read request for a given folio,
  *   possibly due to insufficient resources, or
  *
  * * getting an error during subsequent processing of a request.
  *
- * In the last two cases, the page should be unlocked to indicate that
- * the read attempt has failed.  In the first case the page will be
- * unlocked by the caller.
+ * In the last two cases, the folio should be unlocked by the filesystem
+ * to indicate that the read attempt has failed.  In the first case the
+ * folio will be unlocked by the VFS.
  *
- * Those pages not in the final ``async_size`` of the request should be
+ * Those folios not in the final ``async_size`` of the request should be
  * considered to be important and ->readahead() should not fail them due
  * to congestion or temporary resource unavailability, but should wait
  * for necessary resources (e.g.  memory or indexing information) to
- * become available.  Pages in the final ``async_size`` may be
+ * become available.  Folios in the final ``async_size`` may be
  * considered less urgent and failure to read them is more acceptable.
- * In this case it is best to use delete_from_page_cache() to remove the
- * pages from the page cache as is automatically done for pages that
- * were not fetched with readahead_page().  This will allow a
- * subsequent synchronous read ahead request to try them again.  If they
+ * In this case it is best to use filemap_remove_folio() to remove the
+ * folios from the page cache as is automatically done for folios that
+ * were not fetched with readahead_folio().  This will allow a
+ * subsequent synchronous readahead request to try them again.  If they
  * are left in the page cache, then they will be read individually using
- * ->readpage().
- *
+ * ->readpage() which may be less efficient.
  */
 
 #include <linux/kernel.h>
@@ -142,91 +142,14 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
 }
 EXPORT_SYMBOL_GPL(file_ra_state_init);
 
-/*
- * see if a page needs releasing upon read_cache_pages() failure
- * - the caller of read_cache_pages() may have set PG_private or PG_fscache
- *   before calling, such as the NFS fs marking pages that are cached locally
- *   on disk, thus we need to give the fs a chance to clean up in the event of
- *   an error
- */
-static void read_cache_pages_invalidate_page(struct address_space *mapping,
-                                            struct page *page)
-{
-       if (page_has_private(page)) {
-               if (!trylock_page(page))
-                       BUG();
-               page->mapping = mapping;
-               folio_invalidate(page_folio(page), 0, PAGE_SIZE);
-               page->mapping = NULL;
-               unlock_page(page);
-       }
-       put_page(page);
-}
-
-/*
- * release a list of pages, invalidating them first if need be
- */
-static void read_cache_pages_invalidate_pages(struct address_space *mapping,
-                                             struct list_head *pages)
-{
-       struct page *victim;
-
-       while (!list_empty(pages)) {
-               victim = lru_to_page(pages);
-               list_del(&victim->lru);
-               read_cache_pages_invalidate_page(mapping, victim);
-       }
-}
-
-/**
- * read_cache_pages - populate an address space with some pages & start reads against them
- * @mapping: the address_space
- * @pages: The address of a list_head which contains the target pages.  These
- *   pages have their ->index populated and are otherwise uninitialised.
- * @filler: callback routine for filling a single page.
- * @data: private data for the callback routine.
- *
- * Hides the details of the LRU cache etc from the filesystems.
- *
- * Returns: %0 on success, error return by @filler otherwise
- */
-int read_cache_pages(struct address_space *mapping, struct list_head *pages,
-                       int (*filler)(void *, struct page *), void *data)
-{
-       struct page *page;
-       int ret = 0;
-
-       while (!list_empty(pages)) {
-               page = lru_to_page(pages);
-               list_del(&page->lru);
-               if (add_to_page_cache_lru(page, mapping, page->index,
-                               readahead_gfp_mask(mapping))) {
-                       read_cache_pages_invalidate_page(mapping, page);
-                       continue;
-               }
-               put_page(page);
-
-               ret = filler(data, page);
-               if (unlikely(ret)) {
-                       read_cache_pages_invalidate_pages(mapping, pages);
-                       break;
-               }
-               task_io_account_read(PAGE_SIZE);
-       }
-       return ret;
-}
-
-EXPORT_SYMBOL(read_cache_pages);
-
-static void read_pages(struct readahead_control *rac, struct list_head *pages,
-               bool skip_page)
+static void read_pages(struct readahead_control *rac)
 {
        const struct address_space_operations *aops = rac->mapping->a_ops;
        struct page *page;
        struct blk_plug plug;
 
        if (!readahead_count(rac))
-               goto out;
+               return;
 
        blk_start_plug(&plug);
 
@@ -234,7 +157,7 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
                aops->readahead(rac);
                /*
                 * Clean up the remaining pages.  The sizes in ->ra
-                * maybe be used to size next read-ahead, so make sure
+                * may be used to size the next readahead, so make sure
                 * they accurately reflect what happened.
                 */
                while ((page = readahead_page(rac))) {
@@ -246,13 +169,6 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
                        unlock_page(page);
                        put_page(page);
                }
-       } else if (aops->readpages) {
-               aops->readpages(rac->file, rac->mapping, pages,
-                               readahead_count(rac));
-               /* Clean up the remaining pages */
-               put_pages_list(pages);
-               rac->_index += rac->_nr_pages;
-               rac->_nr_pages = 0;
        } else {
                while ((page = readahead_page(rac))) {
                        aops->readpage(rac->file, page);
@@ -262,12 +178,7 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
 
        blk_finish_plug(&plug);
 
-       BUG_ON(pages && !list_empty(pages));
        BUG_ON(readahead_count(rac));
-
-out:
-       if (skip_page)
-               rac->_index++;
 }
 
 /**
@@ -289,7 +200,6 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
 {
        struct address_space *mapping = ractl->mapping;
        unsigned long index = readahead_index(ractl);
-       LIST_HEAD(page_pool);
        gfp_t gfp_mask = readahead_gfp_mask(mapping);
        unsigned long i;
 
@@ -321,7 +231,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
                         * have a stable reference to this page, and it's
                         * not worth getting one just for that.
                         */
-                       read_pages(ractl, &page_pool, true);
+                       read_pages(ractl);
+                       ractl->_index++;
                        i = ractl->_index + ractl->_nr_pages - index - 1;
                        continue;
                }
@@ -329,13 +240,11 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
                folio = filemap_alloc_folio(gfp_mask, 0);
                if (!folio)
                        break;
-               if (mapping->a_ops->readpages) {
-                       folio->index = index + i;
-                       list_add(&folio->lru, &page_pool);
-               } else if (filemap_add_folio(mapping, folio, index + i,
+               if (filemap_add_folio(mapping, folio, index + i,
                                        gfp_mask) < 0) {
                        folio_put(folio);
-                       read_pages(ractl, &page_pool, true);
+                       read_pages(ractl);
+                       ractl->_index++;
                        i = ractl->_index + ractl->_nr_pages - index - 1;
                        continue;
                }
@@ -349,7 +258,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
         * uptodate then the caller will launch readpage again, and
         * will then handle the error.
         */
-       read_pages(ractl, &page_pool, false);
+       read_pages(ractl);
        filemap_invalidate_unlock_shared(mapping);
        memalloc_nofs_restore(nofs);
 }
@@ -394,8 +303,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
        struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
        unsigned long max_pages, index;
 
-       if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
-                       !mapping->a_ops->readahead))
+       if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead))
                return;
 
        /*
@@ -512,7 +420,7 @@ static pgoff_t count_history_pages(struct address_space *mapping,
 }
 
 /*
- * page cache context based read-ahead
+ * page cache context based readahead
  */
 static int try_context_readahead(struct address_space *mapping,
                                 struct file_ra_state *ra,
@@ -624,7 +532,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
                ra->async_size += index - limit - 1;
        }
 
-       read_pages(ractl, NULL, false);
+       read_pages(ractl);
 
        /*
         * If there were already pages in the page cache, then we may have
@@ -763,9 +671,9 @@ void page_cache_sync_ra(struct readahead_control *ractl,
        bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
 
        /*
-        * Even if read-ahead is disabled, issue this request as read-ahead
+        * Even if readahead is disabled, issue this request as readahead
         * as we'll need it to satisfy the requested range. The forced
-        * read-ahead will do the right thing and limit the read to just the
+        * readahead will do the right thing and limit the read to just the
         * requested range, which we'll set to 1 page for this case.
         */
        if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
@@ -781,7 +689,6 @@ void page_cache_sync_ra(struct readahead_control *ractl,
                return;
        }
 
-       /* do read-ahead */
        ondemand_readahead(ractl, NULL, req_count);
 }
 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
@@ -789,7 +696,7 @@ EXPORT_SYMBOL_GPL(page_cache_sync_ra);
 void page_cache_async_ra(struct readahead_control *ractl,
                struct folio *folio, unsigned long req_count)
 {
-       /* no read-ahead */
+       /* no readahead */
        if (!ractl->ra->ra_pages)
                return;
 
@@ -804,7 +711,6 @@ void page_cache_async_ra(struct readahead_control *ractl,
        if (blk_cgroup_congested())
                return;
 
-       /* do read-ahead */
        ondemand_readahead(ractl, folio, req_count);
 }
 EXPORT_SYMBOL_GPL(page_cache_async_ra);
index 5cb970d..fedb823 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1683,7 +1683,7 @@ discard:
                 */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain(smp_processor_id());
+                       mlock_page_drain_local();
                folio_put(folio);
        }
 
@@ -1961,7 +1961,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                 */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain(smp_processor_id());
+                       mlock_page_drain_local();
                folio_put(folio);
        }
 
index bceff0c..7e320ec 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -624,7 +624,6 @@ void lru_add_drain_cpu(int cpu)
                pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
 
        activate_page_drain(cpu);
-       mlock_page_drain(cpu);
 }
 
 /**
@@ -706,6 +705,7 @@ void lru_add_drain(void)
        local_lock(&lru_pvecs.lock);
        lru_add_drain_cpu(smp_processor_id());
        local_unlock(&lru_pvecs.lock);
+       mlock_page_drain_local();
 }
 
 /*
@@ -720,6 +720,7 @@ static void lru_add_and_bh_lrus_drain(void)
        lru_add_drain_cpu(smp_processor_id());
        local_unlock(&lru_pvecs.lock);
        invalidate_bh_lrus_cpu();
+       mlock_page_drain_local();
 }
 
 void lru_add_drain_cpu_zone(struct zone *zone)
@@ -728,6 +729,7 @@ void lru_add_drain_cpu_zone(struct zone *zone)
        lru_add_drain_cpu(smp_processor_id());
        drain_local_pages(zone);
        local_unlock(&lru_pvecs.lock);
+       mlock_page_drain_local();
 }
 
 #ifdef CONFIG_SMP
index 2173a67..9717e6f 100644 (file)
@@ -40,8 +40,7 @@ include $(srctree)/scripts/Makefile.compiler
 
 # The filename Kbuild has precedence over Makefile
 kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
-kbuild-file := $(if $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Kbuild,$(kbuild-dir)/Makefile)
-include $(kbuild-file)
+include $(or $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Makefile)
 
 include $(srctree)/scripts/Makefile.lib
 
index fd61753..74cb1c5 100644 (file)
@@ -12,7 +12,7 @@ include $(srctree)/scripts/Kbuild.include
 
 # The filename Kbuild has precedence over Makefile
 kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
-include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
+include $(or $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Makefile)
 
 # Figure out what we need to build from the various variables
 # ==========================================================================
index c593475..9f69ecd 100644 (file)
@@ -106,7 +106,7 @@ subdir-ym   := $(addprefix $(obj)/,$(subdir-ym))
 modname-multi = $(sort $(foreach m,$(multi-obj-ym),\
                $(if $(filter $*.o, $(call suffix-search, $m, .o, -objs -y -m)),$(m:.o=))))
 
-__modname = $(if $(modname-multi),$(modname-multi),$(basetarget))
+__modname = $(or $(modname-multi),$(basetarget))
 
 modname = $(subst $(space),:,$(__modname))
 modfile = $(addprefix $(obj)/,$(__modname))
@@ -241,20 +241,16 @@ $(foreach m, $(notdir $1), \
        $(addprefix $(obj)/, $(foreach s, $3, $($(m:%$(strip $2)=%$(s)))))))
 endef
 
-quiet_cmd_copy = COPY    $@
-      cmd_copy = cp $< $@
-
-# Shipped files
+# Copy a file
 # ===========================================================================
 # 'cp' preserves permissions. If you use it to copy a file in read-only srctree,
 # the copy would be read-only as well, leading to an error when executing the
 # rule next time. Use 'cat' instead in order to generate a writable file.
-
-quiet_cmd_shipped = SHIPPED $@
-cmd_shipped = cat $< > $@
+quiet_cmd_copy = COPY    $@
+      cmd_copy = cat $< > $@
 
 $(obj)/%: $(src)/%_shipped
-       $(call cmd,shipped)
+       $(call cmd,copy)
 
 # Commands useful for building a boot image
 # ===========================================================================
@@ -431,7 +427,7 @@ MKIMAGE := $(srctree)/scripts/mkuboot.sh
 # SRCARCH just happens to match slightly more than ARCH (on sparc), so reduces
 # the number of overrides in arch makefiles
 UIMAGE_ARCH ?= $(SRCARCH)
-UIMAGE_COMPRESSION ?= $(if $(2),$(2),none)
+UIMAGE_COMPRESSION ?= $(or $(2),none)
 UIMAGE_OPTS-y ?=
 UIMAGE_TYPE ?= kernel
 UIMAGE_LOADADDR ?= arch_must_set_this
index 44e887c..2328f9a 100644 (file)
@@ -105,25 +105,6 @@ static void usage(void)
        exit(1);
 }
 
-/*
- * In the intended usage of this program, the stdout is redirected to .*.cmd
- * files. The return value of printf() must be checked to catch any error,
- * e.g. "No space left on device".
- */
-static void xprintf(const char *format, ...)
-{
-       va_list ap;
-       int ret;
-
-       va_start(ap, format);
-       ret = vprintf(format, ap);
-       if (ret < 0) {
-               perror("fixdep");
-               exit(1);
-       }
-       va_end(ap);
-}
-
 struct item {
        struct item     *next;
        unsigned int    len;
@@ -189,7 +170,7 @@ static void use_config(const char *m, int slen)
 
        define_config(m, slen, hash);
        /* Print out a dependency path from a symbol name. */
-       xprintf("    $(wildcard include/config/%.*s) \\\n", slen, m);
+       printf("    $(wildcard include/config/%.*s) \\\n", slen, m);
 }
 
 /* test if s ends in sub */
@@ -318,13 +299,13 @@ static void parse_dep_file(char *m, const char *target)
                                 */
                                if (!saw_any_target) {
                                        saw_any_target = 1;
-                                       xprintf("source_%s := %s\n\n",
-                                               target, m);
-                                       xprintf("deps_%s := \\\n", target);
+                                       printf("source_%s := %s\n\n",
+                                              target, m);
+                                       printf("deps_%s := \\\n", target);
                                }
                                is_first_dep = 0;
                        } else {
-                               xprintf("  %s \\\n", m);
+                               printf("  %s \\\n", m);
                        }
 
                        buf = read_file(m);
@@ -347,8 +328,8 @@ static void parse_dep_file(char *m, const char *target)
                exit(1);
        }
 
-       xprintf("\n%s: $(deps_%s)\n\n", target, target);
-       xprintf("$(deps_%s):\n", target);
+       printf("\n%s: $(deps_%s)\n\n", target, target);
+       printf("$(deps_%s):\n", target);
 }
 
 int main(int argc, char *argv[])
@@ -363,11 +344,22 @@ int main(int argc, char *argv[])
        target = argv[2];
        cmdline = argv[3];
 
-       xprintf("cmd_%s := %s\n\n", target, cmdline);
+       printf("cmd_%s := %s\n\n", target, cmdline);
 
        buf = read_file(depfile);
        parse_dep_file(buf, target);
        free(buf);
 
+       fflush(stdout);
+
+       /*
+        * In the intended usage, the stdout is redirected to .*.cmd files.
+        * Call ferror() to catch errors such as "No space left on device".
+        */
+       if (ferror(stdout)) {
+               fprintf(stderr, "fixdep: not all data was written to the output\n");
+               exit(1);
+       }
+
        return 0;
 }
index 7437e19..1389db7 100755 (executable)
@@ -327,7 +327,7 @@ sub output_rest {
                my @filepath = split / /, $data{$what}->{filepath};
 
                if ($enable_lineno) {
-                       printf "#define LINENO %s%s#%s\n\n",
+                       printf ".. LINENO %s%s#%s\n\n",
                               $prefix, $file[0],
                               $data{$what}->{line_no};
                }
@@ -1023,7 +1023,7 @@ logic (B<--no-rst-source>).
 
 =item B<--enable-lineno>
 
-Enable output of #define LINENO lines.
+Enable output of .. LINENO lines.
 
 =item B<--debug> I<debug level>
 
index 4577123..76cfb96 100755 (executable)
@@ -13,6 +13,7 @@ my $man;
 my $debug;
 my $arch;
 my $feat;
+my $enable_fname;
 
 my $basename = abs_path($0);
 $basename =~ s,/[^/]+$,/,;
@@ -31,6 +32,7 @@ GetOptions(
        'arch=s' => \$arch,
        'feat=s' => \$feat,
        'feature=s' => \$feat,
+       "enable-fname" => \$enable_fname,
        man => \$man
 ) or pod2usage(2);
 
@@ -95,6 +97,10 @@ sub parse_feat {
        return if ($file =~ m,($prefix)/arch-support.txt,);
        return if (!($file =~ m,arch-support.txt$,));
 
+       if ($enable_fname) {
+               printf ".. FILE %s\n", abs_path($file);
+       }
+
        my $subsys = "";
        $subsys = $2 if ( m,.*($prefix)/([^/]+).*,);
 
@@ -580,6 +586,11 @@ Output features for a single specific feature.
 Changes the location of the Feature files. By default, it uses
 the Documentation/features directory.
 
+=item B<--enable-fname>
+
+Prints the file name of the feature files. This can be used in order to
+track dependencies during documentation build.
+
 =item B<--debug>
 
 Put the script in verbose mode, useful for debugging. Can be called multiple
index 54ad86d..8caabdd 100644 (file)
@@ -108,7 +108,7 @@ static bool is_ignored_symbol(const char *name, char type)
        /* Symbol names that begin with the following are ignored.*/
        static const char * const ignored_prefixes[] = {
                "$",                    /* local symbols for ARM, MIPS, etc. */
-               ".LASANPC",             /* s390 kasan local symbols */
+               ".L",                   /* local labels, .LBB,.Ltmpxxx,.L__unnamed_xx,.LASANPC, etc. */
                "__crc_",               /* modversions */
                "__efistub_",           /* arm64 EFI stub namespace */
                "__kvm_nvhe_",          /* arm64 non-VHE KVM namespace */
index d3c3a61..c4340c9 100644 (file)
@@ -658,13 +658,6 @@ static char *escape_string_value(const char *in)
        return out;
 }
 
-/*
- * Kconfig configuration printer
- *
- * This printer is used when generating the resulting configuration after
- * kconfig invocation and `defconfig' files. Unset symbol might be omitted by
- * passing a non-NULL argument to the printer.
- */
 enum output_n { OUTPUT_N, OUTPUT_N_AS_UNSET, OUTPUT_N_NONE };
 
 static void __print_symbol(FILE *fp, struct symbol *sym, enum output_n output_n,
@@ -903,19 +896,20 @@ next:
                        menu = menu->list;
                        continue;
                }
-               if (menu->next)
+
+end_check:
+               if (!menu->sym && menu_is_visible(menu) && menu != &rootmenu &&
+                   menu->prompt->type == P_MENU) {
+                       fprintf(out, "# end of %s\n", menu_get_prompt(menu));
+                       need_newline = true;
+               }
+
+               if (menu->next) {
                        menu = menu->next;
-               else while ((menu = menu->parent)) {
-                       if (!menu->sym && menu_is_visible(menu) &&
-                           menu != &rootmenu) {
-                               str = menu_get_prompt(menu);
-                               fprintf(out, "# end of %s\n", str);
-                               need_newline = true;
-                       }
-                       if (menu->next) {
-                               menu = menu->next;
-                               break;
-                       }
+               } else {
+                       menu = menu->parent;
+                       if (menu)
+                               goto end_check;
                }
        }
        fclose(out);
@@ -979,6 +973,7 @@ static int conf_write_autoconf_cmd(const char *autoconf_name)
 
        fprintf(out, "\n$(deps_config): ;\n");
 
+       fflush(out);
        ret = ferror(out); /* error check for all fprintf() calls */
        fclose(out);
        if (ret)
@@ -1097,6 +1092,7 @@ static int __conf_write_autoconf(const char *filename,
                if ((sym->flags & SYMBOL_WRITE) && sym->name)
                        print_symbol(file, sym);
 
+       fflush(file);
        /* check possible errors in conf_write_heading() and print_symbol() */
        ret = ferror(file);
        fclose(file);
index 9c084a2..7516949 100755 (executable)
@@ -424,7 +424,7 @@ sub get_kernel_version() {
 sub print_lineno {
     my $lineno = shift;
     if ($enable_lineno && defined($lineno)) {
-        print "#define LINENO " . $lineno . "\n";
+        print ".. LINENO " . $lineno . "\n";
     }
 }
 ##
@@ -2478,7 +2478,7 @@ May be specified multiple times.
 
 =item -enable-lineno
 
-Enable output of #define LINENO lines.
+Enable output of .. LINENO lines.
 
 =back
 
index f704034..20f4450 100755 (executable)
@@ -50,7 +50,7 @@ gen_initcalls()
 {
        info GEN .tmp_initcalls.lds
 
-       ${PYTHON} ${srctree}/scripts/jobserver-exec             \
+       ${PYTHON3} ${srctree}/scripts/jobserver-exec            \
        ${PERL} ${srctree}/scripts/generate_initcall_order.pl   \
                ${KBUILD_VMLINUX_OBJS} ${KBUILD_VMLINUX_LIBS}   \
                > .tmp_initcalls.lds
index d10f93a..ed9d056 100644 (file)
@@ -674,7 +674,7 @@ static void handle_modversion(const struct module *mod,
        unsigned int crc;
 
        if (sym->st_shndx == SHN_UNDEF) {
-               warn("EXPORT symbol \"%s\" [%s%s] version ...\n"
+               warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n"
                     "Is \"%s\" prototyped in <asm/asm-prototypes.h>?\n",
                     symname, mod->name, mod->is_vmlinux ? "" : ".ko",
                     symname);
index 1d2d71c..9b2c492 100644 (file)
@@ -166,7 +166,7 @@ config HARDENED_USERCOPY
 config HARDENED_USERCOPY_PAGESPAN
        bool "Refuse to copy allocations that span multiple pages"
        depends on HARDENED_USERCOPY
-       depends on EXPERT
+       depends on BROKEN
        help
          When a multi-page allocation is done without __GFP_COMP,
          hardened usercopy will reject attempts to copy it. There are,
index edd9849..977d543 100644 (file)
@@ -970,6 +970,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
 
        runtime->status->state = SNDRV_PCM_STATE_OPEN;
        mutex_init(&runtime->buffer_mutex);
+       atomic_set(&runtime->buffer_accessing, 0);
 
        substream->runtime = runtime;
        substream->private_data = pcm->private_data;
index a40a35e..1fc7c50 100644 (file)
@@ -1906,11 +1906,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
                if (avail >= runtime->twake)
                        break;
                snd_pcm_stream_unlock_irq(substream);
-               mutex_unlock(&runtime->buffer_mutex);
 
                tout = schedule_timeout(wait_time);
 
-               mutex_lock(&runtime->buffer_mutex);
                snd_pcm_stream_lock_irq(substream);
                set_current_state(TASK_INTERRUPTIBLE);
                switch (runtime->status->state) {
@@ -2221,7 +2219,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
 
        nonblock = !!(substream->f_flags & O_NONBLOCK);
 
-       mutex_lock(&runtime->buffer_mutex);
        snd_pcm_stream_lock_irq(substream);
        err = pcm_accessible_state(runtime);
        if (err < 0)
@@ -2276,6 +2273,10 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
                        err = -EINVAL;
                        goto _end_unlock;
                }
+               if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
+                       err = -EBUSY;
+                       goto _end_unlock;
+               }
                snd_pcm_stream_unlock_irq(substream);
                if (!is_playback)
                        snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
@@ -2284,6 +2285,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
                if (is_playback)
                        snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
                snd_pcm_stream_lock_irq(substream);
+               atomic_dec(&runtime->buffer_accessing);
                if (err < 0)
                        goto _end_unlock;
                err = pcm_accessible_state(runtime);
@@ -2313,7 +2315,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
        if (xfer > 0 && err >= 0)
                snd_pcm_update_state(substream, runtime);
        snd_pcm_stream_unlock_irq(substream);
-       mutex_unlock(&runtime->buffer_mutex);
        return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
 }
 EXPORT_SYMBOL(__snd_pcm_lib_xfer);
index 704fdc9..4adaee6 100644 (file)
@@ -685,6 +685,24 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
        return 0;
 }
 
+/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
+ * block the further r/w operations
+ */
+static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
+{
+       if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
+               return -EBUSY;
+       mutex_lock(&runtime->buffer_mutex);
+       return 0; /* keep buffer_mutex, unlocked by below */
+}
+
+/* release buffer_mutex and clear r/w access flag */
+static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
+{
+       mutex_unlock(&runtime->buffer_mutex);
+       atomic_inc(&runtime->buffer_accessing);
+}
+
 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
 #define is_oss_stream(substream)       ((substream)->oss.oss)
 #else
@@ -695,14 +713,16 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
                             struct snd_pcm_hw_params *params)
 {
        struct snd_pcm_runtime *runtime;
-       int err = 0, usecs;
+       int err, usecs;
        unsigned int bits;
        snd_pcm_uframes_t frames;
 
        if (PCM_RUNTIME_CHECK(substream))
                return -ENXIO;
        runtime = substream->runtime;
-       mutex_lock(&runtime->buffer_mutex);
+       err = snd_pcm_buffer_access_lock(runtime);
+       if (err < 0)
+               return err;
        snd_pcm_stream_lock_irq(substream);
        switch (runtime->status->state) {
        case SNDRV_PCM_STATE_OPEN:
@@ -820,7 +840,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
                        snd_pcm_lib_free_pages(substream);
        }
  unlock:
-       mutex_unlock(&runtime->buffer_mutex);
+       snd_pcm_buffer_access_unlock(runtime);
        return err;
 }
 
@@ -865,7 +885,9 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
        if (PCM_RUNTIME_CHECK(substream))
                return -ENXIO;
        runtime = substream->runtime;
-       mutex_lock(&runtime->buffer_mutex);
+       result = snd_pcm_buffer_access_lock(runtime);
+       if (result < 0)
+               return result;
        snd_pcm_stream_lock_irq(substream);
        switch (runtime->status->state) {
        case SNDRV_PCM_STATE_SETUP:
@@ -884,7 +906,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
        snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
        cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
  unlock:
-       mutex_unlock(&runtime->buffer_mutex);
+       snd_pcm_buffer_access_unlock(runtime);
        return result;
 }
 
@@ -1369,12 +1391,15 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
 
        /* Guarantee the group members won't change during non-atomic action */
        down_read(&snd_pcm_link_rwsem);
-       mutex_lock(&substream->runtime->buffer_mutex);
+       res = snd_pcm_buffer_access_lock(substream->runtime);
+       if (res < 0)
+               goto unlock;
        if (snd_pcm_stream_linked(substream))
                res = snd_pcm_action_group(ops, substream, state, false);
        else
                res = snd_pcm_action_single(ops, substream, state);
-       mutex_unlock(&substream->runtime->buffer_mutex);
+       snd_pcm_buffer_access_unlock(substream->runtime);
+ unlock:
        up_read(&snd_pcm_link_rwsem);
        return res;
 }
index b6bdebd..10112e1 100644 (file)
@@ -494,7 +494,7 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
        static int dev;
        int err;
        struct snd_card *card;
-       struct pnp_dev *cdev;
+       struct pnp_dev *cdev, *iter;
        char cid[PNP_ID_LEN];
 
        if (pnp_device_is_isapnp(pdev))
@@ -510,9 +510,11 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
        strcpy(cid, pdev->id[0].id);
        cid[5] = '1';
        cdev = NULL;
-       list_for_each_entry(cdev, &(pdev->protocol->devices), protocol_list) {
-               if (!strcmp(cdev->id[0].id, cid))
+       list_for_each_entry(iter, &(pdev->protocol->devices), protocol_list) {
+               if (!strcmp(iter->id[0].id, cid)) {
+                       cdev = iter;
                        break;
+               }
        }
        err = snd_cs423x_card_new(&pdev->dev, dev, &card);
        if (err < 0)
index 2d1fa70..74c50ec 100644 (file)
@@ -478,28 +478,29 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0A29, "Bullseye", CS8409_BULLSEYE),
        SND_PCI_QUIRK(0x1028, 0x0A2A, "Bullseye", CS8409_BULLSEYE),
        SND_PCI_QUIRK(0x1028, 0x0A2B, "Bullseye", CS8409_BULLSEYE),
+       SND_PCI_QUIRK(0x1028, 0x0A77, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A78, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A79, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7A, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7D, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7E, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7F, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A80, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AB0, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB2, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB1, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB3, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB4, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB5, "Warlock", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0ACF, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD0, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD1, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD2, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD3, "Dolphin", CS8409_DOLPHIN),
        SND_PCI_QUIRK(0x1028, 0x0AD9, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0ADA, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0ADB, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0ADC, "Warlock", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0AF4, "Warlock", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0AF5, "Warlock", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0BB5, "Warlock N3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0BB6, "Warlock V3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0A77, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A78, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A79, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7A, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7D, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7E, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7F, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A80, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0ADF, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AE0, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AE1, "Cyborg", CS8409_CYBORG),
@@ -512,11 +513,30 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0AEE, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AEF, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AF0, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0AD0, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0AD1, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0AD2, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0AD3, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0ACF, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AF4, "Warlock", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0AF5, "Warlock", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0B92, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0B93, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0B94, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0B95, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0B96, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0B97, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BB2, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB3, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB4, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB5, "Warlock N3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0BB6, "Warlock V3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0BB8, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB9, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BBA, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BBB, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BBC, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BBD, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BD4, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD5, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD6, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD7, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD8, "Dolphin", CS8409_DOLPHIN),
        {} /* terminator */
 };
 
@@ -524,6 +544,8 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
 const struct hda_model_fixup cs8409_models[] = {
        { .id = CS8409_BULLSEYE, .name = "bullseye" },
        { .id = CS8409_WARLOCK, .name = "warlock" },
+       { .id = CS8409_WARLOCK_MLK, .name = "warlock mlk" },
+       { .id = CS8409_WARLOCK_MLK_DUAL_MIC, .name = "warlock mlk dual mic" },
        { .id = CS8409_CYBORG, .name = "cyborg" },
        { .id = CS8409_DOLPHIN, .name = "dolphin" },
        {}
@@ -542,6 +564,18 @@ const struct hda_fixup cs8409_fixups[] = {
                .chained = true,
                .chain_id = CS8409_FIXUPS,
        },
+       [CS8409_WARLOCK_MLK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = cs8409_cs42l42_pincfgs,
+               .chained = true,
+               .chain_id = CS8409_FIXUPS,
+       },
+       [CS8409_WARLOCK_MLK_DUAL_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = cs8409_cs42l42_pincfgs,
+               .chained = true,
+               .chain_id = CS8409_FIXUPS,
+       },
        [CS8409_CYBORG] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = cs8409_cs42l42_pincfgs,
index aff2b5a..343fabc 100644 (file)
@@ -733,6 +733,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
                { 0x130A, 0x00 },
                { 0x130F, 0x00 },
        };
+       int fsv_old, fsv_new;
 
        /* Bring CS42L42 out of Reset */
        gpio_data = snd_hda_codec_read(codec, CS8409_PIN_AFG, 0, AC_VERB_GET_GPIO_DATA, 0);
@@ -749,8 +750,13 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
        /* Clear interrupts, by reading interrupt status registers */
        cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
 
-       if (cs42l42->full_scale_vol)
-               cs8409_i2c_write(cs42l42, 0x2001, 0x01);
+       fsv_old = cs8409_i2c_read(cs42l42, 0x2001);
+       if (cs42l42->full_scale_vol == CS42L42_FULL_SCALE_VOL_0DB)
+               fsv_new = fsv_old & ~CS42L42_FULL_SCALE_VOL_MASK;
+       else
+               fsv_new = fsv_old & CS42L42_FULL_SCALE_VOL_MASK;
+       if (fsv_new != fsv_old)
+               cs8409_i2c_write(cs42l42, 0x2001, fsv_new);
 
        /* we have to explicitly allow unsol event handling even during the
         * resume phase so that the jack event is processed properly
@@ -906,9 +912,15 @@ static void cs8409_cs42l42_hw_init(struct hda_codec *codec)
                        cs8409_vendor_coef_set(codec, seq_bullseye->cir, seq_bullseye->coeff);
        }
 
-       /* DMIC1_MO=00b, DMIC1/2_SR=1 */
-       if (codec->fixup_id == CS8409_WARLOCK || codec->fixup_id == CS8409_CYBORG)
-               cs8409_vendor_coef_set(codec, 0x09, 0x0003);
+       switch (codec->fixup_id) {
+       case CS8409_CYBORG:
+       case CS8409_WARLOCK_MLK_DUAL_MIC:
+               /* DMIC1_MO=00b, DMIC1/2_SR=1 */
+               cs8409_vendor_coef_set(codec, CS8409_DMIC_CFG, 0x0003);
+               break;
+       default:
+               break;
+       }
 
        cs42l42_resume(cs42l42);
 
@@ -993,25 +1005,17 @@ void cs8409_cs42l42_fixups(struct hda_codec *codec, const struct hda_fixup *fix,
                cs8409_fix_caps(codec, CS8409_CS42L42_HP_PIN_NID);
                cs8409_fix_caps(codec, CS8409_CS42L42_AMIC_PIN_NID);
 
-               /* Set TIP_SENSE_EN for analog front-end of tip sense.
-                * Additionally set HSBIAS_SENSE_EN and Full Scale volume for some variants.
-                */
+               /* Set HSBIAS_SENSE_EN and Full Scale volume for some variants. */
                switch (codec->fixup_id) {
-               case CS8409_WARLOCK:
+               case CS8409_WARLOCK_MLK:
+               case CS8409_WARLOCK_MLK_DUAL_MIC:
                        spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
-                       break;
-               case CS8409_BULLSEYE:
-                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 0;
-                       break;
-               case CS8409_CYBORG:
-                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x00a0;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
+                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = CS42L42_FULL_SCALE_VOL_0DB;
                        break;
                default:
-                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0003;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
+                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
+                       spec->scodecs[CS8409_CODEC0]->full_scale_vol =
+                               CS42L42_FULL_SCALE_VOL_MINUS6DB;
                        break;
                }
 
@@ -1222,6 +1226,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
                cs8409_fix_caps(codec, DOLPHIN_LO_PIN_NID);
                cs8409_fix_caps(codec, DOLPHIN_AMIC_PIN_NID);
 
+               spec->scodecs[CS8409_CODEC0]->full_scale_vol = CS42L42_FULL_SCALE_VOL_MINUS6DB;
+               spec->scodecs[CS8409_CODEC1]->full_scale_vol = CS42L42_FULL_SCALE_VOL_MINUS6DB;
+
                break;
        case HDA_FIXUP_ACT_PROBE:
                /* Fix Sample Rate to 48kHz */
index d0b725c..7df46bd 100644 (file)
@@ -235,6 +235,9 @@ enum cs8409_coefficient_index_registers {
 #define CS42L42_I2C_SLEEP_US                   (2000)
 #define CS42L42_PDN_TIMEOUT_US                 (250000)
 #define CS42L42_PDN_SLEEP_US                   (2000)
+#define CS42L42_FULL_SCALE_VOL_MASK            (2)
+#define CS42L42_FULL_SCALE_VOL_0DB             (1)
+#define CS42L42_FULL_SCALE_VOL_MINUS6DB                (0)
 
 /* Dell BULLSEYE / WARLOCK / CYBORG Specific Definitions */
 
@@ -264,6 +267,8 @@ enum cs8409_coefficient_index_registers {
 enum {
        CS8409_BULLSEYE,
        CS8409_WARLOCK,
+       CS8409_WARLOCK_MLK,
+       CS8409_WARLOCK_MLK_DUAL_MIC,
        CS8409_CYBORG,
        CS8409_FIXUPS,
        CS8409_DOLPHIN,
index c85ed7b..3e086ee 100644 (file)
@@ -1625,6 +1625,7 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        struct hda_codec *codec = per_pin->codec;
        struct hdmi_spec *spec = codec->spec;
        struct hdmi_eld *eld = &spec->temp_eld;
+       struct device *dev = hda_codec_dev(codec);
        hda_nid_t pin_nid = per_pin->pin_nid;
        int dev_id = per_pin->dev_id;
        /*
@@ -1638,8 +1639,13 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        int present;
        int ret;
 
+#ifdef CONFIG_PM
+       if (dev->power.runtime_status == RPM_SUSPENDING)
+               return;
+#endif
+
        ret = snd_hda_power_up_pm(codec);
-       if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec)))
+       if (ret < 0 && pm_runtime_suspended(dev))
                goto out;
 
        present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id);
index c78f169..4e12af2 100644 (file)
@@ -3617,8 +3617,8 @@ static void alc256_shutup(struct hda_codec *codec)
        /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
         * when booting with headset plugged. So skip setting it for the codec alc257
         */
-       if (spec->codec_variant != ALC269_TYPE_ALC257 &&
-           spec->codec_variant != ALC269_TYPE_ALC256)
+       if (codec->core.vendor_id != 0x10ec0236 &&
+           codec->core.vendor_id != 0x10ec0257)
                alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
 
        if (!spec->no_shutup_pins)
@@ -7006,6 +7006,7 @@ enum {
        ALC287_FIXUP_LEGION_16ACHG6,
        ALC287_FIXUP_CS35L41_I2C_2,
        ALC245_FIXUP_CS35L41_SPI_2,
+       ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED,
        ALC245_FIXUP_CS35L41_SPI_4,
        ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED,
        ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED,
@@ -8771,6 +8772,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_spi_two,
        },
+       [ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cs35l41_fixup_spi_two,
+               .chained = true,
+               .chain_id = ALC285_FIXUP_HP_GPIO_LED,
+       },
        [ALC245_FIXUP_CS35L41_SPI_4] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_spi_four,
@@ -9026,7 +9033,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x89ac, "HP EliteBook 640 G9", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ae, "HP EliteBook 650 G9", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
-       SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -11140,6 +11147,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
+       SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
index 9b263a9..4c7b5d9 100644 (file)
@@ -107,6 +107,7 @@ int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
        priv->mtkaif_protocol = mtkaif_protocol;
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_protocol);
 
 static void playback_gpio_set(struct mt6358_priv *priv)
 {
@@ -273,6 +274,7 @@ int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt)
                           1 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT);
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_enable);
 
 int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
 {
@@ -296,6 +298,7 @@ int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
        capture_gpio_reset(priv);
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_disable);
 
 int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
                                        int phase_1, int phase_2)
@@ -310,6 +313,7 @@ int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
                           phase_2 << RG_AUD_PAD_TOP_PHASE_MODE2_SFT);
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_calibration_phase);
 
 /* dl pga gain */
 enum {
index 370bc79..d9a0d47 100644 (file)
@@ -462,11 +462,9 @@ static int hp_jack_event(struct notifier_block *nb, unsigned long event,
 
        if (event & SND_JACK_HEADPHONE)
                /* Disable speaker if headphone is plugged in */
-               snd_soc_dapm_disable_pin(dapm, "Ext Spk");
+               return snd_soc_dapm_disable_pin(dapm, "Ext Spk");
        else
-               snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
-       return 0;
+               return snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 }
 
 static struct notifier_block hp_jack_nb = {
@@ -481,11 +479,9 @@ static int mic_jack_event(struct notifier_block *nb, unsigned long event,
 
        if (event & SND_JACK_MICROPHONE)
                /* Disable dmic if microphone is plugged in */
-               snd_soc_dapm_disable_pin(dapm, "DMIC");
+               return snd_soc_dapm_disable_pin(dapm, "DMIC");
        else
-               snd_soc_dapm_enable_pin(dapm, "DMIC");
-
-       return 0;
+               return snd_soc_dapm_enable_pin(dapm, "DMIC");
 }
 
 static struct notifier_block mic_jack_nb = {
index d3b7104..98700e7 100644 (file)
@@ -469,14 +469,14 @@ static int rockchip_i2s_tdm_set_fmt(struct snd_soc_dai *cpu_dai,
                txcr_val = I2S_TXCR_IBM_NORMAL;
                rxcr_val = I2S_RXCR_IBM_NORMAL;
                break;
-       case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
-               txcr_val = I2S_TXCR_TFS_PCM;
-               rxcr_val = I2S_RXCR_TFS_PCM;
-               break;
-       case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
+       case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 mode */
                txcr_val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
                rxcr_val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
                break;
+       case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
+               txcr_val = I2S_TXCR_TFS_PCM;
+               rxcr_val = I2S_RXCR_TFS_PCM;
+               break;
        default:
                ret = -EINVAL;
                goto err_pm_put;
index b53f216..1724193 100644 (file)
@@ -84,6 +84,7 @@ if SND_SOC_SOF_PCI
 config SND_SOC_SOF_MERRIFIELD
        tristate "SOF support for Tangier/Merrifield"
        default SND_SOC_SOF_PCI
+       select SND_SOC_SOF_PCI_DEV
        select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
        help
          This adds support for Sound Open Firmware for Intel(R) platforms
index 490d489..c1b6ddc 100644 (file)
@@ -419,6 +419,16 @@ struct kvm_arm_copy_mte_tags {
 #define KVM_PSCI_RET_INVAL             PSCI_RET_INVALID_PARAMS
 #define KVM_PSCI_RET_DENIED            PSCI_RET_DENIED
 
+/* arm64-specific kvm_run::system_event flags */
+/*
+ * Reset caused by a PSCI v1.1 SYSTEM_RESET2 call.
+ * Valid only when the system event has a type of KVM_SYSTEM_EVENT_RESET.
+ */
+#define KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2        (1ULL << 0)
+
+/* run->fail_entry.hardware_entry_failure_reason codes. */
+#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED    (1ULL << 0)
+
 #endif
 
 #endif /* __ARM_KVM_H__ */
index 3edf05e..73e643a 100644 (file)
 #define X86_FEATURE_TSXLDTRK           (18*32+16) /* TSX Suspend Load Address Tracking */
 #define X86_FEATURE_PCONFIG            (18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_ARCH_LBR           (18*32+19) /* Intel ARCH LBR */
+#define X86_FEATURE_IBT                        (18*32+20) /* Indirect Branch Tracking */
 #define X86_FEATURE_AMX_BF16           (18*32+22) /* AMX bf16 Support */
 #define X86_FEATURE_AVX512_FP16                (18*32+23) /* AVX512 FP16 */
 #define X86_FEATURE_AMX_TILE           (18*32+24) /* AMX tile Support */
index 0e7f303..0eb90d2 100644 (file)
 #define RTIT_CTL_DISRETC               BIT(11)
 #define RTIT_CTL_PTW_EN                        BIT(12)
 #define RTIT_CTL_BRANCH_EN             BIT(13)
+#define RTIT_CTL_EVENT_EN              BIT(31)
+#define RTIT_CTL_NOTNT                 BIT_ULL(55)
 #define RTIT_CTL_MTC_RANGE_OFFSET      14
 #define RTIT_CTL_MTC_RANGE             (0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
 #define RTIT_CTL_CYC_THRESH_OFFSET     19
 #define MSR_ATOM_CORE_TURBO_RATIOS     0x0000066c
 #define MSR_ATOM_CORE_TURBO_VIDS       0x0000066d
 
-
 #define MSR_CORE_PERF_LIMIT_REASONS    0x00000690
 #define MSR_GFX_PERF_LIMIT_REASONS     0x000006B0
 #define MSR_RING_PERF_LIMIT_REASONS    0x000006B1
 
+/* Control-flow Enforcement Technology MSRs */
+#define MSR_IA32_U_CET                 0x000006a0 /* user mode cet */
+#define MSR_IA32_S_CET                 0x000006a2 /* kernel mode cet */
+#define CET_SHSTK_EN                   BIT_ULL(0)
+#define CET_WRSS_EN                    BIT_ULL(1)
+#define CET_ENDBR_EN                   BIT_ULL(2)
+#define CET_LEG_IW_EN                  BIT_ULL(3)
+#define CET_NO_TRACK_EN                        BIT_ULL(4)
+#define CET_SUPPRESS_DISABLE           BIT_ULL(5)
+#define CET_RESERVED                   (BIT_ULL(6) | BIT_ULL(7) | BIT_ULL(8) | BIT_ULL(9))
+#define CET_SUPPRESS                   BIT_ULL(10)
+#define CET_WAIT_ENDBR                 BIT_ULL(11)
+
+#define MSR_IA32_PL0_SSP               0x000006a4 /* ring-0 shadow stack pointer */
+#define MSR_IA32_PL1_SSP               0x000006a5 /* ring-1 shadow stack pointer */
+#define MSR_IA32_PL2_SSP               0x000006a6 /* ring-2 shadow stack pointer */
+#define MSR_IA32_PL3_SSP               0x000006a7 /* ring-3 shadow stack pointer */
+#define MSR_IA32_INT_SSP_TAB           0x000006a8 /* exception shadow stack table */
+
 /* Hardware P state interface */
 #define MSR_PPERF                      0x0000064e
 #define MSR_PERF_LIMIT_REASONS         0x0000064f
index 9800f96..c6d2c77 100644 (file)
@@ -74,7 +74,7 @@ CFLAGS += -O2
 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers
 CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS))
 CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
-       -I$(if $(OUTPUT),$(OUTPUT),.) \
+       -I$(or $(OUTPUT),.) \
        -I$(LIBBPF_INCLUDE) \
        -I$(srctree)/kernel/bpf/ \
        -I$(srctree)/tools/include \
@@ -180,7 +180,7 @@ endif
 
 $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
        $(QUIET_CLANG)$(CLANG) \
-               -I$(if $(OUTPUT),$(OUTPUT),.) \
+               -I$(or $(OUTPUT),.) \
                -I$(srctree)/tools/include/uapi/ \
                -I$(LIBBPF_BOOTSTRAP_INCLUDE) \
                -g -O2 -Wall -target bpf -c $< -o $@
index 6f11e6f..17cdf01 100644 (file)
@@ -36,7 +36,7 @@ TMP_O := $(if $(OUTPUT),$(OUTPUT)feature/,./)
 
 clean:
        $(call QUIET_CLEAN, fixdep)
-       $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+       $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
        $(Q)rm -f $(OUTPUT)fixdep
        $(call QUIET_CLEAN, feature-detect)
 ifneq ($(wildcard $(TMP_O)),)
index 5ebc195..8843f0f 100644 (file)
@@ -40,7 +40,7 @@ $(OUTPUT)counter_example: $(COUNTER_EXAMPLE)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/linux/counter.h
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 4404340..d29c9c4 100644 (file)
@@ -78,7 +78,7 @@ $(OUTPUT)gpio-watch: $(GPIO_WATCH_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -f $(OUTPUT)include/linux/gpio.h
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index b57143d..fe770e6 100644 (file)
@@ -47,7 +47,7 @@ $(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN)
 
 clean:
        rm -f $(ALL_PROGRAMS)
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(sbindir); \
index 5d12ac4..fa720f0 100644 (file)
@@ -58,7 +58,7 @@ $(OUTPUT)iio_generic_buffer: $(IIO_GENERIC_BUFFER_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/linux/iio
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 1567a32..6c1aa92 100644 (file)
@@ -75,6 +75,8 @@
 #define MADV_POPULATE_READ     22      /* populate (prefault) page tables readable */
 #define MADV_POPULATE_WRITE    23      /* populate (prefault) page tables writable */
 
+#define MADV_DONTNEED_LOCKED   24      /* like DONTNEED, but drop locked pages too */
+
 /* compatibility flags */
 #define MAP_FILE       0
 
index 914ebd9..05c3642 100644 (file)
@@ -1118,10 +1118,16 @@ struct drm_i915_gem_exec_object2 {
        /**
         * When the EXEC_OBJECT_PINNED flag is specified this is populated by
         * the user with the GTT offset at which this object will be pinned.
+        *
         * When the I915_EXEC_NO_RELOC flag is specified this must contain the
         * presumed_offset of the object.
+        *
         * During execbuffer2 the kernel populates it with the value of the
         * current GTT offset of the object, for future presumed_offset writes.
+        *
+        * See struct drm_i915_gem_create_ext for the rules when dealing with
+        * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
+        * minimum page sizes, like DG2.
         */
        __u64 offset;
 
@@ -3144,11 +3150,40 @@ struct drm_i915_gem_create_ext {
         *
         * The (page-aligned) allocated size for the object will be returned.
         *
-        * Note that for some devices we have might have further minimum
-        * page-size restrictions(larger than 4K), like for device local-memory.
-        * However in general the final size here should always reflect any
-        * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
-        * extension to place the object in device local-memory.
+        *
+        * DG2 64K min page size implications:
+        *
+        * On discrete platforms, starting from DG2, we have to contend with GTT
+        * page size restrictions when dealing with I915_MEMORY_CLASS_DEVICE
+        * objects.  Specifically the hardware only supports 64K or larger GTT
+        * page sizes for such memory. The kernel will already ensure that all
+        * I915_MEMORY_CLASS_DEVICE memory is allocated using 64K or larger page
+        * sizes underneath.
+        *
+        * Note that the returned size here will always reflect any required
+        * rounding up done by the kernel, i.e 4K will now become 64K on devices
+        * such as DG2.
+        *
+        * Special DG2 GTT address alignment requirement:
+        *
+        * The GTT alignment will also need to be at least 2M for such objects.
+        *
+        * Note that due to how the hardware implements 64K GTT page support, we
+        * have some further complications:
+        *
+        *   1) The entire PDE (which covers a 2MB virtual address range), must
+        *   contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
+        *   PDE is forbidden by the hardware.
+        *
+        *   2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
+        *   objects.
+        *
+        * To keep things simple for userland, we mandate that any GTT mappings
+        * must be aligned to and rounded up to 2MB. The kernel will internally
+        * pad them out to the next 2MB boundary. As this only wastes virtual
+        * address space and avoids userland having to copy any needlessly
+        * complicated PDE sharing scheme (coloring) and only affects DG2, this
+        * is deemed to be a good compromise.
         */
        __u64 size;
        /**
index bbc6b7c..91a6fe4 100644 (file)
@@ -562,9 +562,12 @@ struct kvm_s390_mem_op {
        __u32 op;               /* type of operation */
        __u64 buf;              /* buffer in userspace */
        union {
-               __u8 ar;        /* the access register number */
+               struct {
+                       __u8 ar;        /* the access register number */
+                       __u8 key;       /* access key, ignored if flag unset */
+               };
                __u32 sida_offset; /* offset into the sida */
-               __u8 reserved[32]; /* should be set to 0 */
+               __u8 reserved[32]; /* ignored */
        };
 };
 /* types for kvm_s390_mem_op->op */
@@ -572,9 +575,12 @@ struct kvm_s390_mem_op {
 #define KVM_S390_MEMOP_LOGICAL_WRITE   1
 #define KVM_S390_MEMOP_SIDA_READ       2
 #define KVM_S390_MEMOP_SIDA_WRITE      3
+#define KVM_S390_MEMOP_ABSOLUTE_READ   4
+#define KVM_S390_MEMOP_ABSOLUTE_WRITE  5
 /* flags for kvm_s390_mem_op->flags */
 #define KVM_S390_MEMOP_F_CHECK_ONLY            (1ULL << 0)
 #define KVM_S390_MEMOP_F_INJECT_EXCEPTION      (1ULL << 1)
+#define KVM_S390_MEMOP_F_SKEY_PROTECTION       (1ULL << 2)
 
 /* for KVM_INTERRUPT */
 struct kvm_interrupt {
@@ -1137,6 +1143,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_PPC_AIL_MODE_3 210
 #define KVM_CAP_S390_MEM_OP_EXTENSION 211
 #define KVM_CAP_PMU_CAPABILITY 212
+#define KVM_CAP_DISABLE_QUIRKS2 213
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index a13e9c7..e21e1b4 100644 (file)
@@ -60,7 +60,7 @@ $(LIBFILE): $(API_IN)
 
 clean:
        $(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \
-       find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+       find $(or $(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
 
 FORCE:
 
index b8b37fe..064c89e 100644 (file)
@@ -60,7 +60,7 @@ ifndef VERBOSE
   VERBOSE = 0
 endif
 
-INCLUDES = -I$(if $(OUTPUT),$(OUTPUT),.)                               \
+INCLUDES = -I$(or $(OUTPUT),.) \
           -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
 
 export prefix libdir src obj
index 08fe6e3..21df023 100644 (file)
@@ -153,7 +153,7 @@ $(TESTS_STATIC): $(TESTS_IN) $(LIBPERF_A) $(LIBAPI)
        $(QUIET_LINK)$(CC) -o $@ $^
 
 $(TESTS_SHARED): $(TESTS_IN) $(LIBAPI)
-       $(QUIET_LINK)$(CC) -o $@ -L$(if $(OUTPUT),$(OUTPUT),.) $^ -lperf
+       $(QUIET_LINK)$(CC) -o $@ -L$(or $(OUTPUT),.) $^ -lperf
 
 make-tests: libs $(TESTS_SHARED) $(TESTS_STATIC)
 
index ee66760..384d5e0 100644 (file)
@@ -319,6 +319,26 @@ struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map)
        return map->nr > 0 ? map->map[map->nr - 1] : result;
 }
 
+/** Is 'b' a subset of 'a'. */
+bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
+{
+       if (a == b || !b)
+               return true;
+       if (!a || b->nr > a->nr)
+               return false;
+
+       for (int i = 0, j = 0; i < a->nr; i++) {
+               if (a->map[i].cpu > b->map[j].cpu)
+                       return false;
+               if (a->map[i].cpu == b->map[j].cpu) {
+                       j++;
+                       if (j == b->nr)
+                               return true;
+               }
+       }
+       return false;
+}
+
 /*
  * Merge two cpumaps
  *
@@ -335,17 +355,12 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
        int i, j, k;
        struct perf_cpu_map *merged;
 
-       if (!orig && !other)
-               return NULL;
-       if (!orig) {
-               perf_cpu_map__get(other);
-               return other;
-       }
-       if (!other)
-               return orig;
-       if (orig->nr == other->nr &&
-           !memcmp(orig->map, other->map, orig->nr * sizeof(struct perf_cpu)))
+       if (perf_cpu_map__is_subset(orig, other))
                return orig;
+       if (perf_cpu_map__is_subset(other, orig)) {
+               perf_cpu_map__put(orig);
+               return perf_cpu_map__get(other);
+       }
 
        tmp_len = orig->nr + other->nr;
        tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
index 9a770bf..1b15ba1 100644 (file)
@@ -41,10 +41,10 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
         */
        if (!evsel->own_cpus || evlist->has_user_cpus) {
                perf_cpu_map__put(evsel->cpus);
-               evsel->cpus = perf_cpu_map__get(evlist->cpus);
-       } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) {
+               evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
+       } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->user_requested_cpus)) {
                perf_cpu_map__put(evsel->cpus);
-               evsel->cpus = perf_cpu_map__get(evlist->cpus);
+               evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
        } else if (evsel->cpus != evsel->own_cpus) {
                perf_cpu_map__put(evsel->cpus);
                evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
@@ -123,10 +123,10 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
 
 void perf_evlist__exit(struct perf_evlist *evlist)
 {
-       perf_cpu_map__put(evlist->cpus);
+       perf_cpu_map__put(evlist->user_requested_cpus);
        perf_cpu_map__put(evlist->all_cpus);
        perf_thread_map__put(evlist->threads);
-       evlist->cpus = NULL;
+       evlist->user_requested_cpus = NULL;
        evlist->all_cpus = NULL;
        evlist->threads = NULL;
        fdarray__exit(&evlist->pollfd);
@@ -155,9 +155,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist,
         * original reference count of 1.  If that is not the case it is up to
         * the caller to increase the reference count.
         */
-       if (cpus != evlist->cpus) {
-               perf_cpu_map__put(evlist->cpus);
-               evlist->cpus = perf_cpu_map__get(cpus);
+       if (cpus != evlist->user_requested_cpus) {
+               perf_cpu_map__put(evlist->user_requested_cpus);
+               evlist->user_requested_cpus = perf_cpu_map__get(cpus);
        }
 
        if (threads != evlist->threads) {
@@ -294,7 +294,7 @@ add:
 
 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 {
-       int nr_cpus = perf_cpu_map__nr(evlist->cpus);
+       int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
        int nr_threads = perf_thread_map__nr(evlist->threads);
        int nfds = 0;
        struct perf_evsel *evsel;
@@ -426,7 +426,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
               int idx, struct perf_mmap_param *mp, int cpu_idx,
               int thread, int *_output, int *_output_overwrite)
 {
-       struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
+       struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->user_requested_cpus, cpu_idx);
        struct perf_evsel *evsel;
        int revent;
 
@@ -536,7 +536,7 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
             struct perf_mmap_param *mp)
 {
        int nr_threads = perf_thread_map__nr(evlist->threads);
-       int nr_cpus    = perf_cpu_map__nr(evlist->cpus);
+       int nr_cpus    = perf_cpu_map__nr(evlist->user_requested_cpus);
        int cpu, thread;
 
        for (cpu = 0; cpu < nr_cpus; cpu++) {
@@ -564,8 +564,8 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
 {
        int nr_mmaps;
 
-       nr_mmaps = perf_cpu_map__nr(evlist->cpus);
-       if (perf_cpu_map__empty(evlist->cpus))
+       nr_mmaps = perf_cpu_map__nr(evlist->user_requested_cpus);
+       if (perf_cpu_map__empty(evlist->user_requested_cpus))
                nr_mmaps = perf_thread_map__nr(evlist->threads);
 
        return nr_mmaps;
@@ -576,7 +576,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
                          struct perf_mmap_param *mp)
 {
        struct perf_evsel *evsel;
-       const struct perf_cpu_map *cpus = evlist->cpus;
+       const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
        const struct perf_thread_map *threads = evlist->threads;
 
        if (!ops || !ops->get || !ops->mmap)
index 1973a18..35dd296 100644 (file)
@@ -25,5 +25,6 @@ struct perf_cpu_map {
 #endif
 
 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
+bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b);
 
 #endif /* __LIBPERF_INTERNAL_CPUMAP_H */
index 4cefade..e3e64f3 100644 (file)
@@ -19,7 +19,12 @@ struct perf_evlist {
        int                      nr_entries;
        int                      nr_groups;
        bool                     has_user_cpus;
-       struct perf_cpu_map     *cpus;
+       /**
+        * The cpus passed from the command line or all online CPUs by
+        * default.
+        */
+       struct perf_cpu_map     *user_requested_cpus;
+       /** The union of all evsel cpu maps. */
        struct perf_cpu_map     *all_cpus;
        struct perf_thread_map  *threads;
        int                      nr_mmaps;
index 1c777a7..8f1a09c 100644 (file)
@@ -63,7 +63,7 @@ $(LIBFILE): $(SUBCMD_IN)
 
 clean:
        $(call QUIET_CLEAN, libsubcmd) $(RM) $(LIBFILE); \
-       find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+       find $(or $(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
 
 FORCE:
 
index 92ce4fc..0dbd397 100644 (file)
@@ -13,7 +13,7 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
 
 SUBCMD_SRCDIR          = $(srctree)/tools/lib/subcmd/
-LIBSUBCMD_OUTPUT       = $(if $(OUTPUT),$(OUTPUT),$(CURDIR)/)
+LIBSUBCMD_OUTPUT       = $(or $(OUTPUT),$(CURDIR)/)
 LIBSUBCMD              = $(LIBSUBCMD_OUTPUT)libsubcmd.a
 
 OBJTOOL    := $(OUTPUT)objtool
index 4b95a51..5774477 100644 (file)
@@ -42,7 +42,7 @@ $(OUTPUT)pcitest: $(PCITEST_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 9c935f8..69473a8 100644 (file)
@@ -691,9 +691,8 @@ $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
 $(SCRIPTS) : % : %.sh
        $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
 
-$(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD ../../.git/ORIG_HEAD
+$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
        $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
-       $(Q)touch $(OUTPUT)PERF-VERSION-FILE
 
 # These can record PERF_VERSION
 perf.spec $(SCRIPTS) \
@@ -724,7 +723,7 @@ endif
 # get relative building directory (to $(OUTPUT))
 # and '.' if it's $(OUTPUT) itself
 __build-dir = $(subst $(OUTPUT),,$(dir $@))
-build-dir   = $(if $(__build-dir),$(__build-dir),.)
+build-dir   = $(or $(__build-dir),.)
 
 prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioctl_array) \
        $(fadvise_advice_array) \
@@ -1090,7 +1089,7 @@ bpf-skel-clean:
 
 clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean
        $(call QUIET_CLEAN, core-objs)  $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(OUTPUT)perf-iostat $(LANG_BINDINGS)
-       $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+       $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
        $(Q)$(RM) $(OUTPUT).config-detected
        $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
        $(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
@@ -1139,21 +1138,12 @@ else
        @echo "FEATURE-DUMP file available in $(OUTPUT)FEATURE-DUMP"
 endif
 
-#
-# Trick: if ../../.git does not exist - we are building out of tree for example,
-# then force version regeneration:
-#
-ifeq ($(wildcard ../../.git/HEAD),)
-    GIT-HEAD-PHONY = ../../.git/HEAD ../../.git/ORIG_HEAD
-else
-    GIT-HEAD-PHONY =
-endif
 
 FORCE:
 
 .PHONY: all install clean config-clean strip install-gtk
 .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
-.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
+.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope FORCE prepare
 .PHONY: libtraceevent_plugins archheaders
 
 endif # force_fixdep
index cbc5552..11c71aa 100644 (file)
@@ -199,7 +199,7 @@ static int cs_etm_set_option(struct auxtrace_record *itr,
                             struct evsel *evsel, u32 option)
 {
        int i, err = -EINVAL;
-       struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus;
+       struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
        struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
 
        /* Set option of each CPU we have */
@@ -299,7 +299,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
                                container_of(itr, struct cs_etm_recording, itr);
        struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
        struct evsel *evsel, *cs_etm_evsel = NULL;
-       struct perf_cpu_map *cpus = evlist->core.cpus;
+       struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
        bool privileged = perf_event_paranoid_check(-1);
        int err = 0;
 
@@ -522,7 +522,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
 {
        int i;
        int etmv3 = 0, etmv4 = 0, ete = 0;
-       struct perf_cpu_map *event_cpus = evlist->core.cpus;
+       struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
        struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
 
        /* cpu map is not empty, we have specific CPUs to work with */
@@ -713,7 +713,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
        u32 offset;
        u64 nr_cpu, type;
        struct perf_cpu_map *cpu_map;
-       struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
+       struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
        struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
        struct cs_etm_recording *ptr =
                        container_of(itr, struct cs_etm_recording, itr);
index 5860bba..86e2e92 100644 (file)
@@ -144,7 +144,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
                        container_of(itr, struct arm_spe_recording, itr);
        struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
        struct evsel *evsel, *arm_spe_evsel = NULL;
-       struct perf_cpu_map *cpus = evlist->core.cpus;
+       struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
        bool privileged = perf_event_paranoid_check(-1);
        struct evsel *tracking_evsel;
        int err;
index 4a76d49..d68a0f4 100644 (file)
@@ -110,7 +110,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
                        container_of(itr, struct intel_bts_recording, itr);
        struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
        struct evsel *evsel, *intel_bts_evsel = NULL;
-       const struct perf_cpu_map *cpus = evlist->core.cpus;
+       const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
        bool privileged = perf_event_paranoid_check(-1);
 
        if (opts->auxtrace_sample_mode) {
index 8c31578..38ec266 100644 (file)
@@ -382,7 +382,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
                        ui__warning("Intel Processor Trace: TSC not available\n");
        }
 
-       per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus);
+       per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
 
        auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
        auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
@@ -632,7 +632,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
        struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
        bool have_timing_info, need_immediate = false;
        struct evsel *evsel, *intel_pt_evsel = NULL;
-       const struct perf_cpu_map *cpus = evlist->core.cpus;
+       const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
        bool privileged = perf_event_paranoid_check(-1);
        u64 tsc_bit;
        int err;
index de56601..5a27691 100644 (file)
@@ -151,7 +151,7 @@ static int bench_evlist_open_close__run(char *evstr)
 
        init_stats(&time_stats);
 
-       printf("  Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.cpus));
+       printf("  Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.user_requested_cpus));
        printf("  Number of threads:\t%d\n", evlist->core.threads->nr);
        printf("  Number of events:\t%d (%d fds)\n",
                evlist->core.nr_entries, evlist__count_evsel_fds(evlist));
index ad9ce1b..7de07bb 100644 (file)
@@ -301,7 +301,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
 
 static int set_tracing_cpu(struct perf_ftrace *ftrace)
 {
-       struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
+       struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus;
 
        if (!target__has_cpu(&ftrace->target))
                return 0;
index 0b4abed..ba74fab 100644 (file)
@@ -987,7 +987,7 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
        int m, tm, nr_mmaps = evlist->core.nr_mmaps;
        struct mmap *mmap = evlist->mmap;
        struct mmap *overwrite_mmap = evlist->overwrite_mmap;
-       struct perf_cpu_map *cpus = evlist->core.cpus;
+       struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
 
        thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
                                              thread_data->mask->maps.nbits);
@@ -1881,7 +1881,7 @@ static int record__synthesize(struct record *rec, bool tail)
                return err;
        }
 
-       err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
+       err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.user_requested_cpus,
                                             process_synthesized_event, NULL);
        if (err < 0) {
                pr_err("Couldn't synthesize cpu map.\n");
@@ -3675,7 +3675,7 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu
 static int record__init_thread_masks(struct record *rec)
 {
        int ret = 0;
-       struct perf_cpu_map *cpus = rec->evlist->core.cpus;
+       struct perf_cpu_map *cpus = rec->evlist->core.user_requested_cpus;
 
        if (!record__threads_enabled(rec))
                return record__init_thread_default_masks(rec, cpus);
index 4ee40de..a96f106 100644 (file)
@@ -804,7 +804,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
        if (group)
                evlist__set_leader(evsel_list);
 
-       if (!cpu_map__is_dummy(evsel_list->core.cpus)) {
+       if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
                if (affinity__setup(&saved_affinity) < 0)
                        return -1;
                affinity = &saved_affinity;
@@ -1458,7 +1458,7 @@ static int perf_stat_init_aggr_mode(void)
        aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode);
 
        if (get_id) {
-               stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus,
+               stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
                                                         get_id, /*data=*/NULL);
                if (!stat_config.aggr_map) {
                        pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
@@ -1472,7 +1472,10 @@ static int perf_stat_init_aggr_mode(void)
         * taking the highest cpu number to be the size of
         * the aggregation translate cpumap.
         */
-       nr = perf_cpu_map__max(evsel_list->core.cpus).cpu;
+       if (evsel_list->core.user_requested_cpus)
+               nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
+       else
+               nr = 0;
        stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
        return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
 }
@@ -1627,7 +1630,7 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
        if (!get_id)
                return 0;
 
-       stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, get_id, env);
+       stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, get_id, env);
        if (!stat_config.aggr_map) {
                pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
                return -1;
index 9b08e44..fd8fd91 100644 (file)
@@ -1021,7 +1021,7 @@ static int perf_top__start_counters(struct perf_top *top)
 
        evlist__for_each_entry(evlist, counter) {
 try_again:
-               if (evsel__open(counter, top->evlist->core.cpus,
+               if (evsel__open(counter, top->evlist->core.user_requested_cpus,
                                     top->evlist->core.threads) < 0) {
 
                        /*
index 461848c..bba68a6 100755 (executable)
@@ -34,7 +34,7 @@ def main():
             if not isinstance(event, perf.sample_event):
                 continue
 
-            print "time %u prev_comm=%s prev_pid=%d prev_prio=%d prev_state=0x%x ==> next_comm=%s next_pid=%d next_prio=%d" % (
+            print("time %u prev_comm=%s prev_pid=%d prev_prio=%d prev_state=0x%x ==> next_comm=%s next_pid=%d next_prio=%d" % (
                    event.sample_time,
                    event.prev_comm,
                    event.prev_pid,
@@ -42,7 +42,7 @@ def main():
                    event.prev_state,
                    event.next_comm,
                    event.next_pid,
-                   event.next_prio)
+                   event.next_prio))
 
 if __name__ == '__main__':
     main()
index 8ef26d8..6f85f5d 100644 (file)
@@ -366,6 +366,7 @@ struct ucred {
 #define SOL_XDP                283
 #define SOL_MPTCP      284
 #define SOL_MCTP       285
+#define SOL_SMC                286
 
 /* IPX options */
 #define IPX_TYPE       1
index 9e48652..df1c5bb 100644 (file)
@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
        mp->idx = idx;
 
        if (per_cpu) {
-               mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx);
+               mp->cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, idx);
                if (evlist->core.threads)
                        mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
                else
index 4f4d3aa..7a4297d 100644 (file)
@@ -38,7 +38,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
 
        /* don't need to set cpu filter for system-wide mode */
        if (ftrace->target.cpu_list) {
-               ncpus = perf_cpu_map__nr(ftrace->evlist->core.cpus);
+               ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
                bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
        }
 
@@ -63,7 +63,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
                fd = bpf_map__fd(skel->maps.cpu_filter);
 
                for (i = 0; i < ncpus; i++) {
-                       cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i).cpu;
+                       cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
                        bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
                }
        }
index 9bb79e0..52ea004 100644 (file)
@@ -440,7 +440,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
        bool has_imm = false;
 
        // See explanation in evlist__close()
-       if (!cpu_map__is_dummy(evlist->core.cpus)) {
+       if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
                if (affinity__setup(&saved_affinity) < 0)
                        return;
                affinity = &saved_affinity;
@@ -500,7 +500,7 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name)
        struct affinity saved_affinity, *affinity = NULL;
 
        // See explanation in evlist__close()
-       if (!cpu_map__is_dummy(evlist->core.cpus)) {
+       if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
                if (affinity__setup(&saved_affinity) < 0)
                        return;
                affinity = &saved_affinity;
@@ -565,7 +565,7 @@ static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel,
 static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread)
 {
        int cpu;
-       int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
+       int nr_cpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
 
        if (!evsel->core.fd)
                return -EINVAL;
@@ -580,7 +580,7 @@ static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evse
 
 int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
 {
-       bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
+       bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
 
        if (per_cpu_mmaps)
                return evlist__enable_event_cpu(evlist, evsel, idx);
@@ -1301,10 +1301,11 @@ void evlist__close(struct evlist *evlist)
        struct affinity affinity;
 
        /*
-        * With perf record core.cpus is usually NULL.
+        * With perf record core.user_requested_cpus is usually NULL.
         * Use the old method to handle this for now.
         */
-       if (!evlist->core.cpus || cpu_map__is_dummy(evlist->core.cpus)) {
+       if (!evlist->core.user_requested_cpus ||
+           cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
                evlist__for_each_entry_reverse(evlist, evsel)
                        evsel__close(evsel);
                return;
@@ -1330,7 +1331,6 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
 {
        struct perf_cpu_map *cpus;
        struct perf_thread_map *threads;
-       int err = -ENOMEM;
 
        /*
         * Try reading /sys/devices/system/cpu/online to get
@@ -1355,7 +1355,7 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
 out_put:
        perf_cpu_map__put(cpus);
 out:
-       return err;
+       return -ENOMEM;
 }
 
 int evlist__open(struct evlist *evlist)
@@ -1367,7 +1367,7 @@ int evlist__open(struct evlist *evlist)
         * Default: one fd per CPU, all threads, aka systemwide
         * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
         */
-       if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
+       if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
                err = evlist__create_syswide_maps(evlist);
                if (err < 0)
                        goto out_err;
index 3c20b12..aeb09c2 100644 (file)
@@ -75,7 +75,7 @@ void hashmap__clear(struct hashmap *map)
 
 void hashmap__free(struct hashmap *map)
 {
-       if (!map)
+       if (IS_ERR_OR_NULL(map))
                return;
 
        hashmap__clear(map);
@@ -238,4 +238,3 @@ bool hashmap__delete(struct hashmap *map, const void *key,
 
        return true;
 }
-
index 007a646..5b09ecb 100644 (file)
@@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call
        if (opts->group)
                evlist__set_leader(evlist);
 
-       if (perf_cpu_map__cpu(evlist->core.cpus, 0).cpu < 0)
+       if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0)
                opts->no_inherit = true;
 
        use_comm_exec = perf_can_comm_exec();
@@ -244,7 +244,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
 
        evsel = evlist__last(temp_evlist);
 
-       if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
+       if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) {
                struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
 
                if (cpus)
@@ -252,7 +252,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
 
                perf_cpu_map__put(cpus);
        } else {
-               cpu = perf_cpu_map__cpu(evlist->core.cpus, 0);
+               cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0);
        }
 
        while (1) {
index 748371a..388846f 100644 (file)
@@ -114,7 +114,8 @@ int evlist__start_sb_thread(struct evlist *evlist, struct target *target)
        }
 
        evlist__for_each_entry(evlist, counter) {
-               if (evsel__open(counter, evlist->core.cpus, evlist->core.threads) < 0)
+               if (evsel__open(counter, evlist->core.user_requested_cpus,
+                               evlist->core.threads) < 0)
                        goto out_delete_evlist;
        }
 
index 9cbe351..138e3ab 100644 (file)
@@ -929,7 +929,7 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
        int all_idx;
        struct perf_cpu cpu;
 
-       perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.cpus) {
+       perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
                struct evsel *counter;
                bool first = true;
 
index b654de0..27acdc5 100644 (file)
@@ -2127,7 +2127,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p
                return err;
        }
 
-       err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
+       err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
        if (err < 0) {
                pr_err("Couldn't synthesize thread map.\n");
                return err;
index c1ebfc5..b8b3243 100644 (file)
@@ -95,15 +95,17 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
 
        if (target->cpu_list)
                ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
-                               perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "",
+                               perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
+                               ? "s" : "",
                                target->cpu_list);
        else {
                if (target->tid)
                        ret += SNPRINTF(bf + ret, size - ret, ")");
                else
                        ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
-                                       perf_cpu_map__nr(top->evlist->core.cpus),
-                                       perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "");
+                                       perf_cpu_map__nr(top->evlist->core.user_requested_cpus),
+                                       perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
+                                       ? "s" : "");
        }
 
        perf_top__reset_sample_counters(top);
index d2fba12..846f785 100644 (file)
@@ -47,7 +47,7 @@ $(OUTPUT)intel-speed-select: $(ISST_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/linux/isst_if.h
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index a2335e4..0efb8f2 100644 (file)
@@ -52,11 +52,17 @@ define allow-override
 endef
 
 ifneq ($(LLVM),)
-$(call allow-override,CC,clang)
-$(call allow-override,AR,llvm-ar)
-$(call allow-override,LD,ld.lld)
-$(call allow-override,CXX,clang++)
-$(call allow-override,STRIP,llvm-strip)
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+$(call allow-override,CC,$(LLVM_PREFIX)clang$(LLVM_SUFFIX))
+$(call allow-override,AR,$(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX))
+$(call allow-override,LD,$(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX))
+$(call allow-override,CXX,$(LLVM_PREFIX)clang++$(LLVM_SUFFIX))
+$(call allow-override,STRIP,$(LLVM_PREFIX)llvm-strip$(LLVM_SUFFIX))
 else
 # Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
 $(call allow-override,CC,$(CROSS_COMPILE)gcc)
@@ -69,9 +75,9 @@ endif
 CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
 
 ifneq ($(LLVM),)
-HOSTAR  ?= llvm-ar
-HOSTCC  ?= clang
-HOSTLD  ?= ld.lld
+HOSTAR  ?= $(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX)
+HOSTCC  ?= $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+HOSTLD  ?= $(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX)
 else
 HOSTAR  ?= ar
 HOSTCC  ?= gcc
index c16ce83..172e472 100644 (file)
@@ -175,5 +175,5 @@ _ge-abspath = $(if $(is-executable),$(1))
 define get-executable-or-default
 $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
 endef
-_ge_attempt = $(if $(get-executable),$(get-executable),$(call _gea_err,$(2)))
+_ge_attempt = $(or $(get-executable),$(call _gea_err,$(2)))
 _gea_err  = $(if $(1),$(error Please set '$(1)' appropriately))
index 0aa6dbd..7fccd24 100644 (file)
@@ -53,9 +53,9 @@ $(OUTPUT)spidev_fdx: $(SPIDEV_FDX_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '\.*.o.d' -delete
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '\.*.o.cmd' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete
+       find $(or $(OUTPUT),.) -name '\.*.o.d' -delete
+       find $(or $(OUTPUT),.) -name '\.*.o.cmd' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index a40add3..2a2d240 100644 (file)
@@ -1,7 +1,13 @@
 # This mimics the top-level Makefile. We do it explicitly here so that this
 # Makefile can operate with or without the kbuild infrastructure.
 ifneq ($(LLVM),)
-CC := clang
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
 else
 CC := $(CROSS_COMPILE)gcc
 endif
index 3615ef4..2189f03 100644 (file)
@@ -368,9 +368,16 @@ static void req_xtiledata_perm(void)
 
 static void validate_req_xcomp_perm(enum expected_result exp)
 {
-       unsigned long bitmask;
+       unsigned long bitmask, expected_bitmask;
        long rc;
 
+       rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask);
+       if (rc) {
+               fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc);
+       } else if (!(bitmask & XFEATURE_MASK_XTILECFG)) {
+               fatal_error("ARCH_GET_XCOMP_PERM returns XFEATURE_XTILECFG off.");
+       }
+
        rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA);
        if (exp == FAIL_EXPECTED) {
                if (rc) {
@@ -383,10 +390,15 @@ static void validate_req_xcomp_perm(enum expected_result exp)
                fatal_error("ARCH_REQ_XCOMP_PERM saw unexpected failure.\n");
        }
 
+       expected_bitmask = bitmask | XFEATURE_MASK_XTILEDATA;
+
        rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask);
        if (rc) {
                fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc);
-       } else if (bitmask & XFEATURE_MASK_XTILE) {
+       } else if (bitmask != expected_bitmask) {
+               fatal_error("ARCH_REQ_XCOMP_PERM set a wrong bitmask: %lx, expected: %lx.\n",
+                           bitmask, expected_bitmask);
+       } else {
                printf("\tARCH_REQ_XCOMP_PERM is successful.\n");
        }
 }
index 5a1eda6..11fb417 100644 (file)
@@ -46,7 +46,7 @@ DATADIR       :=      /usr/share
 DOCDIR :=      $(DATADIR)/doc
 MANDIR :=      $(DATADIR)/man
 LICDIR :=      $(DATADIR)/licenses
-SRCTREE        :=      $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
+SRCTREE        :=      $(or $(BUILD_SRC),$(CURDIR))
 
 # If running from the tarball, man pages are stored in the Documentation
 # dir. If running from the kernel source, man pages are stored in
index 1b128e5..c623566 100644 (file)
@@ -38,7 +38,7 @@ $(OUTPUT)ffs-test: $(FFS_TEST_IN)
 
 clean:
        rm -f $(ALL_PROGRAMS)
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.o.cmd' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.o.cmd' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 0d7bbe4..1b25cc7 100644 (file)
@@ -5,7 +5,8 @@ virtio_test: virtio_ring.o virtio_test.o
 vringh_test: vringh_test.o vringh.o virtio_ring.o
 
 CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
-LDFLAGS += -lpthread
+CFLAGS += -pthread
+LDFLAGS += -pthread
 vpath %.c ../../drivers/virtio ../../drivers/vhost
 mod:
        ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
index 8f41cd6..834a90b 100644 (file)
@@ -26,8 +26,8 @@ enum dma_data_direction {
 #define dma_map_single(d, p, s, dir) (virt_to_phys(p))
 #define dma_mapping_error(...) (0)
 
-#define dma_unmap_single(...) do { } while (0)
-#define dma_unmap_page(...) do { } while (0)
+#define dma_unmap_single(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
+#define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
 
 #define dma_max_mapping_size(...) SIZE_MAX
 
index 7679335..7d98e76 100644 (file)
@@ -441,7 +441,6 @@ static void usage(void)
                "-n\t\tSort by task command name.\n"
                "-a\t\tSort by memory allocate time.\n"
                "-r\t\tSort by memory release time.\n"
-               "-c\t\tCull by comparing stacktrace instead of total block.\n"
                "-f\t\tFilter out the information of blocks whose memory has been released.\n"
                "--pid <PID>\tSelect by pid. This selects the information of blocks whose process ID number equals to <PID>.\n"
                "--tgid <TGID>\tSelect by tgid. This selects the information of blocks whose Thread Group ID number equals to <TGID>.\n"
@@ -466,14 +465,11 @@ int main(int argc, char **argv)
                { 0, 0, 0, 0},
        };
 
-       while ((opt = getopt_long(argc, argv, "acfmnprstP", longopts, NULL)) != -1)
+       while ((opt = getopt_long(argc, argv, "afmnprstP", longopts, NULL)) != -1)
                switch (opt) {
                case 'a':
                        cmp = compare_ts;
                        break;
-               case 'c':
-                       cull = cull | CULL_STACKTRACE;
-                       break;
                case 'f':
                        filter = filter | FILTER_UNRELEASE;
                        break;
index cc0d282..59d9e8b 100644 (file)
@@ -3,7 +3,7 @@
 # kbuild file for usr/ - including initramfs image
 #
 
-compress-y                                     := shipped
+compress-y                                     := copy
 compress-$(CONFIG_INITRAMFS_COMPRESSION_GZIP)  := gzip
 compress-$(CONFIG_INITRAMFS_COMPRESSION_BZIP2) := bzip2
 compress-$(CONFIG_INITRAMFS_COMPRESSION_LZMA)  := lzma
@@ -37,7 +37,7 @@ endif
 # .cpio.*, use it directly as an initramfs, and avoid double compression.
 ifeq ($(words $(subst .cpio.,$(space),$(ramfs-input))),2)
 cpio-data := $(ramfs-input)
-compress-y := shipped
+compress-y := copy
 endif
 
 endif
index 7b283d4..fa9819e 100644 (file)
@@ -10,7 +10,10 @@ UAPI_CFLAGS := -std=c90 -Wall -Werror=implicit-function-declaration
 
 # In theory, we do not care -m32 or -m64 for header compile tests.
 # It is here just because CONFIG_CC_CAN_LINK is tested with -m32 or -m64.
-UAPI_CFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+
+# USERCFLAGS might contain sysroot location for CC.
+UAPI_CFLAGS += $(USERCFLAGS)
 
 override c_flags = $(UAPI_CFLAGS) -Wp,-MMD,$(depfile) -I$(objtree)/usr/include
 
@@ -84,7 +87,7 @@ endif
 # asm-generic/*.h is used by asm/*.h, and should not be included directly
 no-header-test += asm-generic/%
 
-extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null))
+always-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null))
 
 # Include the header twice to detect missing include guard.
 quiet_cmd_hdrtest = HDRTEST $<
index 69c318f..70e05af 100644 (file)
@@ -117,6 +117,8 @@ EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
 
 static const struct file_operations stat_fops_per_vm;
 
+static struct file_operations kvm_chardev_ops;
+
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
                           unsigned long arg);
 #ifdef CONFIG_KVM_COMPAT
@@ -251,7 +253,8 @@ static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
 {
        int cpu;
 
-       kvm_make_request(req, vcpu);
+       if (likely(!(req & KVM_REQUEST_NO_ACTION)))
+               __kvm_make_request(req, vcpu);
 
        if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
                return;
@@ -1131,6 +1134,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
        preempt_notifier_inc();
        kvm_init_pm_notifier(kvm);
 
+       /*
+        * When the fd passed to this ioctl() is opened it pins the module,
+        * but try_module_get() also prevents getting a reference if the module
+        * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait").
+        */
+       if (!try_module_get(kvm_chardev_ops.owner)) {
+               r = -ENODEV;
+               goto out_err;
+       }
+
        return kvm;
 
 out_err:
@@ -1220,6 +1233,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
        preempt_notifier_dec();
        hardware_disable_all();
        mmdrop(mm);
+       module_put(kvm_chardev_ops.owner);
 }
 
 void kvm_get_kvm(struct kvm *kvm)
@@ -3663,7 +3677,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
-static struct file_operations kvm_vcpu_fops = {
+static const struct file_operations kvm_vcpu_fops = {
        .release        = kvm_vcpu_release,
        .unlocked_ioctl = kvm_vcpu_ioctl,
        .mmap           = kvm_vcpu_mmap,
@@ -4714,7 +4728,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
 }
 #endif
 
-static struct file_operations kvm_vm_fops = {
+static const struct file_operations kvm_vm_fops = {
        .release        = kvm_vm_release,
        .unlocked_ioctl = kvm_vm_ioctl,
        .llseek         = noop_llseek,
@@ -5721,8 +5735,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
                goto out_free_5;
 
        kvm_chardev_ops.owner = module;
-       kvm_vm_fops.owner = module;
-       kvm_vcpu_fops.owner = module;
 
        r = misc_register(&kvm_dev);
        if (r) {
index ce878f4..dd84676 100644 (file)
@@ -27,7 +27,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
 {
        DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
        struct gfn_to_pfn_cache *gpc;
-       bool wake_vcpus = false;
+       bool evict_vcpus = false;
 
        spin_lock(&kvm->gpc_lock);
        list_for_each_entry(gpc, &kvm->gpc_list, list) {
@@ -40,41 +40,32 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
 
                        /*
                         * If a guest vCPU could be using the physical address,
-                        * it needs to be woken.
+                        * it needs to be forced out of guest mode.
                         */
-                       if (gpc->guest_uses_pa) {
-                               if (!wake_vcpus) {
-                                       wake_vcpus = true;
+                       if (gpc->usage & KVM_GUEST_USES_PFN) {
+                               if (!evict_vcpus) {
+                                       evict_vcpus = true;
                                        bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
                                }
                                __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
                        }
-
-                       /*
-                        * We cannot call mark_page_dirty() from here because
-                        * this physical CPU might not have an active vCPU
-                        * with which to do the KVM dirty tracking.
-                        *
-                        * Neither is there any point in telling the kernel MM
-                        * that the underlying page is dirty. A vCPU in guest
-                        * mode might still be writing to it up to the point
-                        * where we wake them a few lines further down anyway.
-                        *
-                        * So all the dirty marking happens on the unmap.
-                        */
                }
                write_unlock_irq(&gpc->lock);
        }
        spin_unlock(&kvm->gpc_lock);
 
-       if (wake_vcpus) {
-               unsigned int req = KVM_REQ_GPC_INVALIDATE;
+       if (evict_vcpus) {
+               /*
+                * KVM needs to ensure the vCPU is fully out of guest context
+                * before allowing the invalidation to continue.
+                */
+               unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
                bool called;
 
                /*
                 * If the OOM reaper is active, then all vCPUs should have
                 * been stopped already, so perform the request without
-                * KVM_REQUEST_WAIT and be sad if any needed to be woken.
+                * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
                 */
                if (!may_block)
                        req &= ~KVM_REQUEST_WAIT;
@@ -104,8 +95,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
 }
 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
 
-static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva,
-                         gpa_t gpa, bool dirty)
+static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, gpa_t gpa)
 {
        /* Unmap the old page if it was mapped before, and release it */
        if (!is_error_noslot_pfn(pfn)) {
@@ -118,9 +108,7 @@ static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva,
 #endif
                }
 
-               kvm_release_pfn(pfn, dirty);
-               if (dirty)
-                       mark_page_dirty(kvm, gpa);
+               kvm_release_pfn(pfn, false);
        }
 }
 
@@ -152,7 +140,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva)
 }
 
 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                                gpa_t gpa, unsigned long len, bool dirty)
+                                gpa_t gpa, unsigned long len)
 {
        struct kvm_memslots *slots = kvm_memslots(kvm);
        unsigned long page_offset = gpa & ~PAGE_MASK;
@@ -160,7 +148,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
        unsigned long old_uhva;
        gpa_t old_gpa;
        void *old_khva;
-       bool old_valid, old_dirty;
+       bool old_valid;
        int ret = 0;
 
        /*
@@ -177,20 +165,19 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
        old_khva = gpc->khva - offset_in_page(gpc->khva);
        old_uhva = gpc->uhva;
        old_valid = gpc->valid;
-       old_dirty = gpc->dirty;
 
        /* If the userspace HVA is invalid, refresh that first */
        if (gpc->gpa != gpa || gpc->generation != slots->generation ||
            kvm_is_error_hva(gpc->uhva)) {
                gfn_t gfn = gpa_to_gfn(gpa);
 
-               gpc->dirty = false;
                gpc->gpa = gpa;
                gpc->generation = slots->generation;
                gpc->memslot = __gfn_to_memslot(slots, gfn);
                gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
 
                if (kvm_is_error_hva(gpc->uhva)) {
+                       gpc->pfn = KVM_PFN_ERR_FAULT;
                        ret = -EFAULT;
                        goto out;
                }
@@ -219,7 +206,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
                        goto map_done;
                }
 
-               if (gpc->kernel_map) {
+               if (gpc->usage & KVM_HOST_USES_PFN) {
                        if (new_pfn == old_pfn) {
                                new_khva = old_khva;
                                old_pfn = KVM_PFN_ERR_FAULT;
@@ -255,14 +242,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
        }
 
  out:
-       if (ret)
-               gpc->dirty = false;
-       else
-               gpc->dirty = dirty;
-
        write_unlock_irq(&gpc->lock);
 
-       __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty);
+       __release_gpc(kvm, old_pfn, old_khva, old_gpa);
 
        return ret;
 }
@@ -272,7 +254,6 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
 {
        void *old_khva;
        kvm_pfn_t old_pfn;
-       bool old_dirty;
        gpa_t old_gpa;
 
        write_lock_irq(&gpc->lock);
@@ -280,7 +261,6 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
        gpc->valid = false;
 
        old_khva = gpc->khva - offset_in_page(gpc->khva);
-       old_dirty = gpc->dirty;
        old_gpa = gpc->gpa;
        old_pfn = gpc->pfn;
 
@@ -293,16 +273,17 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
 
        write_unlock_irq(&gpc->lock);
 
-       __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty);
+       __release_gpc(kvm, old_pfn, old_khva, old_gpa);
 }
 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
 
 
 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                             struct kvm_vcpu *vcpu, bool guest_uses_pa,
-                             bool kernel_map, gpa_t gpa, unsigned long len,
-                             bool dirty)
+                             struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+                             gpa_t gpa, unsigned long len)
 {
+       WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
+
        if (!gpc->active) {
                rwlock_init(&gpc->lock);
 
@@ -310,8 +291,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
                gpc->pfn = KVM_PFN_ERR_FAULT;
                gpc->uhva = KVM_HVA_ERR_BAD;
                gpc->vcpu = vcpu;
-               gpc->kernel_map = kernel_map;
-               gpc->guest_uses_pa = guest_uses_pa;
+               gpc->usage = usage;
                gpc->valid = false;
                gpc->active = true;
 
@@ -319,7 +299,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
                list_add(&gpc->list, &kvm->gpc_list);
                spin_unlock(&kvm->gpc_lock);
        }
-       return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len, dirty);
+       return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
 }
 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init);