Merge tag 'vfs.misc.v6.3-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 12 Mar 2023 16:00:54 +0000 (09:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 12 Mar 2023 16:00:54 +0000 (09:00 -0700)
Pull vfs fixes from Christian Brauner:

 - When allocating pages for a watch queue failed, we didn't return an
   error causing userspace to proceed even though all subsequent
   notifcations would be lost. Make sure to return an error.

 - Fix a misformed tree entry for the idmapping maintainers entry.

 - When setting file leases from an idmapped mount via
   generic_setlease() we need to take the idmapping into account
   otherwise taking a lease would fail from an idmapped mount.

 - Remove two redundant assignments, one in splice code and the other in
   locks code, that static checkers complained about.

* tag 'vfs.misc.v6.3-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/idmapping:
  filelocks: use mount idmapping for setlease permission check
  fs/locks: Remove redundant assignment to cmd
  splice: Remove redundant assignment to ret
  MAINTAINERS: repair a malformed T: entry in IDMAPPED MOUNTS
  watch_queue: fix IOC_WATCH_QUEUE_SET_SIZE alloc error paths

266 files changed:
.mailmap
Documentation/ABI/stable/sysfs-block
Documentation/bpf/bpf_devel_QA.rst
Documentation/filesystems/ext4/blockgroup.rst
Documentation/netlink/genetlink-c.yaml
Documentation/netlink/genetlink-legacy.yaml
Documentation/netlink/genetlink.yaml
Documentation/netlink/specs/ethtool.yaml
Documentation/netlink/specs/fou.yaml
Documentation/netlink/specs/netdev.yaml
Documentation/userspace-api/netlink/specs.rst
MAINTAINERS
arch/alpha/lib/fpreg.c
arch/m68k/kernel/setup_mm.c
arch/m68k/kernel/traps.c
arch/m68k/mm/motorola.c
arch/mips/configs/mtx1_defconfig
arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
arch/powerpc/boot/dts/fsl/t1040rdb.dts
arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
arch/powerpc/configs/ppc6xx_defconfig
arch/powerpc/xmon/xmon.c
arch/riscv/Makefile
arch/riscv/errata/sifive/errata.c
arch/riscv/include/asm/ftrace.h
arch/riscv/include/asm/patch.h
arch/riscv/kernel/compat_vdso/Makefile
arch/riscv/kernel/ftrace.c
arch/riscv/kernel/patch.c
arch/riscv/kernel/stacktrace.c
arch/riscv/net/bpf_jit_comp64.c
arch/sh/include/asm/processor_32.h
arch/sh/kernel/signal_32.c
arch/x86/include/asm/resctrl.h
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
block/bfq-iosched.c
block/genhd.c
drivers/char/random.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/display/drm_hdmi_helper.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/nouveau/dispnv50/wndw.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
drivers/gpu/drm/tiny/cirrus.c
drivers/hid/hid-core.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/intel-ish-hid/ipc/ipc.c
drivers/hid/uhid.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-dev.c
drivers/i2c/i2c-slave-eeprom.c
drivers/i2c/i2c-slave-testunit.c
drivers/i2c/i2c-smbus.c
drivers/i2c/muxes/i2c-mux-ltc4306.c
drivers/i2c/muxes/i2c-mux-pca9541.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/media/i2c/ov2685.c
drivers/media/i2c/ov5695.c
drivers/misc/ad525x_dpot-i2c.c
drivers/mtd/maps/pismo.c
drivers/mtd/ubi/block.c
drivers/net/dsa/mt7530.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bgmac.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/fealnx.c [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_dcb.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/microchip/lan966x/lan966x_police.c
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
drivers/net/ethernet/netronome/nfp/nfd3/dp.c
drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
drivers/net/ethernet/netronome/nfp/nfdk/dp.c
drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ieee802154/ca8210.c
drivers/net/phy/microchip.c
drivers/net/phy/phy_device.c
drivers/net/phy/smsc.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireguard/queueing.h
drivers/nfc/fdp/i2c.c
drivers/platform/mellanox/Kconfig
drivers/platform/x86/Kconfig
drivers/platform/x86/amd/pmc.c
drivers/platform/x86/dell/dell-wmi-ddv.c
drivers/platform/x86/intel/int3472/tps68470_board_data.c
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
drivers/platform/x86/intel/speed_select_if/isst_if_common.h
drivers/platform/x86/intel/tpmi.c
drivers/platform/x86/mlx-platform.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_fp.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpi3mr/mpi3mr.h
drivers/scsi/mpi3mr/mpi3mr_app.c
drivers/scsi/mpi3mr/mpi3mr_fw.c
drivers/scsi/mpi3mr/mpi3mr_os.c
drivers/scsi/mpi3mr/mpi3mr_transport.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/sd.c
drivers/scsi/sd_zbc.c
drivers/scsi/storvsc_drv.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
drivers/tty/serial/sc16is7xx.c
drivers/ufs/core/ufshcd.c
drivers/ufs/host/ufs-qcom.c
drivers/w1/masters/ds2482.c
fs/btrfs/bio.c
fs/btrfs/block-group.c
fs/btrfs/delayed-inode.c
fs/btrfs/extent_map.c
fs/btrfs/ioctl.c
fs/btrfs/sysfs.c
fs/erofs/data.c
fs/erofs/decompressor_lzma.c
fs/erofs/internal.h
fs/erofs/pcpubuf.c
fs/erofs/zdata.c
fs/erofs/zmap.c
fs/ext4/ext4.h
fs/ext4/fsmap.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/ext4/sysfs.c
fs/ext4/xattr.c
fs/file.c
fs/jbd2/journal.c
fs/nfsd/vfs.c
fs/udf/inode.c
include/linux/cpumask.h
include/linux/hid.h
include/linux/highmem.h
include/linux/i2c.h
include/linux/jbd2.h
include/net/netfilter/nf_tproxy.h
include/uapi/linux/btrfs.h
include/uapi/linux/fou.h
include/uapi/linux/netdev.h
io_uring/io-wq.c
io_uring/io_uring.c
io_uring/slist.h
io_uring/uring_cmd.c
kernel/bpf/btf.c
net/bpf/test_run.c
net/caif/caif_usb.c
net/core/netdev-genl-gen.c
net/core/netdev-genl-gen.h
net/core/skbuff.c
net/core/sock.c
net/ieee802154/nl802154.c
net/ipv4/fou_nl.c
net/ipv4/fou_nl.h
net/ipv4/netfilter/nf_tproxy_ipv4.c
net/ipv4/tcp_bpf.c
net/ipv4/udp_bpf.c
net/ipv6/ila/ila_xlat.c
net/ipv6/netfilter/nf_tproxy_ipv6.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nft_last.c
net/netfilter/nft_quota.c
net/nfc/netlink.c
net/sched/act_connmark.c
net/sched/cls_flower.c
net/smc/af_smc.c
net/socket.c
net/sunrpc/svc.c
net/tls/tls_device.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/unix/unix_bpf.c
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/arch/x86/include/asm/msr-index.h
tools/arch/x86/include/asm/required-features.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/svm.h
tools/arch/x86/lib/memcpy_64.S
tools/arch/x86/lib/memset_64.S
tools/include/linux/bits.h
tools/include/uapi/linux/fcntl.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/netdev.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/prctl.h
tools/include/uapi/linux/vhost.h
tools/include/vdso/bits.h
tools/net/ynl/cli.py
tools/net/ynl/lib/__init__.py
tools/net/ynl/lib/nlspec.py
tools/net/ynl/lib/ynl.py
tools/net/ynl/ynl-gen-c.py
tools/net/ynl/ynl-regen.sh
tools/perf/builtin-inject.c
tools/perf/builtin-stat.c
tools/perf/tests/shell/lib/perf_json_output_lint.py
tools/perf/tests/shell/stat+csv_output.sh
tools/perf/util/bpf_skel/off_cpu.bpf.c
tools/perf/util/stat.c
tools/perf/util/stat.h
tools/perf/util/target.h
tools/testing/selftests/bpf/prog_tests/btf.c
tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
tools/testing/selftests/hid/config
tools/testing/selftests/netfilter/nft_nat.sh

index 5367faa..424564f 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -136,6 +136,9 @@ Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
 Eugen Hristev <eugen.hristev@collabora.com> <eugen.hristev@microchip.com>
 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
 Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> <ezequiel@collabora.com>
+Faith Ekstrand <faith.ekstrand@collabora.com> <jason@jlekstrand.net>
+Faith Ekstrand <faith.ekstrand@collabora.com> <jason.ekstrand@intel.com>
+Faith Ekstrand <faith.ekstrand@collabora.com> <jason.ekstrand@collabora.com>
 Felipe W Damasio <felipewd@terra.com.br>
 Felix Kuhling <fxkuehl@gmx.de>
 Felix Moeller <felix@derklecks.de>
@@ -306,6 +309,8 @@ Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@osg.samsung.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@redhat.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <m.chehab@samsung.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@s-opensource.com>
+Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@mellanox.com>
+Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@nvidia.com>
 Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
 Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
 Mayuresh Janorkar <mayur@ti.com>
@@ -411,7 +416,10 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
 Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
-Stephen Hemminger <shemminger@osdl.org>
+Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>
+Stephen Hemminger <stephen@networkplumber.org> <shemminger@osdl.org>
+Stephen Hemminger <stephen@networkplumber.org> <sthemmin@microsoft.com>
+Stephen Hemminger <stephen@networkplumber.org> <sthemmin@vyatta.com>
 Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
 Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
 Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
index ac1e519..282de36 100644 (file)
@@ -705,6 +705,15 @@ Description:
                zoned will report "none".
 
 
+What:          /sys/block/<disk>/hidden
+Date:          March 2023
+Contact:       linux-block@vger.kernel.org
+Description:
+               [RO] the block device is hidden. it doesn’t produce events, and
+               can’t be opened from userspace or using blkdev_get*.
+               Used for the underlying components of multipath devices.
+
+
 What:          /sys/block/<disk>/stat
 Date:          February 2008
 Contact:       Jerome Marchand <jmarchan@redhat.com>
index 03d4993..b421d94 100644 (file)
@@ -7,8 +7,8 @@ workflows related to reporting bugs, submitting patches, and queueing
 patches for stable kernels.
 
 For general information about submitting patches, please refer to
-`Documentation/process/`_. This document only describes additional specifics
-related to BPF.
+Documentation/process/submitting-patches.rst. This document only describes
+additional specifics related to BPF.
 
 .. contents::
     :local:
@@ -461,15 +461,15 @@ needed::
 
   $ sudo make run_tests
 
-See the kernels selftest `Documentation/dev-tools/kselftest.rst`_
-document for further documentation.
+See :doc:`kernel selftest documentation </dev-tools/kselftest>`
+for details.
 
 To maximize the number of tests passing, the .config of the kernel
 under test should match the config file fragment in
 tools/testing/selftests/bpf as closely as possible.
 
 Finally to ensure support for latest BPF Type Format features -
-discussed in `Documentation/bpf/btf.rst`_ - pahole version 1.16
+discussed in Documentation/bpf/btf.rst - pahole version 1.16
 is required for kernels built with CONFIG_DEBUG_INFO_BTF=y.
 pahole is delivered in the dwarves package or can be built
 from source at
@@ -684,12 +684,8 @@ when:
 
 
 .. Links
-.. _Documentation/process/: https://www.kernel.org/doc/html/latest/process/
 .. _netdev-FAQ: Documentation/process/maintainer-netdev.rst
 .. _selftests:
    https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/
-.. _Documentation/dev-tools/kselftest.rst:
-   https://www.kernel.org/doc/html/latest/dev-tools/kselftest.html
-.. _Documentation/bpf/btf.rst: btf.rst
 
 Happy BPF hacking!
index 46d78f8..ed5a5ca 100644 (file)
@@ -105,9 +105,9 @@ descriptors. Instead, the superblock and a single block group descriptor
 block is placed at the beginning of the first, second, and last block
 groups in a meta-block group. A meta-block group is a collection of
 block groups which can be described by a single block group descriptor
-block. Since the size of the block group descriptor structure is 32
-bytes, a meta-block group contains 32 block groups for filesystems with
-a 1KB block size, and 128 block groups for filesystems with a 4KB
+block. Since the size of the block group descriptor structure is 64
+bytes, a meta-block group contains 16 block groups for filesystems with
+a 1KB block size, and 64 block groups for filesystems with a 4KB
 blocksize. Filesystems can either be created using this new block group
 descriptor layout, or existing filesystems can be resized on-line, and
 the field s_first_meta_bg in the superblock will indicate the first
index bbcfa24..f082a5a 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 %YAML 1.2
 ---
 $id: http://kernel.org/schemas/netlink/genetlink-c.yaml#
index 5642925..c6b8c77 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 %YAML 1.2
 ---
 $id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml#
index 62a9227..b2d56ab 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 %YAML 1.2
 ---
 $id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml#
index 08b7769..18ecb7d 100644 (file)
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
 name: ethtool
 
 protocol: genetlink-legacy
@@ -11,7 +13,6 @@ attribute-sets:
       -
         name: dev-index
         type: u32
-        value: 1
       -
         name: dev-name
         type: string
@@ -25,7 +26,6 @@ attribute-sets:
       -
         name: index
         type: u32
-        value: 1
       -
         name: name
         type: string
@@ -39,14 +39,12 @@ attribute-sets:
         name: bit
         type: nest
         nested-attributes: bitset-bit
-        value: 1
   -
     name: bitset
     attributes:
       -
         name: nomask
         type: flag
-        value: 1
       -
         name: size
         type: u32
@@ -61,7 +59,6 @@ attribute-sets:
       -
         name: index
         type: u32
-        value: 1
       -
         name: value
         type: string
@@ -71,7 +68,6 @@ attribute-sets:
       -
         name: string
         type: nest
-        value: 1
         multi-attr: true
         nested-attributes: string
   -
@@ -80,7 +76,6 @@ attribute-sets:
       -
         name: id
         type: u32
-        value: 1
       -
         name: count
         type: u32
@@ -96,14 +91,12 @@ attribute-sets:
         name: stringset
         type: nest
         multi-attr: true
-        value: 1
         nested-attributes: stringset
   -
     name: strset
     attributes:
       -
         name: header
-        value: 1
         type: nest
         nested-attributes: header
       -
@@ -119,7 +112,6 @@ attribute-sets:
     attributes:
       -
         name: header
-        value: 1
         type: nest
         nested-attributes: header
       -
@@ -132,7 +124,6 @@ attribute-sets:
     attributes:
       -
         name: header
-        value: 1
         type: nest
         nested-attributes: header
       -
@@ -180,7 +171,6 @@ attribute-sets:
     attributes:
       -
         name: pad
-        value: 1
         type: pad
       -
         name: reassembly-errors
@@ -205,7 +195,6 @@ attribute-sets:
     attributes:
       -
         name: header
-        value: 1
         type: nest
         nested-attributes: header
       -
@@ -251,13 +240,11 @@ operations:
 
       do: &strset-get-op
         request:
-          value: 1
           attributes:
             - header
             - stringsets
             - counts-only
         reply:
-          value: 1
           attributes:
             - header
             - stringsets
index 266c386..cff1042 100644 (file)
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
 name: fou
 
 protocol: genetlink-legacy
@@ -26,6 +28,7 @@ attribute-sets:
       -
         name: unspec
         type: unused
+        value: 0
       -
         name: port
         type: u16
@@ -71,6 +74,7 @@ operations:
     -
       name: unspec
       doc: unused
+      value: 0
 
     -
       name: add
index cffef09..24de747 100644 (file)
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
 name: netdev
 
 doc:
@@ -48,7 +50,6 @@ attribute-sets:
         name: ifindex
         doc: netdev ifindex
         type: u32
-        value: 1
         checks:
           min: 1
       -
@@ -66,7 +67,6 @@ operations:
     -
       name: dev-get
       doc: Get / dump information about a netdev.
-      value: 1
       attribute-set: dev
       do:
         request:
index 6ffe813..2122e0c 100644 (file)
@@ -24,6 +24,9 @@ YAML specifications can be found under ``Documentation/netlink/specs/``
 This document describes details of the schema.
 See :doc:`intro-specs` for a practical starting guide.
 
+All specs must be licensed under ``GPL-2.0-only OR BSD-3-Clause``
+to allow for easy adoption in user space code.
+
 Compatibility levels
 ====================
 
@@ -197,9 +200,15 @@ value
 Numerical attribute ID, used in serialized Netlink messages.
 The ``value`` property can be skipped, in which case the attribute ID
 will be the value of the previous attribute plus one (recursively)
-and ``0`` for the first attribute in the attribute set.
+and ``1`` for the first attribute in the attribute set.
+
+Attributes (and operations) use ``1`` as the default value for the first
+entry (unlike enums in definitions which start from ``0``) because
+entry ``0`` is almost always reserved as undefined. Spec can explicitly
+set value to ``0`` if needed.
 
-Note that the ``value`` of an attribute is defined only in its main set.
+Note that the ``value`` of an attribute is defined only in its main set
+(not in subsets).
 
 enum
 ~~~~
index 2c2e54e..02f57bc 100644 (file)
@@ -16391,6 +16391,7 @@ R:      Alexander Shishkin <alexander.shishkin@linux.intel.com>
 R:     Jiri Olsa <jolsa@kernel.org>
 R:     Namhyung Kim <namhyung@kernel.org>
 R:     Ian Rogers <irogers@google.com>
+R:     Adrian Hunter <adrian.hunter@intel.com>
 L:     linux-perf-users@vger.kernel.org
 L:     linux-kernel@vger.kernel.org
 S:     Supported
index 612c5ec..7c08b22 100644 (file)
@@ -23,7 +23,7 @@ alpha_read_fp_reg (unsigned long reg)
 
        if (unlikely(reg >= 32))
                return 0;
-       preempt_enable();
+       preempt_disable();
        if (current_thread_info()->status & TS_SAVED_FP)
                val = current_thread_info()->fp[reg];
        else switch (reg) {
@@ -133,7 +133,7 @@ alpha_read_fp_reg_s (unsigned long reg)
        if (unlikely(reg >= 32))
                return 0;
 
-       preempt_enable();
+       preempt_disable();
        if (current_thread_info()->status & TS_SAVED_FP) {
                LDT(0, current_thread_info()->fp[reg]);
                STS(0, val);
index 3a2bb2e..fbff1ce 100644 (file)
@@ -326,16 +326,16 @@ void __init setup_arch(char **cmdline_p)
                panic("No configuration setup");
        }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-       if (m68k_ramdisk.size) {
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size)
                memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
+
+       paging_init();
+
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size) {
                initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
                initrd_end = initrd_start + m68k_ramdisk.size;
                pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
        }
-#endif
-
-       paging_init();
 
 #ifdef CONFIG_NATFEAT
        nf_init();
index 5c8cba0..a700807 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/init.h>
 #include <linux/ptrace.h>
 #include <linux/kallsyms.h>
+#include <linux/extable.h>
 
 #include <asm/setup.h>
 #include <asm/fpu.h>
@@ -545,7 +546,8 @@ static inline void bus_error030 (struct frame *fp)
                        errorcode |= 2;
 
                if (mmusr & (MMU_I | MMU_WP)) {
-                       if (ssw & 4) {
+                       /* We might have an exception table for this PC */
+                       if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
                                pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
                                       ssw & RW ? "read" : "write",
                                       fp->un.fmtb.daddr,
index 2a37563..9113012 100644 (file)
@@ -437,7 +437,7 @@ void __init paging_init(void)
        }
 
        min_addr = m68k_memory[0].addr;
-       max_addr = min_addr + m68k_memory[0].size;
+       max_addr = min_addr + m68k_memory[0].size - 1;
        memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
                          MEMBLOCK_NONE);
        for (i = 1; i < m68k_num_memory;) {
@@ -452,21 +452,21 @@ void __init paging_init(void)
                }
                memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
                                  MEMBLOCK_NONE);
-               addr = m68k_memory[i].addr + m68k_memory[i].size;
+               addr = m68k_memory[i].addr + m68k_memory[i].size - 1;
                if (addr > max_addr)
                        max_addr = addr;
                i++;
        }
        m68k_memoffset = min_addr - PAGE_OFFSET;
-       m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
+       m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6;
 
        module_fixup(NULL, __start_fixup, __stop_fixup);
        flush_icache();
 
-       high_memory = phys_to_virt(max_addr);
+       high_memory = phys_to_virt(max_addr) + 1;
 
        min_low_pfn = availmem >> PAGE_SHIFT;
-       max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
+       max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1;
 
        /* Reserve kernel text/data/bss and the memory allocated in head.S */
        memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
index 89a1511..edf9634 100644 (file)
@@ -284,6 +284,7 @@ CONFIG_IXGB=m
 CONFIG_SKGE=m
 CONFIG_SKY2=m
 CONFIG_MYRI10GE=m
+CONFIG_FEALNX=m
 CONFIG_NATSEMI=m
 CONFIG_NS83820=m
 CONFIG_S2IO=m
index 73f8c99..d4f5f15 100644 (file)
@@ -10,7 +10,6 @@
 
 / {
        model = "fsl,T1040RDB-REV-A";
-       compatible = "fsl,T1040RDB-REV-A";
 };
 
 &seville_port0 {
index b6733e7..dd3aab8 100644 (file)
 };
 
 &seville_port8 {
-       ethernet = <&enet0>;
+       status = "okay";
+};
+
+&seville_port9 {
        status = "okay";
 };
index f58eb82..ad0ab33 100644 (file)
                        seville_port8: port@8 {
                                reg = <8>;
                                phy-mode = "internal";
+                               ethernet = <&enet0>;
                                status = "disabled";
 
                                fixed-link {
                        seville_port9: port@9 {
                                reg = <9>;
                                phy-mode = "internal";
+                               ethernet = <&enet1>;
                                status = "disabled";
 
                                fixed-link {
index 1102582..f73c98b 100644 (file)
@@ -461,6 +461,7 @@ CONFIG_MV643XX_ETH=m
 CONFIG_SKGE=m
 CONFIG_SKY2=m
 CONFIG_MYRI10GE=m
+CONFIG_FEALNX=m
 CONFIG_NATSEMI=m
 CONFIG_NS83820=m
 CONFIG_PCMCIA_AXNET=m
index 73c620c..e753a6b 100644 (file)
@@ -1275,7 +1275,7 @@ static int xmon_batch_next_cpu(void)
        while (!cpumask_empty(&xmon_batch_cpus)) {
                cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus,
                                        xmon_batch_start_cpu, true);
-               if (cpu == nr_cpumask_bits)
+               if (cpu >= nr_cpu_ids)
                        break;
                if (xmon_batch_start_cpu == -1)
                        xmon_batch_start_cpu = cpu;
index 6203c33..4de83b9 100644 (file)
@@ -84,6 +84,13 @@ endif
 # Avoid generating .eh_frame sections.
 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
 
+# The RISC-V attributes frequently cause compatibility issues and provide no
+# information, so just turn them off.
+KBUILD_CFLAGS += $(call cc-option,-mno-riscv-attribute)
+KBUILD_AFLAGS += $(call cc-option,-mno-riscv-attribute)
+KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
+KBUILD_AFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
+
 KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
 KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
 
index da55cb2..31d2ebe 100644 (file)
@@ -111,7 +111,7 @@ void __init_or_module sifive_errata_patch_func(struct alt_entry *begin,
                        mutex_lock(&text_mutex);
                        patch_text_nosync(ALT_OLD_PTR(alt), ALT_ALT_PTR(alt),
                                          alt->alt_len);
-                       mutex_lock(&text_mutex);
+                       mutex_unlock(&text_mutex);
                        cpu_apply_errata |= tmp;
                }
        }
index 9e73922..d47d87c 100644 (file)
@@ -109,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
 #define ftrace_init_nop ftrace_init_nop
 #endif
 
-#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
 
 #endif /* _ASM_RISCV_FTRACE_H */
index f433121..63c9883 100644 (file)
@@ -9,4 +9,6 @@
 int patch_text_nosync(void *addr, const void *insns, size_t len);
 int patch_text(void *addr, u32 *insns, int ninsns);
 
+extern int riscv_patch_in_stop_machine;
+
 #endif /* _ASM_RISCV_PATCH_H */
index 260daf3..7f34f3c 100644 (file)
@@ -14,6 +14,10 @@ COMPAT_LD := $(LD)
 COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
 COMPAT_LD_FLAGS := -melf32lriscv
 
+# Disable attributes, as they're useless and break the build.
+COMPAT_CC_FLAGS += $(call cc-option,-mno-riscv-attribute)
+COMPAT_CC_FLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
+
 # Files to link into the compat_vdso
 obj-compat_vdso = $(patsubst %, %.o, $(compat_vdso-syms)) note.o
 
index 5bff37a..03a6434 100644 (file)
 void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
 {
        mutex_lock(&text_mutex);
+
+       /*
+        * The code sequences we use for ftrace can't be patched while the
+        * kernel is running, so we need to use stop_machine() to modify them
+        * for now.  This doesn't play nice with text_mutex, we use this flag
+        * to elide the check.
+        */
+       riscv_patch_in_stop_machine = true;
 }
 
 void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
 {
+       riscv_patch_in_stop_machine = false;
        mutex_unlock(&text_mutex);
 }
 
@@ -107,9 +116,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
 {
        int out;
 
-       ftrace_arch_code_modify_prepare();
+       mutex_lock(&text_mutex);
        out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
-       ftrace_arch_code_modify_post_process();
+       mutex_unlock(&text_mutex);
 
        return out;
 }
index 8086d1a..575e71d 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/kprobes.h>
 #include <asm/cacheflush.h>
 #include <asm/fixmap.h>
+#include <asm/ftrace.h>
 #include <asm/patch.h>
 
 struct patch_insn {
@@ -20,6 +21,8 @@ struct patch_insn {
        atomic_t cpu_count;
 };
 
+int riscv_patch_in_stop_machine = false;
+
 #ifdef CONFIG_MMU
 /*
  * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
@@ -60,8 +63,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
         * Before reaching here, it was expected to lock the text_mutex
         * already, so we don't need to give another lock here and could
         * ensure that it was safe between each cores.
+        *
+        * We're currently using stop_machine() for ftrace & kprobes, and while
+        * that ensures text_mutex is held before installing the mappings it
+        * does not ensure text_mutex is held by the calling thread.  That's
+        * safe but triggers a lockdep failure, so just elide it for that
+        * specific case.
         */
-       lockdep_assert_held(&text_mutex);
+       if (!riscv_patch_in_stop_machine)
+               lockdep_assert_held(&text_mutex);
 
        if (across_pages)
                patch_map(addr + len, FIX_TEXT_POKE1);
@@ -125,6 +135,7 @@ NOKPROBE_SYMBOL(patch_text_cb);
 
 int patch_text(void *addr, u32 *insns, int ninsns)
 {
+       int ret;
        struct patch_insn patch = {
                .addr = addr,
                .insns = insns,
@@ -132,7 +143,18 @@ int patch_text(void *addr, u32 *insns, int ninsns)
                .cpu_count = ATOMIC_INIT(0),
        };
 
-       return stop_machine_cpuslocked(patch_text_cb,
-                                      &patch, cpu_online_mask);
+       /*
+        * kprobes takes text_mutex, before calling patch_text(), but as we call
+        * calls stop_machine(), the lockdep assertion in patch_insn_write()
+        * gets confused by the context in which the lock is taken.
+        * Instead, ensure the lock is held before calling stop_machine(), and
+        * set riscv_patch_in_stop_machine to skip the check in
+        * patch_insn_write().
+        */
+       lockdep_assert_held(&text_mutex);
+       riscv_patch_in_stop_machine = true;
+       ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
+       riscv_patch_in_stop_machine = false;
+       return ret;
 }
 NOKPROBE_SYMBOL(patch_text);
index f9a5a7c..64a9c09 100644 (file)
@@ -101,7 +101,7 @@ void notrace walk_stackframe(struct task_struct *task,
        while (!kstack_end(ksp)) {
                if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
                        break;
-               pc = (*ksp++) - 0x4;
+               pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
        }
 }
 
index f5a6687..acdc3f0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/filter.h>
 #include <linux/memory.h>
 #include <linux/stop_machine.h>
+#include <asm/patch.h>
 #include "bpf_jit.h"
 
 #define RV_REG_TCC RV_REG_A6
index 27aebf1..3ef7adf 100644 (file)
@@ -50,6 +50,7 @@
 #define SR_FD          0x00008000
 #define SR_MD          0x40000000
 
+#define SR_USER_MASK   0x00000303      // M, Q, S, T bits
 /*
  * DSP structure and data
  */
index 90f495d..a6bfc6f 100644 (file)
@@ -115,6 +115,7 @@ static int
 restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
 {
        unsigned int err = 0;
+       unsigned int sr = regs->sr & ~SR_USER_MASK;
 
 #define COPY(x)                err |= __get_user(regs->x, &sc->sc_##x)
                        COPY(regs[1]);
@@ -130,6 +131,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
        COPY(sr);       COPY(pc);
 #undef COPY
 
+       regs->sr = (regs->sr & SR_USER_MASK) | sr;
+
 #ifdef CONFIG_SH_FPU
        if (boot_cpu_data.flags & CPU_HAS_FPU) {
                int owned_fp;
index 52788f7..255a78d 100644 (file)
@@ -49,7 +49,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
  *   simple as possible.
  * Must be called with preemption disabled.
  */
-static void __resctrl_sched_in(void)
+static inline void __resctrl_sched_in(struct task_struct *tsk)
 {
        struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
        u32 closid = state->default_closid;
@@ -61,13 +61,13 @@ static void __resctrl_sched_in(void)
         * Else use the closid/rmid assigned to this cpu.
         */
        if (static_branch_likely(&rdt_alloc_enable_key)) {
-               tmp = READ_ONCE(current->closid);
+               tmp = READ_ONCE(tsk->closid);
                if (tmp)
                        closid = tmp;
        }
 
        if (static_branch_likely(&rdt_mon_enable_key)) {
-               tmp = READ_ONCE(current->rmid);
+               tmp = READ_ONCE(tsk->rmid);
                if (tmp)
                        rmid = tmp;
        }
@@ -88,17 +88,17 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
        return val * scale;
 }
 
-static inline void resctrl_sched_in(void)
+static inline void resctrl_sched_in(struct task_struct *tsk)
 {
        if (static_branch_likely(&rdt_enable_key))
-               __resctrl_sched_in();
+               __resctrl_sched_in(tsk);
 }
 
 void resctrl_cpu_detect(struct cpuinfo_x86 *c);
 
 #else
 
-static inline void resctrl_sched_in(void) {}
+static inline void resctrl_sched_in(struct task_struct *tsk) {}
 static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
 
 #endif /* CONFIG_X86_CPU_RESCTRL */
index e2c1599..884b6e9 100644 (file)
@@ -314,7 +314,7 @@ static void update_cpu_closid_rmid(void *info)
         * executing task might have its own closid selected. Just reuse
         * the context switch code.
         */
-       resctrl_sched_in();
+       resctrl_sched_in(current);
 }
 
 /*
@@ -530,7 +530,7 @@ static void _update_task_closid_rmid(void *task)
         * Otherwise, the MSR is updated when the task is scheduled in.
         */
        if (task == current)
-               resctrl_sched_in();
+               resctrl_sched_in(task);
 }
 
 static void update_task_closid_rmid(struct task_struct *t)
index 470c128..708c87b 100644 (file)
@@ -212,7 +212,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        switch_fpu_finish();
 
        /* Load the Intel cache allocation PQR MSR. */
-       resctrl_sched_in();
+       resctrl_sched_in(next_p);
 
        return prev_p;
 }
index 4e34b3b..bb65a68 100644 (file)
@@ -656,7 +656,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        }
 
        /* Load the Intel cache allocation PQR MSR. */
-       resctrl_sched_in();
+       resctrl_sched_in(next_p);
 
        return prev_p;
 }
index 8a8d444..d9ed310 100644 (file)
@@ -2854,11 +2854,11 @@ bfq_setup_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 {
        int proc_ref = min(bfqq_process_refs(bfqq),
                           bfqq_process_refs(stable_merge_bfqq));
-       struct bfq_queue *new_bfqq;
+       struct bfq_queue *new_bfqq = NULL;
 
-       if (idling_boosts_thr_without_issues(bfqd, bfqq) ||
-           proc_ref == 0)
-               return NULL;
+       bfqq_data->stable_merge_bfqq = NULL;
+       if (idling_boosts_thr_without_issues(bfqd, bfqq) || proc_ref == 0)
+               goto out;
 
        /* next function will take at least one ref */
        new_bfqq = bfq_setup_merge(bfqq, stable_merge_bfqq);
@@ -2873,6 +2873,11 @@ bfq_setup_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                        new_bfqq_data->stably_merged = true;
                }
        }
+
+out:
+       /* deschedule stable merge, because done or aborted here */
+       bfq_put_stable_ref(stable_merge_bfqq);
+
        return new_bfqq;
 }
 
@@ -2933,11 +2938,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                        struct bfq_queue *stable_merge_bfqq =
                                bfqq_data->stable_merge_bfqq;
 
-                       /* deschedule stable merge, because done or aborted here */
-                       bfq_put_stable_ref(stable_merge_bfqq);
-
-                       bfqq_data->stable_merge_bfqq = NULL;
-
                        return bfq_setup_stable_merge(bfqd, bfqq,
                                                      stable_merge_bfqq,
                                                      bfqq_data);
index 3ee5577..02d9cfb 100644 (file)
@@ -385,7 +385,7 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
        if (IS_ERR(bdev))
                ret =  PTR_ERR(bdev);
        else
-               blkdev_put(bdev, mode);
+               blkdev_put(bdev, mode & ~FMODE_EXCL);
 
        if (!(mode & FMODE_EXCL))
                bd_abort_claiming(disk->part0, disk_scan_partitions);
index ce3ccd1..253f2dd 100644 (file)
@@ -1311,7 +1311,7 @@ static void __cold try_to_generate_entropy(void)
                        /* Basic CPU round-robin, which avoids the current CPU. */
                        do {
                                cpu = cpumask_next(cpu, &timer_cpus);
-                               if (cpu == nr_cpumask_bits)
+                               if (cpu >= nr_cpu_ids)
                                        cpu = cpumask_first(&timer_cpus);
                        } while (cpu == smp_processor_id() && num_cpus > 1);
 
index b719852..1a3cb53 100644 (file)
@@ -543,6 +543,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
        struct harvest_table *harvest_info;
        u16 offset;
        int i;
+       uint32_t umc_harvest_config = 0;
 
        bhdr = (struct binary_header *)adev->mman.discovery_bin;
        offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
@@ -570,12 +571,17 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
                        adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
                        break;
                case UMC_HWID:
+                       umc_harvest_config |=
+                               1 << (le16_to_cpu(harvest_info->list[i].number_instance));
                        (*umc_harvest_count)++;
                        break;
                default:
                        break;
                }
        }
+
+       adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
+                               ~umc_harvest_config;
 }
 
 /* ================================================== */
@@ -1156,8 +1162,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                                AMDGPU_MAX_SDMA_INSTANCES);
                        }
 
-                       if (le16_to_cpu(ip->hw_id) == UMC_HWID)
+                       if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
                                adev->gmc.num_umc++;
+                               adev->umc.node_inst_num++;
+                       }
 
                        for (k = 0; k < num_base_address; k++) {
                                /*
index e3e1ed4..6c7d672 100644 (file)
@@ -1315,7 +1315,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
 
        if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
            !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
-           adev->in_suspend || adev->shutdown)
+           adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
                return;
 
        if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
index 28fe6d9..3f5d130 100644 (file)
@@ -602,27 +602,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
                   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
 {
        int ret;
-       int index, idx;
+       int index;
        int timeout = 20000;
        bool ras_intr = false;
        bool skip_unsupport = false;
-       bool dev_entered;
 
        if (psp->adev->no_hw_access)
                return 0;
 
-       dev_entered = drm_dev_enter(adev_to_drm(psp->adev), &idx);
-       /*
-        * We allow sending PSP messages LOAD_ASD and UNLOAD_TA without acquiring
-        * a lock in drm_dev_enter during driver unload because we must call
-        * drm_dev_unplug as the beginning  of unload driver sequence . It is very
-        * crucial that userspace can't access device instances anymore.
-        */
-       if (!dev_entered)
-               WARN_ON(psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_LOAD_ASD &&
-                       psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_UNLOAD_TA &&
-                       psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_INVOKE_CMD);
-
        memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
 
        memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
@@ -686,8 +673,6 @@ psp_cmd_submit_buf(struct psp_context *psp,
        }
 
 exit:
-       if (dev_entered)
-               drm_dev_exit(idx);
        return ret;
 }
 
index f2bf979..36e1933 100644 (file)
@@ -42,7 +42,7 @@
 #define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
 
 #define LOOP_UMC_NODE_INST(node_inst) \
-               for ((node_inst) = 0; (node_inst) < adev->umc.node_inst_num; (node_inst)++)
+               for_each_set_bit((node_inst), &(adev->umc.active_mask), adev->umc.node_inst_num)
 
 #define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \
                LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst))
@@ -69,7 +69,7 @@ struct amdgpu_umc {
        /* number of umc instance with memory map register access */
        uint32_t umc_inst_num;
 
-       /*number of umc node instance with memory map register access*/
+       /* Total number of umc node instance including harvest one */
        uint32_t node_inst_num;
 
        /* UMC regiser per channel offset */
@@ -82,6 +82,9 @@ struct amdgpu_umc {
 
        const struct amdgpu_umc_funcs *funcs;
        struct amdgpu_umc_ras *ras;
+
+       /* active mask for umc node instance */
+       unsigned long active_mask;
 };
 
 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
index 85e0afc..af7b3ba 100644 (file)
@@ -567,7 +567,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
        case IP_VERSION(8, 10, 0):
                adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
                adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
-               adev->umc.node_inst_num = adev->gmc.num_umc;
                adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
                adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
                adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
index 4b0d563..4ef1fa4 100644 (file)
@@ -382,11 +382,6 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
                if (def != data)
                        WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
                break;
-       case IP_VERSION(7, 5, 1):
-               data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
-               data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
-               WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
-               fallthrough;
        default:
                def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
                data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
@@ -399,6 +394,15 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
                break;
        }
 
+       switch (adev->ip_versions[NBIO_HWIP][0]) {
+       case IP_VERSION(7, 3, 0):
+       case IP_VERSION(7, 5, 1):
+               data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
+               data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
+               WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
+               break;
+       }
+
        if (amdgpu_sriov_vf(adev))
                adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
                        regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
index d972025..855d390 100644 (file)
@@ -444,9 +444,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
        *value = 0;
        for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
                en = &nv_allowed_read_registers[i];
-               if (adev->reg_offset[en->hwip][en->inst] &&
-                   reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
-                                  + en->reg_offset))
+               if (!adev->reg_offset[en->hwip][en->inst])
+                       continue;
+               else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+                                       + en->reg_offset))
                        continue;
 
                *value = nv_get_register_value(adev,
index 7cd17dd..2eddd7f 100644 (file)
@@ -439,8 +439,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
        *value = 0;
        for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
                en = &soc15_allowed_read_registers[i];
-               if (adev->reg_offset[en->hwip][en->inst] &&
-                       reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+               if (!adev->reg_offset[en->hwip][en->inst])
+                       continue;
+               else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
                                        + en->reg_offset))
                        continue;
 
index 620f740..061793d 100644 (file)
@@ -111,6 +111,7 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
        switch (adev->ip_versions[UVD_HWIP][0]) {
        case IP_VERSION(4, 0, 0):
        case IP_VERSION(4, 0, 2):
+       case IP_VERSION(4, 0, 4):
                if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
                        if (encode)
                                *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
@@ -291,9 +292,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
        *value = 0;
        for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
                en = &soc21_allowed_read_registers[i];
-               if (adev->reg_offset[en->hwip][en->inst] &&
-                   reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
-                                  + en->reg_offset))
+               if (!adev->reg_offset[en->hwip][en->inst])
+                       continue;
+               else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+                                       + en->reg_offset))
                        continue;
 
                *value = soc21_get_register_value(adev,
index 25eaf4a..c6dfd43 100644 (file)
@@ -31,9 +31,9 @@
 /* number of umc instance with memory map register access */
 #define UMC_V8_10_UMC_INSTANCE_NUM             2
 
-/* Total channel instances for all umc nodes */
+/* Total channel instances for all available umc nodes */
 #define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \
-       (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->umc.node_inst_num)
+       (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->gmc.num_umc)
 
 /* UMC regiser per channel offset */
 #define UMC_V8_10_PER_CHANNEL_OFFSET   0x400
index cbef2e1..38c9e1c 100644 (file)
@@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
        if (!pdd->doorbell_index) {
                int r = kfd_alloc_process_doorbells(pdd->dev,
                                                    &pdd->doorbell_index);
-               if (r)
+               if (r < 0)
                        return 0;
        }
 
index 24715ca..01383aa 100644 (file)
@@ -529,6 +529,19 @@ static struct clk_bw_params vg_bw_params = {
 
 };
 
+static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
+{
+       uint32_t max = 0;
+       int i;
+
+       for (i = 0; i < num_clocks; ++i) {
+               if (clocks[i] > max)
+                       max = clocks[i];
+       }
+
+       return max;
+}
+
 static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table,
                unsigned int voltage)
 {
@@ -572,12 +585,16 @@ static void vg_clk_mgr_helper_populate_bw_params(
 
        bw_params->clk_table.num_entries = j + 1;
 
-       for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
+       for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
                bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
                bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
                bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
                bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
        }
+       bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
+       bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
+       bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
+       bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
 
        bw_params->vram_type = bios_info->memory_type;
        bw_params->num_channels = bios_info->ma_channel_number;
index 923a9fb..27448ff 100644 (file)
@@ -46,6 +46,7 @@
 #include "asic_reg/mp/mp_13_0_0_sh_mask.h"
 #include "smu_cmn.h"
 #include "amdgpu_ras.h"
+#include "umc_v8_10.h"
 
 /*
  * DO NOT use these for err/warn/info/debug messages.
 
 #define DEBUGSMC_MSG_Mode1Reset        2
 
+/*
+ * SMU_v13_0_10 supports ECCTABLE since version 80.34.0,
+ * use this to check ECCTABLE feature whether support
+ */
+#define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200
+
 static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                 1),
        MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,               1),
@@ -229,6 +236,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
        TAB_MAP(ACTIVITY_MONITOR_COEFF),
        [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
        TAB_MAP(I2C_COMMANDS),
+       TAB_MAP(ECCINFO),
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -462,6 +470,8 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
                       AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+       SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 
        smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
        if (!smu_table->metrics_table)
@@ -477,8 +487,14 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
        if (!smu_table->watermarks_table)
                goto err2_out;
 
+       smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+       if (!smu_table->ecc_table)
+               goto err3_out;
+
        return 0;
 
+err3_out:
+       kfree(smu_table->watermarks_table);
 err2_out:
        kfree(smu_table->gpu_metrics_table);
 err1_out:
@@ -2036,6 +2052,64 @@ static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu,
        return ret;
 }
 
+static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t if_version = 0xff, smu_version = 0xff;
+       int ret = 0;
+
+       ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+       if (ret)
+               return -EOPNOTSUPP;
+
+       if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) &&
+               (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))
+               return ret;
+       else
+               return -EOPNOTSUPP;
+}
+
+static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
+                                                                       void *table)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct amdgpu_device *adev = smu->adev;
+       EccInfoTable_t *ecc_table = NULL;
+       struct ecc_info_per_ch *ecc_info_per_channel = NULL;
+       int i, ret = 0;
+       struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
+
+       ret = smu_v13_0_0_check_ecc_table_support(smu);
+       if (ret)
+               return ret;
+
+       ret = smu_cmn_update_table(smu,
+                                       SMU_TABLE_ECCINFO,
+                                       0,
+                                       smu_table->ecc_table,
+                                       false);
+       if (ret) {
+               dev_info(adev->dev, "Failed to export SMU ecc table!\n");
+               return ret;
+       }
+
+       ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
+
+       for (i = 0; i < UMC_V8_10_TOTAL_CHANNEL_NUM(adev); i++) {
+               ecc_info_per_channel = &(eccinfo->ecc[i]);
+               ecc_info_per_channel->ce_count_lo_chip =
+                               ecc_table->EccInfo[i].ce_count_lo_chip;
+               ecc_info_per_channel->ce_count_hi_chip =
+                               ecc_table->EccInfo[i].ce_count_hi_chip;
+               ecc_info_per_channel->mca_umc_status =
+                               ecc_table->EccInfo[i].mca_umc_status;
+               ecc_info_per_channel->mca_umc_addr =
+                               ecc_table->EccInfo[i].mca_umc_addr;
+       }
+
+       return ret;
+}
+
 static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
        .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -2111,6 +2185,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
        .send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
        .gpo_control = smu_v13_0_gpo_control,
+       .get_ecc_info = smu_v13_0_0_get_ecc_info,
 };
 
 void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
index 0264abe..faf5e9e 100644 (file)
@@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
 
        /* Sink EOTF is Bit map while infoframe is absolute values */
        if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
-           connector->hdr_sink_metadata.hdmi_type1.eotf)) {
-               DRM_DEBUG_KMS("EOTF Not Supported\n");
-               return -EINVAL;
-       }
+           connector->hdr_sink_metadata.hdmi_type1.eotf))
+               DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
 
        err = hdmi_drm_infoframe_init(frame);
        if (err < 0)
index 5457c02..fed4180 100644 (file)
@@ -1070,6 +1070,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
        drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
        drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
        drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
+       drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
 
        if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
                if (state->writeback_job && state->writeback_job->fb)
index 871870d..949b18a 100644 (file)
@@ -23,7 +23,6 @@ config DRM_MSM
        select SHMEM
        select TMPFS
        select QCOM_SCM
-       select DEVFREQ_GOV_SIMPLE_ONDEMAND
        select WANT_DEV_COREDUMP
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
index d09221f..a1e006e 100644 (file)
@@ -151,8 +151,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        OUT_RING(ring, 1);
 
        /* Enable local preemption for finegrain preemption */
-       OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
-       OUT_RING(ring, 0x02);
+       OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+       OUT_RING(ring, 0x1);
 
        /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
        OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
@@ -806,7 +806,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
        gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
 
        /* Set the highest bank bit */
-       if (adreno_is_a540(adreno_gpu))
+       if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
                regbit = 2;
        else
                regbit = 1;
index 7658e89..f58dd56 100644 (file)
@@ -63,7 +63,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
                struct msm_ringbuffer *ring = gpu->rb[i];
 
                spin_lock_irqsave(&ring->preempt_lock, flags);
-               empty = (get_wptr(ring) == ring->memptrs->rptr);
+               empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
                spin_unlock_irqrestore(&ring->preempt_lock, flags);
 
                if (!empty)
@@ -207,6 +207,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
                a5xx_gpu->preempt[i]->wptr = 0;
                a5xx_gpu->preempt[i]->rptr = 0;
                a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
+               a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
        }
 
        /* Write a 0 to signal that we aren't switching pagetables */
@@ -257,7 +258,6 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
        ptr->data = 0;
        ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
 
-       ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
        ptr->counter = counters_iova;
 
        return 0;
index f3c9600..7f5bc73 100644 (file)
@@ -974,7 +974,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
        int status, ret;
 
        if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
-               return 0;
+               return -EINVAL;
 
        gmu->hung = false;
 
index aae60cb..6faea50 100644 (file)
@@ -1746,7 +1746,9 @@ static void a6xx_destroy(struct msm_gpu *gpu)
 
        a6xx_llc_slices_destroy(a6xx_gpu);
 
+       mutex_lock(&a6xx_gpu->gmu.lock);
        a6xx_gmu_remove(a6xx_gpu);
+       mutex_unlock(&a6xx_gpu->gmu.lock);
 
        adreno_gpu_cleanup(adreno_gpu);
 
index 36f062c..c5c4c93 100644 (file)
@@ -558,7 +558,8 @@ static void adreno_unbind(struct device *dev, struct device *master,
        struct msm_drm_private *priv = dev_get_drvdata(master);
        struct msm_gpu *gpu = dev_to_gpu(dev);
 
-       WARN_ON_ONCE(adreno_system_suspend(dev));
+       if (pm_runtime_enabled(dev))
+               WARN_ON_ONCE(adreno_system_suspend(dev));
        gpu->funcs->destroy(gpu);
 
        priv->gpu_pdev = NULL;
index cf053e8..497c9e1 100644 (file)
 #include "dpu_hw_catalog.h"
 #include "dpu_kms.h"
 
-#define VIG_MASK \
+#define VIG_BASE_MASK \
        (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
-       BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
+       BIT(DPU_SSPP_CDP) |\
        BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
 
+#define VIG_MASK \
+       (VIG_BASE_MASK | \
+       BIT(DPU_SSPP_CSC_10BIT))
+
 #define VIG_MSM8998_MASK \
        (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
 
 #define VIG_SC7180_MASK \
        (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
 
-#define VIG_SM8250_MASK \
-       (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
-
-#define VIG_QCM2290_MASK (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL))
+#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
 
 #define DMA_MSM8998_MASK \
        (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
@@ -51,7 +52,7 @@
        (DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
 
 #define MIXER_MSM8998_MASK \
-       (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+       (BIT(DPU_MIXER_SOURCESPLIT))
 
 #define MIXER_SDM845_MASK \
        (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
@@ -314,10 +315,9 @@ static const struct dpu_caps msm8998_dpu_caps = {
 };
 
 static const struct dpu_caps qcm2290_dpu_caps = {
-       .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
        .max_mixer_blendstages = 0x4,
        .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
-       .ubwc_version = DPU_HW_UBWC_VER_20,
        .has_dim_layer = true,
        .has_idle_pc = true,
        .max_linewidth = 2160,
@@ -353,9 +353,9 @@ static const struct dpu_caps sc7180_dpu_caps = {
 };
 
 static const struct dpu_caps sm6115_dpu_caps = {
-       .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
        .max_mixer_blendstages = 0x4,
-       .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+       .qseed_type = DPU_SSPP_SCALER_QSEED4,
        .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
        .ubwc_version = DPU_HW_UBWC_VER_10,
        .has_dim_layer = true,
@@ -399,7 +399,7 @@ static const struct dpu_caps sc8180x_dpu_caps = {
 static const struct dpu_caps sc8280xp_dpu_caps = {
        .max_mixer_width = 2560,
        .max_mixer_blendstages = 11,
-       .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+       .qseed_type = DPU_SSPP_SCALER_QSEED4,
        .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
        .ubwc_version = DPU_HW_UBWC_VER_40,
        .has_src_split = true,
@@ -413,7 +413,7 @@ static const struct dpu_caps sc8280xp_dpu_caps = {
 static const struct dpu_caps sm8250_dpu_caps = {
        .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
        .max_mixer_blendstages = 0xb,
-       .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+       .qseed_type = DPU_SSPP_SCALER_QSEED4,
        .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
        .ubwc_version = DPU_HW_UBWC_VER_40,
        .has_src_split = true,
@@ -427,7 +427,7 @@ static const struct dpu_caps sm8250_dpu_caps = {
 static const struct dpu_caps sm8350_dpu_caps = {
        .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
        .max_mixer_blendstages = 0xb,
-       .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+       .qseed_type = DPU_SSPP_SCALER_QSEED4,
        .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
        .ubwc_version = DPU_HW_UBWC_VER_40,
        .has_src_split = true,
@@ -455,7 +455,7 @@ static const struct dpu_caps sm8450_dpu_caps = {
 static const struct dpu_caps sm8550_dpu_caps = {
        .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
        .max_mixer_blendstages = 0xb,
-       .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+       .qseed_type = DPU_SSPP_SCALER_QSEED4,
        .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
        .ubwc_version = DPU_HW_UBWC_VER_40,
        .has_src_split = true,
@@ -525,9 +525,9 @@ static const struct dpu_mdp_cfg sdm845_mdp[] = {
                        .reg_off = 0x2AC, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
                        .reg_off = 0x2B4, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
                        .reg_off = 0x2BC, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
                        .reg_off = 0x2C4, .bit_off = 8},
        },
 };
@@ -542,9 +542,9 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
                .reg_off = 0x2AC, .bit_off = 0},
        .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
                .reg_off = 0x2AC, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
                .reg_off = 0x2B4, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
                .reg_off = 0x2C4, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_WB2] = {
                .reg_off = 0x3B8, .bit_off = 24},
@@ -569,9 +569,9 @@ static const struct dpu_mdp_cfg sc8180x_mdp[] = {
                        .reg_off = 0x2AC, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
                        .reg_off = 0x2B4, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
                        .reg_off = 0x2BC, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
                        .reg_off = 0x2C4, .bit_off = 8},
        },
 };
@@ -609,9 +609,9 @@ static const struct dpu_mdp_cfg sm8250_mdp[] = {
                        .reg_off = 0x2AC, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
                        .reg_off = 0x2B4, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
                        .reg_off = 0x2BC, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
                        .reg_off = 0x2C4, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
                        .reg_off = 0x2BC, .bit_off = 20},
@@ -638,9 +638,9 @@ static const struct dpu_mdp_cfg sm8350_mdp[] = {
                        .reg_off = 0x2ac, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
                        .reg_off = 0x2b4, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
                        .reg_off = 0x2bc, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
                        .reg_off = 0x2c4, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
                        .reg_off = 0x2bc, .bit_off = 20},
@@ -666,9 +666,9 @@ static const struct dpu_mdp_cfg sm8450_mdp[] = {
                        .reg_off = 0x2AC, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
                        .reg_off = 0x2B4, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
                        .reg_off = 0x2BC, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
                        .reg_off = 0x2C4, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
                        .reg_off = 0x2BC, .bit_off = 20},
@@ -685,9 +685,9 @@ static const struct dpu_mdp_cfg sc7280_mdp[] = {
                .reg_off = 0x2AC, .bit_off = 0},
        .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
                .reg_off = 0x2AC, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
                .reg_off = 0x2B4, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
                .reg_off = 0x2C4, .bit_off = 8},
        },
 };
@@ -696,7 +696,7 @@ static const struct dpu_mdp_cfg sc8280xp_mdp[] = {
        {
        .name = "top_0", .id = MDP_TOP,
        .base = 0x0, .len = 0x494,
-       .features = 0,
+       .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
        .highest_bank_bit = 2,
        .ubwc_swizzle = 6,
        .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0},
@@ -705,8 +705,8 @@ static const struct dpu_mdp_cfg sc8280xp_mdp[] = {
        .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0},
        .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x2bc, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x2c4, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20},
        },
 };
@@ -734,9 +734,9 @@ static const struct dpu_mdp_cfg sm8550_mdp[] = {
                        .reg_off = 0x28330, .bit_off = 0},
        .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
                        .reg_off = 0x2a330, .bit_off = 0},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA4] = {
                        .reg_off = 0x2c330, .bit_off = 0},
-       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+       .clk_ctrls[DPU_CLK_CTRL_DMA5] = {
                        .reg_off = 0x2e330, .bit_off = 0},
        .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
                        .reg_off = 0x2bc, .bit_off = 20},
@@ -828,19 +828,19 @@ static const struct dpu_ctl_cfg sdm845_ctl[] = {
 static const struct dpu_ctl_cfg sc7180_ctl[] = {
        {
        .name = "ctl_0", .id = CTL_0,
-       .base = 0x1000, .len = 0xE4,
+       .base = 0x1000, .len = 0x1dc,
        .features = BIT(DPU_CTL_ACTIVE_CFG),
        .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
        },
        {
        .name = "ctl_1", .id = CTL_1,
-       .base = 0x1200, .len = 0xE4,
+       .base = 0x1200, .len = 0x1dc,
        .features = BIT(DPU_CTL_ACTIVE_CFG),
        .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
        },
        {
        .name = "ctl_2", .id = CTL_2,
-       .base = 0x1400, .len = 0xE4,
+       .base = 0x1400, .len = 0x1dc,
        .features = BIT(DPU_CTL_ACTIVE_CFG),
        .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
        },
@@ -1190,9 +1190,9 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
        SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_MSM8998_MASK,
                sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
        SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_MSM8998_MASK,
-               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
        SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000,  DMA_CURSOR_MSM8998_MASK,
-               sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+               sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
 };
 
 static const struct dpu_sspp_cfg sdm845_sspp[] = {
@@ -1209,9 +1209,9 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
        SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_SDM845_MASK,
                sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
        SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
        SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+               sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
 };
 
 static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
@@ -1226,57 +1226,57 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
        SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000,  DMA_SDM845_MASK,
                sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
        SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+               sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
        SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
 };
 
 static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
-                               _VIG_SBLK("0", 2, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("0", 2, DPU_SSPP_SCALER_QSEED4);
 
 static const struct dpu_sspp_cfg sm6115_sspp[] = {
-       SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
+       SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
                sm6115_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
        SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000,  DMA_SDM845_MASK,
                sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
 };
 
 static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
-                               _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
-                               _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
-                               _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
-                               _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
 
 static const struct dpu_sspp_cfg sm8250_sspp[] = {
-       SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
+       SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
                sm8250_vig_sblk_0, 0,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
-       SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
+       SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
                sm8250_vig_sblk_1, 4,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
-       SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
+       SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
                sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
-       SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
+       SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
                sm8250_vig_sblk_3, 12,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
        SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000,  DMA_SDM845_MASK,
                sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
        SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_SDM845_MASK,
                sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
        SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
        SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+               sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
 };
 
 static const struct dpu_sspp_sub_blks sm8450_vig_sblk_0 =
-                               _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sm8450_vig_sblk_1 =
-                               _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sm8450_vig_sblk_2 =
-                               _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sm8450_vig_sblk_3 =
-                               _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
 
 static const struct dpu_sspp_cfg sm8450_sspp[] = {
        SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
@@ -1292,21 +1292,21 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
        SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_SDM845_MASK,
                sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
        SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
        SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+               sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
 };
 
 static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
-                               _VIG_SBLK("0", 7, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("0", 7, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
-                               _VIG_SBLK("1", 8, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("1", 8, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
-                               _VIG_SBLK("2", 9, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("2", 9, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
-                               _VIG_SBLK("3", 10, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("3", 10, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK("12", 5);
-static const struct dpu_sspp_sub_blks sd8550_dma_sblk_5 = _DMA_SBLK("13", 6);
+static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK("13", 6);
 
 static const struct dpu_sspp_cfg sm8550_sspp[] = {
        SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
@@ -1326,9 +1326,9 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
        SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000,  DMA_SDM845_MASK,
                sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
        SSPP_BLK("sspp_12", SSPP_DMA4, 0x2c000,  DMA_CURSOR_SDM845_MASK,
-               sm8550_dma_sblk_4, 14, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+               sm8550_dma_sblk_4, 14, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA4),
        SSPP_BLK("sspp_13", SSPP_DMA5, 0x2e000,  DMA_CURSOR_SDM845_MASK,
-               sd8550_dma_sblk_5, 15, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+               sm8550_dma_sblk_5, 15, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA5),
 };
 
 static const struct dpu_sspp_cfg sc7280_sspp[] = {
@@ -1337,37 +1337,37 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
        SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000,  DMA_SDM845_MASK,
                sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
        SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+               sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
        SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
 };
 
 static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_0 =
-                               _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_1 =
-                               _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_2 =
-                               _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
 static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_3 =
-                               _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+                               _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
 
 static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
-       SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
+       SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
                 sc8280xp_vig_sblk_0, 0,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
-       SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
+       SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
                 sc8280xp_vig_sblk_1, 4,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
-       SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
+       SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
                 sc8280xp_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
-       SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
+       SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
                 sc8280xp_vig_sblk_3, 12,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
        SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
                 sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
        SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
                 sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
        SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
-                sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+                sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
        SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
-                sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+                sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
 };
 
 #define _VIG_SBLK_NOSCALE(num, sdma_pri) \
@@ -1517,7 +1517,7 @@ static const struct dpu_lm_cfg sc7280_lm[] = {
 /* QCM2290 */
 
 static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
-       .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .maxwidth = DEFAULT_DPU_LINE_WIDTH,
        .maxblendstages = 4, /* excluding base layer */
        .blendstage_base = { /* offsets relative to mixer base */
                0x20, 0x38, 0x50, 0x68
@@ -1714,7 +1714,7 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
 };
 
 static const struct dpu_pingpong_cfg sc7280_pp[] = {
-       PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk, -1, -1),
+       PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
        PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
        PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
        PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
@@ -2841,8 +2841,6 @@ static const struct dpu_mdss_cfg qcm2290_dpu_cfg = {
        .intf = qcm2290_intf,
        .vbif_count = ARRAY_SIZE(sdm845_vbif),
        .vbif = sdm845_vbif,
-       .reg_dma_count = 1,
-       .dma_cfg = &sdm845_regdma,
        .perf = &qcm2290_perf_data,
        .mdss_irqs = IRQ_SC7180_MASK,
 };
index ddab9ca..e659030 100644 (file)
@@ -515,6 +515,8 @@ enum dpu_clk_ctrl_type {
        DPU_CLK_CTRL_DMA1,
        DPU_CLK_CTRL_DMA2,
        DPU_CLK_CTRL_DMA3,
+       DPU_CLK_CTRL_DMA4,
+       DPU_CLK_CTRL_DMA5,
        DPU_CLK_CTRL_CURSOR0,
        DPU_CLK_CTRL_CURSOR1,
        DPU_CLK_CTRL_INLINE_ROT0_SSPP,
index b88a2f3..6c53ea5 100644 (file)
@@ -446,7 +446,9 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
                         * CTL_LAYER has 3-bit field (and extra bits in EXT register),
                         * all EXT registers has 4-bit fields.
                         */
-                       if (cfg->idx == 0) {
+                       if (cfg->idx == -1) {
+                               continue;
+                       } else if (cfg->idx == 0) {
                                mixercfg[0] |= mix << cfg->shift;
                                mixercfg[1] |= ext << cfg->ext_shift;
                        } else {
index 396429e..66c1b70 100644 (file)
@@ -577,6 +577,8 @@ void dpu_rm_release(struct dpu_global_state *global_state,
                ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
        _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
                ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
+       _dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
+               ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
 }
 
 int dpu_rm_reserve(
index be4bf77..ac8ed73 100644 (file)
@@ -637,8 +637,8 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
        int ret = 0;
        uint32_t i, j;
 
-       post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
-                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+       post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
+                           GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
        if (!post_deps)
                return ERR_PTR(-ENOMEM);
 
@@ -653,7 +653,6 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
                }
 
                post_deps[i].point = syncobj_desc.point;
-               post_deps[i].chain = NULL;
 
                if (syncobj_desc.flags) {
                        ret = -EINVAL;
index 591c852..76a6ae5 100644 (file)
@@ -35,8 +35,9 @@ struct nv50_wndw {
 
 int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
                   enum drm_plane_type, const char *name, int index,
-                  const u32 *format, enum nv50_disp_interlock_type,
-                  u32 interlock_data, u32 heads, struct nv50_wndw **);
+                  const u32 *format, u32 heads,
+                  enum nv50_disp_interlock_type, u32 interlock_data,
+                  struct nv50_wndw **);
 void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock,
                         struct nv50_wndw_atom *);
 void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush,
index c5a4f49..01a22a1 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef __NVKM_FB_H__
 #define __NVKM_FB_H__
 #include <core/subdev.h>
+#include <core/falcon.h>
 #include <core/mm.h>
 
 /* memory type/access flags, do not match hardware values */
@@ -33,7 +34,7 @@ struct nvkm_fb {
        const struct nvkm_fb_func *func;
        struct nvkm_subdev subdev;
 
-       struct nvkm_blob vpr_scrubber;
+       struct nvkm_falcon_fw vpr_scrubber;
 
        struct {
                struct page *flush_page;
index bac7dcc..0955340 100644 (file)
@@ -143,6 +143,10 @@ nvkm_fb_mem_unlock(struct nvkm_fb *fb)
        if (!fb->func->vpr.scrub_required)
                return 0;
 
+       ret = nvkm_subdev_oneinit(subdev);
+       if (ret)
+               return ret;
+
        if (!fb->func->vpr.scrub_required(fb)) {
                nvkm_debug(subdev, "VPR not locked\n");
                return 0;
@@ -150,7 +154,7 @@ nvkm_fb_mem_unlock(struct nvkm_fb *fb)
 
        nvkm_debug(subdev, "VPR locked, running scrubber binary\n");
 
-       if (!fb->vpr_scrubber.size) {
+       if (!fb->vpr_scrubber.fw.img) {
                nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n");
                return 0;
        }
@@ -229,7 +233,7 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
 
        nvkm_ram_del(&fb->ram);
 
-       nvkm_blob_dtor(&fb->vpr_scrubber);
+       nvkm_falcon_fw_dtor(&fb->vpr_scrubber);
 
        if (fb->sysmem.flush_page) {
                dma_unmap_page(subdev->device->dev, fb->sysmem.flush_page_addr,
index 5098f21..a7456e7 100644 (file)
@@ -37,5 +37,5 @@ ga100_fb = {
 int
 ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-       return gp102_fb_new_(&ga100_fb, device, type, inst, pfb);
+       return gf100_fb_new_(&ga100_fb, device, type, inst, pfb);
 }
index 5a21b0a..dd476e0 100644 (file)
 #include <engine/nvdec.h>
 
 static int
-ga102_fb_vpr_scrub(struct nvkm_fb *fb)
+ga102_fb_oneinit(struct nvkm_fb *fb)
 {
-       struct nvkm_falcon_fw fw = {};
-       int ret;
+       struct nvkm_subdev *subdev = &fb->subdev;
 
-       ret = nvkm_falcon_fw_ctor_hs_v2(&ga102_flcn_fw, "mem-unlock", &fb->subdev, "nvdec/scrubber",
-                                       0, &fb->subdev.device->nvdec[0]->falcon, &fw);
-       if (ret)
-               return ret;
+       nvkm_falcon_fw_ctor_hs_v2(&ga102_flcn_fw, "mem-unlock", subdev, "nvdec/scrubber",
+                                 0, &subdev->device->nvdec[0]->falcon, &fb->vpr_scrubber);
 
-       ret = nvkm_falcon_fw_boot(&fw, &fb->subdev, true, NULL, NULL, 0, 0);
-       nvkm_falcon_fw_dtor(&fw);
-       return ret;
+       return gf100_fb_oneinit(fb);
 }
 
 static const struct nvkm_fb_func
 ga102_fb = {
        .dtor = gf100_fb_dtor,
-       .oneinit = gf100_fb_oneinit,
+       .oneinit = ga102_fb_oneinit,
        .init = gm200_fb_init,
        .init_page = gv100_fb_init_page,
        .init_unkn = gp100_fb_init_unkn,
@@ -51,13 +46,13 @@ ga102_fb = {
        .ram_new = ga102_ram_new,
        .default_bigpage = 16,
        .vpr.scrub_required = tu102_fb_vpr_scrub_required,
-       .vpr.scrub = ga102_fb_vpr_scrub,
+       .vpr.scrub = gp102_fb_vpr_scrub,
 };
 
 int
 ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-       return gp102_fb_new_(&ga102_fb, device, type, inst, pfb);
+       return gf100_fb_new_(&ga102_fb, device, type, inst, pfb);
 }
 
 MODULE_FIRMWARE("nvidia/ga102/nvdec/scrubber.bin");
index 2658481..14d942e 100644 (file)
 int
 gp102_fb_vpr_scrub(struct nvkm_fb *fb)
 {
-       struct nvkm_subdev *subdev = &fb->subdev;
-       struct nvkm_falcon_fw fw = {};
-       int ret;
-
-       ret = nvkm_falcon_fw_ctor_hs(&gm200_flcn_fw, "mem-unlock", subdev, NULL,
-                                    "nvdec/scrubber", 0, &subdev->device->nvdec[0]->falcon, &fw);
-       if (ret)
-               return ret;
-
-       ret = nvkm_falcon_fw_boot(&fw, subdev, true, NULL, NULL, 0, 0x00000000);
-       nvkm_falcon_fw_dtor(&fw);
-       return ret;
+       return nvkm_falcon_fw_boot(&fb->vpr_scrubber, &fb->subdev, true, NULL, NULL, 0, 0x00000000);
 }
 
 bool
@@ -51,10 +40,21 @@ gp102_fb_vpr_scrub_required(struct nvkm_fb *fb)
        return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0;
 }
 
+int
+gp102_fb_oneinit(struct nvkm_fb *fb)
+{
+       struct nvkm_subdev *subdev = &fb->subdev;
+
+       nvkm_falcon_fw_ctor_hs(&gm200_flcn_fw, "mem-unlock", subdev, NULL, "nvdec/scrubber",
+                              0, &subdev->device->nvdec[0]->falcon, &fb->vpr_scrubber);
+
+       return gf100_fb_oneinit(fb);
+}
+
 static const struct nvkm_fb_func
 gp102_fb = {
        .dtor = gf100_fb_dtor,
-       .oneinit = gf100_fb_oneinit,
+       .oneinit = gp102_fb_oneinit,
        .init = gm200_fb_init,
        .init_remapper = gp100_fb_init_remapper,
        .init_page = gm200_fb_init_page,
@@ -65,22 +65,9 @@ gp102_fb = {
 };
 
 int
-gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
-             enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
-{
-       int ret = gf100_fb_new_(func, device, type, inst, pfb);
-       if (ret)
-               return ret;
-
-       nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0,
-                               &(*pfb)->vpr_scrubber);
-       return 0;
-}
-
-int
 gp102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-       return gp102_fb_new_(&gp102_fb, device, type, inst, pfb);
+       return gf100_fb_new_(&gp102_fb, device, type, inst, pfb);
 }
 
 MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
index 0e3c0a8..4d8a286 100644 (file)
@@ -31,7 +31,7 @@ gv100_fb_init_page(struct nvkm_fb *fb)
 static const struct nvkm_fb_func
 gv100_fb = {
        .dtor = gf100_fb_dtor,
-       .oneinit = gf100_fb_oneinit,
+       .oneinit = gp102_fb_oneinit,
        .init = gm200_fb_init,
        .init_page = gv100_fb_init_page,
        .init_unkn = gp100_fb_init_unkn,
@@ -45,7 +45,7 @@ gv100_fb = {
 int
 gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-       return gp102_fb_new_(&gv100_fb, device, type, inst, pfb);
+       return gf100_fb_new_(&gv100_fb, device, type, inst, pfb);
 }
 
 MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
index f517751..726c30c 100644 (file)
@@ -83,8 +83,7 @@ int gm200_fb_init_page(struct nvkm_fb *);
 void gp100_fb_init_remapper(struct nvkm_fb *);
 void gp100_fb_init_unkn(struct nvkm_fb *);
 
-int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
-                 struct nvkm_fb **);
+int gp102_fb_oneinit(struct nvkm_fb *);
 bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
 int gp102_fb_vpr_scrub(struct nvkm_fb *);
 
index be82af0..b8803c1 100644 (file)
@@ -31,7 +31,7 @@ tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
 static const struct nvkm_fb_func
 tu102_fb = {
        .dtor = gf100_fb_dtor,
-       .oneinit = gf100_fb_oneinit,
+       .oneinit = gp102_fb_oneinit,
        .init = gm200_fb_init,
        .init_page = gv100_fb_init_page,
        .init_unkn = gp100_fb_init_unkn,
@@ -45,7 +45,7 @@ tu102_fb = {
 int
 tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
-       return gp102_fb_new_(&tu102_fb, device, type, inst, pfb);
+       return gf100_fb_new_(&tu102_fb, device, type, inst, pfb);
 }
 
 MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
index cf35b60..accfa52 100644 (file)
@@ -455,7 +455,7 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
        if (state->fb && cirrus->cpp != cirrus_cpp(state->fb))
                cirrus_mode_set(cirrus, &crtc->mode, state->fb);
 
-       if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+       if (state->fb && drm_atomic_helper_damage_merged(old_state, state, &rect))
                cirrus_fb_blit_rect(state->fb, &shadow_plane_state->data[0], &rect);
 }
 
index 842afc8..22623eb 100644 (file)
@@ -256,6 +256,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
 {
        struct hid_report *report;
        struct hid_field *field;
+       unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
        unsigned int usages;
        unsigned int offset;
        unsigned int i;
@@ -286,8 +287,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
        offset = report->size;
        report->size += parser->global.report_size * parser->global.report_count;
 
+       if (parser->device->ll_driver->max_buffer_size)
+               max_buffer_size = parser->device->ll_driver->max_buffer_size;
+
        /* Total size check: Allow for possible report index byte */
-       if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+       if (report->size > (max_buffer_size - 1) << 3) {
                hid_err(parser->device, "report is too long\n");
                return -1;
        }
@@ -1963,6 +1967,7 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
        struct hid_report_enum *report_enum = hid->report_enum + type;
        struct hid_report *report;
        struct hid_driver *hdrv;
+       int max_buffer_size = HID_MAX_BUFFER_SIZE;
        u32 rsize, csize = size;
        u8 *cdata = data;
        int ret = 0;
@@ -1978,10 +1983,13 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
 
        rsize = hid_compute_report_size(report);
 
-       if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
-               rsize = HID_MAX_BUFFER_SIZE - 1;
-       else if (rsize > HID_MAX_BUFFER_SIZE)
-               rsize = HID_MAX_BUFFER_SIZE;
+       if (hid->ll_driver->max_buffer_size)
+               max_buffer_size = hid->ll_driver->max_buffer_size;
+
+       if (report_enum->numbered && rsize >= max_buffer_size)
+               rsize = max_buffer_size - 1;
+       else if (rsize > max_buffer_size)
+               rsize = max_buffer_size;
 
        if (csize < rsize) {
                dbg_hid("report %d is too short, (%d < %d)\n", report->id,
@@ -2396,7 +2404,12 @@ int hid_hw_raw_request(struct hid_device *hdev,
                       unsigned char reportnum, __u8 *buf,
                       size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
 {
-       if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+       unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
+
+       if (hdev->ll_driver->max_buffer_size)
+               max_buffer_size = hdev->ll_driver->max_buffer_size;
+
+       if (len < 1 || len > max_buffer_size || !buf)
                return -EINVAL;
 
        return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
@@ -2415,7 +2428,12 @@ EXPORT_SYMBOL_GPL(hid_hw_raw_request);
  */
 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
 {
-       if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+       unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
+
+       if (hdev->ll_driver->max_buffer_size)
+               max_buffer_size = hdev->ll_driver->max_buffer_size;
+
+       if (len < 1 || len > max_buffer_size || !buf)
                return -EINVAL;
 
        if (hdev->ll_driver->output_report)
index 1e16b0f..27cadad 100644 (file)
@@ -1354,6 +1354,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
        girq->parents = NULL;
        girq->default_type = IRQ_TYPE_NONE;
        girq->handler = handle_simple_irq;
+       girq->threaded = true;
 
        ret = gpiochip_add_data(&dev->gc, dev);
        if (ret < 0) {
index 25dcda7..5fc88a0 100644 (file)
@@ -4399,6 +4399,8 @@ static const struct hid_device_id hidpp_devices[] = {
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb02a) },
        { /* MX Master 3 mouse over Bluetooth */
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) },
+       { /* MX Master 3S mouse over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb034) },
        {}
 };
 
index 15e1423..a49c6af 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (c) 2014-2016, Intel Corporation.
  */
 
+#include <linux/devm-helpers.h>
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/delay.h>
@@ -621,7 +622,6 @@ static void recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
        case MNG_RESET_NOTIFY:
                if (!ishtp_dev) {
                        ishtp_dev = dev;
-                       INIT_WORK(&fw_reset_work, fw_reset_work_fn);
                }
                schedule_work(&fw_reset_work);
                break;
@@ -940,6 +940,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
 {
        struct ishtp_device *dev;
        int     i;
+       int     ret;
 
        dev = devm_kzalloc(&pdev->dev,
                           sizeof(struct ishtp_device) + sizeof(struct ish_hw),
@@ -975,6 +976,12 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
                list_add_tail(&tx_buf->link, &dev->wr_free_list);
        }
 
+       ret = devm_work_autocancel(&pdev->dev, &fw_reset_work, fw_reset_work_fn);
+       if (ret) {
+               dev_err(dev->devc, "Failed to initialise FW reset work\n");
+               return NULL;
+       }
+
        dev->ops = &ish_hw_ops;
        dev->devc = &pdev->dev;
        dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
index f161c95..4588d2c 100644 (file)
@@ -395,6 +395,7 @@ static const struct hid_ll_driver uhid_hid_driver = {
        .parse = uhid_hid_parse,
        .raw_request = uhid_hid_raw_request,
        .output_report = uhid_hid_output_report,
+       .max_buffer_size = UHID_DATA_MAX,
 };
 
 #ifdef CONFIG_COMPAT
index cb5fa97..ae3af73 100644 (file)
@@ -561,15 +561,8 @@ static int i2c_device_probe(struct device *dev)
                goto err_detach_pm_domain;
        }
 
-       /*
-        * When there are no more users of probe(),
-        * rename probe_new to probe.
-        */
-       if (driver->probe_new)
-               status = driver->probe_new(client);
-       else if (driver->probe)
-               status = driver->probe(client,
-                                      i2c_match_id(driver->id_table, client));
+       if (driver->probe)
+               status = driver->probe(client);
        else
                status = -EINVAL;
 
@@ -1057,7 +1050,7 @@ static int dummy_probe(struct i2c_client *client)
 
 static struct i2c_driver dummy_driver = {
        .driver.name    = "dummy",
-       .probe_new      = dummy_probe,
+       .probe          = dummy_probe,
        .id_table       = dummy_id,
 };
 
index 107623c..95a0b63 100644 (file)
@@ -646,7 +646,7 @@ static void i2cdev_dev_release(struct device *dev)
        kfree(i2c_dev);
 }
 
-static int i2cdev_attach_adapter(struct device *dev, void *dummy)
+static int i2cdev_attach_adapter(struct device *dev)
 {
        struct i2c_adapter *adap;
        struct i2c_dev *i2c_dev;
@@ -685,7 +685,7 @@ err_put_i2c_dev:
        return NOTIFY_DONE;
 }
 
-static int i2cdev_detach_adapter(struct device *dev, void *dummy)
+static int i2cdev_detach_adapter(struct device *dev)
 {
        struct i2c_adapter *adap;
        struct i2c_dev *i2c_dev;
@@ -711,9 +711,9 @@ static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
 
        switch (action) {
        case BUS_NOTIFY_ADD_DEVICE:
-               return i2cdev_attach_adapter(dev, NULL);
+               return i2cdev_attach_adapter(dev);
        case BUS_NOTIFY_DEL_DEVICE:
-               return i2cdev_detach_adapter(dev, NULL);
+               return i2cdev_detach_adapter(dev);
        }
 
        return NOTIFY_DONE;
@@ -725,6 +725,18 @@ static struct notifier_block i2cdev_notifier = {
 
 /* ------------------------------------------------------------------------- */
 
+static int __init i2c_dev_attach_adapter(struct device *dev, void *dummy)
+{
+       i2cdev_attach_adapter(dev);
+       return 0;
+}
+
+static int __exit i2c_dev_detach_adapter(struct device *dev, void *dummy)
+{
+       i2cdev_detach_adapter(dev);
+       return 0;
+}
+
 /*
  * module load/unload record keeping
  */
@@ -752,7 +764,7 @@ static int __init i2c_dev_init(void)
                goto out_unreg_class;
 
        /* Bind to already existing adapters right away */
-       i2c_for_each_dev(NULL, i2cdev_attach_adapter);
+       i2c_for_each_dev(NULL, i2c_dev_attach_adapter);
 
        return 0;
 
@@ -768,7 +780,7 @@ out:
 static void __exit i2c_dev_exit(void)
 {
        bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
-       i2c_for_each_dev(NULL, i2cdev_detach_adapter);
+       i2c_for_each_dev(NULL, i2c_dev_detach_adapter);
        class_destroy(i2c_dev_class);
        unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
 }
index 5f25f23..5946c0d 100644 (file)
@@ -207,7 +207,7 @@ static struct i2c_driver i2c_slave_eeprom_driver = {
        .driver = {
                .name = "i2c-slave-eeprom",
        },
-       .probe_new = i2c_slave_eeprom_probe,
+       .probe = i2c_slave_eeprom_probe,
        .remove = i2c_slave_eeprom_remove,
        .id_table = i2c_slave_eeprom_id,
 };
index 75ee7eb..a49642b 100644 (file)
@@ -171,7 +171,7 @@ static struct i2c_driver i2c_slave_testunit_driver = {
        .driver = {
                .name = "i2c-slave-testunit",
        },
-       .probe_new = i2c_slave_testunit_probe,
+       .probe = i2c_slave_testunit_probe,
        .remove = i2c_slave_testunit_remove,
        .id_table = i2c_slave_testunit_id,
 };
index cd19546..138c3f5 100644 (file)
@@ -169,7 +169,7 @@ static struct i2c_driver smbalert_driver = {
        .driver = {
                .name   = "smbus_alert",
        },
-       .probe_new      = smbalert_probe,
+       .probe          = smbalert_probe,
        .remove         = smbalert_remove,
        .id_table       = smbalert_ids,
 };
index 7083582..5a03031 100644 (file)
@@ -306,7 +306,7 @@ static struct i2c_driver ltc4306_driver = {
                .name   = "ltc4306",
                .of_match_table = of_match_ptr(ltc4306_of_match),
        },
-       .probe_new      = ltc4306_probe,
+       .probe          = ltc4306_probe,
        .remove         = ltc4306_remove,
        .id_table       = ltc4306_id,
 };
index 09d1d9e..ce0fb69 100644 (file)
@@ -336,7 +336,7 @@ static struct i2c_driver pca9541_driver = {
                   .name = "pca9541",
                   .of_match_table = of_match_ptr(pca9541_of_match),
                   },
-       .probe_new = pca9541_probe,
+       .probe = pca9541_probe,
        .remove = pca9541_remove,
        .id_table = pca9541_id,
 };
index 3639e6d..0ccee2a 100644 (file)
@@ -554,7 +554,7 @@ static struct i2c_driver pca954x_driver = {
                .pm     = &pca954x_pm,
                .of_match_table = pca954x_of_match,
        },
-       .probe_new      = pca954x_probe,
+       .probe          = pca954x_probe,
        .remove         = pca954x_remove,
        .id_table       = pca954x_id,
 };
index a3b524f..1c80b12 100644 (file)
@@ -707,8 +707,7 @@ static int ov2685_configure_regulators(struct ov2685 *ov2685)
                                       ov2685->supplies);
 }
 
-static int ov2685_probe(struct i2c_client *client,
-                       const struct i2c_device_id *id)
+static int ov2685_probe(struct i2c_client *client)
 {
        struct device *dev = &client->dev;
        struct ov2685 *ov2685;
@@ -830,7 +829,7 @@ static struct i2c_driver ov2685_i2c_driver = {
                .pm = &ov2685_pm_ops,
                .of_match_table = of_match_ptr(ov2685_of_match),
        },
-       .probe          = &ov2685_probe,
+       .probe_new      = &ov2685_probe,
        .remove         = &ov2685_remove,
 };
 
index 61906fc..b287c28 100644 (file)
@@ -1267,8 +1267,7 @@ static int ov5695_configure_regulators(struct ov5695 *ov5695)
                                       ov5695->supplies);
 }
 
-static int ov5695_probe(struct i2c_client *client,
-                       const struct i2c_device_id *id)
+static int ov5695_probe(struct i2c_client *client)
 {
        struct device *dev = &client->dev;
        struct ov5695 *ov5695;
@@ -1393,7 +1392,7 @@ static struct i2c_driver ov5695_i2c_driver = {
                .pm = &ov5695_pm_ops,
                .of_match_table = of_match_ptr(ov5695_of_match),
        },
-       .probe          = &ov5695_probe,
+       .probe_new      = &ov5695_probe,
        .remove         = &ov5695_remove,
 };
 
index 28ffb43..3856d5c 100644 (file)
@@ -50,9 +50,9 @@ static const struct ad_dpot_bus_ops bops = {
        .write_r8d16    = write_r8d16,
 };
 
-static int ad_dpot_i2c_probe(struct i2c_client *client,
-                                     const struct i2c_device_id *id)
+static int ad_dpot_i2c_probe(struct i2c_client *client)
 {
+       const struct i2c_device_id *id = i2c_client_get_device_id(client);
        struct ad_dpot_bus_data bdata = {
                .client = client,
                .bops = &bops,
@@ -106,7 +106,7 @@ static struct i2c_driver ad_dpot_i2c_driver = {
        .driver = {
                .name   = "ad_dpot",
        },
-       .probe          = ad_dpot_i2c_probe,
+       .probe_new      = ad_dpot_i2c_probe,
        .remove         = ad_dpot_i2c_remove,
        .id_table       = ad_dpot_id,
 };
index 5fcefcd..3e0fff3 100644 (file)
@@ -206,8 +206,7 @@ static void pismo_remove(struct i2c_client *client)
        kfree(pismo);
 }
 
-static int pismo_probe(struct i2c_client *client,
-                      const struct i2c_device_id *id)
+static int pismo_probe(struct i2c_client *client)
 {
        struct pismo_pdata *pdata = client->dev.platform_data;
        struct pismo_eeprom eeprom;
@@ -260,7 +259,7 @@ static struct i2c_driver pismo_driver = {
        .driver = {
                .name   = "pismo",
        },
-       .probe          = pismo_probe,
+       .probe_new      = pismo_probe,
        .remove         = pismo_remove,
        .id_table       = pismo_id,
 };
index 1de8706..3711d7f 100644 (file)
@@ -221,7 +221,10 @@ static blk_status_t ubiblock_read(struct request *req)
 
        rq_for_each_segment(bvec, req, iter)
                flush_dcache_page(bvec.bv_page);
-       return errno_to_blk_status(ret);
+
+       blk_mq_end_request(req, errno_to_blk_status(ret));
+
+       return BLK_STS_OK;
 }
 
 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
index 3a15015..a508402 100644 (file)
@@ -393,6 +393,24 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
                mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
 }
 
+/* Set up switch core clock for MT7530 */
+static void mt7530_pll_setup(struct mt7530_priv *priv)
+{
+       /* Disable PLL */
+       core_write(priv, CORE_GSWPLL_GRP1, 0);
+
+       /* Set core clock into 500Mhz */
+       core_write(priv, CORE_GSWPLL_GRP2,
+                  RG_GSWPLL_POSDIV_500M(1) |
+                  RG_GSWPLL_FBKDIV_500M(25));
+
+       /* Enable PLL */
+       core_write(priv, CORE_GSWPLL_GRP1,
+                  RG_GSWPLL_EN_PRE |
+                  RG_GSWPLL_POSDIV_200M(2) |
+                  RG_GSWPLL_FBKDIV_200M(32));
+}
+
 /* Setup TX circuit including relevant PAD and driving */
 static int
 mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
@@ -453,21 +471,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
        core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
                   REG_GSWCK_EN | REG_TRGMIICK_EN);
 
-       /* Setup core clock for MT7530 */
-       /* Disable PLL */
-       core_write(priv, CORE_GSWPLL_GRP1, 0);
-
-       /* Set core clock into 500Mhz */
-       core_write(priv, CORE_GSWPLL_GRP2,
-                  RG_GSWPLL_POSDIV_500M(1) |
-                  RG_GSWPLL_FBKDIV_500M(25));
-
-       /* Enable PLL */
-       core_write(priv, CORE_GSWPLL_GRP1,
-                  RG_GSWPLL_EN_PRE |
-                  RG_GSWPLL_POSDIV_200M(2) |
-                  RG_GSWPLL_FBKDIV_200M(32));
-
        /* Setup the MT7530 TRGMII Tx Clock */
        core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
        core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
@@ -2196,6 +2199,8 @@ mt7530_setup(struct dsa_switch *ds)
                     SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
                     SYS_CTRL_REG_RST);
 
+       mt7530_pll_setup(priv);
+
        /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
        val = mt7530_read(priv, MT7530_MHWTRAP);
        val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
index 323ec56..1917da7 100644 (file)
@@ -132,6 +132,16 @@ source "drivers/net/ethernet/mscc/Kconfig"
 source "drivers/net/ethernet/microsoft/Kconfig"
 source "drivers/net/ethernet/moxa/Kconfig"
 source "drivers/net/ethernet/myricom/Kconfig"
+
+config FEALNX
+       tristate "Myson MTD-8xx PCI Ethernet support"
+       depends on PCI
+       select CRC32
+       select MII
+       help
+         Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
+         cards. <http://www.myson.com.tw/>
+
 source "drivers/net/ethernet/ni/Kconfig"
 source "drivers/net/ethernet/natsemi/Kconfig"
 source "drivers/net/ethernet/neterion/Kconfig"
index 2fedbaa..0d872d4 100644 (file)
@@ -64,6 +64,7 @@ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
 obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
 obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
 obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
+obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
 obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
 obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
index 3038386..1761df8 100644 (file)
@@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
 
                if (iost & BGMAC_BCMA_IOST_ATTACHED) {
                        flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
-                       if (!bgmac->has_robosw)
+                       if (bgmac->in_init || !bgmac->has_robosw)
                                flags |= BGMAC_BCMA_IOCTL_SW_RESET;
                }
                bgmac_clk_enable(bgmac, flags);
        }
 
-       if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+       if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw))
                bgmac_idm_write(bgmac, BCMA_IOCTL,
                                bgmac_idm_read(bgmac, BCMA_IOCTL) &
                                ~BGMAC_BCMA_IOCTL_SW_RESET);
@@ -1490,6 +1490,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
        struct net_device *net_dev = bgmac->net_dev;
        int err;
 
+       bgmac->in_init = true;
+
        bgmac_chip_intrs_off(bgmac);
 
        net_dev->irq = bgmac->irq;
@@ -1542,6 +1544,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
        /* Omit FCS from max MTU size */
        net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
 
+       bgmac->in_init = false;
+
        err = register_netdev(bgmac->net_dev);
        if (err) {
                dev_err(bgmac->dev, "Cannot register net device\n");
index e05ac92..d73ef26 100644 (file)
@@ -472,6 +472,8 @@ struct bgmac {
        int irq;
        u32 int_mask;
 
+       bool in_init;
+
        /* Current MAC state */
        int mac_speed;
        int mac_duplex;
index 5d4b1f2..808236d 100644 (file)
@@ -3145,7 +3145,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 
 static void bnxt_free_tpa_info(struct bnxt *bp)
 {
-       int i;
+       int i, j;
 
        for (i = 0; i < bp->rx_nr_rings; i++) {
                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
@@ -3153,8 +3153,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
                kfree(rxr->rx_tpa_idx_map);
                rxr->rx_tpa_idx_map = NULL;
                if (rxr->rx_tpa) {
-                       kfree(rxr->rx_tpa[0].agg_arr);
-                       rxr->rx_tpa[0].agg_arr = NULL;
+                       for (j = 0; j < bp->max_tpa; j++) {
+                               kfree(rxr->rx_tpa[j].agg_arr);
+                               rxr->rx_tpa[j].agg_arr = NULL;
+                       }
                }
                kfree(rxr->rx_tpa);
                rxr->rx_tpa = NULL;
@@ -3163,14 +3165,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
 
 static int bnxt_alloc_tpa_info(struct bnxt *bp)
 {
-       int i, j, total_aggs = 0;
+       int i, j;
 
        bp->max_tpa = MAX_TPA;
        if (bp->flags & BNXT_FLAG_CHIP_P5) {
                if (!bp->max_tpa_v2)
                        return 0;
                bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
-               total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
        }
 
        for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -3184,12 +3185,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
 
                if (!(bp->flags & BNXT_FLAG_CHIP_P5))
                        continue;
-               agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
-               rxr->rx_tpa[0].agg_arr = agg;
-               if (!agg)
-                       return -ENOMEM;
-               for (j = 1; j < bp->max_tpa; j++)
-                       rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
+               for (j = 0; j < bp->max_tpa; j++) {
+                       agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+                       if (!agg)
+                               return -ENOMEM;
+                       rxr->rx_tpa[j].agg_arr = agg;
+               }
                rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
                                              GFP_KERNEL);
                if (!rxr->rx_tpa_idx_map)
@@ -13204,8 +13205,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        bnxt_free_hwrm_resources(bp);
        bnxt_ethtool_free(bp);
        bnxt_dcb_free(bp);
-       kfree(bp->edev);
-       bp->edev = NULL;
        kfree(bp->ptp_cfg);
        bp->ptp_cfg = NULL;
        kfree(bp->fw_health);
index d4cc9c3..e7b5e28 100644 (file)
@@ -317,9 +317,11 @@ static void bnxt_aux_dev_release(struct device *dev)
 {
        struct bnxt_aux_priv *aux_priv =
                container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
+       struct bnxt *bp = netdev_priv(aux_priv->edev->net);
 
        ida_free(&bnxt_aux_dev_ids, aux_priv->id);
        kfree(aux_priv->edev->ulp_tbl);
+       bp->edev = NULL;
        kfree(aux_priv->edev);
        kfree(aux_priv);
 }
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
new file mode 100644 (file)
index 0000000..ed18450
--- /dev/null
@@ -0,0 +1,1953 @@
+/*
+       Written 1998-2000 by Donald Becker.
+
+       This software may be used and distributed according to the terms of
+       the GNU General Public License (GPL), incorporated herein by reference.
+       Drivers based on or derived from this code fall under the GPL and must
+       retain the authorship, copyright and license notice.  This file is not
+       a complete program and may only be used when the entire operating
+       system is licensed under the GPL.
+
+       The author may be reached as becker@scyld.com, or C/O
+       Scyld Computing Corporation
+       410 Severn Ave., Suite 210
+       Annapolis MD 21403
+
+       Support information and updates available at
+       http://www.scyld.com/network/pci-skeleton.html
+
+       Linux kernel updates:
+
+       Version 2.51, Nov 17, 2001 (jgarzik):
+       - Add ethtool support
+       - Replace some MII-related magic numbers with constants
+
+*/
+
+#define DRV_NAME       "fealnx"
+
+static int debug;              /* 1-> print debug message */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
+/* Setting to > 1518 effectively disables this feature.          */
+static int rx_copybreak;
+
+/* Used to pass the media type, etc.                            */
+/* Both 'options[]' and 'full_duplex[]' should exist for driver */
+/* interoperability.                                            */
+/* The media type is usually passed in 'options[]'.             */
+#define MAX_UNITS 8            /* More are supported, limit only on options */
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+
+/* Operational parameters that are set at compile time.                 */
+/* Keep the ring sizes a power of two for compile efficiency.           */
+/* The compiler will convert <unsigned>'%'<2^N> into a bit mask.        */
+/* Making the Tx ring too large decreases the effectiveness of channel  */
+/* bonding and packet priority.                                         */
+/* There are no ill effects from too-large receive rings.               */
+// 88-12-9 modify,
+// #define TX_RING_SIZE    16
+// #define RX_RING_SIZE    32
+#define TX_RING_SIZE    6
+#define RX_RING_SIZE    12
+#define TX_TOTAL_SIZE  TX_RING_SIZE*sizeof(struct fealnx_desc)
+#define RX_TOTAL_SIZE  RX_RING_SIZE*sizeof(struct fealnx_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT      (2*HZ)
+
+#define PKT_BUF_SZ      1536   /* Size of each temporary Rx buffer. */
+
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h>     /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+
+/* This driver was written to use PCI memory space, however some x86 systems
+   work only with I/O space accesses. */
+#ifndef __alpha__
+#define USE_IO_OPS
+#endif
+
+/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
+/* This is only in the support-all-kernels source code. */
+
+#define RUN_AT(x) (jiffies + (x))
+
+MODULE_AUTHOR("Myson or whoever");
+MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
+MODULE_LICENSE("GPL");
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(multicast_filter_limit, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
+MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
+
+enum {
+       MIN_REGION_SIZE         = 136,
+};
+
+/* A chip capabilities table, matching the entries in pci_tbl[] above. */
+enum chip_capability_flags {
+       HAS_MII_XCVR,
+       HAS_CHIP_XCVR,
+};
+
+/* 89/6/13 add, */
+/* for different PHY */
+enum phy_type_flags {
+       MysonPHY = 1,
+       AhdocPHY = 2,
+       SeeqPHY = 3,
+       MarvellPHY = 4,
+       Myson981 = 5,
+       LevelOnePHY = 6,
+       OtherPHY = 10,
+};
+
+struct chip_info {
+       char *chip_name;
+       int flags;
+};
+
+static const struct chip_info skel_netdrv_tbl[] = {
+       { "100/10M Ethernet PCI Adapter",       HAS_MII_XCVR },
+       { "100/10M Ethernet PCI Adapter",       HAS_CHIP_XCVR },
+       { "1000/100/10M Ethernet PCI Adapter",  HAS_MII_XCVR },
+};
+
+/* Offsets to the Command and Status Registers. */
+enum fealnx_offsets {
+       PAR0 = 0x0,             /* physical address 0-3 */
+       PAR1 = 0x04,            /* physical address 4-5 */
+       MAR0 = 0x08,            /* multicast address 0-3 */
+       MAR1 = 0x0C,            /* multicast address 4-7 */
+       FAR0 = 0x10,            /* flow-control address 0-3 */
+       FAR1 = 0x14,            /* flow-control address 4-5 */
+       TCRRCR = 0x18,          /* receive & transmit configuration */
+       BCR = 0x1C,             /* bus command */
+       TXPDR = 0x20,           /* transmit polling demand */
+       RXPDR = 0x24,           /* receive polling demand */
+       RXCWP = 0x28,           /* receive current word pointer */
+       TXLBA = 0x2C,           /* transmit list base address */
+       RXLBA = 0x30,           /* receive list base address */
+       ISR = 0x34,             /* interrupt status */
+       IMR = 0x38,             /* interrupt mask */
+       FTH = 0x3C,             /* flow control high/low threshold */
+       MANAGEMENT = 0x40,      /* bootrom/eeprom and mii management */
+       TALLY = 0x44,           /* tally counters for crc and mpa */
+       TSR = 0x48,             /* tally counter for transmit status */
+       BMCRSR = 0x4c,          /* basic mode control and status */
+       PHYIDENTIFIER = 0x50,   /* phy identifier */
+       ANARANLPAR = 0x54,      /* auto-negotiation advertisement and link
+                                  partner ability */
+       ANEROCR = 0x58,         /* auto-negotiation expansion and pci conf. */
+       BPREMRPSR = 0x5c,       /* bypass & receive error mask and phy status */
+};
+
+/* Bits in the interrupt status/enable registers. */
+/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
+enum intr_status_bits {
+       RFCON = 0x00020000,     /* receive flow control xon packet */
+       RFCOFF = 0x00010000,    /* receive flow control xoff packet */
+       LSCStatus = 0x00008000, /* link status change */
+       ANCStatus = 0x00004000, /* autonegotiation completed */
+       FBE = 0x00002000,       /* fatal bus error */
+       FBEMask = 0x00001800,   /* mask bit12-11 */
+       ParityErr = 0x00000000, /* parity error */
+       TargetErr = 0x00001000, /* target abort */
+       MasterErr = 0x00000800, /* master error */
+       TUNF = 0x00000400,      /* transmit underflow */
+       ROVF = 0x00000200,      /* receive overflow */
+       ETI = 0x00000100,       /* transmit early int */
+       ERI = 0x00000080,       /* receive early int */
+       CNTOVF = 0x00000040,    /* counter overflow */
+       RBU = 0x00000020,       /* receive buffer unavailable */
+       TBU = 0x00000010,       /* transmit buffer unavilable */
+       TI = 0x00000008,        /* transmit interrupt */
+       RI = 0x00000004,        /* receive interrupt */
+       RxErr = 0x00000002,     /* receive error */
+};
+
+/* Bits in the NetworkConfig register, W for writing, R for reading */
+/* FIXME: some names are invented by me. Marked with (name?) */
+/* If you have docs and know bit names, please fix 'em */
+enum rx_mode_bits {
+       CR_W_ENH        = 0x02000000,   /* enhanced mode (name?) */
+       CR_W_FD         = 0x00100000,   /* full duplex */
+       CR_W_PS10       = 0x00080000,   /* 10 mbit */
+       CR_W_TXEN       = 0x00040000,   /* tx enable (name?) */
+       CR_W_PS1000     = 0x00010000,   /* 1000 mbit */
+     /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
+       CR_W_RXMODEMASK = 0x000000e0,
+       CR_W_PROM       = 0x00000080,   /* promiscuous mode */
+       CR_W_AB         = 0x00000040,   /* accept broadcast */
+       CR_W_AM         = 0x00000020,   /* accept mutlicast */
+       CR_W_ARP        = 0x00000008,   /* receive runt pkt */
+       CR_W_ALP        = 0x00000004,   /* receive long pkt */
+       CR_W_SEP        = 0x00000002,   /* receive error pkt */
+       CR_W_RXEN       = 0x00000001,   /* rx enable (unicast?) (name?) */
+
+       CR_R_TXSTOP     = 0x04000000,   /* tx stopped (name?) */
+       CR_R_FD         = 0x00100000,   /* full duplex detected */
+       CR_R_PS10       = 0x00080000,   /* 10 mbit detected */
+       CR_R_RXSTOP     = 0x00008000,   /* rx stopped (name?) */
+};
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct fealnx_desc {
+       s32 status;
+       s32 control;
+       u32 buffer;
+       u32 next_desc;
+       struct fealnx_desc *next_desc_logical;
+       struct sk_buff *skbuff;
+       u32 reserved1;
+       u32 reserved2;
+};
+
+/* Bits in network_desc.status */
+enum rx_desc_status_bits {
+       RXOWN = 0x80000000,     /* own bit */
+       FLNGMASK = 0x0fff0000,  /* frame length */
+       FLNGShift = 16,
+       MARSTATUS = 0x00004000, /* multicast address received */
+       BARSTATUS = 0x00002000, /* broadcast address received */
+       PHYSTATUS = 0x00001000, /* physical address received */
+       RXFSD = 0x00000800,     /* first descriptor */
+       RXLSD = 0x00000400,     /* last descriptor */
+       ErrorSummary = 0x80,    /* error summary */
+       RUNTPKT = 0x40,         /* runt packet received */
+       LONGPKT = 0x20,         /* long packet received */
+       FAE = 0x10,             /* frame align error */
+       CRC = 0x08,             /* crc error */
+       RXER = 0x04,            /* receive error */
+};
+
+enum rx_desc_control_bits {
+       RXIC = 0x00800000,      /* interrupt control */
+       RBSShift = 0,
+};
+
+enum tx_desc_status_bits {
+       TXOWN = 0x80000000,     /* own bit */
+       JABTO = 0x00004000,     /* jabber timeout */
+       CSL = 0x00002000,       /* carrier sense lost */
+       LC = 0x00001000,        /* late collision */
+       EC = 0x00000800,        /* excessive collision */
+       UDF = 0x00000400,       /* fifo underflow */
+       DFR = 0x00000200,       /* deferred */
+       HF = 0x00000100,        /* heartbeat fail */
+       NCRMask = 0x000000ff,   /* collision retry count */
+       NCRShift = 0,
+};
+
+enum tx_desc_control_bits {
+       TXIC = 0x80000000,      /* interrupt control */
+       ETIControl = 0x40000000,        /* early transmit interrupt */
+       TXLD = 0x20000000,      /* last descriptor */
+       TXFD = 0x10000000,      /* first descriptor */
+       CRCEnable = 0x08000000, /* crc control */
+       PADEnable = 0x04000000, /* padding control */
+       RetryTxLC = 0x02000000, /* retry late collision */
+       PKTSMask = 0x3ff800,    /* packet size bit21-11 */
+       PKTSShift = 11,
+       TBSMask = 0x000007ff,   /* transmit buffer bit 10-0 */
+       TBSShift = 0,
+};
+
+/* BootROM/EEPROM/MII Management Register */
+#define MASK_MIIR_MII_READ       0x00000000
+#define MASK_MIIR_MII_WRITE      0x00000008
+#define MASK_MIIR_MII_MDO        0x00000004
+#define MASK_MIIR_MII_MDI        0x00000002
+#define MASK_MIIR_MII_MDC        0x00000001
+
+/* ST+OP+PHYAD+REGAD+TA */
+#define OP_READ             0x6000     /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
+#define OP_WRITE            0x5002     /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
+
+/* ------------------------------------------------------------------------- */
+/*      Constants for Myson PHY                                              */
+/* ------------------------------------------------------------------------- */
+#define MysonPHYID      0xd0000302
+/* 89-7-27 add, (begin) */
+#define MysonPHYID0     0x0302
+#define StatusRegister  18
+#define SPEED100        0x0400 // bit10
+#define FULLMODE        0x0800 // bit11
+/* 89-7-27 add, (end) */
+
+/* ------------------------------------------------------------------------- */
+/*      Constants for Seeq 80225 PHY                                         */
+/* ------------------------------------------------------------------------- */
+#define SeeqPHYID0      0x0016
+
+#define MIIRegister18   18
+#define SPD_DET_100     0x80
+#define DPLX_DET_FULL   0x40
+
+/* ------------------------------------------------------------------------- */
+/*      Constants for Ahdoc 101 PHY                                          */
+/* ------------------------------------------------------------------------- */
+#define AhdocPHYID0     0x0022
+
+#define DiagnosticReg   18
+#define DPLX_FULL       0x0800
+#define Speed_100       0x0400
+
+/* 89/6/13 add, */
+/* -------------------------------------------------------------------------- */
+/*      Constants                                                             */
+/* -------------------------------------------------------------------------- */
+#define MarvellPHYID0           0x0141
+#define LevelOnePHYID0         0x0013
+
+#define MII1000BaseTControlReg  9
+#define MII1000BaseTStatusReg   10
+#define SpecificReg            17
+
+/* for 1000BaseT Control Register */
+#define PHYAbletoPerform1000FullDuplex  0x0200
+#define PHYAbletoPerform1000HalfDuplex  0x0100
+#define PHY1000AbilityMask              0x300
+
+// for phy specific status register, marvell phy.
+#define SpeedMask       0x0c000
+#define Speed_1000M     0x08000
+#define Speed_100M      0x4000
+#define Speed_10M       0
+#define Full_Duplex     0x2000
+
+// 89/12/29 add, for phy specific status register, levelone phy, (begin)
+#define LXT1000_100M    0x08000
+#define LXT1000_1000M   0x0c000
+#define LXT1000_Full    0x200
+// 89/12/29 add, for phy specific status register, levelone phy, (end)
+
+/* for 3-in-1 case, BMCRSR register */
+#define LinkIsUp2      0x00040000
+
+/* for PHY */
+#define LinkIsUp        0x0004
+
+
+struct netdev_private {
+       /* Descriptor rings first for alignment. */
+       struct fealnx_desc *rx_ring;
+       struct fealnx_desc *tx_ring;
+
+       dma_addr_t rx_ring_dma;
+       dma_addr_t tx_ring_dma;
+
+       spinlock_t lock;
+
+       /* Media monitoring timer. */
+       struct timer_list timer;
+
+       /* Reset timer */
+       struct timer_list reset_timer;
+       int reset_timer_armed;
+       unsigned long crvalue_sv;
+       unsigned long imrvalue_sv;
+
+       /* Frequently used values: keep some adjacent for cache effect. */
+       int flags;
+       struct pci_dev *pci_dev;
+       unsigned long crvalue;
+       unsigned long bcrvalue;
+       unsigned long imrvalue;
+       struct fealnx_desc *cur_rx;
+       struct fealnx_desc *lack_rxbuf;
+       int really_rx_count;
+       struct fealnx_desc *cur_tx;
+       struct fealnx_desc *cur_tx_copy;
+       int really_tx_count;
+       int free_tx_count;
+       unsigned int rx_buf_sz; /* Based on MTU+slack. */
+
+       /* These values are keep track of the transceiver/media in use. */
+       unsigned int linkok;
+       unsigned int line_speed;
+       unsigned int duplexmode;
+       unsigned int default_port:4;    /* Last dev->if_port value. */
+       unsigned int PHYType;
+
+       /* MII transceiver section. */
+       int mii_cnt;            /* MII device addresses. */
+       unsigned char phys[2];  /* MII device addresses. */
+       struct mii_if_info mii;
+       void __iomem *mem;
+};
+
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void getlinktype(struct net_device *dev);
+static void getlinkstatus(struct net_device *dev);
+static void netdev_timer(struct timer_list *t);
+static void reset_timer(struct timer_list *t);
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
+static void init_ring(struct net_device *dev);
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
+static int netdev_rx(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void __set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static const struct ethtool_ops netdev_ethtool_ops;
+static int netdev_close(struct net_device *dev);
+static void reset_rx_descriptors(struct net_device *dev);
+static void reset_tx_descriptors(struct net_device *dev);
+
+static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
+{
+       int delay = 0x1000;
+       iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
+       while (--delay) {
+               if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
+                       break;
+       }
+}
+
+
+static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
+{
+       int delay = 0x1000;
+       iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
+       while (--delay) {
+               if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
+                                           == (CR_R_RXSTOP+CR_R_TXSTOP) )
+                       break;
+       }
+}
+
+static const struct net_device_ops netdev_ops = {
+       .ndo_open               = netdev_open,
+       .ndo_stop               = netdev_close,
+       .ndo_start_xmit         = start_tx,
+       .ndo_get_stats          = get_stats,
+       .ndo_set_rx_mode        = set_rx_mode,
+       .ndo_eth_ioctl          = mii_ioctl,
+       .ndo_tx_timeout         = fealnx_tx_timeout,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+static int fealnx_init_one(struct pci_dev *pdev,
+                          const struct pci_device_id *ent)
+{
+       struct netdev_private *np;
+       int i, option, err, irq;
+       static int card_idx = -1;
+       char boardname[12];
+       void __iomem *ioaddr;
+       unsigned long len;
+       unsigned int chip_id = ent->driver_data;
+       struct net_device *dev;
+       void *ring_space;
+       dma_addr_t ring_dma;
+       u8 addr[ETH_ALEN];
+#ifdef USE_IO_OPS
+       int bar = 0;
+#else
+       int bar = 1;
+#endif
+
+       card_idx++;
+       sprintf(boardname, "fealnx%d", card_idx);
+
+       option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+       i = pci_enable_device(pdev);
+       if (i) return i;
+       pci_set_master(pdev);
+
+       len = pci_resource_len(pdev, bar);
+       if (len < MIN_REGION_SIZE) {
+               dev_err(&pdev->dev,
+                          "region size %ld too small, aborting\n", len);
+               return -ENODEV;
+       }
+
+       i = pci_request_regions(pdev, boardname);
+       if (i)
+               return i;
+
+       irq = pdev->irq;
+
+       ioaddr = pci_iomap(pdev, bar, len);
+       if (!ioaddr) {
+               err = -ENOMEM;
+               goto err_out_res;
+       }
+
+       dev = alloc_etherdev(sizeof(struct netdev_private));
+       if (!dev) {
+               err = -ENOMEM;
+               goto err_out_unmap;
+       }
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       /* read ethernet id */
+       for (i = 0; i < 6; ++i)
+               addr[i] = ioread8(ioaddr + PAR0 + i);
+       eth_hw_addr_set(dev, addr);
+
+       /* Reset the chip to erase previous misconfiguration. */
+       iowrite32(0x00000001, ioaddr + BCR);
+
+       /* Make certain the descriptor lists are aligned. */
+       np = netdev_priv(dev);
+       np->mem = ioaddr;
+       spin_lock_init(&np->lock);
+       np->pci_dev = pdev;
+       np->flags = skel_netdrv_tbl[chip_id].flags;
+       pci_set_drvdata(pdev, dev);
+       np->mii.dev = dev;
+       np->mii.mdio_read = mdio_read;
+       np->mii.mdio_write = mdio_write;
+       np->mii.phy_id_mask = 0x1f;
+       np->mii.reg_num_mask = 0x1f;
+
+       ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+                                       GFP_KERNEL);
+       if (!ring_space) {
+               err = -ENOMEM;
+               goto err_out_free_dev;
+       }
+       np->rx_ring = ring_space;
+       np->rx_ring_dma = ring_dma;
+
+       ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+                                       GFP_KERNEL);
+       if (!ring_space) {
+               err = -ENOMEM;
+               goto err_out_free_rx;
+       }
+       np->tx_ring = ring_space;
+       np->tx_ring_dma = ring_dma;
+
+       /* find the connected MII xcvrs */
+       if (np->flags == HAS_MII_XCVR) {
+               int phy, phy_idx = 0;
+
+               for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
+                              phy++) {
+                       int mii_status = mdio_read(dev, phy, 1);
+
+                       if (mii_status != 0xffff && mii_status != 0x0000) {
+                               np->phys[phy_idx++] = phy;
+                               dev_info(&pdev->dev,
+                                      "MII PHY found at address %d, status "
+                                      "0x%4.4x.\n", phy, mii_status);
+                               /* get phy type */
+                               {
+                                       unsigned int data;
+
+                                       data = mdio_read(dev, np->phys[0], 2);
+                                       if (data == SeeqPHYID0)
+                                               np->PHYType = SeeqPHY;
+                                       else if (data == AhdocPHYID0)
+                                               np->PHYType = AhdocPHY;
+                                       else if (data == MarvellPHYID0)
+                                               np->PHYType = MarvellPHY;
+                                       else if (data == MysonPHYID0)
+                                               np->PHYType = Myson981;
+                                       else if (data == LevelOnePHYID0)
+                                               np->PHYType = LevelOnePHY;
+                                       else
+                                               np->PHYType = OtherPHY;
+                               }
+                       }
+               }
+
+               np->mii_cnt = phy_idx;
+               if (phy_idx == 0)
+                       dev_warn(&pdev->dev,
+                               "MII PHY not found -- this device may "
+                              "not operate correctly.\n");
+       } else {
+               np->phys[0] = 32;
+/* 89/6/23 add, (begin) */
+               /* get phy type */
+               if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
+                       np->PHYType = MysonPHY;
+               else
+                       np->PHYType = OtherPHY;
+       }
+       np->mii.phy_id = np->phys[0];
+
+       if (dev->mem_start)
+               option = dev->mem_start;
+
+       /* The lower four bits are the media type. */
+       if (option > 0) {
+               if (option & 0x200)
+                       np->mii.full_duplex = 1;
+               np->default_port = option & 15;
+       }
+
+       if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+               np->mii.full_duplex = full_duplex[card_idx];
+
+       if (np->mii.full_duplex) {
+               dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
+/* 89/6/13 add, (begin) */
+//      if (np->PHYType==MarvellPHY)
+               if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
+                       unsigned int data;
+
+                       data = mdio_read(dev, np->phys[0], 9);
+                       data = (data & 0xfcff) | 0x0200;
+                       mdio_write(dev, np->phys[0], 9, data);
+               }
+/* 89/6/13 add, (end) */
+               if (np->flags == HAS_MII_XCVR)
+                       mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
+               else
+                       iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
+               np->mii.force_media = 1;
+       }
+
+       dev->netdev_ops = &netdev_ops;
+       dev->ethtool_ops = &netdev_ethtool_ops;
+       dev->watchdog_timeo = TX_TIMEOUT;
+
+       err = register_netdev(dev);
+       if (err)
+               goto err_out_free_tx;
+
+       printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
+              dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
+              dev->dev_addr, irq);
+
+       return 0;
+
+err_out_free_tx:
+       dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+                         np->tx_ring_dma);
+err_out_free_rx:
+       dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+                         np->rx_ring_dma);
+err_out_free_dev:
+       free_netdev(dev);
+err_out_unmap:
+       pci_iounmap(pdev, ioaddr);
+err_out_res:
+       pci_release_regions(pdev);
+       return err;
+}
+
+
+static void fealnx_remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       if (dev) {
+               struct netdev_private *np = netdev_priv(dev);
+
+               dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+                                 np->tx_ring_dma);
+               dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+                                 np->rx_ring_dma);
+               unregister_netdev(dev);
+               pci_iounmap(pdev, np->mem);
+               free_netdev(dev);
+               pci_release_regions(pdev);
+       } else
+               printk(KERN_ERR "fealnx: remove for unknown device\n");
+}
+
+
+static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
+{
+       ulong miir;
+       int i;
+       unsigned int mask, data;
+
+       /* enable MII output */
+       miir = (ulong) ioread32(miiport);
+       miir &= 0xfffffff0;
+
+       miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
+
+       /* send 32 1's preamble */
+       for (i = 0; i < 32; i++) {
+               /* low MDC; MDO is already high (miir) */
+               miir &= ~MASK_MIIR_MII_MDC;
+               iowrite32(miir, miiport);
+
+               /* high MDC */
+               miir |= MASK_MIIR_MII_MDC;
+               iowrite32(miir, miiport);
+       }
+
+       /* calculate ST+OP+PHYAD+REGAD+TA */
+       data = opcode | (phyad << 7) | (regad << 2);
+
+       /* sent out */
+       mask = 0x8000;
+       while (mask) {
+               /* low MDC, prepare MDO */
+               miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
+               if (mask & data)
+                       miir |= MASK_MIIR_MII_MDO;
+
+               iowrite32(miir, miiport);
+               /* high MDC */
+               miir |= MASK_MIIR_MII_MDC;
+               iowrite32(miir, miiport);
+               udelay(30);
+
+               /* next */
+               mask >>= 1;
+               if (mask == 0x2 && opcode == OP_READ)
+                       miir &= ~MASK_MIIR_MII_WRITE;
+       }
+       return miir;
+}
+
+
+static int mdio_read(struct net_device *dev, int phyad, int regad)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *miiport = np->mem + MANAGEMENT;
+       ulong miir;
+       unsigned int mask, data;
+
+       miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
+
+       /* read data */
+       mask = 0x8000;
+       data = 0;
+       while (mask) {
+               /* low MDC */
+               miir &= ~MASK_MIIR_MII_MDC;
+               iowrite32(miir, miiport);
+
+               /* read MDI */
+               miir = ioread32(miiport);
+               if (miir & MASK_MIIR_MII_MDI)
+                       data |= mask;
+
+               /* high MDC, and wait */
+               miir |= MASK_MIIR_MII_MDC;
+               iowrite32(miir, miiport);
+               udelay(30);
+
+               /* next */
+               mask >>= 1;
+       }
+
+       /* low MDC */
+       miir &= ~MASK_MIIR_MII_MDC;
+       iowrite32(miir, miiport);
+
+       return data & 0xffff;
+}
+
+
+static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *miiport = np->mem + MANAGEMENT;
+       ulong miir;
+       unsigned int mask;
+
+       miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
+
+       /* write data */
+       mask = 0x8000;
+       while (mask) {
+               /* low MDC, prepare MDO */
+               miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
+               if (mask & data)
+                       miir |= MASK_MIIR_MII_MDO;
+               iowrite32(miir, miiport);
+
+               /* high MDC */
+               miir |= MASK_MIIR_MII_MDC;
+               iowrite32(miir, miiport);
+
+               /* next */
+               mask >>= 1;
+       }
+
+       /* low MDC */
+       miir &= ~MASK_MIIR_MII_MDC;
+       iowrite32(miir, miiport);
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->mem;
+       const int irq = np->pci_dev->irq;
+       int rc, i;
+
+       iowrite32(0x00000001, ioaddr + BCR);    /* Reset */
+
+       rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
+       if (rc)
+               return -EAGAIN;
+
+       for (i = 0; i < 3; i++)
+               iowrite16(((const unsigned short *)dev->dev_addr)[i],
+                               ioaddr + PAR0 + i*2);
+
+       init_ring(dev);
+
+       iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
+       iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
+
+       /* Initialize other registers. */
+       /* Configure the PCI bus bursts and FIFO thresholds.
+          486: Set 8 longword burst.
+          586: no burst limit.
+          Burst length 5:3
+          0 0 0   1
+          0 0 1   4
+          0 1 0   8
+          0 1 1   16
+          1 0 0   32
+          1 0 1   64
+          1 1 0   128
+          1 1 1   256
+          Wait the specified 50 PCI cycles after a reset by initializing
+          Tx and Rx queues and the address filter list.
+          FIXME (Ueimor): optimistic for alpha + posted writes ? */
+
+       np->bcrvalue = 0x10;    /* little-endian, 8 burst length */
+#ifdef __BIG_ENDIAN
+       np->bcrvalue |= 0x04;   /* big-endian */
+#endif
+
+#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
+       if (boot_cpu_data.x86 <= 4)
+               np->crvalue = 0xa00;
+       else
+#endif
+               np->crvalue = 0xe00;    /* rx 128 burst length */
+
+
+// 89/12/29 add,
+// 90/1/16 modify,
+//   np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
+       np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
+       if (np->pci_dev->device == 0x891) {
+               np->bcrvalue |= 0x200;  /* set PROG bit */
+               np->crvalue |= CR_W_ENH;        /* set enhanced bit */
+               np->imrvalue |= ETI;
+       }
+       iowrite32(np->bcrvalue, ioaddr + BCR);
+
+       if (dev->if_port == 0)
+               dev->if_port = np->default_port;
+
+       iowrite32(0, ioaddr + RXPDR);
+// 89/9/1 modify,
+//   np->crvalue = 0x00e40001;    /* tx store and forward, tx/rx enable */
+       np->crvalue |= 0x00e40001;      /* tx store and forward, tx/rx enable */
+       np->mii.full_duplex = np->mii.force_media;
+       getlinkstatus(dev);
+       if (np->linkok)
+               getlinktype(dev);
+       __set_rx_mode(dev);
+
+       netif_start_queue(dev);
+
+       /* Clear and Enable interrupts by setting the interrupt mask. */
+       iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
+       iowrite32(np->imrvalue, ioaddr + IMR);
+
+       if (debug)
+               printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+
+       /* Set the timer to check for link beat. */
+       timer_setup(&np->timer, netdev_timer, 0);
+       np->timer.expires = RUN_AT(3 * HZ);
+
+       /* timer handler */
+       add_timer(&np->timer);
+
+       timer_setup(&np->reset_timer, reset_timer, 0);
+       np->reset_timer_armed = 0;
+       return rc;
+}
+
+
+static void getlinkstatus(struct net_device *dev)
+/* function: Routine will read MII Status Register to get link status.       */
+/* input   : dev... pointer to the adapter block.                            */
+/* output  : none.                                                           */
+{
+       struct netdev_private *np = netdev_priv(dev);
+       unsigned int i, DelayTime = 0x1000;
+
+       np->linkok = 0;
+
+       if (np->PHYType == MysonPHY) {
+               for (i = 0; i < DelayTime; ++i) {
+                       if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
+                               np->linkok = 1;
+                               return;
+                       }
+                       udelay(100);
+               }
+       } else {
+               for (i = 0; i < DelayTime; ++i) {
+                       if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
+                               np->linkok = 1;
+                               return;
+                       }
+                       udelay(100);
+               }
+       }
+}
+
+
+static void getlinktype(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+
+       if (np->PHYType == MysonPHY) {  /* 3-in-1 case */
+               if (ioread32(np->mem + TCRRCR) & CR_R_FD)
+                       np->duplexmode = 2;     /* full duplex */
+               else
+                       np->duplexmode = 1;     /* half duplex */
+               if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
+                       np->line_speed = 1;     /* 10M */
+               else
+                       np->line_speed = 2;     /* 100M */
+       } else {
+               if (np->PHYType == SeeqPHY) {   /* this PHY is SEEQ 80225 */
+                       unsigned int data;
+
+                       data = mdio_read(dev, np->phys[0], MIIRegister18);
+                       if (data & SPD_DET_100)
+                               np->line_speed = 2;     /* 100M */
+                       else
+                               np->line_speed = 1;     /* 10M */
+                       if (data & DPLX_DET_FULL)
+                               np->duplexmode = 2;     /* full duplex mode */
+                       else
+                               np->duplexmode = 1;     /* half duplex mode */
+               } else if (np->PHYType == AhdocPHY) {
+                       unsigned int data;
+
+                       data = mdio_read(dev, np->phys[0], DiagnosticReg);
+                       if (data & Speed_100)
+                               np->line_speed = 2;     /* 100M */
+                       else
+                               np->line_speed = 1;     /* 10M */
+                       if (data & DPLX_FULL)
+                               np->duplexmode = 2;     /* full duplex mode */
+                       else
+                               np->duplexmode = 1;     /* half duplex mode */
+               }
+/* 89/6/13 add, (begin) */
+               else if (np->PHYType == MarvellPHY) {
+                       unsigned int data;
+
+                       data = mdio_read(dev, np->phys[0], SpecificReg);
+                       if (data & Full_Duplex)
+                               np->duplexmode = 2;     /* full duplex mode */
+                       else
+                               np->duplexmode = 1;     /* half duplex mode */
+                       data &= SpeedMask;
+                       if (data == Speed_1000M)
+                               np->line_speed = 3;     /* 1000M */
+                       else if (data == Speed_100M)
+                               np->line_speed = 2;     /* 100M */
+                       else
+                               np->line_speed = 1;     /* 10M */
+               }
+/* 89/6/13 add, (end) */
+/* 89/7/27 add, (begin) */
+               else if (np->PHYType == Myson981) {
+                       unsigned int data;
+
+                       data = mdio_read(dev, np->phys[0], StatusRegister);
+
+                       if (data & SPEED100)
+                               np->line_speed = 2;
+                       else
+                               np->line_speed = 1;
+
+                       if (data & FULLMODE)
+                               np->duplexmode = 2;
+                       else
+                               np->duplexmode = 1;
+               }
+/* 89/7/27 add, (end) */
+/* 89/12/29 add */
+               else if (np->PHYType == LevelOnePHY) {
+                       unsigned int data;
+
+                       data = mdio_read(dev, np->phys[0], SpecificReg);
+                       if (data & LXT1000_Full)
+                               np->duplexmode = 2;     /* full duplex mode */
+                       else
+                               np->duplexmode = 1;     /* half duplex mode */
+                       data &= SpeedMask;
+                       if (data == LXT1000_1000M)
+                               np->line_speed = 3;     /* 1000M */
+                       else if (data == LXT1000_100M)
+                               np->line_speed = 2;     /* 100M */
+                       else
+                               np->line_speed = 1;     /* 10M */
+               }
+               np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
+               if (np->line_speed == 1)
+                       np->crvalue |= CR_W_PS10;
+               else if (np->line_speed == 3)
+                       np->crvalue |= CR_W_PS1000;
+               if (np->duplexmode == 2)
+                       np->crvalue |= CR_W_FD;
+       }
+}
+
+
+/* Take lock before calling this */
+static void allocate_rx_buffers(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+
+       /*  allocate skb for rx buffers */
+       while (np->really_rx_count != RX_RING_SIZE) {
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb(dev, np->rx_buf_sz);
+               if (skb == NULL)
+                       break;  /* Better luck next round. */
+
+               while (np->lack_rxbuf->skbuff)
+                       np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
+
+               np->lack_rxbuf->skbuff = skb;
+               np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
+                                                       skb->data,
+                                                       np->rx_buf_sz,
+                                                       DMA_FROM_DEVICE);
+               np->lack_rxbuf->status = RXOWN;
+               ++np->really_rx_count;
+       }
+}
+
+
+static void netdev_timer(struct timer_list *t)
+{
+       struct netdev_private *np = from_timer(np, t, timer);
+       struct net_device *dev = np->mii.dev;
+       void __iomem *ioaddr = np->mem;
+       int old_crvalue = np->crvalue;
+       unsigned int old_linkok = np->linkok;
+       unsigned long flags;
+
+       if (debug)
+               printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
+                      "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
+                      ioread32(ioaddr + TCRRCR));
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       if (np->flags == HAS_MII_XCVR) {
+               getlinkstatus(dev);
+               if ((old_linkok == 0) && (np->linkok == 1)) {   /* we need to detect the media type again */
+                       getlinktype(dev);
+                       if (np->crvalue != old_crvalue) {
+                               stop_nic_rxtx(ioaddr, np->crvalue);
+                               iowrite32(np->crvalue, ioaddr + TCRRCR);
+                       }
+               }
+       }
+
+       allocate_rx_buffers(dev);
+
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       np->timer.expires = RUN_AT(10 * HZ);
+       add_timer(&np->timer);
+}
+
+
+/* Take lock before calling */
+/* Reset chip and disable rx, tx and interrupts */
+static void reset_and_disable_rxtx(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->mem;
+       int delay=51;
+
+       /* Reset the chip's Tx and Rx processes. */
+       stop_nic_rxtx(ioaddr, 0);
+
+       /* Disable interrupts by clearing the interrupt mask. */
+       iowrite32(0, ioaddr + IMR);
+
+       /* Reset the chip to erase previous misconfiguration. */
+       iowrite32(0x00000001, ioaddr + BCR);
+
+       /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
+          We surely wait too long (address+data phase). Who cares? */
+       while (--delay) {
+               ioread32(ioaddr + BCR);
+               rmb();
+       }
+}
+
+
+/* Take lock before calling */
+/* Restore chip after reset */
+static void enable_rxtx(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->mem;
+
+       reset_rx_descriptors(dev);
+
+       iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
+               ioaddr + TXLBA);
+       iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
+               ioaddr + RXLBA);
+
+       iowrite32(np->bcrvalue, ioaddr + BCR);
+
+       iowrite32(0, ioaddr + RXPDR);
+       __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
+
+       /* Clear and Enable interrupts by setting the interrupt mask. */
+       iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
+       iowrite32(np->imrvalue, ioaddr + IMR);
+
+       iowrite32(0, ioaddr + TXPDR);
+}
+
+
+static void reset_timer(struct timer_list *t)
+{
+       struct netdev_private *np = from_timer(np, t, reset_timer);
+       struct net_device *dev = np->mii.dev;
+       unsigned long flags;
+
+       printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
+
+       spin_lock_irqsave(&np->lock, flags);
+       np->crvalue = np->crvalue_sv;
+       np->imrvalue = np->imrvalue_sv;
+
+       reset_and_disable_rxtx(dev);
+       /* works for me without this:
+       reset_tx_descriptors(dev); */
+       enable_rxtx(dev);
+       netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
+
+       np->reset_timer_armed = 0;
+
+       spin_unlock_irqrestore(&np->lock, flags);
+}
+
+
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->mem;
+       unsigned long flags;
+       int i;
+
+       printk(KERN_WARNING
+              "%s: Transmit timed out, status %8.8x, resetting...\n",
+              dev->name, ioread32(ioaddr + ISR));
+
+       {
+               printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
+               for (i = 0; i < RX_RING_SIZE; i++)
+                       printk(KERN_CONT " %8.8x",
+                              (unsigned int) np->rx_ring[i].status);
+               printk(KERN_CONT "\n");
+               printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
+               for (i = 0; i < TX_RING_SIZE; i++)
+                       printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
+               printk(KERN_CONT "\n");
+       }
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       reset_and_disable_rxtx(dev);
+       reset_tx_descriptors(dev);
+       enable_rxtx(dev);
+
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       netif_trans_update(dev); /* prevent tx timeout */
+       dev->stats.tx_errors++;
+       netif_wake_queue(dev); /* or .._start_.. ?? */
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       int i;
+
+       /* initialize rx variables */
+       np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+       np->cur_rx = &np->rx_ring[0];
+       np->lack_rxbuf = np->rx_ring;
+       np->really_rx_count = 0;
+
+       /* initial rx descriptors. */
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               np->rx_ring[i].status = 0;
+               np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
+               np->rx_ring[i].next_desc = np->rx_ring_dma +
+                       (i + 1)*sizeof(struct fealnx_desc);
+               np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
+               np->rx_ring[i].skbuff = NULL;
+       }
+
+       /* for the last rx descriptor */
+       np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
+       np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
+
+       /* allocate skb for rx buffers */
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
+
+               if (skb == NULL) {
+                       np->lack_rxbuf = &np->rx_ring[i];
+                       break;
+               }
+
+               ++np->really_rx_count;
+               np->rx_ring[i].skbuff = skb;
+               np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
+                                                      skb->data,
+                                                      np->rx_buf_sz,
+                                                      DMA_FROM_DEVICE);
+               np->rx_ring[i].status = RXOWN;
+               np->rx_ring[i].control |= RXIC;
+       }
+
+       /* initialize tx variables */
+       np->cur_tx = &np->tx_ring[0];
+       np->cur_tx_copy = &np->tx_ring[0];
+       np->really_tx_count = 0;
+       np->free_tx_count = TX_RING_SIZE;
+
+       for (i = 0; i < TX_RING_SIZE; i++) {
+               np->tx_ring[i].status = 0;
+               /* do we need np->tx_ring[i].control = XXX; ?? */
+               np->tx_ring[i].next_desc = np->tx_ring_dma +
+                       (i + 1)*sizeof(struct fealnx_desc);
+               np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
+               np->tx_ring[i].skbuff = NULL;
+       }
+
+       /* for the last tx descriptor */
+       np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
+       np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
+}
+
+
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       np->cur_tx_copy->skbuff = skb;
+
+#define one_buffer
+#define BPT 1022
+#if defined(one_buffer)
+       np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
+                                                skb->len, DMA_TO_DEVICE);
+       np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
+       np->cur_tx_copy->control |= (skb->len << PKTSShift);    /* pkt size */
+       np->cur_tx_copy->control |= (skb->len << TBSShift);     /* buffer size */
+// 89/12/29 add,
+       if (np->pci_dev->device == 0x891)
+               np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+       np->cur_tx_copy->status = TXOWN;
+       np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
+       --np->free_tx_count;
+#elif defined(two_buffer)
+       if (skb->len > BPT) {
+               struct fealnx_desc *next;
+
+               /* for the first descriptor */
+               np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
+                                                        skb->data, BPT,
+                                                        DMA_TO_DEVICE);
+               np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
+               np->cur_tx_copy->control |= (skb->len << PKTSShift);    /* pkt size */
+               np->cur_tx_copy->control |= (BPT << TBSShift);  /* buffer size */
+
+               /* for the last descriptor */
+               next = np->cur_tx_copy->next_desc_logical;
+               next->skbuff = skb;
+               next->control = TXIC | TXLD | CRCEnable | PADEnable;
+               next->control |= (skb->len << PKTSShift);       /* pkt size */
+               next->control |= ((skb->len - BPT) << TBSShift);        /* buf size */
+// 89/12/29 add,
+               if (np->pci_dev->device == 0x891)
+                       np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+               next->buffer = dma_map_single(&ep->pci_dev->dev,
+                                             skb->data + BPT, skb->len - BPT,
+                                             DMA_TO_DEVICE);
+
+               next->status = TXOWN;
+               np->cur_tx_copy->status = TXOWN;
+
+               np->cur_tx_copy = next->next_desc_logical;
+               np->free_tx_count -= 2;
+       } else {
+               np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
+                                                        skb->data, skb->len,
+                                                        DMA_TO_DEVICE);
+               np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
+               np->cur_tx_copy->control |= (skb->len << PKTSShift);    /* pkt size */
+               np->cur_tx_copy->control |= (skb->len << TBSShift);     /* buffer size */
+// 89/12/29 add,
+               if (np->pci_dev->device == 0x891)
+                       np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+               np->cur_tx_copy->status = TXOWN;
+               np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
+               --np->free_tx_count;
+       }
+#endif
+
+       if (np->free_tx_count < 2)
+               netif_stop_queue(dev);
+       ++np->really_tx_count;
+       iowrite32(0, np->mem + TXPDR);
+
+       spin_unlock_irqrestore(&np->lock, flags);
+       return NETDEV_TX_OK;
+}
+
+
+/* Take lock before calling */
+/* Chip probably hosed tx ring. Clean up. */
+static void reset_tx_descriptors(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       struct fealnx_desc *cur;
+       int i;
+
+       /* initialize tx variables */
+       np->cur_tx = &np->tx_ring[0];
+       np->cur_tx_copy = &np->tx_ring[0];
+       np->really_tx_count = 0;
+       np->free_tx_count = TX_RING_SIZE;
+
+       for (i = 0; i < TX_RING_SIZE; i++) {
+               cur = &np->tx_ring[i];
+               if (cur->skbuff) {
+                       dma_unmap_single(&np->pci_dev->dev, cur->buffer,
+                                        cur->skbuff->len, DMA_TO_DEVICE);
+                       dev_kfree_skb_any(cur->skbuff);
+                       cur->skbuff = NULL;
+               }
+               cur->status = 0;
+               cur->control = 0;       /* needed? */
+               /* probably not needed. We do it for purely paranoid reasons */
+               cur->next_desc = np->tx_ring_dma +
+                       (i + 1)*sizeof(struct fealnx_desc);
+               cur->next_desc_logical = &np->tx_ring[i + 1];
+       }
+       /* for the last tx descriptor */
+       np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
+       np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
+}
+
+
+/* Take lock and stop rx before calling this */
+static void reset_rx_descriptors(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       struct fealnx_desc *cur = np->cur_rx;
+       int i;
+
+       allocate_rx_buffers(dev);
+
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               if (cur->skbuff)
+                       cur->status = RXOWN;
+               cur = cur->next_desc_logical;
+       }
+
+       iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
+               np->mem + RXLBA);
+}
+
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static irqreturn_t intr_handler(int irq, void *dev_instance)
+{
+       struct net_device *dev = (struct net_device *) dev_instance;
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->mem;
+       long boguscnt = max_interrupt_work;
+       unsigned int num_tx = 0;
+       int handled = 0;
+
+       spin_lock(&np->lock);
+
+       iowrite32(0, ioaddr + IMR);
+
+       do {
+               u32 intr_status = ioread32(ioaddr + ISR);
+
+               /* Acknowledge all of the current interrupt sources ASAP. */
+               iowrite32(intr_status, ioaddr + ISR);
+
+               if (debug)
+                       printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
+                              intr_status);
+
+               if (!(intr_status & np->imrvalue))
+                       break;
+
+               handled = 1;
+
+// 90/1/16 delete,
+//
+//      if (intr_status & FBE)
+//      {   /* fatal error */
+//          stop_nic_tx(ioaddr, 0);
+//          stop_nic_rx(ioaddr, 0);
+//          break;
+//      };
+
+               if (intr_status & TUNF)
+                       iowrite32(0, ioaddr + TXPDR);
+
+               if (intr_status & CNTOVF) {
+                       /* missed pkts */
+                       dev->stats.rx_missed_errors +=
+                               ioread32(ioaddr + TALLY) & 0x7fff;
+
+                       /* crc error */
+                       dev->stats.rx_crc_errors +=
+                           (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+               }
+
+               if (intr_status & (RI | RBU)) {
+                       if (intr_status & RI)
+                               netdev_rx(dev);
+                       else {
+                               stop_nic_rx(ioaddr, np->crvalue);
+                               reset_rx_descriptors(dev);
+                               iowrite32(np->crvalue, ioaddr + TCRRCR);
+                       }
+               }
+
+               while (np->really_tx_count) {
+                       long tx_status = np->cur_tx->status;
+                       long tx_control = np->cur_tx->control;
+
+                       if (!(tx_control & TXLD)) {     /* this pkt is combined by two tx descriptors */
+                               struct fealnx_desc *next;
+
+                               next = np->cur_tx->next_desc_logical;
+                               tx_status = next->status;
+                               tx_control = next->control;
+                       }
+
+                       if (tx_status & TXOWN)
+                               break;
+
+                       if (!(np->crvalue & CR_W_ENH)) {
+                               if (tx_status & (CSL | LC | EC | UDF | HF)) {
+                                       dev->stats.tx_errors++;
+                                       if (tx_status & EC)
+                                               dev->stats.tx_aborted_errors++;
+                                       if (tx_status & CSL)
+                                               dev->stats.tx_carrier_errors++;
+                                       if (tx_status & LC)
+                                               dev->stats.tx_window_errors++;
+                                       if (tx_status & UDF)
+                                               dev->stats.tx_fifo_errors++;
+                                       if ((tx_status & HF) && np->mii.full_duplex == 0)
+                                               dev->stats.tx_heartbeat_errors++;
+
+                               } else {
+                                       dev->stats.tx_bytes +=
+                                           ((tx_control & PKTSMask) >> PKTSShift);
+
+                                       dev->stats.collisions +=
+                                           ((tx_status & NCRMask) >> NCRShift);
+                                       dev->stats.tx_packets++;
+                               }
+                       } else {
+                               dev->stats.tx_bytes +=
+                                   ((tx_control & PKTSMask) >> PKTSShift);
+                               dev->stats.tx_packets++;
+                       }
+
+                       /* Free the original skb. */
+                       dma_unmap_single(&np->pci_dev->dev,
+                                        np->cur_tx->buffer,
+                                        np->cur_tx->skbuff->len,
+                                        DMA_TO_DEVICE);
+                       dev_consume_skb_irq(np->cur_tx->skbuff);
+                       np->cur_tx->skbuff = NULL;
+                       --np->really_tx_count;
+                       if (np->cur_tx->control & TXLD) {
+                               np->cur_tx = np->cur_tx->next_desc_logical;
+                               ++np->free_tx_count;
+                       } else {
+                               np->cur_tx = np->cur_tx->next_desc_logical;
+                               np->cur_tx = np->cur_tx->next_desc_logical;
+                               np->free_tx_count += 2;
+                       }
+                       num_tx++;
+               }               /* end of for loop */
+
+               if (num_tx && np->free_tx_count >= 2)
+                       netif_wake_queue(dev);
+
+               /* read transmit status for enhanced mode only */
+               if (np->crvalue & CR_W_ENH) {
+                       long data;
+
+                       data = ioread32(ioaddr + TSR);
+                       dev->stats.tx_errors += (data & 0xff000000) >> 24;
+                       dev->stats.tx_aborted_errors +=
+                               (data & 0xff000000) >> 24;
+                       dev->stats.tx_window_errors +=
+                               (data & 0x00ff0000) >> 16;
+                       dev->stats.collisions += (data & 0x0000ffff);
+               }
+
+               if (--boguscnt < 0) {
+                       printk(KERN_WARNING "%s: Too much work at interrupt, "
+                              "status=0x%4.4x.\n", dev->name, intr_status);
+                       if (!np->reset_timer_armed) {
+                               np->reset_timer_armed = 1;
+                               np->reset_timer.expires = RUN_AT(HZ/2);
+                               add_timer(&np->reset_timer);
+                               stop_nic_rxtx(ioaddr, 0);
+                               netif_stop_queue(dev);
+                               /* or netif_tx_disable(dev); ?? */
+                               /* Prevent other paths from enabling tx,rx,intrs */
+                               np->crvalue_sv = np->crvalue;
+                               np->imrvalue_sv = np->imrvalue;
+                               np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
+                               np->imrvalue = 0;
+                       }
+
+                       break;
+               }
+       } while (1);
+
+       /* read the tally counters */
+       /* missed pkts */
+       dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
+
+       /* crc error */
+       dev->stats.rx_crc_errors +=
+               (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+
+       if (debug)
+               printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+                      dev->name, ioread32(ioaddr + ISR));
+
+       iowrite32(np->imrvalue, ioaddr + IMR);
+
+       spin_unlock(&np->lock);
+
+       return IRQ_RETVAL(handled);
+}
+
+
+/* This routine is logically part of the interrupt handler, but separated
+   for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->mem;
+
+       /* If EOP is set on the next entry, it's a new packet. Send it up. */
+       while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
+               s32 rx_status = np->cur_rx->status;
+
+               if (np->really_rx_count == 0)
+                       break;
+
+               if (debug)
+                       printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n", rx_status);
+
+               if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
+                   (rx_status & ErrorSummary)) {
+                       if (rx_status & ErrorSummary) { /* there was a fatal error */
+                               if (debug)
+                                       printk(KERN_DEBUG
+                                              "%s: Receive error, Rx status %8.8x.\n",
+                                              dev->name, rx_status);
+
+                               dev->stats.rx_errors++; /* end of a packet. */
+                               if (rx_status & (LONGPKT | RUNTPKT))
+                                       dev->stats.rx_length_errors++;
+                               if (rx_status & RXER)
+                                       dev->stats.rx_frame_errors++;
+                               if (rx_status & CRC)
+                                       dev->stats.rx_crc_errors++;
+                       } else {
+                               int need_to_reset = 0;
+                               int desno = 0;
+
+                               if (rx_status & RXFSD) {        /* this pkt is too long, over one rx buffer */
+                                       struct fealnx_desc *cur;
+
+                                       /* check this packet is received completely? */
+                                       cur = np->cur_rx;
+                                       while (desno <= np->really_rx_count) {
+                                               ++desno;
+                                               if ((!(cur->status & RXOWN)) &&
+                                                   (cur->status & RXLSD))
+                                                       break;
+                                               /* goto next rx descriptor */
+                                               cur = cur->next_desc_logical;
+                                       }
+                                       if (desno > np->really_rx_count)
+                                               need_to_reset = 1;
+                               } else  /* RXLSD did not find, something error */
+                                       need_to_reset = 1;
+
+                               if (need_to_reset == 0) {
+                                       int i;
+
+                                       dev->stats.rx_length_errors++;
+
+                                       /* free all rx descriptors related this long pkt */
+                                       for (i = 0; i < desno; ++i) {
+                                               if (!np->cur_rx->skbuff) {
+                                                       printk(KERN_DEBUG
+                                                               "%s: I'm scared\n", dev->name);
+                                                       break;
+                                               }
+                                               np->cur_rx->status = RXOWN;
+                                               np->cur_rx = np->cur_rx->next_desc_logical;
+                                       }
+                                       continue;
+                               } else {        /* rx error, need to reset this chip */
+                                       stop_nic_rx(ioaddr, np->crvalue);
+                                       reset_rx_descriptors(dev);
+                                       iowrite32(np->crvalue, ioaddr + TCRRCR);
+                               }
+                               break;  /* exit the while loop */
+                       }
+               } else {        /* this received pkt is ok */
+
+                       struct sk_buff *skb;
+                       /* Omit the four octet CRC from the length. */
+                       short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
+
+#ifndef final_version
+                       if (debug)
+                               printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
+                                      " status %x.\n", pkt_len, rx_status);
+#endif
+
+                       /* Check if the packet is long enough to accept without copying
+                          to a minimally-sized skbuff. */
+                       if (pkt_len < rx_copybreak &&
+                           (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
+                               skb_reserve(skb, 2);    /* 16 byte align the IP header */
+                               dma_sync_single_for_cpu(&np->pci_dev->dev,
+                                                       np->cur_rx->buffer,
+                                                       np->rx_buf_sz,
+                                                       DMA_FROM_DEVICE);
+                               /* Call copy + cksum if available. */
+
+#if ! defined(__alpha__)
+                               skb_copy_to_linear_data(skb,
+                                       np->cur_rx->skbuff->data, pkt_len);
+                               skb_put(skb, pkt_len);
+#else
+                               skb_put_data(skb, np->cur_rx->skbuff->data,
+                                            pkt_len);
+#endif
+                               dma_sync_single_for_device(&np->pci_dev->dev,
+                                                          np->cur_rx->buffer,
+                                                          np->rx_buf_sz,
+                                                          DMA_FROM_DEVICE);
+                       } else {
+                               dma_unmap_single(&np->pci_dev->dev,
+                                                np->cur_rx->buffer,
+                                                np->rx_buf_sz,
+                                                DMA_FROM_DEVICE);
+                               skb_put(skb = np->cur_rx->skbuff, pkt_len);
+                               np->cur_rx->skbuff = NULL;
+                               --np->really_rx_count;
+                       }
+                       skb->protocol = eth_type_trans(skb, dev);
+                       netif_rx(skb);
+                       dev->stats.rx_packets++;
+                       dev->stats.rx_bytes += pkt_len;
+               }
+
+               np->cur_rx = np->cur_rx->next_desc_logical;
+       }                       /* end of while loop */
+
+       /*  allocate skb for rx buffers */
+       allocate_rx_buffers(dev);
+
+       return 0;
+}
+
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->mem;
+
+       /* The chip only need report frame silently dropped. */
+       if (netif_running(dev)) {
+               dev->stats.rx_missed_errors +=
+                       ioread32(ioaddr + TALLY) & 0x7fff;
+               dev->stats.rx_crc_errors +=
+                       (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+       }
+
+       return &dev->stats;
+}
+
+
+/* for dev->set_multicast_list */
+static void set_rx_mode(struct net_device *dev)
+{
+       spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
+       unsigned long flags;
+       spin_lock_irqsave(lp, flags);
+       __set_rx_mode(dev);
+       spin_unlock_irqrestore(lp, flags);
+}
+
+
+/* Take lock before calling */
+static void __set_rx_mode(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->mem;
+       u32 mc_filter[2];       /* Multicast hash filter */
+       u32 rx_mode;
+
+       if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+               memset(mc_filter, 0xff, sizeof(mc_filter));
+               rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
+       } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+                  (dev->flags & IFF_ALLMULTI)) {
+               /* Too many to match, or accept all multicasts. */
+               memset(mc_filter, 0xff, sizeof(mc_filter));
+               rx_mode = CR_W_AB | CR_W_AM;
+       } else {
+               struct netdev_hw_addr *ha;
+
+               memset(mc_filter, 0, sizeof(mc_filter));
+               netdev_for_each_mc_addr(ha, dev) {
+                       unsigned int bit;
+                       bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
+                       mc_filter[bit >> 5] |= (1 << bit);
+               }
+               rx_mode = CR_W_AB | CR_W_AM;
+       }
+
+       stop_nic_rxtx(ioaddr, np->crvalue);
+
+       iowrite32(mc_filter[0], ioaddr + MAR0);
+       iowrite32(mc_filter[1], ioaddr + MAR1);
+       np->crvalue &= ~CR_W_RXMODEMASK;
+       np->crvalue |= rx_mode;
+       iowrite32(np->crvalue, ioaddr + TCRRCR);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+       struct netdev_private *np = netdev_priv(dev);
+
+       strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+}
+
+static int netdev_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
+{
+       struct netdev_private *np = netdev_priv(dev);
+
+       spin_lock_irq(&np->lock);
+       mii_ethtool_get_link_ksettings(&np->mii, cmd);
+       spin_unlock_irq(&np->lock);
+
+       return 0;
+}
+
+static int netdev_set_link_ksettings(struct net_device *dev,
+                                    const struct ethtool_link_ksettings *cmd)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       int rc;
+
+       spin_lock_irq(&np->lock);
+       rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
+       spin_unlock_irq(&np->lock);
+
+       return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       return mii_nway_restart(&np->mii);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       return mii_link_ok(&np->mii);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+       return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+       debug = value;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+       .get_drvinfo            = netdev_get_drvinfo,
+       .nway_reset             = netdev_nway_reset,
+       .get_link               = netdev_get_link,
+       .get_msglevel           = netdev_get_msglevel,
+       .set_msglevel           = netdev_set_msglevel,
+       .get_link_ksettings     = netdev_get_link_ksettings,
+       .set_link_ksettings     = netdev_set_link_ksettings,
+};
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       int rc;
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       spin_lock_irq(&np->lock);
+       rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
+       spin_unlock_irq(&np->lock);
+
+       return rc;
+}
+
+
+static int netdev_close(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->mem;
+       int i;
+
+       netif_stop_queue(dev);
+
+       /* Disable interrupts by clearing the interrupt mask. */
+       iowrite32(0x0000, ioaddr + IMR);
+
+       /* Stop the chip's Tx and Rx processes. */
+       stop_nic_rxtx(ioaddr, 0);
+
+       del_timer_sync(&np->timer);
+       del_timer_sync(&np->reset_timer);
+
+       free_irq(np->pci_dev->irq, dev);
+
+       /* Free all the skbuffs in the Rx queue. */
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               struct sk_buff *skb = np->rx_ring[i].skbuff;
+
+               np->rx_ring[i].status = 0;
+               if (skb) {
+                       dma_unmap_single(&np->pci_dev->dev,
+                                        np->rx_ring[i].buffer, np->rx_buf_sz,
+                                        DMA_FROM_DEVICE);
+                       dev_kfree_skb(skb);
+                       np->rx_ring[i].skbuff = NULL;
+               }
+       }
+
+       for (i = 0; i < TX_RING_SIZE; i++) {
+               struct sk_buff *skb = np->tx_ring[i].skbuff;
+
+               if (skb) {
+                       dma_unmap_single(&np->pci_dev->dev,
+                                        np->tx_ring[i].buffer, skb->len,
+                                        DMA_TO_DEVICE);
+                       dev_kfree_skb(skb);
+                       np->tx_ring[i].skbuff = NULL;
+               }
+       }
+
+       return 0;
+}
+
+static const struct pci_device_id fealnx_pci_tbl[] = {
+       {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+       {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+       {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
+       {} /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
+
+
+static struct pci_driver fealnx_driver = {
+       .name           = "fealnx",
+       .id_table       = fealnx_pci_tbl,
+       .probe          = fealnx_init_one,
+       .remove         = fealnx_remove_one,
+};
+
+module_pci_driver(fealnx_driver);
index c557dfc..396e555 100644 (file)
@@ -1411,7 +1411,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
        tlv->ouisubtype = htonl(ouisubtype);
 
        buf[0] = dcbcfg->pfc.pfccap & 0xF;
-       buf[1] = dcbcfg->pfc.pfcena & 0xF;
+       buf[1] = dcbcfg->pfc.pfcena;
 }
 
 /**
index b360bd8..f86e814 100644 (file)
@@ -4331,6 +4331,8 @@ ice_get_module_eeprom(struct net_device *netdev,
                 * SFP modules only ever use page 0.
                 */
                if (page == 0 || !(data[0x2] & 0x4)) {
+                       u32 copy_len;
+
                        /* If i2c bus is busy due to slow page change or
                         * link management access, call can fail. This is normal.
                         * So we retry this a few times.
@@ -4354,8 +4356,8 @@ ice_get_module_eeprom(struct net_device *netdev,
                        }
 
                        /* Make sure we have enough room for the new block */
-                       if ((i + SFF_READ_BLOCK_SIZE) < ee->len)
-                               memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
+                       copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i);
+                       memcpy(data + i, value, copy_len);
                }
        }
        return 0;
index 7814754..0f52ea3 100644 (file)
@@ -2126,7 +2126,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
        ice_for_each_rxq(vsi, i)
                ice_tx_xsk_pool(vsi, i);
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -2693,12 +2693,14 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
                return ret;
 
        /* allocate memory for Tx/Rx ring stat pointers */
-       if (ice_vsi_alloc_stat_arrays(vsi))
+       ret = ice_vsi_alloc_stat_arrays(vsi);
+       if (ret)
                goto unroll_vsi_alloc;
 
        ice_alloc_fd_res(vsi);
 
-       if (ice_vsi_get_qs(vsi)) {
+       ret = ice_vsi_get_qs(vsi);
+       if (ret) {
                dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
                        vsi->idx);
                goto unroll_vsi_alloc_stat;
@@ -2811,6 +2813,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
                break;
        default:
                /* clean up the resources and exit */
+               ret = -EINVAL;
                goto unroll_vsi_init;
        }
 
@@ -3508,10 +3511,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
                if (vsi_flags & ICE_VSI_FLAG_INIT) {
                        ret = -EIO;
                        goto err_vsi_cfg_tc_lan;
-               } else {
-                       kfree(coalesce);
-                       return ice_schedule_reset(pf, ICE_RESET_PFR);
                }
+
+               kfree(coalesce);
+               return ice_schedule_reset(pf, ICE_RESET_PFR);
        }
 
        ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
@@ -3759,7 +3762,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
        dev = ice_pf_to_dev(pf);
        if (vsi->tc_cfg.ena_tc == ena_tc &&
            vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
-               return ret;
+               return 0;
 
        ice_for_each_traffic_class(i) {
                /* build bitmap of enabled TCs */
index 6b48cbc..76f29a5 100644 (file)
@@ -1455,8 +1455,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
                if (match.mask->vlan_priority) {
                        fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
                        headers->vlan_hdr.vlan_prio =
-                               cpu_to_be16((match.key->vlan_priority <<
-                                            VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+                               be16_encode_bits(match.key->vlan_priority,
+                                                VLAN_PRIO_MASK);
                }
 
                if (match.mask->vlan_tpid)
@@ -1489,8 +1489,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
                if (match.mask->vlan_priority) {
                        fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
                        headers->cvlan_hdr.vlan_prio =
-                               cpu_to_be16((match.key->vlan_priority <<
-                                            VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+                               be16_encode_bits(match.key->vlan_priority,
+                                                VLAN_PRIO_MASK);
                }
        }
 
index 389663a..ef721ca 100644 (file)
@@ -884,6 +884,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
 int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
 int rvu_cpt_init(struct rvu *rvu);
 
+#define NDC_AF_BANK_MASK       GENMASK_ULL(7, 0)
+#define NDC_AF_BANK_LINE_MASK  GENMASK_ULL(31, 16)
+
 /* CN10K RVU */
 int rvu_set_channels_base(struct rvu *rvu);
 void rvu_program_channels(struct rvu *rvu);
@@ -902,6 +905,8 @@ static inline void rvu_dbg_init(struct rvu *rvu) {}
 static inline void rvu_dbg_exit(struct rvu *rvu) {}
 #endif
 
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
+
 /* RVU Switch */
 void rvu_switch_enable(struct rvu *rvu);
 void rvu_switch_disable(struct rvu *rvu);
index fa280eb..26cfa50 100644 (file)
@@ -198,9 +198,6 @@ enum cpt_eng_type {
        CPT_IE_TYPE = 3,
 };
 
-#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
-                                               blk_addr, NDC_AF_CONST) & 0xFF)
-
 #define rvu_dbg_NULL NULL
 #define rvu_dbg_open_NULL NULL
 
@@ -1448,6 +1445,7 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
        struct nix_hw *nix_hw;
        struct rvu *rvu;
        int bank, max_bank;
+       u64 ndc_af_const;
 
        if (blk_addr == BLKADDR_NDC_NPA0) {
                rvu = s->private;
@@ -1456,7 +1454,8 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
                rvu = nix_hw->rvu;
        }
 
-       max_bank = NDC_MAX_BANK(rvu, blk_addr);
+       ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
+       max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
        for (bank = 0; bank < max_bank; bank++) {
                seq_printf(s, "BANK:%d\n", bank);
                seq_printf(s, "\tHits:\t%lld\n",
index 26e639e..4ad707e 100644 (file)
@@ -790,6 +790,7 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
        struct nix_aq_res_s *result;
        int timeout = 1000;
        u64 reg, head;
+       int ret;
 
        result = (struct nix_aq_res_s *)aq->res->base;
 
@@ -813,9 +814,22 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
                        return -EBUSY;
        }
 
-       if (result->compcode != NIX_AQ_COMP_GOOD)
+       if (result->compcode != NIX_AQ_COMP_GOOD) {
                /* TODO: Replace this with some error code */
+               if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
+                   result->compcode == NIX_AQ_COMP_LOCKERR ||
+                   result->compcode == NIX_AQ_COMP_CTX_POISON) {
+                       ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
+                       ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
+                       ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
+                       ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
+                       if (ret)
+                               dev_err(rvu->dev,
+                                       "%s: Not able to unlock cachelines\n", __func__);
+               }
+
                return -EBUSY;
+       }
 
        return 0;
 }
index 70bd036..4f5ca5a 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2018 Marvell.
  *
  */
-
+#include <linux/bitfield.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 
@@ -42,9 +42,18 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
                        return -EBUSY;
        }
 
-       if (result->compcode != NPA_AQ_COMP_GOOD)
+       if (result->compcode != NPA_AQ_COMP_GOOD) {
                /* TODO: Replace this with some error code */
+               if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
+                   result->compcode == NPA_AQ_COMP_LOCKERR ||
+                   result->compcode == NPA_AQ_COMP_CTX_POISON) {
+                       if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
+                               dev_err(rvu->dev,
+                                       "%s: Not able to unlock cachelines\n", __func__);
+               }
+
                return -EBUSY;
+       }
 
        return 0;
 }
@@ -545,3 +554,48 @@ void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
 
        npa_ctx_free(rvu, pfvf);
 }
+
+/* Due to an Hardware errata, in some corner cases, AQ context lock
+ * operations can result in a NDC way getting into an illegal state
+ * of not valid but locked.
+ *
+ * This API solves the problem by clearing the lock bit of the NDC block.
+ * The operation needs to be done for each line of all the NDC banks.
+ */
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
+{
+       int bank, max_bank, line, max_line, err;
+       u64 reg, ndc_af_const;
+
+       /* Set the ENABLE bit(63) to '0' */
+       reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
+       rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
+
+       /* Poll until the BUSY bits(47:32) are set to '0' */
+       err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
+       if (err) {
+               dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
+               return err;
+       }
+
+       ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
+       max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+       max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
+       for (bank = 0; bank < max_bank; bank++) {
+               for (line = 0; line < max_line; line++) {
+                       /* Check if 'cache line valid bit(63)' is not set
+                        * but 'cache line lock bit(60)' is set and on
+                        * success, reset the lock bit(60).
+                        */
+                       reg = rvu_read64(rvu, blkaddr,
+                                        NDC_AF_BANKX_LINEX_METADATA(bank, line));
+                       if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
+                               rvu_write64(rvu, blkaddr,
+                                           NDC_AF_BANKX_LINEX_METADATA(bank, line),
+                                           reg & ~BIT_ULL(60));
+                       }
+               }
+       }
+
+       return 0;
+}
index 1729b22..7007f0b 100644 (file)
 #define NDC_AF_INTR_ENA_W1S            (0x00068)
 #define NDC_AF_INTR_ENA_W1C            (0x00070)
 #define NDC_AF_ACTIVE_PC               (0x00078)
+#define NDC_AF_CAMS_RD_INTERVAL                (0x00080)
 #define NDC_AF_BP_TEST_ENABLE          (0x001F8)
 #define NDC_AF_BP_TEST(a)              (0x00200 | (a) << 3)
 #define NDC_AF_BLK_RST                 (0x002F0)
                (0x00F00 | (a) << 5 | (b) << 4)
 #define NDC_AF_BANKX_HIT_PC(a)         (0x01000 | (a) << 3)
 #define NDC_AF_BANKX_MISS_PC(a)                (0x01100 | (a) << 3)
+#define NDC_AF_BANKX_LINEX_METADATA(a, b) \
+               (0x10000 | (a) << 12 | (b) << 3)
 
 /* LBK */
 #define LBK_CONST                      (0x10ull)
index 14be6ea..3cb4362 100644 (file)
@@ -616,7 +616,8 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
        mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
        mcr_new = mcr_cur;
        mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
-                  MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+                  MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
+                  MAC_MCR_RX_FIFO_CLR_DIS;
 
        /* Only update control register when needed! */
        if (mcr_new != mcr_cur)
index afc9d52..b65de17 100644 (file)
 #define MAC_MCR_FORCE_MODE     BIT(15)
 #define MAC_MCR_TX_EN          BIT(14)
 #define MAC_MCR_RX_EN          BIT(13)
+#define MAC_MCR_RX_FIFO_CLR_DIS        BIT(12)
 #define MAC_MCR_BACKOFF_EN     BIT(9)
 #define MAC_MCR_BACKPR_EN      BIT(8)
 #define MAC_MCR_FORCE_RX_FC    BIT(5)
index a9aec90..7d66fe7 100644 (file)
@@ -194,7 +194,7 @@ int lan966x_police_port_del(struct lan966x_port *port,
                return -EINVAL;
        }
 
-       err = lan966x_police_del(port, port->tc.police_id);
+       err = lan966x_police_del(port, POL_IDX_PORT + port->chip_port);
        if (err) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Failed to add policer to port");
index 871a3e6..2d76366 100644 (file)
@@ -249,6 +249,21 @@ static int sparx5_dcb_ieee_dscp_setdel(struct net_device *dev,
        return 0;
 }
 
+static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+       int err;
+
+       if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+               err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
+       else
+               err = dcb_ieee_delapp(dev, app);
+
+       if (err < 0)
+               return err;
+
+       return sparx5_dcb_app_update(dev);
+}
+
 static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
 {
        struct dcb_app app_itr;
@@ -264,7 +279,7 @@ static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
        if (prio) {
                app_itr = *app;
                app_itr.priority = prio;
-               dcb_ieee_delapp(dev, &app_itr);
+               sparx5_dcb_ieee_delapp(dev, &app_itr);
        }
 
        if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
@@ -281,21 +296,6 @@ out:
        return err;
 }
 
-static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
-{
-       int err;
-
-       if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
-               err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
-       else
-               err = dcb_ieee_delapp(dev, app);
-
-       if (err < 0)
-               return err;
-
-       return sparx5_dcb_app_update(dev);
-}
-
 static int sparx5_dcb_setapptrust(struct net_device *dev, u8 *selectors,
                                  int nselectors)
 {
index 59fb058..0cc026b 100644 (file)
@@ -324,14 +324,15 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
 
        /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
        nfp_nfd3_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
-       nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
+       if (ipsec)
+               nfp_nfd3_ipsec_tx(txd, skb);
+       else
+               nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
        if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
                txd->flags |= NFD3_DESC_TX_VLAN;
                txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
        }
 
-       if (ipsec)
-               nfp_nfd3_ipsec_tx(txd, skb);
        /* Gather DMA */
        if (nr_frags > 0) {
                __le64 second_half;
index e90f8c9..5108769 100644 (file)
 void nfp_nfd3_ipsec_tx(struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb)
 {
        struct xfrm_state *x = xfrm_input_state(skb);
+       struct xfrm_offload *xo = xfrm_offload(skb);
+       struct iphdr *iph = ip_hdr(skb);
+       int l4_proto;
 
        if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)) {
-               txd->flags |= NFD3_DESC_TX_CSUM | NFD3_DESC_TX_IP4_CSUM |
-                             NFD3_DESC_TX_TCP_CSUM | NFD3_DESC_TX_UDP_CSUM;
+               txd->flags |= NFD3_DESC_TX_CSUM;
+
+               if (iph->version == 4)
+                       txd->flags |= NFD3_DESC_TX_IP4_CSUM;
+
+               if (x->props.mode == XFRM_MODE_TRANSPORT)
+                       l4_proto = xo->proto;
+               else if (x->props.mode == XFRM_MODE_TUNNEL)
+                       l4_proto = xo->inner_ipproto;
+               else
+                       return;
+
+               switch (l4_proto) {
+               case IPPROTO_UDP:
+                       txd->flags |= NFD3_DESC_TX_UDP_CSUM;
+                       return;
+               case IPPROTO_TCP:
+                       txd->flags |= NFD3_DESC_TX_TCP_CSUM;
+                       return;
+               }
        }
 }
index d60c0e9..33b6d74 100644 (file)
@@ -387,7 +387,8 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
        if (!skb_is_gso(skb)) {
                real_len = skb->len;
                /* Metadata desc */
-               metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
+               if (!ipsec)
+                       metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
                txd->raw = cpu_to_le64(metadata);
                txd++;
        } else {
@@ -395,7 +396,8 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
                (txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb);
                real_len = txbuf->real_len;
                /* Metadata desc */
-               metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
+               if (!ipsec)
+                       metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
                txd->raw = cpu_to_le64(metadata);
                txd += 2;
                txbuf++;
index 58d8f59..cec199f 100644 (file)
@@ -9,9 +9,13 @@
 u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
 {
        struct xfrm_state *x = xfrm_input_state(skb);
+       struct iphdr *iph = ip_hdr(skb);
 
-       if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM))
-               flags |= NFDK_DESC_TX_L3_CSUM | NFDK_DESC_TX_L4_CSUM;
+       if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)) {
+               if (iph->version == 4)
+                       flags |= NFDK_DESC_TX_L3_CSUM;
+               flags |= NFDK_DESC_TX_L4_CSUM;
+       }
 
        return flags;
 }
index 81b7ca0..62f0bf9 100644 (file)
@@ -38,6 +38,7 @@
 #include <net/tls.h>
 #include <net/vxlan.h>
 #include <net/xdp_sock_drv.h>
+#include <net/xfrm.h>
 
 #include "nfpcore/nfp_dev.h"
 #include "nfpcore/nfp_nsp.h"
@@ -1897,6 +1898,9 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
                        features &= ~NETIF_F_GSO_MASK;
        }
 
+       if (xfrm_offload(skb))
+               return features;
+
        /* VXLAN/GRE check */
        switch (vlan_get_protocol(skb)) {
        case htons(ETH_P_IP):
index e4902a7..8f543c3 100644 (file)
@@ -1170,6 +1170,7 @@ static int stmmac_init_phy(struct net_device *dev)
 
                phylink_ethtool_get_wol(priv->phylink, &wol);
                device_set_wakeup_capable(priv->device, !!wol.supported);
+               device_set_wakeup_enable(priv->device, !!wol.wolopts);
        }
 
        return ret;
index e1a569b..0b0c6c0 100644 (file)
@@ -1913,6 +1913,8 @@ static int ca8210_skb_tx(
         * packet
         */
        mac_len = ieee802154_hdr_peek_addrs(skb, &header);
+       if (mac_len < 0)
+               return mac_len;
 
        secspec.security_level = header.sec.level;
        secspec.key_id_mode = header.sec.key_id_mode;
index ccecee2..0b88635 100644 (file)
@@ -342,6 +342,37 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
        return genphy_config_aneg(phydev);
 }
 
+static void lan88xx_link_change_notify(struct phy_device *phydev)
+{
+       int temp;
+
+       /* At forced 100 F/H mode, chip may fail to set mode correctly
+        * when cable is switched between long(~50+m) and short one.
+        * As workaround, set to 10 before setting to 100
+        * at forced 100 F/H mode.
+        */
+       if (!phydev->autoneg && phydev->speed == 100) {
+               /* disable phy interrupt */
+               temp = phy_read(phydev, LAN88XX_INT_MASK);
+               temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+               phy_write(phydev, LAN88XX_INT_MASK, temp);
+
+               temp = phy_read(phydev, MII_BMCR);
+               temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+               phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+               temp |= BMCR_SPEED100;
+               phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+
+               /* clear pending interrupt generated while workaround */
+               temp = phy_read(phydev, LAN88XX_INT_STS);
+
+               /* enable phy interrupt back */
+               temp = phy_read(phydev, LAN88XX_INT_MASK);
+               temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+               phy_write(phydev, LAN88XX_INT_MASK, temp);
+       }
+}
+
 static struct phy_driver microchip_phy_driver[] = {
 {
        .phy_id         = 0x0007c132,
@@ -359,6 +390,7 @@ static struct phy_driver microchip_phy_driver[] = {
 
        .config_init    = lan88xx_config_init,
        .config_aneg    = lan88xx_config_aneg,
+       .link_change_notify = lan88xx_link_change_notify,
 
        .config_intr    = lan88xx_phy_config_intr,
        .handle_interrupt = lan88xx_handle_interrupt,
index 3f8a64f..1785f1c 100644 (file)
@@ -3098,8 +3098,6 @@ static int phy_probe(struct device *dev)
        if (phydrv->flags & PHY_IS_INTERNAL)
                phydev->is_internal = true;
 
-       mutex_lock(&phydev->lock);
-
        /* Deassert the reset signal */
        phy_device_reset(phydev, 0);
 
@@ -3146,7 +3144,7 @@ static int phy_probe(struct device *dev)
         */
        err = genphy_c45_read_eee_adv(phydev, phydev->advertising_eee);
        if (err)
-               return err;
+               goto out;
 
        /* There is no "enabled" flag. If PHY is advertising, assume it is
         * kind of enabled.
@@ -3188,12 +3186,10 @@ static int phy_probe(struct device *dev)
        phydev->state = PHY_READY;
 
 out:
-       /* Assert the reset signal */
+       /* Re-assert the reset signal on error */
        if (err)
                phy_device_reset(phydev, 1);
 
-       mutex_unlock(&phydev->lock);
-
        return err;
 }
 
@@ -3203,9 +3199,7 @@ static int phy_remove(struct device *dev)
 
        cancel_delayed_work_sync(&phydev->state_queue);
 
-       mutex_lock(&phydev->lock);
        phydev->state = PHY_DOWN;
-       mutex_unlock(&phydev->lock);
 
        sfp_bus_del_upstream(phydev->sfp_bus);
        phydev->sfp_bus = NULL;
index ac7481c..00d9eff 100644 (file)
@@ -44,7 +44,6 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
 };
 
 struct smsc_phy_priv {
-       u16 intmask;
        bool energy_enable;
 };
 
@@ -57,7 +56,6 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
 
 static int smsc_phy_config_intr(struct phy_device *phydev)
 {
-       struct smsc_phy_priv *priv = phydev->priv;
        int rc;
 
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -65,14 +63,9 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
                if (rc)
                        return rc;
 
-               priv->intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
-               if (priv->energy_enable)
-                       priv->intmask |= MII_LAN83C185_ISF_INT7;
-
-               rc = phy_write(phydev, MII_LAN83C185_IM, priv->intmask);
+               rc = phy_write(phydev, MII_LAN83C185_IM,
+                              MII_LAN83C185_ISF_INT_PHYLIB_EVENTS);
        } else {
-               priv->intmask = 0;
-
                rc = phy_write(phydev, MII_LAN83C185_IM, 0);
                if (rc)
                        return rc;
@@ -85,7 +78,6 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
 
 static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
 {
-       struct smsc_phy_priv *priv = phydev->priv;
        int irq_status;
 
        irq_status = phy_read(phydev, MII_LAN83C185_ISF);
@@ -96,7 +88,7 @@ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
                return IRQ_NONE;
        }
 
-       if (!(irq_status & priv->intmask))
+       if (!(irq_status & MII_LAN83C185_ISF_INT_PHYLIB_EVENTS))
                return IRQ_NONE;
 
        phy_trigger_machine(phydev);
index c896393..cd4083e 100644 (file)
@@ -665,6 +665,11 @@ static const struct usb_device_id mbim_devs[] = {
          .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
        },
 
+       /* Telit FE990 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+         .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+       },
+
        /* default entry */
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
          .driver_info = (unsigned long)&cdc_mbim_info_zlp,
index f18ab8e..0684888 100644 (file)
@@ -2115,33 +2115,8 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
 static void lan78xx_link_status_change(struct net_device *net)
 {
        struct phy_device *phydev = net->phydev;
-       int temp;
-
-       /* At forced 100 F/H mode, chip may fail to set mode correctly
-        * when cable is switched between long(~50+m) and short one.
-        * As workaround, set to 10 before setting to 100
-        * at forced 100 F/H mode.
-        */
-       if (!phydev->autoneg && (phydev->speed == 100)) {
-               /* disable phy interrupt */
-               temp = phy_read(phydev, LAN88XX_INT_MASK);
-               temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
-               phy_write(phydev, LAN88XX_INT_MASK, temp);
 
-               temp = phy_read(phydev, MII_BMCR);
-               temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
-               phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
-               temp |= BMCR_SPEED100;
-               phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
-
-               /* clear pending interrupt generated while workaround */
-               temp = phy_read(phydev, LAN88XX_INT_STS);
-
-               /* enable phy interrupt back */
-               temp = phy_read(phydev, LAN88XX_INT_MASK);
-               temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
-               phy_write(phydev, LAN88XX_INT_MASK, temp);
-       }
+       phy_print_status(phydev);
 }
 
 static int irq_map(struct irq_domain *d, unsigned int irq,
index a808d71..571e37e 100644 (file)
@@ -1364,6 +1364,7 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
        {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
index 583adb3..125284b 100644 (file)
@@ -106,7 +106,7 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
 {
        unsigned int cpu = *stored_cpu, cpu_index, i;
 
-       if (unlikely(cpu == nr_cpumask_bits ||
+       if (unlikely(cpu >= nr_cpu_ids ||
                     !cpumask_test_cpu(cpu, cpu_online_mask))) {
                cpu_index = id % cpumask_weight(cpu_online_mask);
                cpu = cpumask_first(cpu_online_mask);
index 2d53e0f..1e0f229 100644 (file)
@@ -247,6 +247,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
                                           len, sizeof(**fw_vsc_cfg),
                                           GFP_KERNEL);
 
+               if (!*fw_vsc_cfg)
+                       goto alloc_err;
+
                r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME,
                                                  *fw_vsc_cfg, len);
 
@@ -260,6 +263,7 @@ vsc_read_err:
                *fw_vsc_cfg = NULL;
        }
 
+alloc_err:
        dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s",
                *clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no");
 }
index 09c7829..382793e 100644 (file)
@@ -16,17 +16,17 @@ if MELLANOX_PLATFORM
 
 config MLXREG_HOTPLUG
        tristate "Mellanox platform hotplug driver support"
-       depends on REGMAP
        depends on HWMON
        depends on I2C
+       select REGMAP
        help
          This driver handles hot-plug events for the power suppliers, power
          cables and fans on the wide range Mellanox IB and Ethernet systems.
 
 config MLXREG_IO
        tristate "Mellanox platform register access driver support"
-       depends on REGMAP
        depends on HWMON
+       select REGMAP
        help
          This driver allows access to Mellanox programmable device register
          space through sysfs interface. The sets of registers for sysfs access
@@ -36,9 +36,9 @@ config MLXREG_IO
 
 config MLXREG_LC
        tristate "Mellanox line card platform driver support"
-       depends on REGMAP
        depends on HWMON
        depends on I2C
+       select REGMAP
        help
          This driver provides support for the Mellanox MSN4800-XX line cards,
          which are the part of MSN4800 Ethernet modular switch systems
@@ -80,10 +80,9 @@ config MLXBF_PMC
 
 config NVSW_SN2201
        tristate "Nvidia SN2201 platform driver support"
-       depends on REGMAP
        depends on HWMON
        depends on I2C
-       depends on REGMAP_I2C
+       select REGMAP_I2C
        help
          This driver provides support for the Nvidia SN2201 platform.
          The SN2201 is a highly integrated for one rack unit system with
index ec7c2b4..4a01b31 100644 (file)
@@ -955,7 +955,8 @@ config SERIAL_MULTI_INSTANTIATE
 
 config MLX_PLATFORM
        tristate "Mellanox Technologies platform support"
-       depends on I2C && REGMAP
+       depends on I2C
+       select REGMAP
        help
          This option enables system support for the Mellanox Technologies
          platform. The Mellanox systems provide data center networking
index ab05b9e..2edaae0 100644 (file)
@@ -171,9 +171,7 @@ MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs");
 static struct amd_pmc_dev pmc;
 static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
-#ifdef CONFIG_SUSPEND
 static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
-#endif
 
 static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
 {
@@ -386,7 +384,6 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
        return 0;
 }
 
-#ifdef CONFIG_SUSPEND
 static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
 {
        struct smu_metrics table;
@@ -400,7 +397,6 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
                dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
                         table.timein_s0i3_lastcapture);
 }
-#endif
 
 static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
 {
@@ -673,7 +669,6 @@ out_unlock:
        return rc;
 }
 
-#ifdef CONFIG_SUSPEND
 static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
 {
        switch (dev->cpu_id) {
@@ -861,9 +856,7 @@ static int __maybe_unused amd_pmc_suspend_handler(struct device *dev)
        return 0;
 }
 
-static SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
-
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
 
 static const struct pci_device_id pmc_pci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
@@ -905,7 +898,6 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
        return 0;
 }
 
-#ifdef CONFIG_SUSPEND
 static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
 {
        int err;
@@ -926,7 +918,6 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
 
        return 0;
 }
-#endif
 
 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
 {
@@ -1017,11 +1008,11 @@ static int amd_pmc_probe(struct platform_device *pdev)
        }
 
        platform_set_drvdata(pdev, dev);
-#ifdef CONFIG_SUSPEND
-       err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
-       if (err)
-               dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
-#endif
+       if (IS_ENABLED(CONFIG_SUSPEND)) {
+               err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
+               if (err)
+                       dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+       }
 
        amd_pmc_dbgfs_register(dev);
        return 0;
@@ -1035,9 +1026,8 @@ static int amd_pmc_remove(struct platform_device *pdev)
 {
        struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
 
-#ifdef CONFIG_SUSPEND
-       acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
-#endif
+       if (IS_ENABLED(CONFIG_SUSPEND))
+               acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
        amd_pmc_dbgfs_unregister(dev);
        pci_dev_put(dev->rdev);
        mutex_destroy(&dev->lock);
@@ -1061,9 +1051,7 @@ static struct platform_driver amd_pmc_driver = {
                .name = "amd_pmc",
                .acpi_match_table = amd_pmc_acpi_ids,
                .dev_groups = pmc_groups,
-#ifdef CONFIG_SUSPEND
-               .pm = &amd_pmc_pm,
-#endif
+               .pm = pm_sleep_ptr(&amd_pmc_pm),
        },
        .probe = amd_pmc_probe,
        .remove = amd_pmc_remove,
index d547c9d..2750dee 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/kernel.h>
 #include <linux/hwmon.h>
 #include <linux/kstrtox.h>
-#include <linux/math.h>
 #include <linux/math64.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
@@ -96,6 +95,7 @@ struct combined_chip_info {
 };
 
 struct dell_wmi_ddv_sensors {
+       bool active;
        struct mutex lock;      /* protect caching */
        unsigned long timestamp;
        union acpi_object *obj;
@@ -520,6 +520,9 @@ static struct hwmon_channel_info *dell_wmi_ddv_channel_create(struct device *dev
 
 static void dell_wmi_ddv_hwmon_cache_invalidate(struct dell_wmi_ddv_sensors *sensors)
 {
+       if (!sensors->active)
+               return;
+
        mutex_lock(&sensors->lock);
        kfree(sensors->obj);
        sensors->obj = NULL;
@@ -530,6 +533,7 @@ static void dell_wmi_ddv_hwmon_cache_destroy(void *data)
 {
        struct dell_wmi_ddv_sensors *sensors = data;
 
+       sensors->active = false;
        mutex_destroy(&sensors->lock);
        kfree(sensors->obj);
 }
@@ -549,6 +553,7 @@ static struct hwmon_channel_info *dell_wmi_ddv_channel_init(struct wmi_device *w
                return ERR_PTR(ret);
 
        mutex_init(&sensors->lock);
+       sensors->active = true;
 
        ret = devm_add_action_or_reset(&wdev->dev, dell_wmi_ddv_hwmon_cache_destroy, sensors);
        if (ret < 0)
@@ -659,7 +664,8 @@ static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char
        if (ret < 0)
                return ret;
 
-       return sysfs_emit(buf, "%d\n", DIV_ROUND_CLOSEST(value, 10));
+       /* Use 2731 instead of 2731.5 to avoid unnecessary rounding */
+       return sysfs_emit(buf, "%d\n", value - 2731);
 }
 
 static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -852,7 +858,7 @@ static int dell_wmi_ddv_resume(struct device *dev)
 {
        struct dell_wmi_ddv_data *data = dev_get_drvdata(dev);
 
-       /* Force re-reading of all sensors */
+       /* Force re-reading of all active sensors */
        dell_wmi_ddv_hwmon_cache_invalidate(&data->fans);
        dell_wmi_ddv_hwmon_cache_invalidate(&data->temps);
 
index 309eab9..322237e 100644 (file)
@@ -159,9 +159,10 @@ static const struct int3472_tps68470_board_data surface_go_tps68470_board_data =
 static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = {
        .dev_name = "i2c-INT3472:01",
        .tps68470_regulator_pdata = &surface_go_tps68470_pdata,
-       .n_gpiod_lookups = 1,
+       .n_gpiod_lookups = 2,
        .tps68470_gpio_lookup_tables = {
-               &surface_go_int347a_gpios
+               &surface_go_int347a_gpios,
+               &surface_go_int347e_gpios,
        },
 };
 
index a7e02b2..0954a04 100644 (file)
@@ -47,7 +47,7 @@ struct isst_cmd_set_req_type {
 
 static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
        {0xD0, 0x00, 0x03},
-       {0x7F, 0x00, 0x0B},
+       {0x7F, 0x00, 0x0C},
        {0x7F, 0x10, 0x12},
        {0x7F, 0x20, 0x23},
        {0x94, 0x03, 0x03},
@@ -112,6 +112,7 @@ static void isst_delete_hash(void)
  * isst_store_cmd() - Store command to a hash table
  * @cmd: Mailbox command.
  * @sub_cmd: Mailbox sub-command or MSR id.
+ * @cpu: Target CPU for the command
  * @mbox_cmd_type: Mailbox or MSR command.
  * @param: Mailbox parameter.
  * @data: Mailbox request data or MSR data.
@@ -363,7 +364,7 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
 /**
  * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
  * @cpu: Logical CPU number.
- * @bus_number: The bus number assigned by the hardware.
+ * @bus_no: The bus number assigned by the hardware.
  * @dev: The device number assigned by the hardware.
  * @fn: The function number assigned by the hardware.
  *
index fdecdae..35ff506 100644 (file)
@@ -40,6 +40,7 @@
  * @offset:    Offset to the first valid member in command structure.
  *             This will be the offset of the start of the command
  *             after command count field
+ * @owner:     Registered module owner
  * @cmd_callback: Callback function to handle IOCTL. The callback has the
  *             command pointer with data for command. There is a pointer
  *             called write_only, which when set, will not copy the
index c607332..c999732 100644 (file)
@@ -209,14 +209,14 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
        if (!name)
                return -EOPNOTSUPP;
 
-       feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
-       if (!feature_vsec_dev)
+       res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL);
+       if (!res)
                return -ENOMEM;
 
-       res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL);
-       if (!res) {
+       feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
+       if (!feature_vsec_dev) {
                ret = -ENOMEM;
-               goto free_vsec;
+               goto free_res;
        }
 
        snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name);
@@ -239,6 +239,8 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
        /*
         * intel_vsec_add_aux() is resource managed, no explicit
         * delete is required on error or on module unload.
+        * feature_vsec_dev memory is also freed as part of device
+        * delete.
         */
        ret = intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev,
                                 feature_vsec_dev, feature_id_name);
@@ -249,8 +251,6 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
 
 free_res:
        kfree(res);
-free_vsec:
-       kfree(feature_vsec_dev);
 
        return ret;
 }
index 7b6779c..67367f0 100644 (file)
@@ -5980,7 +5980,7 @@ MODULE_DEVICE_TABLE(dmi, mlxplat_dmi_table);
 static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
 {
        struct i2c_adapter *search_adap;
-       int shift, i;
+       int i, shift = 0;
 
        /* Scan adapters from expected id to verify it is free. */
        *nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
index 0c3fcb8..a63279f 100644 (file)
@@ -2495,8 +2495,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
        hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
        shost->nr_hw_queues = hisi_hba->cq_nvecs;
 
-       devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
-       return 0;
+       return devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
 }
 
 static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
index 61958a2..4f74859 100644 (file)
@@ -7291,6 +7291,8 @@ lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
        /* Find out if the FW has a new set of congestion parameters. */
        len = sizeof(struct lpfc_cgn_param);
        pdata = kzalloc(len, GFP_KERNEL);
+       if (!pdata)
+               return -ENOMEM;
        ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
                               pdata, len);
 
@@ -12563,7 +12565,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
                                        goto found_same;
                                new_cpu = cpumask_next(
                                        new_cpu, cpu_present_mask);
-                               if (new_cpu == nr_cpumask_bits)
+                               if (new_cpu >= nr_cpu_ids)
                                        new_cpu = first_cpu;
                        }
                        /* At this point, we leave the CPU as unassigned */
@@ -12577,7 +12579,7 @@ found_same:
                         * selecting the same IRQ.
                         */
                        start_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (start_cpu == nr_cpumask_bits)
+                       if (start_cpu >= nr_cpu_ids)
                                start_cpu = first_cpu;
 
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12613,7 +12615,7 @@ found_same:
                                        goto found_any;
                                new_cpu = cpumask_next(
                                        new_cpu, cpu_present_mask);
-                               if (new_cpu == nr_cpumask_bits)
+                               if (new_cpu >= nr_cpu_ids)
                                        new_cpu = first_cpu;
                        }
                        /* We should never leave an entry unassigned */
@@ -12631,7 +12633,7 @@ found_any:
                         * selecting the same IRQ.
                         */
                        start_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (start_cpu == nr_cpumask_bits)
+                       if (start_cpu >= nr_cpu_ids)
                                start_cpu = first_cpu;
 
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12704,7 +12706,7 @@ found_any:
                                goto found_hdwq;
                        }
                        new_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (new_cpu == nr_cpumask_bits)
+                       if (new_cpu >= nr_cpu_ids)
                                new_cpu = first_cpu;
                }
 
@@ -12719,7 +12721,7 @@ found_any:
                                goto found_hdwq;
 
                        new_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (new_cpu == nr_cpumask_bits)
+                       if (new_cpu >= nr_cpu_ids)
                                new_cpu = first_cpu;
                }
 
@@ -12730,7 +12732,7 @@ found_any:
  found_hdwq:
                /* We found an available entry, copy the IRQ info */
                start_cpu = cpumask_next(new_cpu, cpu_present_mask);
-               if (start_cpu == nr_cpumask_bits)
+               if (start_cpu >= nr_cpu_ids)
                        start_cpu = first_cpu;
                cpup->hdwq = new_cpup->hdwq;
  logit:
index c5b69f3..cf630aa 100644 (file)
@@ -21899,20 +21899,20 @@ lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
 static struct lpfc_io_buf *
 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
 {
-       struct lpfc_io_buf *lpfc_ncmd;
+       struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
        struct lpfc_io_buf *lpfc_ncmd_next;
        unsigned long iflag;
        struct lpfc_epd_pool *epd_pool;
 
        epd_pool = &phba->epd_pool;
-       lpfc_ncmd = NULL;
 
        spin_lock_irqsave(&epd_pool->lock, iflag);
        if (epd_pool->count > 0) {
-               list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+               list_for_each_entry_safe(iter, lpfc_ncmd_next,
                                         &epd_pool->list, list) {
-                       list_del(&lpfc_ncmd->list);
+                       list_del(&iter->list);
                        epd_pool->count--;
+                       lpfc_ncmd = iter;
                        break;
                }
        }
@@ -22109,10 +22109,6 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
        struct lpfc_dmabuf *pcmd;
        u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
 
-       /* sanity check on queue memory */
-       if (!datap)
-               return -ENODEV;
-
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mbox)
                return -ENOMEM;
index 4919ea5..63bac36 100644 (file)
@@ -23,8 +23,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "07.719.03.00-rc1"
-#define MEGASAS_RELDATE                                "Sep 29, 2021"
+#define MEGASAS_VERSION                                "07.725.01.00-rc1"
+#define MEGASAS_RELDATE                                "Mar 2, 2023"
 
 #define MEGASAS_MSIX_NAME_LEN                  32
 
@@ -1519,6 +1519,8 @@ struct megasas_ctrl_info {
 #define MEGASAS_MAX_LD_IDS                     (MEGASAS_MAX_LD_CHANNELS * \
                                                MEGASAS_MAX_DEV_PER_CHANNEL)
 
+#define MEGASAS_MAX_SUPPORTED_LD_IDS           240
+
 #define MEGASAS_MAX_SECTORS                    (2*1024)
 #define MEGASAS_MAX_SECTORS_IEEE               (2*128)
 #define MEGASAS_DBG_LVL                                1
@@ -1758,7 +1760,8 @@ union megasas_sgl_frame {
 typedef union _MFI_CAPABILITIES {
        struct {
 #if   defined(__BIG_ENDIAN_BITFIELD)
-       u32     reserved:16;
+       u32     reserved:15;
+       u32     support_memdump:1;
        u32     support_fw_exposed_dev_list:1;
        u32     support_nvme_passthru:1;
        u32     support_64bit_mode:1;
@@ -1792,7 +1795,8 @@ typedef union _MFI_CAPABILITIES {
        u32     support_64bit_mode:1;
        u32     support_nvme_passthru:1;
        u32     support_fw_exposed_dev_list:1;
-       u32     reserved:16;
+       u32     support_memdump:1;
+       u32     reserved:15;
 #endif
        } mfi_capabilities;
        __le32          reg;
index da1cad1..4463a53 100644 (file)
@@ -358,7 +358,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
                ld = MR_TargetIdToLdGet(i, drv_map);
 
                /* For non existing VDs, iterate to next VD*/
-               if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+               if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS)
                        continue;
 
                raid = MR_LdRaidGet(ld, drv_map);
index 6597e11..84c9a55 100644 (file)
@@ -1201,6 +1201,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
        drv_ops->mfi_capabilities.support_nvme_passthru = 1;
        drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1;
 
+       if (reset_devices)
+               drv_ops->mfi_capabilities.support_memdump = 1;
+
        if (instance->consistent_mask_64bit)
                drv_ops->mfi_capabilities.support_64bit_mode = 1;
 
index 23de260..40f238f 100644 (file)
@@ -902,6 +902,7 @@ struct scmd_priv {
  * @admin_reply_ephase:Admin reply queue expected phase
  * @admin_reply_base: Admin reply queue base virtual address
  * @admin_reply_dma: Admin reply queue base dma address
+ * @admin_reply_q_in_use: Queue is handled by poll/ISR
  * @ready_timeout: Controller ready timeout
  * @intr_info: Interrupt cookie pointer
  * @intr_info_count: Number of interrupt cookies
@@ -1055,6 +1056,7 @@ struct mpi3mr_ioc {
        u8 admin_reply_ephase;
        void *admin_reply_base;
        dma_addr_t admin_reply_dma;
+       atomic_t admin_reply_q_in_use;
 
        u32 ready_timeout;
 
@@ -1390,4 +1392,5 @@ void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
+int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
 #endif /*MPI3MR_H_INCLUDED*/
index bff6377..d10c6af 100644 (file)
@@ -886,7 +886,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
                         * each time through the loop.
                         */
                        *prp_entry = cpu_to_le64(dma_addr);
-                       if (*prp1_entry & sgemod_mask) {
+                       if (*prp_entry & sgemod_mask) {
                                dprint_bsg_err(mrioc,
                                    "%s: PRP address collides with SGE modifier\n",
                                    __func__);
@@ -895,7 +895,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
                        *prp_entry &= ~sgemod_mask;
                        *prp_entry |= sgemod_val;
                        prp_entry++;
-                       prp_entry_dma++;
+                       prp_entry_dma += prp_size;
                }
 
                /*
index 758f7ca..29acf61 100644 (file)
@@ -415,7 +415,7 @@ out:
                    le64_to_cpu(scsi_reply->sense_data_buffer_address));
 }
 
-static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
 {
        u32 exp_phase = mrioc->admin_reply_ephase;
        u32 admin_reply_ci = mrioc->admin_reply_ci;
@@ -423,12 +423,17 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
        u64 reply_dma = 0;
        struct mpi3_default_reply_descriptor *reply_desc;
 
+       if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
+               return 0;
+
        reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
            admin_reply_ci;
 
        if ((le16_to_cpu(reply_desc->reply_flags) &
-           MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+           MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
+               atomic_dec(&mrioc->admin_reply_q_in_use);
                return 0;
+       }
 
        do {
                if (mrioc->unrecoverable)
@@ -454,6 +459,7 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
        writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
        mrioc->admin_reply_ci = admin_reply_ci;
        mrioc->admin_reply_ephase = exp_phase;
+       atomic_dec(&mrioc->admin_reply_q_in_use);
 
        return num_admin_replies;
 }
@@ -1192,7 +1198,7 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
  */
 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
 {
-       u32 ioc_config, ioc_status, timeout;
+       u32 ioc_config, ioc_status, timeout, host_diagnostic;
        int retval = 0;
        enum mpi3mr_iocstate ioc_state;
        u64 base_info;
@@ -1246,6 +1252,23 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
                            retval, mpi3mr_iocstate_name(ioc_state));
        }
        if (ioc_state != MRIOC_STATE_RESET) {
+               if (ioc_state == MRIOC_STATE_FAULT) {
+                       timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+                       mpi3mr_print_fault_info(mrioc);
+                       do {
+                               host_diagnostic =
+                                       readl(&mrioc->sysif_regs->host_diagnostic);
+                               if (!(host_diagnostic &
+                                     MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+                                       break;
+                               if (!pci_device_is_present(mrioc->pdev)) {
+                                       mrioc->unrecoverable = 1;
+                                       ioc_err(mrioc, "controller is not present at the bringup\n");
+                                       goto out_device_not_present;
+                               }
+                               msleep(100);
+                       } while (--timeout);
+               }
                mpi3mr_print_fault_info(mrioc);
                ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
                retval = mpi3mr_issue_reset(mrioc,
@@ -2605,6 +2628,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
        mrioc->admin_reply_ci = 0;
        mrioc->admin_reply_ephase = 1;
        mrioc->admin_reply_base = NULL;
+       atomic_set(&mrioc->admin_reply_q_in_use, 0);
 
        if (!mrioc->admin_req_base) {
                mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
@@ -3816,8 +3840,10 @@ retry_init:
        dprint_init(mrioc, "allocating config page buffers\n");
        mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
            MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
-       if (!mrioc->cfg_page)
+       if (!mrioc->cfg_page) {
+               retval = -1;
                goto out_failed_noretry;
+       }
 
        mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
 
@@ -3879,8 +3905,10 @@ retry_init:
                dprint_init(mrioc, "allocating memory for throttle groups\n");
                sz = sizeof(struct mpi3mr_throttle_group_info);
                mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
-               if (!mrioc->throttle_groups)
+               if (!mrioc->throttle_groups) {
+                       retval = -1;
                        goto out_failed_noretry;
+               }
        }
 
        retval = mpi3mr_enable_events(mrioc);
@@ -3900,6 +3928,7 @@ out_failed:
                mpi3mr_memset_buffers(mrioc);
                goto retry_init;
        }
+       retval = -1;
 out_failed_noretry:
        ioc_err(mrioc, "controller initialization failed\n");
        mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
@@ -4012,6 +4041,7 @@ retry_init:
                ioc_err(mrioc,
                    "cannot create minimum number of operational queues expected:%d created:%d\n",
                    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
+               retval = -1;
                goto out_failed_noretry;
        }
 
@@ -4078,6 +4108,7 @@ out_failed:
                mpi3mr_memset_buffers(mrioc);
                goto retry_init;
        }
+       retval = -1;
 out_failed_noretry:
        ioc_err(mrioc, "controller %s is failed\n",
            (is_resume)?"resume":"re-initialization");
@@ -4155,6 +4186,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
                memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
        if (mrioc->admin_reply_base)
                memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
+       atomic_set(&mrioc->admin_reply_q_in_use, 0);
 
        if (mrioc->init_cmds.reply) {
                memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
index 6eaeba4..a794cc8 100644 (file)
@@ -3720,6 +3720,7 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
                mpi3mr_poll_pend_io_completions(mrioc);
                mpi3mr_ioc_enable_intr(mrioc);
                mpi3mr_poll_pend_io_completions(mrioc);
+               mpi3mr_process_admin_reply_q(mrioc);
        }
        switch (tm_type) {
        case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
index 3b61815..be25f24 100644 (file)
@@ -1552,7 +1552,8 @@ static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
 
        list_for_each_entry_safe(mr_sas_phy, next_phy,
            &mr_sas_port->phy_list, port_siblings) {
-               if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+               if ((!mrioc->stop_drv_processing) &&
+                   (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
                        dev_info(&mr_sas_port->port->dev,
                            "remove: sas_address(0x%016llx), phy(%d)\n",
                            (unsigned long long)
@@ -2357,15 +2358,16 @@ int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
        tgtdev->host_exposed = 1;
        if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle,
            sas_address_parent, hba_port)) {
-               tgtdev->host_exposed = 0;
                retval = -1;
-       } else if ((!tgtdev->starget)) {
-               if (!mrioc->is_driver_loading)
+               } else if ((!tgtdev->starget) && (!mrioc->is_driver_loading)) {
                        mpi3mr_sas_port_remove(mrioc, sas_address,
                            sas_address_parent, hba_port);
-               tgtdev->host_exposed = 0;
                retval = -1;
        }
+       if (retval) {
+               tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
+               tgtdev->host_exposed = 0;
+       }
        return retval;
 }
 
@@ -2394,6 +2396,7 @@ void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
        mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent,
            hba_port);
        tgtdev->host_exposed = 0;
+       tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
 }
 
 /**
@@ -2450,7 +2453,7 @@ static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *
 
                tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
                            rphy->identify.sas_address, rphy);
-               if (tgtdev) {
+               if (tgtdev && tgtdev->dev_spec.sas_sata_inf.hba_port) {
                        port_id =
                                tgtdev->dev_spec.sas_sata_inf.hba_port->port_id;
                        mpi3mr_tgtdev_put(tgtdev);
index 9142df8..9aba07c 100644 (file)
@@ -192,6 +192,7 @@ extern int ql2xsecenable;
 extern int ql2xenforce_iocb_limit;
 extern int ql2xabts_wait_nvme;
 extern u32 ql2xnvme_queues;
+extern int ql2xfc2target;
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
index 1dbc149..ec0423e 100644 (file)
@@ -1840,7 +1840,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
        case RSCN_PORT_ADDR:
                fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
                if (fcport) {
-                       if (fcport->flags & FCF_FCP2_DEVICE &&
+                       if (ql2xfc2target &&
+                           fcport->flags & FCF_FCP2_DEVICE &&
                            atomic_read(&fcport->state) == FCS_ONLINE) {
                                ql_dbg(ql_dbg_disc, vha, 0x2115,
                                       "Delaying session delete for FCP2 portid=%06x %8phC ",
index 5451676..80c4ee9 100644 (file)
@@ -360,6 +360,13 @@ MODULE_PARM_DESC(ql2xnvme_queues,
        "1 - Minimum number of queues supported\n"
        "8 - Default value");
 
+int ql2xfc2target = 1;
+module_param(ql2xfc2target, int, 0444);
+MODULE_PARM_DESC(qla2xfc2target,
+                 "Enables FC2 Target support. "
+                 "0 - FC2 Target support is disabled. "
+                 "1 - FC2 Target support is enabled (default).");
+
 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
 
@@ -4085,7 +4092,8 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
            "Mark all dev lost\n");
 
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
-               if (fcport->loop_id != FC_NO_LOOP_ID &&
+               if (ql2xfc2target &&
+                   fcport->loop_id != FC_NO_LOOP_ID &&
                    (fcport->flags & FCF_FCP2_DEVICE) &&
                    fcport->port_type == FCT_TARGET &&
                    !qla2x00_reset_active(vha)) {
index 4f28dd6..4bb8704 100644 (file)
@@ -2988,8 +2988,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
        }
 
        if (sdkp->device->type == TYPE_ZBC) {
-               /* Host-managed */
+               /*
+                * Host-managed: Per ZBC and ZAC specifications, writes in
+                * sequential write required zones of host-managed devices must
+                * be aligned to the device physical block size.
+                */
                disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
+               blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
        } else {
                sdkp->zoned = zoned;
                if (sdkp->zoned == 1) {
index 6b3a02d..22801c2 100644 (file)
@@ -965,14 +965,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
        disk_set_max_active_zones(disk, 0);
        nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
 
-       /*
-        * Per ZBC and ZAC specifications, writes in sequential write required
-        * zones of host-managed devices must be aligned to the device physical
-        * block size.
-        */
-       if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
-               blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
-
        sdkp->early_zone_info.nr_zones = nr_zones;
        sdkp->early_zone_info.zone_blocks = zone_blocks;
 
index 33f568b..d9ce379 100644 (file)
@@ -988,6 +988,22 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
                        }
 
                        /*
+                        * Check for "Operating parameters have changed"
+                        * due to Hyper-V changing the VHD/VHDX BlockSize
+                        * when adding/removing a differencing disk. This
+                        * causes discard_granularity to change, so do a
+                        * rescan to pick up the new granularity. We don't
+                        * want scsi_report_sense() to output a message
+                        * that a sysadmin wouldn't know what to do with.
+                        */
+                       if ((asc == 0x3f) && (ascq != 0x03) &&
+                                       (ascq != 0x0e)) {
+                               process_err_fn = storvsc_device_scan;
+                               set_host_byte(scmnd, DID_REQUEUE);
+                               goto do_work;
+                       }
+
+                       /*
                         * Otherwise, let upper layer deal with the
                         * error when sense message is present
                         */
index 2317fb0..557516c 100644 (file)
@@ -1262,18 +1262,20 @@ static struct iscsi_param *iscsi_check_key(
                return param;
 
        if (!(param->phase & phase)) {
-               pr_err("Key \"%s\" may not be negotiated during ",
-                               param->name);
+               char *phase_name;
+
                switch (phase) {
                case PHASE_SECURITY:
-                       pr_debug("Security phase.\n");
+                       phase_name = "Security";
                        break;
                case PHASE_OPERATIONAL:
-                       pr_debug("Operational phase.\n");
+                       phase_name = "Operational";
                        break;
                default:
-                       pr_debug("Unknown phase.\n");
+                       phase_name = "Unknown";
                }
+               pr_err("Key \"%s\" may not be negotiated during %s phase.\n",
+                               param->name, phase_name);
                return NULL;
        }
 
index 40725cb..90526f4 100644 (file)
@@ -166,7 +166,6 @@ static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp
        proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, _temp);
        proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
 
-       thermal_zone_device_enable(tzd);
        pci_info->stored_thres = temp;
 
        return 0;
@@ -268,6 +267,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
                goto err_free_vectors;
        }
 
+       ret = thermal_zone_device_enable(pci_info->tzone);
+       if (ret)
+               goto err_free_vectors;
+
        return 0;
 
 err_free_vectors:
index 29c94be..abad091 100644 (file)
@@ -1666,9 +1666,9 @@ MODULE_ALIAS("spi:sc16is7xx");
 #endif
 
 #ifdef CONFIG_SERIAL_SC16IS7XX_I2C
-static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
-                              const struct i2c_device_id *id)
+static int sc16is7xx_i2c_probe(struct i2c_client *i2c)
 {
+       const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
        const struct sc16is7xx_devtype *devtype;
        struct regmap *regmap;
 
@@ -1709,7 +1709,7 @@ static struct i2c_driver sc16is7xx_i2c_uart_driver = {
                .name           = SC16IS7XX_NAME,
                .of_match_table = sc16is7xx_dt_ids,
        },
-       .probe          = sc16is7xx_i2c_probe,
+       .probe_new      = sc16is7xx_i2c_probe,
        .remove         = sc16is7xx_i2c_remove,
        .id_table       = sc16is7xx_i2c_id_table,
 };
index 172d25f..05eac96 100644 (file)
@@ -10512,4 +10512,5 @@ module_exit(ufshcd_core_exit);
 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
 MODULE_LICENSE("GPL");
index 34fc453..a02cd86 100644 (file)
@@ -1177,7 +1177,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
                        err = ufs_qcom_clk_scale_down_post_change(hba);
 
 
-               if (err || !dev_req_params) {
+               if (err) {
                        ufshcd_uic_hibern8_exit(hba);
                        return err;
                }
@@ -1451,8 +1451,8 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
                if (IS_ERR(res->base)) {
                        dev_err(hba->dev, "Failed to map res %s, err=%d\n",
                                         res->name, (int)PTR_ERR(res->base));
-                       res->base = NULL;
                        ret = PTR_ERR(res->base);
+                       res->base = NULL;
                        return ret;
                }
        }
@@ -1466,7 +1466,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
        /* Explicitly allocate MCQ resource from ufs_mem */
        res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
        if (!res_mcq)
-               return ret;
+               return -ENOMEM;
 
        res_mcq->start = res_mem->start +
                         MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
@@ -1478,7 +1478,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
        if (ret) {
                dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
                        ret);
-               goto insert_res_err;
+               return ret;
        }
 
        res->base = devm_ioremap_resource(hba->dev, res_mcq);
@@ -1495,8 +1495,6 @@ out:
 ioremap_err:
        res->base = NULL;
        remove_resource(res_mcq);
-insert_res_err:
-       devm_kfree(hba->dev, res_mcq);
        return ret;
 }
 
index 62c4461..3d8b513 100644 (file)
@@ -442,8 +442,7 @@ static u8 ds2482_w1_set_pullup(void *data, int delay)
 }
 
 
-static int ds2482_probe(struct i2c_client *client,
-                       const struct i2c_device_id *id)
+static int ds2482_probe(struct i2c_client *client)
 {
        struct ds2482_data *data;
        int err = -ENODEV;
@@ -553,7 +552,7 @@ static struct i2c_driver ds2482_driver = {
        .driver = {
                .name   = "ds2482",
        },
-       .probe          = ds2482_probe,
+       .probe_new      = ds2482_probe,
        .remove         = ds2482_remove,
        .id_table       = ds2482_id,
 };
index d8b90f9..7265928 100644 (file)
@@ -287,7 +287,7 @@ static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
 
        if (btrfs_op(bio) == BTRFS_MAP_WRITE)
                btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
-       if (!(bio->bi_opf & REQ_RAHEAD))
+       else if (!(bio->bi_opf & REQ_RAHEAD))
                btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
        if (bio->bi_opf & REQ_PREFLUSH)
                btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS);
index 5b10401..0ef8b89 100644 (file)
@@ -558,14 +558,15 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end
 static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl,
                                          struct btrfs_block_group *block_group,
                                          int index, int max_index,
-                                         struct btrfs_key *key)
+                                         struct btrfs_key *found_key)
 {
        struct btrfs_fs_info *fs_info = block_group->fs_info;
        struct btrfs_root *extent_root;
-       int ret = 0;
        u64 search_offset;
        u64 search_end = block_group->start + block_group->length;
        struct btrfs_path *path;
+       struct btrfs_key search_key;
+       int ret = 0;
 
        ASSERT(index >= 0);
        ASSERT(index <= max_index);
@@ -585,37 +586,24 @@ static int sample_block_group_extent_item(struct btrfs_caching_control *caching_
        path->reada = READA_FORWARD;
 
        search_offset = index * div_u64(block_group->length, max_index);
-       key->objectid = block_group->start + search_offset;
-       key->type = BTRFS_EXTENT_ITEM_KEY;
-       key->offset = 0;
+       search_key.objectid = block_group->start + search_offset;
+       search_key.type = BTRFS_EXTENT_ITEM_KEY;
+       search_key.offset = 0;
 
-       while (1) {
-               ret = btrfs_search_forward(extent_root, key, path, 0);
-               if (ret != 0)
-                       goto out;
+       btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) {
                /* Success; sampled an extent item in the block group */
-               if (key->type == BTRFS_EXTENT_ITEM_KEY &&
-                   key->objectid >= block_group->start &&
-                   key->objectid + key->offset <= search_end)
-                       goto out;
+               if (found_key->type == BTRFS_EXTENT_ITEM_KEY &&
+                   found_key->objectid >= block_group->start &&
+                   found_key->objectid + found_key->offset <= search_end)
+                       break;
 
                /* We can't possibly find a valid extent item anymore */
-               if (key->objectid >= search_end) {
+               if (found_key->objectid >= search_end) {
                        ret = 1;
                        break;
                }
-               if (key->type < BTRFS_EXTENT_ITEM_KEY)
-                       key->type = BTRFS_EXTENT_ITEM_KEY;
-               else
-                       key->objectid++;
-               btrfs_release_path(path);
-               up_read(&fs_info->commit_root_sem);
-               mutex_unlock(&caching_ctl->mutex);
-               cond_resched();
-               mutex_lock(&caching_ctl->mutex);
-               down_read(&fs_info->commit_root_sem);
        }
-out:
+
        lockdep_assert_held(&caching_ctl->mutex);
        lockdep_assert_held_read(&fs_info->commit_root_sem);
        btrfs_free_path(path);
@@ -659,6 +647,7 @@ out:
 static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl,
                                       struct btrfs_block_group *block_group)
 {
+       struct btrfs_fs_info *fs_info = block_group->fs_info;
        struct btrfs_key key;
        int i;
        u64 min_size = block_group->length;
@@ -668,6 +657,8 @@ static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl
        if (!btrfs_block_group_should_use_size_class(block_group))
                return 0;
 
+       lockdep_assert_held(&caching_ctl->mutex);
+       lockdep_assert_held_read(&fs_info->commit_root_sem);
        for (i = 0; i < 5; ++i) {
                ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key);
                if (ret < 0)
@@ -682,7 +673,6 @@ static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl
                block_group->size_class = size_class;
                spin_unlock(&block_group->lock);
        }
-
 out:
        return ret;
 }
@@ -1836,7 +1826,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
 
                btrfs_info(fs_info,
                        "reclaiming chunk %llu with %llu%% used %llu%% unusable",
-                               bg->start, div_u64(bg->used * 100, bg->length),
+                               bg->start,
+                               div64_u64(bg->used * 100, bg->length),
                                div64_u64(zone_unusable * 100, bg->length));
                trace_btrfs_reclaim_block_group(bg);
                ret = btrfs_relocate_chunk(fs_info, bg->start);
@@ -2493,18 +2484,29 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans,
        struct btrfs_block_group_item bgi;
        struct btrfs_root *root = btrfs_block_group_root(fs_info);
        struct btrfs_key key;
+       u64 old_commit_used;
+       int ret;
 
        spin_lock(&block_group->lock);
        btrfs_set_stack_block_group_used(&bgi, block_group->used);
        btrfs_set_stack_block_group_chunk_objectid(&bgi,
                                                   block_group->global_root_id);
        btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
+       old_commit_used = block_group->commit_used;
+       block_group->commit_used = block_group->used;
        key.objectid = block_group->start;
        key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
        key.offset = block_group->length;
        spin_unlock(&block_group->lock);
 
-       return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
+       ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
+       if (ret < 0) {
+               spin_lock(&block_group->lock);
+               block_group->commit_used = old_commit_used;
+               spin_unlock(&block_group->lock);
+       }
+
+       return ret;
 }
 
 static int insert_dev_extent(struct btrfs_trans_handle *trans,
index 0095c6e..6b457b0 100644 (file)
@@ -1048,7 +1048,7 @@ again:
         * so there is only one iref. The case that several irefs are
         * in the same item doesn't exist.
         */
-       btrfs_del_item(trans, root, path);
+       ret = btrfs_del_item(trans, root, path);
 out:
        btrfs_release_delayed_iref(node);
        btrfs_release_path(path);
index be94030..138afa9 100644 (file)
@@ -763,7 +763,13 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
                        goto next;
                }
 
+               flags = em->flags;
                clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+               /*
+                * In case we split the extent map, we want to preserve the
+                * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
+                * it on the new extent maps.
+                */
                clear_bit(EXTENT_FLAG_LOGGING, &flags);
                modified = !list_empty(&em->list);
 
@@ -774,7 +780,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
                if (em->start >= start && em_end <= end)
                        goto remove_em;
 
-               flags = em->flags;
                gen = em->generation;
                compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 
index 84626c8..a0ef1a1 100644 (file)
@@ -2859,6 +2859,7 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
        di_args->bytes_used = btrfs_device_get_bytes_used(dev);
        di_args->total_bytes = btrfs_device_get_total_bytes(dev);
        memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
+       memcpy(di_args->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
        if (dev->name)
                strscpy(di_args->path, btrfs_dev_name(dev), sizeof(di_args->path));
        else
index 8c5efa5..37fc58a 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/spinlock.h>
 #include <linux/completion.h>
 #include <linux/bug.h>
+#include <linux/list.h>
 #include <crypto/hash.h>
 #include "messages.h"
 #include "ctree.h"
@@ -778,6 +779,45 @@ static ssize_t btrfs_chunk_size_store(struct kobject *kobj,
        return len;
 }
 
+static ssize_t btrfs_size_classes_show(struct kobject *kobj,
+                                      struct kobj_attribute *a, char *buf)
+{
+       struct btrfs_space_info *sinfo = to_space_info(kobj);
+       struct btrfs_block_group *bg;
+       u32 none = 0;
+       u32 small = 0;
+       u32 medium = 0;
+       u32 large = 0;
+
+       for (int i = 0; i < BTRFS_NR_RAID_TYPES; ++i) {
+               down_read(&sinfo->groups_sem);
+               list_for_each_entry(bg, &sinfo->block_groups[i], list) {
+                       if (!btrfs_block_group_should_use_size_class(bg))
+                               continue;
+                       switch (bg->size_class) {
+                       case BTRFS_BG_SZ_NONE:
+                               none++;
+                               break;
+                       case BTRFS_BG_SZ_SMALL:
+                               small++;
+                               break;
+                       case BTRFS_BG_SZ_MEDIUM:
+                               medium++;
+                               break;
+                       case BTRFS_BG_SZ_LARGE:
+                               large++;
+                               break;
+                       }
+               }
+               up_read(&sinfo->groups_sem);
+       }
+       return sysfs_emit(buf, "none %u\n"
+                              "small %u\n"
+                              "medium %u\n"
+                              "large %u\n",
+                              none, small, medium, large);
+}
+
 #ifdef CONFIG_BTRFS_DEBUG
 /*
  * Request chunk allocation with current chunk size.
@@ -835,6 +875,7 @@ SPACE_INFO_ATTR(bytes_zone_unusable);
 SPACE_INFO_ATTR(disk_used);
 SPACE_INFO_ATTR(disk_total);
 BTRFS_ATTR_RW(space_info, chunk_size, btrfs_chunk_size_show, btrfs_chunk_size_store);
+BTRFS_ATTR(space_info, size_classes, btrfs_size_classes_show);
 
 static ssize_t btrfs_sinfo_bg_reclaim_threshold_show(struct kobject *kobj,
                                                     struct kobj_attribute *a,
@@ -887,6 +928,7 @@ static struct attribute *space_info_attrs[] = {
        BTRFS_ATTR_PTR(space_info, disk_total),
        BTRFS_ATTR_PTR(space_info, bg_reclaim_threshold),
        BTRFS_ATTR_PTR(space_info, chunk_size),
+       BTRFS_ATTR_PTR(space_info, size_classes),
 #ifdef CONFIG_BTRFS_DEBUG
        BTRFS_ATTR_PTR(space_info, force_chunk_alloc),
 #endif
index e165458..c08c0f5 100644 (file)
@@ -376,7 +376,7 @@ static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                if (bdev)
                        blksize_mask = bdev_logical_block_size(bdev) - 1;
                else
-                       blksize_mask = (1 << inode->i_blkbits) - 1;
+                       blksize_mask = i_blocksize(inode) - 1;
 
                if ((iocb->ki_pos | iov_iter_count(to) |
                     iov_iter_alignment(to)) & blksize_mask)
index 091fd5a..d38e19c 100644 (file)
@@ -47,7 +47,7 @@ void z_erofs_lzma_exit(void)
        }
 }
 
-int z_erofs_lzma_init(void)
+int __init z_erofs_lzma_init(void)
 {
        unsigned int i;
 
@@ -278,7 +278,7 @@ again:
                }
        }
        if (no < nrpages_out && strm->buf.out)
-               kunmap(rq->in[no]);
+               kunmap(rq->out[no]);
        if (ni < nrpages_in)
                kunmap(rq->in[ni]);
        /* 4. push back LZMA stream context to the global list */
index 3f3561d..1db018f 100644 (file)
@@ -486,7 +486,7 @@ static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
 void *erofs_get_pcpubuf(unsigned int requiredpages);
 void erofs_put_pcpubuf(void *ptr);
 int erofs_pcpubuf_growsize(unsigned int nrpages);
-void erofs_pcpubuf_init(void);
+void __init erofs_pcpubuf_init(void);
 void erofs_pcpubuf_exit(void);
 
 int erofs_register_sysfs(struct super_block *sb);
@@ -545,7 +545,7 @@ static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP;
 #endif /* !CONFIG_EROFS_FS_ZIP */
 
 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
-int z_erofs_lzma_init(void);
+int __init z_erofs_lzma_init(void);
 void z_erofs_lzma_exit(void);
 int z_erofs_load_lzma_config(struct super_block *sb,
                             struct erofs_super_block *dsb,
index a2efd83..c7a4b1d 100644 (file)
@@ -114,7 +114,7 @@ out:
        return ret;
 }
 
-void erofs_pcpubuf_init(void)
+void __init erofs_pcpubuf_init(void)
 {
        int cpu;
 
index 3247d24..f1708c7 100644 (file)
@@ -1312,12 +1312,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
 
        if (!be->decompressed_pages)
                be->decompressed_pages =
-                       kcalloc(be->nr_pages, sizeof(struct page *),
-                               GFP_KERNEL | __GFP_NOFAIL);
+                       kvcalloc(be->nr_pages, sizeof(struct page *),
+                                GFP_KERNEL | __GFP_NOFAIL);
        if (!be->compressed_pages)
                be->compressed_pages =
-                       kcalloc(pclusterpages, sizeof(struct page *),
-                               GFP_KERNEL | __GFP_NOFAIL);
+                       kvcalloc(pclusterpages, sizeof(struct page *),
+                                GFP_KERNEL | __GFP_NOFAIL);
 
        z_erofs_parse_out_bvecs(be);
        err2 = z_erofs_parse_in_bvecs(be, &overlapped);
@@ -1365,7 +1365,7 @@ out:
        }
        if (be->compressed_pages < be->onstack_pages ||
            be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
-               kfree(be->compressed_pages);
+               kvfree(be->compressed_pages);
        z_erofs_fill_other_copies(be, err);
 
        for (i = 0; i < be->nr_pages; ++i) {
@@ -1384,7 +1384,7 @@ out:
        }
 
        if (be->decompressed_pages != be->onstack_pages)
-               kfree(be->decompressed_pages);
+               kvfree(be->decompressed_pages);
 
        pcl->length = 0;
        pcl->partial = true;
index 8bf6d30..655da4d 100644 (file)
@@ -757,9 +757,6 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
        err = z_erofs_do_map_blocks(inode, map, flags);
 out:
        trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
-
-       /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
-       DBG_BUGON(err < 0 && err != -ENOMEM);
        return err;
 }
 
index 4eeb02d..08b29c2 100644 (file)
@@ -1387,7 +1387,7 @@ struct ext4_super_block {
        __le32  s_first_meta_bg;        /* First metablock block group */
        __le32  s_mkfs_time;            /* When the filesystem was created */
        __le32  s_jnl_blocks[17];       /* Backup of the journal inode */
-       /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
+       /* 64bit support valid if EXT4_FEATURE_INCOMPAT_64BIT */
 /*150*/        __le32  s_blocks_count_hi;      /* Blocks count */
        __le32  s_r_blocks_count_hi;    /* Reserved blocks count */
        __le32  s_free_blocks_count_hi; /* Free blocks count */
index 4493ef0..cdf9bfe 100644 (file)
@@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
                keys[0].fmr_physical = bofs;
        if (keys[1].fmr_physical >= eofs)
                keys[1].fmr_physical = eofs - 1;
+       if (keys[1].fmr_physical < keys[0].fmr_physical)
+               return 0;
        start_fsb = keys[0].fmr_physical;
        end_fsb = keys[1].fmr_physical;
 
index 2b42ece..1602d74 100644 (file)
@@ -159,7 +159,6 @@ int ext4_find_inline_data_nolock(struct inode *inode)
                                        (void *)ext4_raw_inode(&is.iloc));
                EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
                                le32_to_cpu(is.s.here->e_value_size);
-               ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
        }
 out:
        brelse(is.iloc.bh);
index d251d70..bf0b7de 100644 (file)
@@ -4797,8 +4797,13 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
 
        if (EXT4_INODE_HAS_XATTR_SPACE(inode)  &&
            *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
+               int err;
+
                ext4_set_inode_state(inode, EXT4_STATE_XATTR);
-               return ext4_find_inline_data_nolock(inode);
+               err = ext4_find_inline_data_nolock(inode);
+               if (!err && ext4_has_inline_data(inode))
+                       ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+               return err;
        } else
                EXT4_I(inode)->i_inline_off = 0;
        return 0;
index 12435d6..f9a4301 100644 (file)
@@ -431,6 +431,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
                ei_bl->i_flags = 0;
                inode_set_iversion(inode_bl, 1);
                i_size_write(inode_bl, 0);
+               EXT4_I(inode_bl)->i_disksize = inode_bl->i_size;
                inode_bl->i_mode = S_IFREG;
                if (ext4_has_feature_extents(sb)) {
                        ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
index 94608b7..31e21de 100644 (file)
@@ -1595,11 +1595,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
                int has_inline_data = 1;
                ret = ext4_find_inline_entry(dir, fname, res_dir,
                                             &has_inline_data);
-               if (has_inline_data) {
-                       if (inlined)
-                               *inlined = 1;
+               if (inlined)
+                       *inlined = has_inline_data;
+               if (has_inline_data)
                        goto cleanup_and_exit;
-               }
        }
 
        if ((namelen <= 2) && (name[0] == '.') &&
@@ -3646,7 +3645,8 @@ static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
         * so the old->de may no longer valid and need to find it again
         * before reset old inode info.
         */
-       old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+       old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
+                                &old.inlined);
        if (IS_ERR(old.bh))
                retval = PTR_ERR(old.bh);
        if (!old.bh)
@@ -3813,9 +3813,20 @@ static int ext4_rename(struct mnt_idmap *idmap, struct inode *old_dir,
                        return retval;
        }
 
-       old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
-       if (IS_ERR(old.bh))
-               return PTR_ERR(old.bh);
+       /*
+        * We need to protect against old.inode directory getting converted
+        * from inline directory format into a normal one.
+        */
+       if (S_ISDIR(old.inode->i_mode))
+               inode_lock_nested(old.inode, I_MUTEX_NONDIR2);
+
+       old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
+                                &old.inlined);
+       if (IS_ERR(old.bh)) {
+               retval = PTR_ERR(old.bh);
+               goto unlock_moved_dir;
+       }
+
        /*
         *  Check for inode number is _not_ due to possible IO errors.
         *  We might rmdir the source, keep it as pwd of some process
@@ -3872,11 +3883,6 @@ static int ext4_rename(struct mnt_idmap *idmap, struct inode *old_dir,
                        if (new.dir != old.dir && EXT4_DIR_LINK_MAX(new.dir))
                                goto end_rename;
                }
-               /*
-                * We need to protect against old.inode directory getting
-                * converted from inline directory format into a normal one.
-                */
-               inode_lock_nested(old.inode, I_MUTEX_NONDIR2);
                retval = ext4_rename_dir_prepare(handle, &old);
                if (retval) {
                        inode_unlock(old.inode);
@@ -4013,12 +4019,15 @@ end_rename:
        } else {
                ext4_journal_stop(handle);
        }
-       if (old.dir_bh)
-               inode_unlock(old.inode);
 release_bh:
        brelse(old.dir_bh);
        brelse(old.bh);
        brelse(new.bh);
+
+unlock_moved_dir:
+       if (S_ISDIR(old.inode->i_mode))
+               inode_unlock(old.inode);
+
        return retval;
 }
 
index beaec6d..1e4db96 100644 (file)
@@ -409,7 +409,8 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
 
 static void io_submit_add_bh(struct ext4_io_submit *io,
                             struct inode *inode,
-                            struct page *page,
+                            struct page *pagecache_page,
+                            struct page *bounce_page,
                             struct buffer_head *bh)
 {
        int ret;
@@ -421,10 +422,11 @@ submit_and_retry:
        }
        if (io->io_bio == NULL)
                io_submit_init_bio(io, bh);
-       ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
+       ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page,
+                          bh->b_size, bh_offset(bh));
        if (ret != bh->b_size)
                goto submit_and_retry;
-       wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
+       wbc_account_cgroup_owner(io->io_wbc, pagecache_page, bh->b_size);
        io->io_next_block++;
 }
 
@@ -561,8 +563,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
        do {
                if (!buffer_async_write(bh))
                        continue;
-               io_submit_add_bh(io, inode,
-                                bounce_page ? bounce_page : page, bh);
+               io_submit_add_bh(io, inode, page, bounce_page, bh);
        } while ((bh = bh->b_this_page) != head);
 unlock:
        unlock_page(page);
index 88f7b8a..f43e526 100644 (file)
@@ -5726,6 +5726,28 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
        return journal_inode;
 }
 
+static int ext4_journal_bmap(journal_t *journal, sector_t *block)
+{
+       struct ext4_map_blocks map;
+       int ret;
+
+       if (journal->j_inode == NULL)
+               return 0;
+
+       map.m_lblk = *block;
+       map.m_len = 1;
+       ret = ext4_map_blocks(NULL, journal->j_inode, &map, 0);
+       if (ret <= 0) {
+               ext4_msg(journal->j_inode->i_sb, KERN_CRIT,
+                        "journal bmap failed: block %llu ret %d\n",
+                        *block, ret);
+               jbd2_journal_abort(journal, ret ? ret : -EIO);
+               return ret;
+       }
+       *block = map.m_pblk;
+       return 0;
+}
+
 static journal_t *ext4_get_journal(struct super_block *sb,
                                   unsigned int journal_inum)
 {
@@ -5746,6 +5768,7 @@ static journal_t *ext4_get_journal(struct super_block *sb,
                return NULL;
        }
        journal->j_private = sb;
+       journal->j_bmap = ext4_journal_bmap;
        ext4_init_journal_params(sb, journal);
        return journal;
 }
@@ -5920,6 +5943,7 @@ static int ext4_load_journal(struct super_block *sb,
                err = jbd2_journal_wipe(journal, !really_read_only);
        if (!err) {
                char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
+
                if (save)
                        memcpy(save, ((char *) es) +
                               EXT4_S_ERR_START, EXT4_S_ERR_LEN);
@@ -5928,6 +5952,14 @@ static int ext4_load_journal(struct super_block *sb,
                        memcpy(((char *) es) + EXT4_S_ERR_START,
                               save, EXT4_S_ERR_LEN);
                kfree(save);
+               es->s_state |= cpu_to_le16(EXT4_SB(sb)->s_mount_state &
+                                          EXT4_ERROR_FS);
+               /* Write out restored error information to the superblock */
+               if (!bdev_read_only(sb->s_bdev)) {
+                       int err2;
+                       err2 = ext4_commit_super(sb);
+                       err = err ? : err2;
+               }
        }
 
        if (err) {
@@ -6157,11 +6189,13 @@ static int ext4_clear_journal_err(struct super_block *sb,
                errstr = ext4_decode_error(sb, j_errno, nbuf);
                ext4_warning(sb, "Filesystem error recorded "
                             "from previous mount: %s", errstr);
-               ext4_warning(sb, "Marking fs in need of filesystem check.");
 
                EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
                es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
-               ext4_commit_super(sb);
+               j_errno = ext4_commit_super(sb);
+               if (j_errno)
+                       return j_errno;
+               ext4_warning(sb, "Marked fs in need of filesystem check.");
 
                jbd2_journal_clear_err(journal);
                jbd2_journal_update_sb_errno(journal);
index e2b8b34..12d6252 100644 (file)
@@ -501,13 +501,13 @@ static const struct sysfs_ops ext4_attr_ops = {
        .store  = ext4_attr_store,
 };
 
-static struct kobj_type ext4_sb_ktype = {
+static const struct kobj_type ext4_sb_ktype = {
        .default_groups = ext4_groups,
        .sysfs_ops      = &ext4_attr_ops,
        .release        = ext4_sb_release,
 };
 
-static struct kobj_type ext4_feat_ktype = {
+static const struct kobj_type ext4_feat_ktype = {
        .default_groups = ext4_feat_groups,
        .sysfs_ops      = &ext4_attr_ops,
        .release        = ext4_feat_release,
index 62f2ec5..767454d 100644 (file)
@@ -2852,6 +2852,9 @@ shift:
                        (void *)header, total_ino);
        EXT4_I(inode)->i_extra_isize = new_extra_isize;
 
+       if (ext4_has_inline_data(inode))
+               error = ext4_find_inline_data_nolock(inode);
+
 cleanup:
        if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
                ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
index c942c89..7893ea1 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -642,6 +642,7 @@ static struct file *pick_file(struct files_struct *files, unsigned fd)
        if (fd >= fdt->max_fds)
                return NULL;
 
+       fd = array_index_nospec(fd, fdt->max_fds);
        file = fdt->fd[fd];
        if (file) {
                rcu_assign_pointer(fdt->fd[fd], NULL);
index e80c781..8ae4191 100644 (file)
@@ -969,10 +969,13 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
 {
        int err = 0;
        unsigned long long ret;
-       sector_t block = 0;
+       sector_t block = blocknr;
 
-       if (journal->j_inode) {
-               block = blocknr;
+       if (journal->j_bmap) {
+               err = journal->j_bmap(journal, &block);
+               if (err == 0)
+                       *retp = block;
+       } else if (journal->j_inode) {
                ret = bmap(journal->j_inode, &block);
 
                if (ret || !block) {
index e7462b5..502e1b7 100644 (file)
@@ -1104,7 +1104,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
        since = READ_ONCE(file->f_wb_err);
        if (verf)
                nfsd_copy_write_verifier(verf, nn);
+       file_start_write(file);
        host_err = vfs_iter_write(file, &iter, &pos, flags);
+       file_end_write(file);
        if (host_err < 0) {
                nfsd_reset_write_verifier(nn);
                trace_nfsd_writeverf_reset(nn, rqstp, host_err);
index f7a9607..2210e5e 100644 (file)
@@ -193,7 +193,7 @@ static int udf_adinicb_writepage(struct folio *folio,
        struct udf_inode_info *iinfo = UDF_I(inode);
 
        BUG_ON(!PageLocked(page));
-       memcpy_to_page(page, 0, iinfo->i_data + iinfo->i_lenEAttr,
+       memcpy_from_page(iinfo->i_data + iinfo->i_lenEAttr, page, 0,
                       i_size_read(inode));
        unlock_page(page);
        mark_inode_dirty(inode);
@@ -241,6 +241,15 @@ static int udf_read_folio(struct file *file, struct folio *folio)
 
 static void udf_readahead(struct readahead_control *rac)
 {
+       struct udf_inode_info *iinfo = UDF_I(rac->mapping->host);
+
+       /*
+        * No readahead needed for in-ICB files and udf_get_block() would get
+        * confused for such file anyway.
+        */
+       if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+               return;
+
        mpage_readahead(rac, udf_get_block);
 }
 
@@ -407,6 +416,9 @@ static int udf_map_block(struct inode *inode, struct udf_map_rq *map)
        int err;
        struct udf_inode_info *iinfo = UDF_I(inode);
 
+       if (WARN_ON_ONCE(iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB))
+               return -EFSCORRUPTED;
+
        map->oflags = 0;
        if (!(map->iflags & UDF_MAP_CREATE)) {
                struct kernel_lb_addr eloc;
index 8fbe766..d4901ca 100644 (file)
@@ -66,7 +66,7 @@ static inline void set_nr_cpu_ids(unsigned int nr)
  *
  * Finally, some operations just want the exact limit, either because
  * they set bits or just don't have any faster fixed-sized versions. We
- * call this just 'nr_cpumask_size'.
+ * call this just 'nr_cpumask_bits'.
  *
  * Note that these optional constants are always guaranteed to be at
  * least as big as 'nr_cpu_ids' itself is, and all our cpumask
@@ -147,7 +147,7 @@ static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bit
 /* verify cpu argument to cpumask_* operators */
 static __always_inline unsigned int cpumask_check(unsigned int cpu)
 {
-       cpu_max_bits_warn(cpu, nr_cpumask_bits);
+       cpu_max_bits_warn(cpu, small_cpumask_bits);
        return cpu;
 }
 
@@ -518,14 +518,14 @@ static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *
 /**
  * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
  * @dstp: the cpumask pointer
- *
- * Note: since we set bits, we should use the tighter 'bitmap_set()' with
- * the eact number of bits, not 'bitmap_fill()' that will fill past the
- * end.
  */
 static inline void cpumask_setall(struct cpumask *dstp)
 {
-       bitmap_set(cpumask_bits(dstp), 0, nr_cpumask_bits);
+       if (small_const_nbits(small_cpumask_bits)) {
+               cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
+               return;
+       }
+       bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
 }
 
 /**
index eaf8ab1..1ea8c7a 100644 (file)
@@ -834,6 +834,7 @@ struct hid_driver {
  * @output_report: send output report to device
  * @idle: send idle request to device
  * @may_wakeup: return if device may act as a wakeup source during system-suspend
+ * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE)
  */
 struct hid_ll_driver {
        int (*start)(struct hid_device *hdev);
@@ -859,6 +860,8 @@ struct hid_ll_driver {
 
        int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
        bool (*may_wakeup)(struct hid_device *hdev);
+
+       unsigned int max_buffer_size;
 };
 
 extern bool hid_is_usb(const struct hid_device *hdev);
index b06254e..8fc1008 100644 (file)
@@ -481,4 +481,10 @@ static inline void folio_zero_range(struct folio *folio,
        zero_user_segments(&folio->page, start, start + length, 0, 0);
 }
 
+static inline void put_and_unmap_page(struct page *page, void *addr)
+{
+       kunmap_local(addr);
+       put_page(page);
+}
+
 #endif /* _LINUX_HIGHMEM_H */
index 500404d..5ba8966 100644 (file)
@@ -236,8 +236,8 @@ enum i2c_driver_flags {
 /**
  * struct i2c_driver - represent an I2C device driver
  * @class: What kind of i2c device we instantiate (for detect)
- * @probe: Callback for device binding - soon to be deprecated
- * @probe_new: New callback for device binding
+ * @probe: Callback for device binding
+ * @probe_new: Transitional callback for device binding - do not use
  * @remove: Callback for device unbinding
  * @shutdown: Callback for device shutdown
  * @alert: Alert callback, for example for the SMBus alert protocol
@@ -272,14 +272,18 @@ enum i2c_driver_flags {
 struct i2c_driver {
        unsigned int class;
 
+       union {
        /* Standard driver model interfaces */
-       int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
+               int (*probe)(struct i2c_client *client);
+               /*
+                * Legacy callback that was part of a conversion of .probe().
+                * Today it has the same semantic as .probe(). Don't use for new
+                * code.
+                */
+               int (*probe_new)(struct i2c_client *client);
+       };
        void (*remove)(struct i2c_client *client);
 
-       /* New driver model interface to aid the seamless removal of the
-        * current probe()'s, more commonly unused than used second parameter.
-        */
-       int (*probe_new)(struct i2c_client *client);
 
        /* driver model interfaces that don't relate to enumeration  */
        void (*shutdown)(struct i2c_client *client);
index 5962072..f619bae 100644 (file)
@@ -1308,6 +1308,14 @@ struct journal_s
                                    struct buffer_head *bh,
                                    enum passtype pass, int off,
                                    tid_t expected_commit_id);
+
+       /**
+        * @j_bmap:
+        *
+        * Bmap function that should be used instead of the generic
+        * VFS bmap function.
+        */
+       int (*j_bmap)(struct journal_s *journal, sector_t *block);
 };
 
 #define jbd2_might_wait_for_commit(j) \
index 82d0e41..faa108b 100644 (file)
@@ -17,6 +17,13 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
        return false;
 }
 
+static inline void nf_tproxy_twsk_deschedule_put(struct inet_timewait_sock *tw)
+{
+       local_bh_disable();
+       inet_twsk_deschedule_put(tw);
+       local_bh_enable();
+}
+
 /* assign a socket to the skb -- consumes sk */
 static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
 {
index b4f0f95..ada0a48 100644 (file)
@@ -245,7 +245,17 @@ struct btrfs_ioctl_dev_info_args {
        __u8 uuid[BTRFS_UUID_SIZE];             /* in/out */
        __u64 bytes_used;                       /* out */
        __u64 total_bytes;                      /* out */
-       __u64 unused[379];                      /* pad to 4k */
+       /*
+        * Optional, out.
+        *
+        * Showing the fsid of the device, allowing user space to check if this
+        * device is a seeding one.
+        *
+        * Introduced in v6.3, thus user space still needs to check if kernel
+        * changed this value.  Older kernel will not touch the values here.
+        */
+       __u8 fsid[BTRFS_UUID_SIZE];
+       __u64 unused[377];                      /* pad to 4k */
        __u8 path[BTRFS_DEVICE_PATH_NAME_MAX];  /* out */
 };
 
index 19ebbef..5041c35 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/fou.yaml */
 /* YNL-GEN uapi header */
index 5883914..8c4e3e5 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/netdev.yaml */
 /* YNL-GEN uapi header */
index 411bb2d..f81c0a7 100644 (file)
@@ -616,7 +616,7 @@ static int io_wqe_worker(void *data)
        struct io_wqe_acct *acct = io_wqe_get_acct(worker);
        struct io_wqe *wqe = worker->wqe;
        struct io_wq *wq = wqe->wq;
-       bool last_timeout = false;
+       bool exit_mask = false, last_timeout = false;
        char buf[TASK_COMM_LEN];
 
        worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
@@ -632,8 +632,11 @@ static int io_wqe_worker(void *data)
                        io_worker_handle_work(worker);
 
                raw_spin_lock(&wqe->lock);
-               /* timed out, exit unless we're the last worker */
-               if (last_timeout && acct->nr_workers > 1) {
+               /*
+                * Last sleep timed out. Exit if we're not the last worker,
+                * or if someone modified our affinity.
+                */
+               if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
                        acct->nr_workers--;
                        raw_spin_unlock(&wqe->lock);
                        __set_current_state(TASK_RUNNING);
@@ -652,7 +655,11 @@ static int io_wqe_worker(void *data)
                                continue;
                        break;
                }
-               last_timeout = !ret;
+               if (!ret) {
+                       last_timeout = true;
+                       exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
+                                                       wqe->cpu_mask);
+               }
        }
 
        if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
@@ -704,7 +711,6 @@ static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
        tsk->worker_private = worker;
        worker->task = tsk;
        set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
-       tsk->flags |= PF_NO_SETAFFINITY;
 
        raw_spin_lock(&wqe->lock);
        hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
index fd1cc35..722624b 100644 (file)
@@ -1499,14 +1499,14 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
 static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
        __must_hold(&ctx->uring_lock)
 {
-       struct io_wq_work_node *node, *prev;
        struct io_submit_state *state = &ctx->submit_state;
+       struct io_wq_work_node *node;
 
        __io_cq_lock(ctx);
        /* must come first to preserve CQE ordering in failure cases */
        if (state->cqes_count)
                __io_flush_post_cqes(ctx);
-       wq_list_for_each(node, prev, &state->compl_reqs) {
+       __wq_list_for_each(node, &state->compl_reqs) {
                struct io_kiocb *req = container_of(node, struct io_kiocb,
                                            comp_list);
 
index 7c198a4..0eb1948 100644 (file)
@@ -3,6 +3,9 @@
 
 #include <linux/io_uring_types.h>
 
+#define __wq_list_for_each(pos, head)                          \
+       for (pos = (head)->first; pos; pos = (pos)->next)
+
 #define wq_list_for_each(pos, prv, head)                       \
        for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
 
@@ -113,4 +116,4 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
        return container_of(work->list.next, struct io_wq_work, list);
 }
 
-#endif // INTERNAL_IO_SLIST_H
\ No newline at end of file
+#endif // INTERNAL_IO_SLIST_H
index 446a189..2e4c483 100644 (file)
@@ -108,7 +108,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
        struct file *file = req->file;
        int ret;
 
-       if (!req->file->f_op->uring_cmd)
+       if (!file->f_op->uring_cmd)
                return -EOPNOTSUPP;
 
        ret = security_uring_cmd(ioucmd);
@@ -120,6 +120,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
        if (ctx->flags & IORING_SETUP_CQE32)
                issue_flags |= IO_URING_F_CQE32;
        if (ctx->flags & IORING_SETUP_IOPOLL) {
+               if (!file->f_op->uring_cmd_iopoll)
+                       return -EOPNOTSUPP;
                issue_flags |= IO_URING_F_IOPOLL;
                req->iopoll_completed = 0;
                WRITE_ONCE(ioucmd->cookie, NULL);
index fa22ec7..7378074 100644 (file)
@@ -4569,6 +4569,7 @@ static int btf_datasec_resolve(struct btf_verifier_env *env,
        struct btf *btf = env->btf;
        u16 i;
 
+       env->resolve_mode = RESOLVE_TBD;
        for_each_vsi_from(i, v->next_member, v->t, vsi) {
                u32 var_type_id = vsi->type, type_id, type_size = 0;
                const struct btf_type *var_type = btf_type_by_id(env->btf,
index 6f3d654..f81b243 100644 (file)
@@ -97,8 +97,11 @@ reset:
 struct xdp_page_head {
        struct xdp_buff orig_ctx;
        struct xdp_buff ctx;
-       struct xdp_frame frm;
-       u8 data[];
+       union {
+               /* ::data_hard_start starts here */
+               DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
+               DECLARE_FLEX_ARRAY(u8, data);
+       };
 };
 
 struct xdp_test_data {
@@ -113,6 +116,10 @@ struct xdp_test_data {
        u32 frame_cnt;
 };
 
+/* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE
+ * must be updated accordingly this gets changed, otherwise BPF selftests
+ * will fail.
+ */
 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
 #define TEST_XDP_MAX_BATCH 256
 
@@ -132,8 +139,8 @@ static void xdp_test_run_init_page(struct page *page, void *arg)
        headroom -= meta_len;
 
        new_ctx = &head->ctx;
-       frm = &head->frm;
-       data = &head->data;
+       frm = head->frame;
+       data = head->data;
        memcpy(data + headroom, orig_ctx->data_meta, frm_len);
 
        xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
@@ -223,7 +230,7 @@ static void reset_ctx(struct xdp_page_head *head)
        head->ctx.data = head->orig_ctx.data;
        head->ctx.data_meta = head->orig_ctx.data_meta;
        head->ctx.data_end = head->orig_ctx.data_end;
-       xdp_update_frame_from_buff(&head->ctx, &head->frm);
+       xdp_update_frame_from_buff(&head->ctx, head->frame);
 }
 
 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
@@ -285,7 +292,7 @@ static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
                head = phys_to_virt(page_to_phys(page));
                reset_ctx(head);
                ctx = &head->ctx;
-               frm = &head->frm;
+               frm = head->frame;
                xdp->frame_cnt++;
 
                act = bpf_prog_run_xdp(prog, ctx);
index ebc202f..bf61ea4 100644 (file)
@@ -134,6 +134,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
        struct usb_device *usbdev;
        int res;
 
+       if (what == NETDEV_UNREGISTER && dev->reg_state >= NETREG_UNREGISTERED)
+               return 0;
+
        /* Check whether we have a NCM device, and find its VID/PID. */
        if (!(dev->dev.parent && dev->dev.parent->driver &&
              strcmp(dev->dev.parent->driver->name, "cdc_ncm") == 0))
index 48812ec..9e10802 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: BSD-3-Clause
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/netdev.yaml */
 /* YNL-GEN kernel source */
index b16dc7e..2c5fc7d 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/netdev.yaml */
 /* YNL-GEN kernel header */
index eb7d33b..1a31815 100644 (file)
@@ -517,18 +517,16 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
 #ifdef HAVE_SKB_SMALL_HEAD_CACHE
        if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
            !(flags & KMALLOC_NOT_NORMAL_BITS)) {
-
-               /* skb_small_head_cache has non power of two size,
-                * likely forcing SLUB to use order-3 pages.
-                * We deliberately attempt a NOMEMALLOC allocation only.
-                */
                obj = kmem_cache_alloc_node(skb_small_head_cache,
                                flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
                                node);
-               if (obj) {
-                       *size = SKB_SMALL_HEAD_CACHE_SIZE;
+               *size = SKB_SMALL_HEAD_CACHE_SIZE;
+               if (obj || !(gfp_pfmemalloc_allowed(flags)))
                        goto out;
-               }
+               /* Try again but now we are using pfmemalloc reserves */
+               ret_pfmemalloc = true;
+               obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
+               goto out;
        }
 #endif
        *size = obj_size = kmalloc_size_roundup(obj_size);
@@ -2082,6 +2080,7 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
 }
 EXPORT_SYMBOL(skb_realloc_headroom);
 
+/* Note: We plan to rework this in linux-6.4 */
 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
 {
        unsigned int saved_end_offset, saved_truesize;
@@ -2100,6 +2099,22 @@ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
        if (likely(skb_end_offset(skb) == saved_end_offset))
                return 0;
 
+#ifdef HAVE_SKB_SMALL_HEAD_CACHE
+       /* We can not change skb->end if the original or new value
+        * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head().
+        */
+       if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM ||
+           skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) {
+               /* We think this path should not be taken.
+                * Add a temporary trace to warn us just in case.
+                */
+               pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n",
+                           saved_end_offset, skb_end_offset(skb));
+               WARN_ON_ONCE(1);
+               return 0;
+       }
+#endif
+
        shinfo = skb_shinfo(skb);
 
        /* We are about to change back skb->end,
index 341c565..c258887 100644 (file)
@@ -2818,7 +2818,8 @@ static void sk_enter_memory_pressure(struct sock *sk)
 static void sk_leave_memory_pressure(struct sock *sk)
 {
        if (sk->sk_prot->leave_memory_pressure) {
-               sk->sk_prot->leave_memory_pressure(sk);
+               INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure,
+                                    tcp_leave_memory_pressure, sk);
        } else {
                unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
 
index 2215f57..d8f4379 100644 (file)
@@ -1412,7 +1412,7 @@ static int nl802154_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                return -EOPNOTSUPP;
        }
 
-       if (!nla_get_u8(info->attrs[NL802154_ATTR_SCAN_TYPE])) {
+       if (!info->attrs[NL802154_ATTR_SCAN_TYPE]) {
                NL_SET_ERR_MSG(info->extack, "Malformed request, missing scan type");
                return -EINVAL;
        }
index 6c3820f..5c14fe0 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: BSD-3-Clause
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/fou.yaml */
 /* YNL-GEN kernel source */
index b7a6812..58b1e1e 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/fou.yaml */
 /* YNL-GEN kernel header */
index b22b2c7..69e3317 100644 (file)
@@ -38,7 +38,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
                                            hp->source, lport ? lport : hp->dest,
                                            skb->dev, NF_TPROXY_LOOKUP_LISTENER);
                if (sk2) {
-                       inet_twsk_deschedule_put(inet_twsk(sk));
+                       nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
                }
        }
index cf26d65..ebf9175 100644 (file)
@@ -186,6 +186,9 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
 
+       if (!len)
+               return 0;
+
        psock = sk_psock_get(sk);
        if (unlikely(!psock))
                return tcp_recvmsg(sk, msg, len, flags, addr_len);
@@ -244,6 +247,9 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
 
+       if (!len)
+               return 0;
+
        psock = sk_psock_get(sk);
        if (unlikely(!psock))
                return tcp_recvmsg(sk, msg, len, flags, addr_len);
index e5dc91d..0735d82 100644 (file)
@@ -68,6 +68,9 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
 
+       if (!len)
+               return 0;
+
        psock = sk_psock_get(sk);
        if (unlikely(!psock))
                return sk_udp_recvmsg(sk, msg, len, flags, addr_len);
index 47447f0..bee45df 100644 (file)
@@ -477,6 +477,7 @@ int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
 
        rcu_read_lock();
 
+       ret = -ESRCH;
        ila = ila_lookup_by_params(&xp, ilan);
        if (ila) {
                ret = ila_dump_info(ila,
index 929502e..52f828b 100644 (file)
@@ -63,7 +63,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
                                            lport ? lport : hp->dest,
                                            skb->dev, NF_TPROXY_LOOKUP_LISTENER);
                if (sk2) {
-                       inet_twsk_deschedule_put(inet_twsk(sk));
+                       nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
                }
        }
index 7250082..c6a6a60 100644 (file)
@@ -96,8 +96,8 @@ static DEFINE_MUTEX(nf_conntrack_mutex);
 #define GC_SCAN_MAX_DURATION   msecs_to_jiffies(10)
 #define GC_SCAN_EXPIRED_MAX    (64000u / HZ)
 
-#define MIN_CHAINLEN   8u
-#define MAX_CHAINLEN   (32u - MIN_CHAINLEN)
+#define MIN_CHAINLEN   50u
+#define MAX_CHAINLEN   (80u - MIN_CHAINLEN)
 
 static struct conntrack_gc_work conntrack_gc_work;
 
index c11dff9..bfc3aaa 100644 (file)
@@ -328,11 +328,12 @@ nla_put_failure:
 }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct,
+                              bool dump)
 {
        u32 mark = READ_ONCE(ct->mark);
 
-       if (!mark)
+       if (!mark && !dump)
                return 0;
 
        if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
@@ -343,7 +344,7 @@ nla_put_failure:
        return -1;
 }
 #else
-#define ctnetlink_dump_mark(a, b) (0)
+#define ctnetlink_dump_mark(a, b, c) (0)
 #endif
 
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
@@ -548,7 +549,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
 static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
 {
        if (ctnetlink_dump_status(skb, ct) < 0 ||
-           ctnetlink_dump_mark(skb, ct) < 0 ||
+           ctnetlink_dump_mark(skb, ct, true) < 0 ||
            ctnetlink_dump_secctx(skb, ct) < 0 ||
            ctnetlink_dump_id(skb, ct) < 0 ||
            ctnetlink_dump_use(skb, ct) < 0 ||
@@ -831,8 +832,7 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
        }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-       if (events & (1 << IPCT_MARK) &&
-           ctnetlink_dump_mark(skb, ct) < 0)
+       if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK)))
                goto nla_put_failure;
 #endif
        nlmsg_end(skb, nlh);
@@ -2735,7 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
                goto nla_put_failure;
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-       if (ctnetlink_dump_mark(skb, ct) < 0)
+       if (ctnetlink_dump_mark(skb, ct, true) < 0)
                goto nla_put_failure;
 #endif
        if (ctnetlink_dump_labels(skb, ct) < 0)
index 7f2bda6..8e6d7ea 100644 (file)
@@ -105,11 +105,15 @@ static void nft_last_destroy(const struct nft_ctx *ctx,
 static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src)
 {
        struct nft_last_priv *priv_dst = nft_expr_priv(dst);
+       struct nft_last_priv *priv_src = nft_expr_priv(src);
 
        priv_dst->last = kzalloc(sizeof(*priv_dst->last), GFP_ATOMIC);
        if (!priv_dst->last)
                return -ENOMEM;
 
+       priv_dst->last->set = priv_src->last->set;
+       priv_dst->last->jiffies = priv_src->last->jiffies;
+
        return 0;
 }
 
index 123578e..3ba12a7 100644 (file)
@@ -236,12 +236,16 @@ static void nft_quota_destroy(const struct nft_ctx *ctx,
 static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
 {
        struct nft_quota *priv_dst = nft_expr_priv(dst);
+       struct nft_quota *priv_src = nft_expr_priv(src);
+
+       priv_dst->quota = priv_src->quota;
+       priv_dst->flags = priv_src->flags;
 
        priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), GFP_ATOMIC);
        if (!priv_dst->consumed)
                return -ENOMEM;
 
-       atomic64_set(priv_dst->consumed, 0);
+       *priv_dst->consumed = *priv_src->consumed;
 
        return 0;
 }
index 348bf56..b9264e7 100644 (file)
@@ -1446,8 +1446,8 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx,
        return rc;
 
 error:
-       kfree(cb_context);
        device_unlock(&dev->dev);
+       kfree(cb_context);
        return rc;
 }
 
index 8dabfb5..0d7aee8 100644 (file)
@@ -158,6 +158,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                nparms->zone = parm->zone;
 
                ret = 0;
+       } else {
+               err = ret;
+               goto out_free;
        }
 
        err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
index e960a46..475fe22 100644 (file)
@@ -2200,8 +2200,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
 
                if (!tc_flags_valid(fnew->flags)) {
+                       kfree(fnew);
                        err = -EINVAL;
-                       goto errout;
+                       goto errout_tb;
                }
        }
 
@@ -2226,8 +2227,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                }
                spin_unlock(&tp->lock);
 
-               if (err)
-                       goto errout;
+               if (err) {
+                       kfree(fnew);
+                       goto errout_tb;
+               }
        }
        fnew->handle = handle;
 
@@ -2337,7 +2340,6 @@ errout_mask:
        fl_mask_put(head, fnew->mask);
 errout_idr:
        idr_remove(&head->handle_idr, fnew->handle);
-errout:
        __fl_put(fnew);
 errout_tb:
        kfree(tb);
index a4cccdf..ff6dd86 100644 (file)
@@ -2657,16 +2657,14 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct smc_sock *smc;
-       int rc = -EPIPE;
+       int rc;
 
        smc = smc_sk(sk);
        lock_sock(sk);
-       if ((sk->sk_state != SMC_ACTIVE) &&
-           (sk->sk_state != SMC_APPCLOSEWAIT1) &&
-           (sk->sk_state != SMC_INIT))
-               goto out;
 
+       /* SMC does not support connect with fastopen */
        if (msg->msg_flags & MSG_FASTOPEN) {
+               /* not connected yet, fallback */
                if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
                        rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
                        if (rc)
@@ -2675,6 +2673,11 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                        rc = -EINVAL;
                        goto out;
                }
+       } else if ((sk->sk_state != SMC_ACTIVE) &&
+                  (sk->sk_state != SMC_APPCLOSEWAIT1) &&
+                  (sk->sk_state != SMC_INIT)) {
+               rc = -EPIPE;
+               goto out;
        }
 
        if (smc->use_fallback) {
index 6bae8ce..9c92c0e 100644 (file)
@@ -450,7 +450,9 @@ static struct file_system_type sock_fs_type = {
  *
  *     Returns the &file bound with @sock, implicitly storing it
  *     in sock->file. If dname is %NULL, sets to "".
- *     On failure the return is a ERR pointer (see linux/err.h).
+ *
+ *     On failure @sock is released, and an ERR pointer is returned.
+ *
  *     This function uses GFP_KERNEL internally.
  */
 
@@ -1638,7 +1640,6 @@ static struct socket *__sys_socket_create(int family, int type, int protocol)
 struct file *__sys_socket_file(int family, int type, int protocol)
 {
        struct socket *sock;
-       struct file *file;
        int flags;
 
        sock = __sys_socket_create(family, type, protocol);
@@ -1649,11 +1650,7 @@ struct file *__sys_socket_file(int family, int type, int protocol)
        if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
                flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
 
-       file = sock_alloc_file(sock, flags, NULL);
-       if (IS_ERR(file))
-               sock_release(sock);
-
-       return file;
+       return sock_alloc_file(sock, flags, NULL);
 }
 
 int __sys_socket(int family, int type, int protocol)
index 1fd3f5e..fea7ce8 100644 (file)
@@ -798,6 +798,7 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
 static int
 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
 {
+       struct svc_rqst *rqstp;
        struct task_struct *task;
        unsigned int state = serv->sv_nrthreads-1;
 
@@ -806,7 +807,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
                task = choose_victim(serv, pool, &state);
                if (task == NULL)
                        break;
-               kthread_stop(task);
+               rqstp = kthread_data(task);
+               /* Did we lose a race to svo_function threadfn? */
+               if (kthread_stop(task) == -EINTR)
+                       svc_exit_thread(rqstp);
                nrservs++;
        } while (nrservs < 0);
        return 0;
index 6c59378..a7cc4f9 100644 (file)
@@ -508,6 +508,8 @@ handle_error:
                        zc_pfrag.offset = iter_offset.offset;
                        zc_pfrag.size = copy;
                        tls_append_frag(record, &zc_pfrag, copy);
+
+                       iter_offset.offset += copy;
                } else if (copy) {
                        copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
 
index 3735cb0..b32c112 100644 (file)
@@ -405,13 +405,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
                        rc = -EINVAL;
                        goto out;
                }
-               lock_sock(sk);
                memcpy(crypto_info_aes_gcm_128->iv,
                       cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
                       TLS_CIPHER_AES_GCM_128_IV_SIZE);
                memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
                       TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
-               release_sock(sk);
                if (copy_to_user(optval,
                                 crypto_info_aes_gcm_128,
                                 sizeof(*crypto_info_aes_gcm_128)))
@@ -429,13 +427,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
                        rc = -EINVAL;
                        goto out;
                }
-               lock_sock(sk);
                memcpy(crypto_info_aes_gcm_256->iv,
                       cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
                       TLS_CIPHER_AES_GCM_256_IV_SIZE);
                memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
                       TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
-               release_sock(sk);
                if (copy_to_user(optval,
                                 crypto_info_aes_gcm_256,
                                 sizeof(*crypto_info_aes_gcm_256)))
@@ -451,13 +447,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
                        rc = -EINVAL;
                        goto out;
                }
-               lock_sock(sk);
                memcpy(aes_ccm_128->iv,
                       cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE,
                       TLS_CIPHER_AES_CCM_128_IV_SIZE);
                memcpy(aes_ccm_128->rec_seq, cctx->rec_seq,
                       TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
-               release_sock(sk);
                if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128)))
                        rc = -EFAULT;
                break;
@@ -472,13 +466,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
                        rc = -EINVAL;
                        goto out;
                }
-               lock_sock(sk);
                memcpy(chacha20_poly1305->iv,
                       cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE,
                       TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE);
                memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq,
                       TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
-               release_sock(sk);
                if (copy_to_user(optval, chacha20_poly1305,
                                sizeof(*chacha20_poly1305)))
                        rc = -EFAULT;
@@ -493,13 +485,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
                        rc = -EINVAL;
                        goto out;
                }
-               lock_sock(sk);
                memcpy(sm4_gcm_info->iv,
                       cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
                       TLS_CIPHER_SM4_GCM_IV_SIZE);
                memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
                       TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
-               release_sock(sk);
                if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
                        rc = -EFAULT;
                break;
@@ -513,13 +503,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
                        rc = -EINVAL;
                        goto out;
                }
-               lock_sock(sk);
                memcpy(sm4_ccm_info->iv,
                       cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
                       TLS_CIPHER_SM4_CCM_IV_SIZE);
                memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
                       TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
-               release_sock(sk);
                if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
                        rc = -EFAULT;
                break;
@@ -535,13 +523,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
                        rc = -EINVAL;
                        goto out;
                }
-               lock_sock(sk);
                memcpy(crypto_info_aria_gcm_128->iv,
                       cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE,
                       TLS_CIPHER_ARIA_GCM_128_IV_SIZE);
                memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq,
                       TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE);
-               release_sock(sk);
                if (copy_to_user(optval,
                                 crypto_info_aria_gcm_128,
                                 sizeof(*crypto_info_aria_gcm_128)))
@@ -559,13 +545,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
                        rc = -EINVAL;
                        goto out;
                }
-               lock_sock(sk);
                memcpy(crypto_info_aria_gcm_256->iv,
                       cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE,
                       TLS_CIPHER_ARIA_GCM_256_IV_SIZE);
                memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq,
                       TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE);
-               release_sock(sk);
                if (copy_to_user(optval,
                                 crypto_info_aria_gcm_256,
                                 sizeof(*crypto_info_aria_gcm_256)))
@@ -614,11 +598,9 @@ static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval,
        if (len < sizeof(value))
                return -EINVAL;
 
-       lock_sock(sk);
        value = -EINVAL;
        if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
                value = ctx->rx_no_pad;
-       release_sock(sk);
        if (value < 0)
                return value;
 
@@ -635,6 +617,8 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
 {
        int rc = 0;
 
+       lock_sock(sk);
+
        switch (optname) {
        case TLS_TX:
        case TLS_RX:
@@ -651,6 +635,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
                rc = -ENOPROTOOPT;
                break;
        }
+
+       release_sock(sk);
+
        return rc;
 }
 
index 782d370..635b8bf 100644 (file)
@@ -956,7 +956,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                               MSG_CMSG_COMPAT))
                return -EOPNOTSUPP;
 
-       mutex_lock(&tls_ctx->tx_lock);
+       ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
+       if (ret)
+               return ret;
        lock_sock(sk);
 
        if (unlikely(msg->msg_controllen)) {
@@ -1290,7 +1292,9 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
                      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
                return -EOPNOTSUPP;
 
-       mutex_lock(&tls_ctx->tx_lock);
+       ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
+       if (ret)
+               return ret;
        lock_sock(sk);
        ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
        release_sock(sk);
@@ -2127,7 +2131,7 @@ recv_end:
                else
                        err = process_rx_list(ctx, msg, &control, 0,
                                              async_copy_bytes, is_peek);
-               decrypted = max(err, 0);
+               decrypted += max(err, 0);
        }
 
        copied += decrypted;
@@ -2435,11 +2439,19 @@ static void tx_work_handler(struct work_struct *work)
 
        if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
                return;
-       mutex_lock(&tls_ctx->tx_lock);
-       lock_sock(sk);
-       tls_tx_records(sk, -1);
-       release_sock(sk);
-       mutex_unlock(&tls_ctx->tx_lock);
+
+       if (mutex_trylock(&tls_ctx->tx_lock)) {
+               lock_sock(sk);
+               tls_tx_records(sk, -1);
+               release_sock(sk);
+               mutex_unlock(&tls_ctx->tx_lock);
+       } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+               /* Someone is holding the tx_lock, they will likely run Tx
+                * and cancel the work on their way out of the lock section.
+                * Schedule a long delay just in case.
+                */
+               schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
+       }
 }
 
 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
index 347122c..0b0f18e 100644 (file)
@@ -2105,7 +2105,8 @@ out:
 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
 
 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
-static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other)
+static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
+                    struct scm_cookie *scm, bool fds_sent)
 {
        struct unix_sock *ousk = unix_sk(other);
        struct sk_buff *skb;
@@ -2116,6 +2117,11 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
        if (!skb)
                return err;
 
+       err = unix_scm_to_skb(scm, skb, !fds_sent);
+       if (err < 0) {
+               kfree_skb(skb);
+               return err;
+       }
        skb_put(skb, 1);
        err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
 
@@ -2243,7 +2249,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
 
 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
        if (msg->msg_flags & MSG_OOB) {
-               err = queue_oob(sock, msg, other);
+               err = queue_oob(sock, msg, other, &scm, fds_sent);
                if (err)
                        goto out_err;
                sent++;
index e9bf155..2f9d827 100644 (file)
@@ -54,6 +54,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
        struct sk_psock *psock;
        int copied;
 
+       if (!len)
+               return 0;
+
        psock = sk_psock_get(sk);
        if (unlikely(!psock))
                return __unix_recvmsg(sk, msg, len, flags);
index a7a857f..f8129c6 100644 (file)
@@ -109,6 +109,7 @@ struct kvm_regs {
 #define KVM_ARM_VCPU_SVE               4 /* enable SVE for this CPU */
 #define KVM_ARM_VCPU_PTRAUTH_ADDRESS   5 /* VCPU uses address authentication */
 #define KVM_ARM_VCPU_PTRAUTH_GENERIC   6 /* VCPU uses generic authentication */
+#define KVM_ARM_VCPU_HAS_EL2           7 /* Support nested virtualization */
 
 struct kvm_vcpu_init {
        __u32 target;
index b70111a..b890058 100644 (file)
@@ -13,7 +13,7 @@
 /*
  * Defines x86 CPU feature bits
  */
-#define NCAPINTS                       20         /* N 32-bit words worth of info */
+#define NCAPINTS                       21         /* N 32-bit words worth of info */
 #define NBUGINTS                       1          /* N 32-bit bug flags */
 
 /*
index c44b56f..5dfa4fb 100644 (file)
 #define DISABLED_MASK17        0
 #define DISABLED_MASK18        0
 #define DISABLED_MASK19        0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
+#define DISABLED_MASK20        0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
 
 #endif /* _ASM_X86_DISABLED_FEATURES_H */
index 37ff475..ad35355 100644 (file)
@@ -25,6 +25,7 @@
 #define _EFER_SVME             12 /* Enable virtualization */
 #define _EFER_LMSLE            13 /* Long Mode Segment Limit Enable */
 #define _EFER_FFXSR            14 /* Enable Fast FXSAVE/FXRSTOR */
+#define _EFER_AUTOIBRS         21 /* Enable Automatic IBRS */
 
 #define EFER_SCE               (1<<_EFER_SCE)
 #define EFER_LME               (1<<_EFER_LME)
@@ -33,6 +34,7 @@
 #define EFER_SVME              (1<<_EFER_SVME)
 #define EFER_LMSLE             (1<<_EFER_LMSLE)
 #define EFER_FFXSR             (1<<_EFER_FFXSR)
+#define EFER_AUTOIBRS          (1<<_EFER_AUTOIBRS)
 
 /* Intel MSRs. Some also available on other CPUs */
 
 #define SPEC_CTRL_RRSBA_DIS_S_SHIFT    6          /* Disable RRSBA behavior */
 #define SPEC_CTRL_RRSBA_DIS_S          BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
 
+/* A mask for bits which the kernel toggles when controlling mitigations */
+#define SPEC_CTRL_MITIGATIONS_MASK     (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
+                                                       | SPEC_CTRL_RRSBA_DIS_S)
+
 #define MSR_IA32_PRED_CMD              0x00000049 /* Prediction Command */
 #define PRED_CMD_IBPB                  BIT(0)     /* Indirect Branch Prediction Barrier */
 
 #define MSR_TURBO_RATIO_LIMIT1         0x000001ae
 #define MSR_TURBO_RATIO_LIMIT2         0x000001af
 
+#define MSR_SNOOP_RSP_0                        0x00001328
+#define MSR_SNOOP_RSP_1                        0x00001329
+
 #define MSR_LBR_SELECT                 0x000001c8
 #define MSR_LBR_TOS                    0x000001c9
 
 #define MSR_AMD64_SEV_ES_ENABLED       BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
 #define MSR_AMD64_SEV_SNP_ENABLED      BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
 
+/* SNP feature bits enabled by the hypervisor */
+#define MSR_AMD64_SNP_VTOM                     BIT_ULL(3)
+#define MSR_AMD64_SNP_REFLECT_VC               BIT_ULL(4)
+#define MSR_AMD64_SNP_RESTRICTED_INJ           BIT_ULL(5)
+#define MSR_AMD64_SNP_ALT_INJ                  BIT_ULL(6)
+#define MSR_AMD64_SNP_DEBUG_SWAP               BIT_ULL(7)
+#define MSR_AMD64_SNP_PREVENT_HOST_IBS         BIT_ULL(8)
+#define MSR_AMD64_SNP_BTB_ISOLATION            BIT_ULL(9)
+#define MSR_AMD64_SNP_VMPL_SSS                 BIT_ULL(10)
+#define MSR_AMD64_SNP_SECURE_TSC               BIT_ULL(11)
+#define MSR_AMD64_SNP_VMGEXIT_PARAM            BIT_ULL(12)
+#define MSR_AMD64_SNP_IBS_VIRT                 BIT_ULL(14)
+#define MSR_AMD64_SNP_VMSA_REG_PROTECTION      BIT_ULL(16)
+#define MSR_AMD64_SNP_SMT_PROTECTION           BIT_ULL(17)
+
+/* SNP feature bits reserved for future use. */
+#define MSR_AMD64_SNP_RESERVED_BIT13           BIT_ULL(13)
+#define MSR_AMD64_SNP_RESERVED_BIT15           BIT_ULL(15)
+#define MSR_AMD64_SNP_RESERVED_MASK            GENMASK_ULL(63, 18)
+
 #define MSR_AMD64_VIRT_SPEC_CTRL       0xc001011f
 
 /* AMD Collaborative Processor Performance Control MSRs */
 
 /* - AMD: */
 #define MSR_IA32_MBA_BW_BASE           0xc0000200
+#define MSR_IA32_SMBA_BW_BASE          0xc0000280
+#define MSR_IA32_EVT_CFG_BASE          0xc0000400
 
 /* MSR_IA32_VMX_MISC bits */
 #define MSR_IA32_VMX_MISC_INTEL_PT                 (1ULL << 14)
index aff7747..7ba1726 100644 (file)
@@ -98,6 +98,7 @@
 #define REQUIRED_MASK17        0
 #define REQUIRED_MASK18        0
 #define REQUIRED_MASK19        0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
+#define REQUIRED_MASK20        0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
 
 #endif /* _ASM_X86_REQUIRED_FEATURES_H */
index e48deab..7f467fe 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/types.h>
 #include <linux/ioctl.h>
+#include <linux/stddef.h>
 
 #define KVM_PIO_PAGE_OFFSET 1
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
@@ -507,8 +508,8 @@ struct kvm_nested_state {
         * KVM_{GET,PUT}_NESTED_STATE ioctl values.
         */
        union {
-               struct kvm_vmx_nested_state_data vmx[0];
-               struct kvm_svm_nested_state_data svm[0];
+               __DECLARE_FLEX_ARRAY(struct kvm_vmx_nested_state_data, vmx);
+               __DECLARE_FLEX_ARRAY(struct kvm_svm_nested_state_data, svm);
        } data;
 };
 
@@ -525,6 +526,35 @@ struct kvm_pmu_event_filter {
 #define KVM_PMU_EVENT_ALLOW 0
 #define KVM_PMU_EVENT_DENY 1
 
+#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS BIT(0)
+#define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS)
+
+/*
+ * Masked event layout.
+ * Bits   Description
+ * ----   -----------
+ * 7:0    event select (low bits)
+ * 15:8   umask match
+ * 31:16  unused
+ * 35:32  event select (high bits)
+ * 36:54  unused
+ * 55     exclude bit
+ * 63:56  umask mask
+ */
+
+#define KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, exclude) \
+       (((event_select) & 0xFFULL) | (((event_select) & 0XF00ULL) << 24) | \
+       (((mask) & 0xFFULL) << 56) | \
+       (((match) & 0xFFULL) << 8) | \
+       ((__u64)(!!(exclude)) << 55))
+
+#define KVM_PMU_MASKED_ENTRY_EVENT_SELECT \
+       (GENMASK_ULL(7, 0) | GENMASK_ULL(35, 32))
+#define KVM_PMU_MASKED_ENTRY_UMASK_MASK                (GENMASK_ULL(63, 56))
+#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH       (GENMASK_ULL(15, 8))
+#define KVM_PMU_MASKED_ENTRY_EXCLUDE           (BIT_ULL(55))
+#define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT  (56)
+
 /* for KVM_{GET,SET,HAS}_DEVICE_ATTR */
 #define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
 #define   KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
index f69c168..80e1df4 100644 (file)
 #define SVM_VMGEXIT_AP_CREATE                  1
 #define SVM_VMGEXIT_AP_DESTROY                 2
 #define SVM_VMGEXIT_HV_FEATURES                        0x8000fffd
+#define SVM_VMGEXIT_TERM_REQUEST               0x8000fffe
+#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code)       \
+       /* SW_EXITINFO1[3:0] */                                 \
+       (((((u64)reason_set) & 0xf)) |                          \
+       /* SW_EXITINFO1[11:4] */                                \
+       ((((u64)reason_code) & 0xff) << 4))
 #define SVM_VMGEXIT_UNSUPPORTED_EVENT          0x8000ffff
 
 /* Exit code reserved for hypervisor/software use */
index 5418e2f..a91ac66 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/alternative.h>
 #include <asm/export.h>
 
-.pushsection .noinstr.text, "ax"
+.section .noinstr.text, "ax"
 
 /*
  * We build a jump to memcpy_orig by default which gets NOPped out on
@@ -42,7 +42,7 @@ SYM_TYPED_FUNC_START(__memcpy)
 SYM_FUNC_END(__memcpy)
 EXPORT_SYMBOL(__memcpy)
 
-SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
+SYM_FUNC_ALIAS(memcpy, __memcpy)
 EXPORT_SYMBOL(memcpy)
 
 /*
@@ -183,4 +183,3 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
        RET
 SYM_FUNC_END(memcpy_orig)
 
-.popsection
index fc9ffd3..6143b1a 100644 (file)
@@ -6,6 +6,8 @@
 #include <asm/alternative.h>
 #include <asm/export.h>
 
+.section .noinstr.text, "ax"
+
 /*
  * ISO C memset - set a memory block to a byte value. This function uses fast
  * string to get better performance than the original function. The code is
@@ -43,7 +45,7 @@ SYM_FUNC_START(__memset)
 SYM_FUNC_END(__memset)
 EXPORT_SYMBOL(__memset)
 
-SYM_FUNC_ALIAS_WEAK(memset, __memset)
+SYM_FUNC_ALIAS(memset, __memset)
 EXPORT_SYMBOL(memset)
 
 /*
index 87d1126..7c0cf50 100644 (file)
@@ -6,7 +6,6 @@
 #include <vdso/bits.h>
 #include <asm/bitsperlong.h>
 
-#define BIT_ULL(nr)            (ULL(1) << (nr))
 #define BIT_MASK(nr)           (UL(1) << ((nr) % BITS_PER_LONG))
 #define BIT_WORD(nr)           ((nr) / BITS_PER_LONG)
 #define BIT_ULL_MASK(nr)       (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
index 2f86b2a..e8c07da 100644 (file)
@@ -43,6 +43,7 @@
 #define F_SEAL_GROW    0x0004  /* prevent file from growing */
 #define F_SEAL_WRITE   0x0008  /* prevent writes */
 #define F_SEAL_FUTURE_WRITE    0x0010  /* prevent future writes while mapped */
+#define F_SEAL_EXEC    0x0020  /* prevent chmod modifying exec bits */
 /* (1U << 31) is reserved for signed error codes */
 
 /*
index 55155e2..d77aef8 100644 (file)
@@ -583,6 +583,8 @@ struct kvm_s390_mem_op {
                struct {
                        __u8 ar;        /* the access register number */
                        __u8 key;       /* access key, ignored if flag unset */
+                       __u8 pad1[6];   /* ignored */
+                       __u64 old_addr; /* ignored if cmpxchg flag unset */
                };
                __u32 sida_offset; /* offset into the sida */
                __u8 reserved[32]; /* ignored */
@@ -595,11 +597,17 @@ struct kvm_s390_mem_op {
 #define KVM_S390_MEMOP_SIDA_WRITE      3
 #define KVM_S390_MEMOP_ABSOLUTE_READ   4
 #define KVM_S390_MEMOP_ABSOLUTE_WRITE  5
+#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG        6
+
 /* flags for kvm_s390_mem_op->flags */
 #define KVM_S390_MEMOP_F_CHECK_ONLY            (1ULL << 0)
 #define KVM_S390_MEMOP_F_INJECT_EXCEPTION      (1ULL << 1)
 #define KVM_S390_MEMOP_F_SKEY_PROTECTION       (1ULL << 2)
 
+/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */
+#define KVM_S390_MEMOP_EXTENSION_CAP_BASE      (1 << 0)
+#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG   (1 << 1)
+
 /* for KVM_INTERRUPT */
 struct kvm_interrupt {
        /* in */
@@ -1175,6 +1183,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223
 #define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
 #define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
+#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 5883914..8c4e3e5 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/netdev.yaml */
 /* YNL-GEN uapi header */
index ccb7f5d..3767543 100644 (file)
@@ -374,6 +374,7 @@ enum perf_event_read_format {
 #define PERF_ATTR_SIZE_VER5    112     /* add: aux_watermark */
 #define PERF_ATTR_SIZE_VER6    120     /* add: aux_sample_size */
 #define PERF_ATTR_SIZE_VER7    128     /* add: sig_data */
+#define PERF_ATTR_SIZE_VER8    136     /* add: config3 */
 
 /*
  * Hardware event_id to monitor via a performance monitoring event:
@@ -515,6 +516,8 @@ struct perf_event_attr {
         * truncated accordingly on 32 bit architectures.
         */
        __u64   sig_data;
+
+       __u64   config3; /* extension of config2 */
 };
 
 /*
index a5e06dc..1312a13 100644 (file)
@@ -281,6 +281,12 @@ struct prctl_mm_map {
 # define PR_SME_VL_LEN_MASK            0xffff
 # define PR_SME_VL_INHERIT             (1 << 17) /* inherit across exec */
 
+/* Memory deny write / execute */
+#define PR_SET_MDWE                    65
+# define PR_MDWE_REFUSE_EXEC_GAIN      1
+
+#define PR_GET_MDWE                    66
+
 #define PR_SET_VMA             0x53564d41
 # define PR_SET_VMA_ANON_NAME          0
 
index f9f115a..92e1b70 100644 (file)
  */
 #define VHOST_VDPA_SUSPEND             _IO(VHOST_VIRTIO, 0x7D)
 
+/* Resume a device so it can resume processing virtqueue requests
+ *
+ * After the return of this ioctl the device will have restored all the
+ * necessary states and it is fully operational to continue processing the
+ * virtqueue descriptors.
+ */
+#define VHOST_VDPA_RESUME              _IO(VHOST_VIRTIO, 0x7E)
+
 #endif
index 6d005a1..388b212 100644 (file)
@@ -5,5 +5,6 @@
 #include <vdso/const.h>
 
 #define BIT(nr)                        (UL(1) << (nr))
+#define BIT_ULL(nr)            (ULL(1) << (nr))
 
 #endif /* __VDSO_BITS_H */
index db410b7..ffaa803 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 
 import argparse
 import json
index 3c73f59..4b3797f 100644 (file)
@@ -1,7 +1,8 @@
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 
-from .nlspec import SpecAttr, SpecAttrSet, SpecFamily, SpecOperation
+from .nlspec import SpecAttr, SpecAttrSet, SpecEnumEntry, SpecEnumSet, \
+    SpecFamily, SpecOperation
 from .ynl import YnlFamily
 
-__all__ = ["SpecAttr", "SpecAttrSet", "SpecFamily", "SpecOperation",
-           "YnlFamily"]
+__all__ = ["SpecAttr", "SpecAttrSet", "SpecEnumEntry", "SpecEnumSet",
+           "SpecFamily", "SpecOperation", "YnlFamily"]
index 71da568..a34d088 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 
 import collections
 import importlib
@@ -57,6 +57,94 @@ class SpecElement:
         pass
 
 
+class SpecEnumEntry(SpecElement):
+    """ Entry within an enum declared in the Netlink spec.
+
+    Attributes:
+        doc         documentation string
+        enum_set    back reference to the enum
+        value       numerical value of this enum (use accessors in most situations!)
+
+    Methods:
+        raw_value   raw value, i.e. the id in the enum, unlike user value which is a mask for flags
+        user_value   user value, same as raw value for enums, for flags it's the mask
+    """
+    def __init__(self, enum_set, yaml, prev, value_start):
+        if isinstance(yaml, str):
+            yaml = {'name': yaml}
+        super().__init__(enum_set.family, yaml)
+
+        self.doc = yaml.get('doc', '')
+        self.enum_set = enum_set
+
+        if 'value' in yaml:
+            self.value = yaml['value']
+        elif prev:
+            self.value = prev.value + 1
+        else:
+            self.value = value_start
+
+    def has_doc(self):
+        return bool(self.doc)
+
+    def raw_value(self):
+        return self.value
+
+    def user_value(self):
+        if self.enum_set['type'] == 'flags':
+            return 1 << self.value
+        else:
+            return self.value
+
+
+class SpecEnumSet(SpecElement):
+    """ Enum type
+
+    Represents an enumeration (list of numerical constants)
+    as declared in the "definitions" section of the spec.
+
+    Attributes:
+        type            enum or flags
+        entries         entries by name
+        entries_by_val  entries by value
+    Methods:
+        get_mask      for flags compute the mask of all defined values
+    """
+    def __init__(self, family, yaml):
+        super().__init__(family, yaml)
+
+        self.type = yaml['type']
+
+        prev_entry = None
+        value_start = self.yaml.get('value-start', 0)
+        self.entries = dict()
+        self.entries_by_val = dict()
+        for entry in self.yaml['entries']:
+            e = self.new_entry(entry, prev_entry, value_start)
+            self.entries[e.name] = e
+            self.entries_by_val[e.raw_value()] = e
+            prev_entry = e
+
+    def new_entry(self, entry, prev_entry, value_start):
+        return SpecEnumEntry(self, entry, prev_entry, value_start)
+
+    def has_doc(self):
+        if 'doc' in self.yaml:
+            return True
+        for entry in self.entries.values():
+            if entry.has_doc():
+                return True
+        return False
+
+    def get_mask(self):
+        mask = 0
+        idx = self.yaml.get('value-start', 0)
+        for _ in self.entries.values():
+            mask |= 1 << idx
+            idx += 1
+        return mask
+
+
 class SpecAttr(SpecElement):
     """ Single Netlink atttribute type
 
@@ -95,15 +183,22 @@ class SpecAttrSet(SpecElement):
         self.attrs = collections.OrderedDict()
         self.attrs_by_val = collections.OrderedDict()
 
-        val = 0
-        for elem in self.yaml['attributes']:
-            if 'value' in elem:
-                val = elem['value']
+        if self.subset_of is None:
+            val = 1
+            for elem in self.yaml['attributes']:
+                if 'value' in elem:
+                    val = elem['value']
 
-            attr = self.new_attr(elem, val)
-            self.attrs[attr.name] = attr
-            self.attrs_by_val[attr.value] = attr
-            val += 1
+                attr = self.new_attr(elem, val)
+                self.attrs[attr.name] = attr
+                self.attrs_by_val[attr.value] = attr
+                val += 1
+        else:
+            real_set = family.attr_sets[self.subset_of]
+            for elem in self.yaml['attributes']:
+                attr = real_set[elem['name']]
+                self.attrs[attr.name] = attr
+                self.attrs_by_val[attr.value] = attr
 
     def new_attr(self, elem, value):
         return SpecAttr(self.family, self, elem, value)
@@ -186,6 +281,7 @@ class SpecFamily(SpecElement):
         msgs       dict of all messages (index by name)
         msgs_by_value  dict of all messages (indexed by name)
         ops        dict of all valid requests / responses
+        consts     dict of all constants/enums
     """
     def __init__(self, spec_path, schema_path=None):
         with open(spec_path, "r") as stream:
@@ -215,6 +311,7 @@ class SpecFamily(SpecElement):
         self.req_by_value = collections.OrderedDict()
         self.rsp_by_value = collections.OrderedDict()
         self.ops = collections.OrderedDict()
+        self.consts = collections.OrderedDict()
 
         last_exception = None
         while len(self._resolution_list) > 0:
@@ -235,6 +332,9 @@ class SpecFamily(SpecElement):
             if len(resolved) == 0:
                 raise last_exception
 
+    def new_enum(self, elem):
+        return SpecEnumSet(self, elem)
+
     def new_attr_set(self, elem):
         return SpecAttrSet(self, elem)
 
@@ -245,7 +345,7 @@ class SpecFamily(SpecElement):
         self._resolution_list.append(elem)
 
     def _dictify_ops_unified(self):
-        val = 0
+        val = 1
         for elem in self.yaml['operations']['list']:
             if 'value' in elem:
                 val = elem['value']
@@ -256,7 +356,7 @@ class SpecFamily(SpecElement):
             self.msgs[op.name] = op
 
     def _dictify_ops_directional(self):
-        req_val = rsp_val = 0
+        req_val = rsp_val = 1
         for elem in self.yaml['operations']['list']:
             if 'notify' in elem:
                 if 'value' in elem:
@@ -289,6 +389,12 @@ class SpecFamily(SpecElement):
     def resolve(self):
         self.resolve_up(super())
 
+        for elem in self.yaml['definitions']:
+            if elem['type'] == 'enum' or elem['type'] == 'flags':
+                self.consts[elem['name']] = self.new_enum(elem)
+            else:
+                self.consts[elem['name']] = elem
+
         for elem in self.yaml['attribute-sets']:
             attr_set = self.new_attr_set(elem)
             self.attr_sets[elem['name']] = attr_set
index 1c7411e..90764a8 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 
 import functools
 import os
@@ -303,11 +303,6 @@ class YnlFamily(SpecFamily):
         self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
         self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_EXT_ACK, 1)
 
-        self._types = dict()
-
-        for elem in self.yaml.get('definitions', []):
-            self._types[elem['name']] = elem
-
         self.async_msg_ids = set()
         self.async_msg_queue = []
 
@@ -353,13 +348,13 @@ class YnlFamily(SpecFamily):
 
     def _decode_enum(self, rsp, attr_spec):
         raw = rsp[attr_spec['name']]
-        enum = self._types[attr_spec['enum']]
+        enum = self.consts[attr_spec['enum']]
         i = attr_spec.get('value-start', 0)
         if 'enum-as-flags' in attr_spec and attr_spec['enum-as-flags']:
             value = set()
             while raw:
                 if raw & 1:
-                    value.add(enum['entries'][i])
+                    value.add(enum.entries_by_val[i].name)
                 raw >>= 1
                 i += 1
         else:
index 274e9c5..1bcc535 100755 (executable)
@@ -1,11 +1,12 @@
 #!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 
 import argparse
 import collections
 import os
 import yaml
 
-from lib import SpecFamily, SpecAttrSet, SpecAttr, SpecOperation
+from lib import SpecFamily, SpecAttrSet, SpecAttr, SpecOperation, SpecEnumSet, SpecEnumEntry
 
 
 def c_upper(name):
@@ -566,97 +567,37 @@ class Struct:
         self.inherited = [c_lower(x) for x in sorted(self._inherited)]
 
 
-class EnumEntry:
+class EnumEntry(SpecEnumEntry):
     def __init__(self, enum_set, yaml, prev, value_start):
-        if isinstance(yaml, str):
-            self.name = yaml
-            yaml = {}
-            self.doc = ''
-        else:
-            self.name = yaml['name']
-            self.doc = yaml.get('doc', '')
-
-        self.yaml = yaml
-        self.enum_set = enum_set
-        self.c_name = c_upper(enum_set.value_pfx + self.name)
-
-        if 'value' in yaml:
-            self.value = yaml['value']
-            if prev:
-                self.value_change = (self.value != prev.value + 1)
-        elif prev:
-            self.value_change = False
-            self.value = prev.value + 1
+        super().__init__(enum_set, yaml, prev, value_start)
+
+        if prev:
+            self.value_change = (self.value != prev.value + 1)
         else:
-            self.value = value_start
             self.value_change = (self.value != 0)
-
         self.value_change = self.value_change or self.enum_set['type'] == 'flags'
 
-    def __getitem__(self, key):
-        return self.yaml[key]
-
-    def __contains__(self, key):
-        return key in self.yaml
-
-    def has_doc(self):
-        return bool(self.doc)
+        # Added by resolve:
+        self.c_name = None
+        delattr(self, "c_name")
 
-    # raw value, i.e. the id in the enum, unlike user value which is a mask for flags
-    def raw_value(self):
-        return self.value
+    def resolve(self):
+        self.resolve_up(super())
 
-    # user value, same as raw value for enums, for flags it's the mask
-    def user_value(self):
-        if self.enum_set['type'] == 'flags':
-            return 1 << self.value
-        else:
-            return self.value
+        self.c_name = c_upper(self.enum_set.value_pfx + self.name)
 
 
-class EnumSet:
+class EnumSet(SpecEnumSet):
     def __init__(self, family, yaml):
-        self.yaml = yaml
-        self.family = family
-
         self.render_name = c_lower(family.name + '-' + yaml['name'])
         self.enum_name = 'enum ' + self.render_name
 
         self.value_pfx = yaml.get('name-prefix', f"{family.name}-{yaml['name']}-")
 
-        self.type = yaml['type']
-
-        prev_entry = None
-        value_start = self.yaml.get('value-start', 0)
-        self.entries = {}
-        self.entry_list = []
-        for entry in self.yaml['entries']:
-            e = EnumEntry(self, entry, prev_entry, value_start)
-            self.entries[e.name] = e
-            self.entry_list.append(e)
-            prev_entry = e
-
-    def __getitem__(self, key):
-        return self.yaml[key]
-
-    def __contains__(self, key):
-        return key in self.yaml
-
-    def has_doc(self):
-        if 'doc' in self.yaml:
-            return True
-        for entry in self.entry_list:
-            if entry.has_doc():
-                return True
-        return False
+        super().__init__(family, yaml)
 
-    def get_mask(self):
-        mask = 0
-        idx = self.yaml.get('value-start', 0)
-        for _ in self.entry_list:
-            mask |= 1 << idx
-            idx += 1
-        return mask
+    def new_entry(self, entry, prev_entry, value_start):
+        return EnumEntry(self, entry, prev_entry, value_start)
 
 
 class AttrSet(SpecAttrSet):
@@ -791,8 +732,6 @@ class Family(SpecFamily):
 
         self.mcgrps = self.yaml.get('mcast-groups', {'list': []})
 
-        self.consts = dict()
-
         self.hooks = dict()
         for when in ['pre', 'post']:
             self.hooks[when] = dict()
@@ -819,6 +758,9 @@ class Family(SpecFamily):
         if self.kernel_policy == 'global':
             self._load_global_policy()
 
+    def new_enum(self, elem):
+        return EnumSet(self, elem)
+
     def new_attr_set(self, elem):
         return AttrSet(self, elem)
 
@@ -836,12 +778,6 @@ class Family(SpecFamily):
                 }
 
     def _dictify(self):
-        for elem in self.yaml['definitions']:
-            if elem['type'] == 'enum' or elem['type'] == 'flags':
-                self.consts[elem['name']] = EnumSet(self, elem)
-            else:
-                self.consts[elem['name']] = elem
-
         ntf = []
         for msg in self.msgs.values():
             if 'notify' in msg:
@@ -1979,7 +1915,7 @@ def render_uapi(family, cw):
                 if 'doc' in enum:
                     doc = ' - ' + enum['doc']
                 cw.write_doc_line(enum.enum_name + doc)
-                for entry in enum.entry_list:
+                for entry in enum.entries.values():
                     if entry.has_doc():
                         doc = '@' + entry.c_name + ': ' + entry['doc']
                         cw.write_doc_line(doc)
@@ -1987,7 +1923,7 @@ def render_uapi(family, cw):
 
             uapi_enum_start(family, cw, const, 'name')
             name_pfx = const.get('name-prefix', f"{family.name}-{const['name']}-")
-            for entry in enum.entry_list:
+            for entry in enum.entries.values():
                 suffix = ','
                 if entry.value_change:
                     suffix = f" = {entry.user_value()}" + suffix
@@ -2044,14 +1980,17 @@ def render_uapi(family, cw):
     max_value = f"({cnt_name} - 1)"
 
     uapi_enum_start(family, cw, family['operations'], 'enum-name')
+    val = 0
     for op in family.msgs.values():
         if separate_ntf and ('notify' in op or 'event' in op):
             continue
 
         suffix = ','
-        if 'value' in op:
-            suffix = f" = {op['value']},"
+        if op.value != val:
+            suffix = f" = {op.value},"
+            val = op.value
         cw.p(op.enum_name + suffix)
+        val += 1
     cw.nl()
     cw.p(cnt_name + ('' if max_by_define else ','))
     if not max_by_define:
@@ -2124,12 +2063,12 @@ def main():
 
     _, spec_kernel = find_kernel_root(args.spec)
     if args.mode == 'uapi':
-        cw.p('/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */')
+        cw.p('/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */')
     else:
         if args.header:
-            cw.p('/* SPDX-License-Identifier: BSD-3-Clause */')
+            cw.p('/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */')
         else:
-            cw.p('// SPDX-License-Identifier: BSD-3-Clause')
+            cw.p('// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause')
     cw.p("/* Do not edit directly, auto-generated from: */")
     cw.p(f"/*\t{spec_kernel} */")
     cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */")
index 43989ae..74f5de1 100755 (executable)
@@ -1,5 +1,5 @@
 #!/bin/bash
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 
 TOOL=$(dirname $(realpath $0))/ynl-gen-c.py
 
index f818241..10bb1d4 100644 (file)
@@ -538,6 +538,7 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
                        dso->hit = 1;
                }
                dso__put(dso);
+               perf_event__repipe(tool, event, sample, machine);
                return 0;
        }
 
index 5d18a5a..fa7c409 100644 (file)
@@ -539,12 +539,7 @@ static int enable_counters(void)
                        return err;
        }
 
-       /*
-        * We need to enable counters only if:
-        * - we don't have tracee (attaching to task or cpu)
-        * - we have initial delay configured
-        */
-       if (!target__none(&target)) {
+       if (!target__enable_on_exec(&target)) {
                if (!all_counters_use_bpf)
                        evlist__enable(evsel_list);
        }
@@ -914,7 +909,7 @@ try_again_reset:
                        return err;
        }
 
-       if (stat_config.initial_delay) {
+       if (target.initial_delay) {
                pr_info(EVLIST_DISABLED_MSG);
        } else {
                err = enable_counters();
@@ -926,8 +921,8 @@ try_again_reset:
        if (forks)
                evlist__start_workload(evsel_list);
 
-       if (stat_config.initial_delay > 0) {
-               usleep(stat_config.initial_delay * USEC_PER_MSEC);
+       if (target.initial_delay > 0) {
+               usleep(target.initial_delay * USEC_PER_MSEC);
                err = enable_counters();
                if (err)
                        return -1;
@@ -1248,7 +1243,7 @@ static struct option stat_options[] = {
                     "aggregate counts per thread", AGGR_THREAD),
        OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
                     "aggregate counts per numa node", AGGR_NODE),
-       OPT_INTEGER('D', "delay", &stat_config.initial_delay,
+       OPT_INTEGER('D', "delay", &target.initial_delay,
                    "ms to wait before starting measurement after program start (-1: start with events disabled)"),
        OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
                        "Only print computed metrics. No raw values", enable_metric_only),
index d90f8d1..97598d1 100644 (file)
@@ -40,19 +40,6 @@ def is_counter_value(num):
   return isfloat(num) or num == '<not counted>' or num == '<not supported>'
 
 def check_json_output(expected_items):
-  if expected_items != -1:
-    for line in Lines:
-      if 'failed' not in line:
-        count = 0
-        count = line.count(',')
-        if count != expected_items and count >= 1 and count <= 3 and 'metric-value' in line:
-          # Events that generate >1 metric may have isolated metric
-          # values and possibly other prefixes like interval, core and
-          # aggregate-number.
-          continue
-        if count != expected_items:
-          raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
-                             f' in \'{line}\'')
   checks = {
       'aggregate-number': lambda x: isfloat(x),
       'core': lambda x: True,
@@ -73,6 +60,16 @@ def check_json_output(expected_items):
   }
   input = '[\n' + ','.join(Lines) + '\n]'
   for item in json.loads(input):
+    if expected_items != -1:
+      count = len(item)
+      if count != expected_items and count >= 1 and count <= 4 and 'metric-value' in item:
+        # Events that generate >1 metric may have isolated metric
+        # values and possibly other prefixes like interval, core and
+        # aggregate-number.
+        pass
+      elif count != expected_items:
+        raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
+                           f' in \'{item}\'')
     for key, value in item.items():
       if key not in checks:
         raise RuntimeError(f'Unexpected key: key={key} value={value}')
@@ -82,11 +79,11 @@ def check_json_output(expected_items):
 
 try:
   if args.no_args or args.system_wide or args.event:
-    expected_items = 6
-  elif args.interval or args.per_thread or args.system_wide_no_aggr:
     expected_items = 7
-  elif args.per_core or args.per_socket or args.per_node or args.per_die:
+  elif args.interval or args.per_thread or args.system_wide_no_aggr:
     expected_items = 8
+  elif args.per_core or args.per_socket or args.per_node or args.per_die:
+    expected_items = 9
   else:
     # If no option is specified, don't check the number of items.
     expected_items = -1
index b7f050a..324fc9e 100755 (executable)
@@ -7,6 +7,7 @@
 set -e
 
 skip_test=0
+csv_sep=@
 
 function commachecker()
 {
@@ -34,7 +35,7 @@ function commachecker()
                [ "$x" = "Failed" ] && continue
 
                # Count the number of commas
-               x=$(echo $line | tr -d -c ',')
+               x=$(echo $line | tr -d -c $csv_sep)
                cnt="${#x}"
                # echo $line $cnt
                [[ ! "$cnt" =~ $exp ]] && {
@@ -54,7 +55,7 @@ function ParanoidAndNotRoot()
 check_no_args()
 {
        echo -n "Checking CSV output: no args "
-       perf stat -x, true 2>&1 | commachecker --no-args
+       perf stat -x$csv_sep true 2>&1 | commachecker --no-args
        echo "[Success]"
 }
 
@@ -66,7 +67,7 @@ check_system_wide()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, -a true 2>&1 | commachecker --system-wide
+       perf stat -x$csv_sep -a true 2>&1 | commachecker --system-wide
        echo "[Success]"
 }
 
@@ -79,14 +80,14 @@ check_system_wide_no_aggr()
                return
        fi
        echo -n "Checking CSV output: system wide no aggregation "
-       perf stat -x, -A -a --no-merge true 2>&1 | commachecker --system-wide-no-aggr
+       perf stat -x$csv_sep -A -a --no-merge true 2>&1 | commachecker --system-wide-no-aggr
        echo "[Success]"
 }
 
 check_interval()
 {
        echo -n "Checking CSV output: interval "
-       perf stat -x, -I 1000 true 2>&1 | commachecker --interval
+       perf stat -x$csv_sep -I 1000 true 2>&1 | commachecker --interval
        echo "[Success]"
 }
 
@@ -94,7 +95,7 @@ check_interval()
 check_event()
 {
        echo -n "Checking CSV output: event "
-       perf stat -x, -e cpu-clock true 2>&1 | commachecker --event
+       perf stat -x$csv_sep -e cpu-clock true 2>&1 | commachecker --event
        echo "[Success]"
 }
 
@@ -106,7 +107,7 @@ check_per_core()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, --per-core -a true 2>&1 | commachecker --per-core
+       perf stat -x$csv_sep --per-core -a true 2>&1 | commachecker --per-core
        echo "[Success]"
 }
 
@@ -118,7 +119,7 @@ check_per_thread()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, --per-thread -a true 2>&1 | commachecker --per-thread
+       perf stat -x$csv_sep --per-thread -a true 2>&1 | commachecker --per-thread
        echo "[Success]"
 }
 
@@ -130,7 +131,7 @@ check_per_die()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, --per-die -a true 2>&1 | commachecker --per-die
+       perf stat -x$csv_sep --per-die -a true 2>&1 | commachecker --per-die
        echo "[Success]"
 }
 
@@ -142,7 +143,7 @@ check_per_node()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, --per-node -a true 2>&1 | commachecker --per-node
+       perf stat -x$csv_sep --per-node -a true 2>&1 | commachecker --per-node
        echo "[Success]"
 }
 
@@ -154,7 +155,7 @@ check_per_socket()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, --per-socket -a true 2>&1 | commachecker --per-socket
+       perf stat -x$csv_sep --per-socket -a true 2>&1 | commachecker --per-socket
        echo "[Success]"
 }
 
index 38e3b28..d877a0a 100644 (file)
@@ -277,7 +277,7 @@ int on_switch(u64 *ctx)
        else
                prev_state = get_task_state(prev);
 
-       return off_cpu_stat(ctx, prev, next, prev_state);
+       return off_cpu_stat(ctx, prev, next, prev_state & 0xff);
 }
 
 char LICENSE[] SEC("license") = "Dual BSD/GPL";
index 534d36d..a074737 100644 (file)
@@ -842,11 +842,7 @@ int create_perf_stat_counter(struct evsel *evsel,
        if (evsel__is_group_leader(evsel)) {
                attr->disabled = 1;
 
-               /*
-                * In case of initial_delay we enable tracee
-                * events manually.
-                */
-               if (target__none(target) && !config->initial_delay)
+               if (target__enable_on_exec(target))
                        attr->enable_on_exec = 1;
        }
 
index b1c2915..bf1794e 100644 (file)
@@ -166,7 +166,6 @@ struct perf_stat_config {
        FILE                    *output;
        unsigned int             interval;
        unsigned int             timeout;
-       int                      initial_delay;
        unsigned int             unit_width;
        unsigned int             metric_only_len;
        int                      times;
index daec6cb..880f1af 100644 (file)
@@ -18,6 +18,7 @@ struct target {
        bool         per_thread;
        bool         use_bpf;
        bool         hybrid;
+       int          initial_delay;
        const char   *attr_map;
 };
 
@@ -72,6 +73,17 @@ static inline bool target__none(struct target *target)
        return !target__has_task(target) && !target__has_cpu(target);
 }
 
+static inline bool target__enable_on_exec(struct target *target)
+{
+       /*
+        * Normally enable_on_exec should be set if:
+        *  1) The tracee process is forked (not attaching to existed task or cpu).
+        *  2) And initial_delay is not configured.
+        * Otherwise, we enable tracee events manually.
+        */
+       return target__none(target) && !target->initial_delay;
+}
+
 static inline bool target__has_per_thread(struct target *target)
 {
        return target->system_wide && target->per_thread;
index cbb600b..210d643 100644 (file)
@@ -879,6 +879,34 @@ static struct btf_raw_test raw_tests[] = {
        .btf_load_err = true,
        .err_str = "Invalid elem",
 },
+{
+       .descr = "var after datasec, ptr followed by modifier",
+       .raw_types = {
+               /* .bss section */                              /* [1] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2),
+                       sizeof(void*)+4),
+               BTF_VAR_SECINFO_ENC(4, 0, sizeof(void*)),
+               BTF_VAR_SECINFO_ENC(6, sizeof(void*), 4),
+               /* int */                                       /* [2] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               /* int* */                                      /* [3] */
+               BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
+               BTF_VAR_ENC(NAME_TBD, 3, 0),                    /* [4] */
+               /* const int */                                 /* [5] */
+               BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2),
+               BTF_VAR_ENC(NAME_TBD, 5, 0),                    /* [6] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0a\0b\0c\0",
+       .str_sec_size = sizeof("\0a\0b\0c\0"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = sizeof(void*)+4,
+       .key_type_id = 0,
+       .value_type_id = 1,
+       .max_entries = 1,
+},
 /* Test member exceeds the size of struct.
  *
  * struct A {
index 2666c84..7271a18 100644 (file)
@@ -65,12 +65,13 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd)
 }
 
 /* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) -
- * sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes
+ * SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - XDP_PACKET_HEADROOM =
+ * 3408 bytes for 64-byte cacheline and 3216 for 256-byte one.
  */
 #if defined(__s390x__)
-#define MAX_PKT_SIZE 3176
+#define MAX_PKT_SIZE 3216
 #else
-#define MAX_PKT_SIZE 3368
+#define MAX_PKT_SIZE 3408
 #endif
 static void test_max_pkt_size(int fd)
 {
index 9c5a55a..5b5cef4 100644 (file)
@@ -17,5 +17,6 @@ CONFIG_FTRACE_SYSCALLS=y
 CONFIG_FUNCTION_TRACER=y
 CONFIG_HIDRAW=y
 CONFIG_HID=y
+CONFIG_HID_BPF=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_UHID=y
index 924ecb3..dd40d9f 100755 (executable)
@@ -404,6 +404,8 @@ EOF
        echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 &
        sc_s=$!
 
+       sleep 1
+
        result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT)
 
        if [ "$result" = "SERVER-inet" ];then