Merge tag 'timers_urgent_for_v5.16_rc4' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 5 Dec 2021 16:58:52 +0000 (08:58 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 5 Dec 2021 16:58:52 +0000 (08:58 -0800)
Pull timer fix from Borislav Petkov:

 - Prevent a tick storm when a dedicated timekeeper CPU in nohz_full
   mode runs for prolonged periods with interrupts disabled and ends up
   programming the next tick in the past, leading to that storm

* tag 'timers_urgent_for_v5.16_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  timers/nohz: Last resort update jiffies on nohz_full IRQ entry

250 files changed:
Documentation/arm64/pointer-authentication.rst
Documentation/cpu-freq/core.rst
Documentation/filesystems/netfs_library.rst
MAINTAINERS
arch/arm64/include/asm/kvm_arm.h
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/machine_kexec.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/riscv/include/asm/kvm_host.h
arch/riscv/kvm/mmu.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/pci_io.h
arch/s390/lib/test_unwind.c
arch/x86/entry/entry_64.S
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/sev-common.h
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/sev.c
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc_sync.c
arch/x86/kvm/ioapic.h
arch/x86/kvm/irq.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/posted_intr.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/realmode/init.c
arch/x86/xen/xen-asm.S
drivers/ata/libata-sata.c
drivers/ata/pata_falcon.c
drivers/ata/sata_fsl.c
drivers/block/loop.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/cpufreq/cpufreq.c
drivers/dma-buf/heaps/system_heap.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
drivers/gpu/drm/msm/dp/dp_aux.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/msm_debugfs.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_gpu_devfreq.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/i2c/busses/i2c-cbus-gpio.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/net/dsa/b53/b53_spi.c
drivers/net/dsa/mv88e6xxx/serdes.c
drivers/net/dsa/mv88e6xxx/serdes.h
drivers/net/dsa/rtl8365mb.c
drivers/net/ethernet/aquantia/atlantic/aq_common.h
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/natsemi/xtsonic.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/usb/lan78xx.c
drivers/net/vrf.c
drivers/net/wireguard/allowedips.c
drivers/net/wireguard/device.c
drivers/net/wireguard/device.h
drivers/net/wireguard/main.c
drivers/net/wireguard/queueing.c
drivers/net/wireguard/queueing.h
drivers/net/wireguard/ratelimiter.c
drivers/net/wireguard/receive.c
drivers/net/wireguard/socket.c
drivers/net/wireless/intel/iwlwifi/fw/uefi.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
drivers/net/wireless/realtek/rtw89/fw.c
drivers/net/wireless/realtek/rtw89/fw.h
drivers/powercap/dtpm.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/ufs/ufshcd-pci.c
drivers/usb/core/quirks.c
drivers/vfio/pci/vfio_pci_igd.c
drivers/vfio/vfio.c
fs/cifs/connect.c
fs/cifs/fscache.c
fs/cifs/inode.c
fs/file.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/io-wq.c
fs/netfs/read_helper.c
fs/xfs/xfs_inode.c
include/linux/kprobes.h
include/linux/mlx5/mlx5_ifc.h
include/linux/netdevice.h
include/linux/sched/cputime.h
include/linux/siphash.h
include/net/busy_poll.h
include/net/dst_cache.h
include/net/fib_rules.h
include/net/ip_fib.h
include/net/netns/ipv4.h
include/net/sock.h
include/sound/soc-acpi.h
include/uapi/drm/virtgpu_drm.h
include/uapi/linux/if_ether.h
kernel/kprobes.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/trace/trace_events_hist.c
kernel/trace/tracing_map.c
lib/siphash.c
net/core/dev.c
net/core/dst_cache.c
net/core/fib_rules.c
net/ipv4/fib_frontend.c
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv6/fib6_rules.c
net/ipv6/ip6_offload.c
net/mctp/route.c
net/mctp/test/utils.c
net/mpls/af_mpls.c
net/mpls/internal.h
net/netlink/af_netlink.c
net/rds/tcp.c
net/rxrpc/conn_client.c
net/rxrpc/peer_object.c
net/smc/smc_close.c
net/smc/smc_core.c
net/tls/tls_sw.c
sound/hda/intel-dsp-config.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_local.h
sound/pci/hda/patch_cs8409.c
sound/pci/hda/patch_hdmi.c
sound/soc/codecs/cs35l41-spi.c
sound/soc/codecs/cs35l41.c
sound/soc/codecs/cs35l41.h
sound/soc/codecs/rk817_codec.c
sound/soc/intel/common/soc-acpi-intel-cml-match.c
sound/soc/soc-acpi.c
sound/soc/sof/intel/hda.c
sound/soc/tegra/tegra186_dspk.c
sound/soc/tegra/tegra210_admaif.c
sound/soc/tegra/tegra210_adx.c
sound/soc/tegra/tegra210_ahub.c
sound/soc/tegra/tegra210_amx.c
sound/soc/tegra/tegra210_dmic.c
sound/soc/tegra/tegra210_i2s.c
sound/soc/tegra/tegra210_mixer.c
sound/soc/tegra/tegra210_mvc.c
sound/soc/tegra/tegra210_sfc.c
tools/include/linux/kernel.h
tools/include/linux/math.h [new file with mode: 0644]
tools/objtool/elf.c
tools/objtool/objtool.c
tools/testing/radix-tree/linux/lockdep.h
tools/testing/selftests/kvm/kvm_create_max_vcpus.c
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/x86_64/hyperv_features.c
tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/wireguard/qemu/debug.config
tools/testing/selftests/wireguard/qemu/kernel.config
virt/kvm/kvm_main.c

index f127666ea3a81659daf3bba59c4c64c38bb6ff20..e5dad2e40aa8937b100cfc7def72bf371b89b72a 100644 (file)
@@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the
 virtual address size configured by the kernel. For example, with a
 virtual address size of 48, the PAC is 7 bits wide.
 
-Recent versions of GCC can compile code with APIAKey-based return
-address protection when passed the -msign-return-address option. This
-uses instructions in the HINT space (unless -march=armv8.3-a or higher
-is also passed), and such code can run on systems without the pointer
-authentication extension.
+When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled
+with HINT space pointer authentication instructions protecting
+function returns. Kernels built with this option will work on hardware
+with or without pointer authentication support.
 
 In addition to exec(), keys can also be reinitialized to random values
 using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,
index 33cb90bd1d8f9fac3b21cdb8190b768c30980224..4ceef8e7217c38fc3e07606ce24b05fbed57d71d 100644 (file)
@@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE.
 The third argument is a struct cpufreq_freqs with the following
 values:
 
-=====  ===========================
-cpu    number of the affected CPU
+====== ======================================
+policy a pointer to the struct cpufreq_policy
 old    old frequency
 new    new frequency
 flags  flags of the cpufreq driver
-=====  ===========================
+====== ======================================
 
 3. CPUFreq Table Generation with Operating Performance Point (OPP)
 ==================================================================
index bb68d39f03b789c0a78655ab009b93a24acee862..375baca7edcdc299628c044afe3a7ab8e79c850c 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 =================================
-NETWORK FILESYSTEM HELPER LIBRARY
+Network Filesystem Helper Library
 =================================
 
 .. Contents:
@@ -37,22 +37,22 @@ into a common call framework.
 
 The following services are provided:
 
- * Handles transparent huge pages (THPs).
+ * Handle folios that span multiple pages.
 
- * Insulates the netfs from VM interface changes.
+ * Insulate the netfs from VM interface changes.
 
- * Allows the netfs to arbitrarily split reads up into pieces, even ones that
-   don't match page sizes or page alignments and that may cross pages.
+ * Allow the netfs to arbitrarily split reads up into pieces, even ones that
+   don't match folio sizes or folio alignments and that may cross folios.
 
- * Allows the netfs to expand a readahead request in both directions to meet
-   its needs.
+ * Allow the netfs to expand a readahead request in both directions to meet its
+   needs.
 
- * Allows the netfs to partially fulfil a read, which will then be resubmitted.
+ * Allow the netfs to partially fulfil a read, which will then be resubmitted.
 
- * Handles local caching, allowing cached data and server-read data to be
+ * Handle local caching, allowing cached data and server-read data to be
    interleaved for a single request.
 
- * Handles clearing of bufferage that aren't on the server.
+ * Handle clearing of bufferage that aren't on the server.
 
  * Handle retrying of reads that failed, switching reads from the cache to the
    server as necessary.
@@ -70,22 +70,22 @@ Read Helper Functions
 
 Three read helpers are provided::
 
* void netfs_readahead(struct readahead_control *ractl,
-                       const struct netfs_read_request_ops *ops,
-                       void *netfs_priv);``
* int netfs_readpage(struct file *file,
-                     struct page *page,
-                     const struct netfs_read_request_ops *ops,
-                     void *netfs_priv);
* int netfs_write_begin(struct file *file,
-                        struct address_space *mapping,
-                        loff_t pos,
-                        unsigned int len,
-                        unsigned int flags,
-                        struct page **_page,
-                        void **_fsdata,
-                        const struct netfs_read_request_ops *ops,
-                        void *netfs_priv);
      void netfs_readahead(struct readahead_control *ractl,
+                            const struct netfs_read_request_ops *ops,
+                            void *netfs_priv);
      int netfs_readpage(struct file *file,
+                          struct folio *folio,
+                          const struct netfs_read_request_ops *ops,
+                          void *netfs_priv);
      int netfs_write_begin(struct file *file,
+                             struct address_space *mapping,
+                             loff_t pos,
+                             unsigned int len,
+                             unsigned int flags,
+                             struct folio **_folio,
+                             void **_fsdata,
+                             const struct netfs_read_request_ops *ops,
+                             void *netfs_priv);
 
 Each corresponds to a VM operation, with the addition of a couple of parameters
 for the use of the read helpers:
@@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure.
 For ->readahead() and ->readpage(), the network filesystem should just jump
 into the corresponding read helper; whereas for ->write_begin(), it may be a
 little more complicated as the network filesystem might want to flush
-conflicting writes or track dirty data and needs to put the acquired page if an
-error occurs after calling the helper.
+conflicting writes or track dirty data and needs to put the acquired folio if
+an error occurs after calling the helper.
 
 The helpers manage the read request, calling back into the network filesystem
 through the suppplied table of operations.  Waits will be performed as
@@ -253,7 +253,7 @@ through which it can issue requests and negotiate::
                void (*issue_op)(struct netfs_read_subrequest *subreq);
                bool (*is_still_valid)(struct netfs_read_request *rreq);
                int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
-                                        struct page *page, void **_fsdata);
+                                        struct folio *folio, void **_fsdata);
                void (*done)(struct netfs_read_request *rreq);
                void (*cleanup)(struct address_space *mapping, void *netfs_priv);
        };
@@ -313,13 +313,14 @@ The operations are as follows:
 
    There is no return value; the netfs_subreq_terminated() function should be
    called to indicate whether or not the operation succeeded and how much data
-   it transferred.  The filesystem also should not deal with setting pages
+   it transferred.  The filesystem also should not deal with setting folios
    uptodate, unlocking them or dropping their refs - the helpers need to deal
    with this as they have to coordinate with copying to the local cache.
 
-   Note that the helpers have the pages locked, but not pinned.  It is possible
-   to use the ITER_XARRAY iov iterator to refer to the range of the inode that
-   is being operated upon without the need to allocate large bvec tables.
+   Note that the helpers have the folios locked, but not pinned.  It is
+   possible to use the ITER_XARRAY iov iterator to refer to the range of the
+   inode that is being operated upon without the need to allocate large bvec
+   tables.
 
  * ``is_still_valid()``
 
@@ -330,15 +331,15 @@ The operations are as follows:
  * ``check_write_begin()``
 
    [Optional] This is called from the netfs_write_begin() helper once it has
-   allocated/grabbed the page to be modified to allow the filesystem to flush
+   allocated/grabbed the folio to be modified to allow the filesystem to flush
    conflicting state before allowing it to be modified.
 
-   It should return 0 if everything is now fine, -EAGAIN if the page should be
+   It should return 0 if everything is now fine, -EAGAIN if the folio should be
    regrabbed and any other error code to abort the operation.
 
  * ``done``
 
-   [Optional] This is called after the pages in the request have all been
+   [Optional] This is called after the folios in the request have all been
    unlocked (and marked uptodate if applicable).
 
  * ``cleanup``
@@ -390,7 +391,7 @@ The read helpers work by the following general procedure:
      * If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
        end of the slice instead of reissuing.
 
- * Once the data is read, the pages that have been fully read/cleared:
+ * Once the data is read, the folios that have been fully read/cleared:
 
    * Will be marked uptodate.
 
@@ -398,11 +399,11 @@ The read helpers work by the following general procedure:
 
    * Unlocked
 
- * Any pages that need writing to the cache will then have DIO writes issued.
+ * Any folios that need writing to the cache will then have DIO writes issued.
 
  * Synchronous operations will wait for reading to be complete.
 
- * Writes to the cache will proceed asynchronously and the pages will have the
+ * Writes to the cache will proceed asynchronously and the folios will have the
    PG_fscache mark removed when that completes.
 
  * The request structures will be cleaned up when everything has completed.
@@ -452,6 +453,9 @@ operation table looks like the following::
                            netfs_io_terminated_t term_func,
                            void *term_func_priv);
 
+               int (*prepare_write)(struct netfs_cache_resources *cres,
+                                    loff_t *_start, size_t *_len, loff_t i_size);
+
                int (*write)(struct netfs_cache_resources *cres,
                             loff_t start_pos,
                             struct iov_iter *iter,
@@ -509,6 +513,14 @@ The methods defined in the table are:
    indicating whether the termination is definitely happening in the caller's
    context.
 
+ * ``prepare_write()``
+
+   [Required] Called to adjust a write to the cache and check that there is
+   sufficient space in the cache.  The start and length values indicate the
+   size of the write that netfslib is proposing, and this can be adjusted by
+   the cache to respect DIO boundaries.  The file size is passed for
+   information.
+
  * ``write()``
 
    [Required] Called to write to the cache.  The start file offset is given
@@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where
 there isn't a read request structure as well, such as writing dirty data to the
 cache.
 
+
+API Function Reference
+======================
+
 .. kernel-doc:: include/linux/netfs.h
+.. kernel-doc:: fs/netfs/read_helper.c
index 360e9aa0205d69c77a011e072d3b30b009bad27b..faa9c34d837deb22ed6c9c00b32aac33f5485cc1 100644 (file)
@@ -15979,6 +15979,7 @@ F:      arch/mips/generic/board-ranchu.c
 
 RANDOM NUMBER DRIVER
 M:     "Theodore Ts'o" <tytso@mit.edu>
+M:     Jason A. Donenfeld <Jason@zx2c4.com>
 S:     Maintained
 F:     drivers/char/random.c
 
@@ -16623,7 +16624,8 @@ F:      drivers/iommu/s390-iommu.c
 
 S390 IUCV NETWORK LAYER
 M:     Julian Wiedmann <jwi@linux.ibm.com>
-M:     Karsten Graul <kgraul@linux.ibm.com>
+M:     Alexandra Winter <wintera@linux.ibm.com>
+M:     Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -16634,7 +16636,8 @@ F:      net/iucv/
 
 S390 NETWORK DRIVERS
 M:     Julian Wiedmann <jwi@linux.ibm.com>
-M:     Karsten Graul <kgraul@linux.ibm.com>
+M:     Alexandra Winter <wintera@linux.ibm.com>
+M:     Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Supported
index a39fcf318c774df52e72ceebbdf139756328c1a0..01d47c5886dc43a6925116e3ba38c0a52aaf5657 100644 (file)
@@ -91,7 +91,7 @@
 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 
 /* TCR_EL2 Registers bits */
-#define TCR_EL2_RES1           ((1 << 31) | (1 << 23))
+#define TCR_EL2_RES1           ((1U << 31) | (1 << 23))
 #define TCR_EL2_TBI            (1 << 20)
 #define TCR_EL2_PS_SHIFT       16
 #define TCR_EL2_PS_MASK                (7 << TCR_EL2_PS_SHIFT)
 #define CPTR_EL2_TFP_SHIFT 10
 
 /* Hyp Coprocessor Trap Register */
-#define CPTR_EL2_TCPAC (1 << 31)
+#define CPTR_EL2_TCPAC (1U << 31)
 #define CPTR_EL2_TAM   (1 << 30)
 #define CPTR_EL2_TTA   (1 << 20)
 #define CPTR_EL2_TFP   (1 << CPTR_EL2_TFP_SHIFT)
index b3e4f9a088b1a76118205f0065fda2cc6859aa2f..8cf970d219f5d896a7805c446e9854dadf7d0cd9 100644 (file)
        .endm
 
 SYM_CODE_START(ftrace_regs_caller)
+#ifdef BTI_C
+       BTI_C
+#endif
        ftrace_regs_entry       1
        b       ftrace_common
 SYM_CODE_END(ftrace_regs_caller)
 
 SYM_CODE_START(ftrace_caller)
+#ifdef BTI_C
+       BTI_C
+#endif
        ftrace_regs_entry       0
        b       ftrace_common
 SYM_CODE_END(ftrace_caller)
index 1038494135c8cef847829ebcf43b6fa58d596770..6fb31c117ebe08cab0898cd9a8ca552e3c4a7026 100644 (file)
@@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage)
        if (rc)
                return rc;
        kimage->arch.ttbr1 = __pa(trans_pgd);
-       kimage->arch.zero_page = __pa(empty_zero_page);
+       kimage->arch.zero_page = __pa_symbol(empty_zero_page);
 
        reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
        memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
index 7a0af1d39303cd580a097e42b14fc287f6ff0c92..96c5f3fb78389ef8be01890df1d8c8fa45d1c6e2 100644 (file)
@@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
 
 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
 
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
+
 /*
  * Allow the hypervisor to handle the exit with an exit handler if it has one.
  *
@@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
  */
 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
+       /*
+        * Save PSTATE early so that we can evaluate the vcpu mode
+        * early on.
+        */
+       vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+
+       /*
+        * Check whether we want to repaint the state one way or
+        * another.
+        */
+       early_exit_filter(vcpu, exit_code);
+
        if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
                vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
 
index de7e14c862e6c9b5415df6a7daf815142a302a16..7ecca8b078519fd315c92cbc7cc059c0d2269bed 100644 (file)
@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
 {
        ctxt->regs.pc                   = read_sysreg_el2(SYS_ELR);
-       ctxt->regs.pstate               = read_sysreg_el2(SYS_SPSR);
+       /*
+        * Guest PSTATE gets saved at guest fixup time in all
+        * cases. We still need to handle the nVHE host side here.
+        */
+       if (!has_vhe() && ctxt->__hyp_running_vcpu)
+               ctxt->regs.pstate       = read_sysreg_el2(SYS_SPSR);
 
        if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
                ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
index c0e3fed26d93068bb1953fed35be5e076f42b096..d13115a124341601f6d0f3367dc5d7c6f7095806 100644 (file)
@@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
  * Returns false if the guest ran in AArch32 when it shouldn't have, and
  * thus should exit to the host, or true if a the guest run loop can continue.
  */
-static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
        struct kvm *kvm = kern_hyp_va(vcpu->kvm);
 
@@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
                vcpu->arch.target = -1;
                *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
                *exit_code |= ARM_EXCEPTION_IL;
-               return false;
        }
-
-       return true;
 }
 
 /* Switch to the guest for legacy non-VHE systems */
@@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
                /* Jump in the fire! */
                exit_code = __guest_enter(vcpu);
 
-               if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
-                       break;
-
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
 
index 5a2cb5d9bc4b22a55e7afb591962b741619c8446..fbb26b93c347738ce85f27e090ddff776c08e16d 100644 (file)
@@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
        return hyp_exit_handlers;
 }
 
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+}
+
 /* Switch to the guest for VHE systems running in EL2 */
 static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 {
index 25ba21f98504120d8a723c13655d5bd583d35b4f..2639b9ee48f97d0b69c72177d55eb2dd1e9be1d2 100644 (file)
 #include <linux/types.h>
 #include <linux/kvm.h>
 #include <linux/kvm_types.h>
+#include <asm/csr.h>
 #include <asm/kvm_vcpu_fp.h>
 #include <asm/kvm_vcpu_timer.h>
 
-#ifdef CONFIG_64BIT
-#define KVM_MAX_VCPUS                  (1U << 16)
-#else
-#define KVM_MAX_VCPUS                  (1U << 9)
-#endif
+#define KVM_MAX_VCPUS                  \
+       ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
 
 #define KVM_HALT_POLL_NS_DEFAULT       500000
 
index d81bae8eb55ea0a0f81bed64b5c3ab589a99a00a..fc058ff5f4b6f3ac393d58ea25d0446a19fc7664 100644 (file)
@@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
                                   struct kvm_memory_slot *slot)
 {
+       gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
+       phys_addr_t size = slot->npages << PAGE_SHIFT;
+
+       spin_lock(&kvm->mmu_lock);
+       stage2_unmap_range(kvm, gpa, size, false);
+       spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_arch_commit_memory_region(struct kvm *kvm,
index fd825097cf048b59d8cc7486ae345e9cbec08b07..b626bc6e0eaf9809a3e81fa06c398a9fb7f01cbc 100644 (file)
@@ -403,7 +403,6 @@ CONFIG_DEVTMPFS=y
 CONFIG_CONNECTOR=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
@@ -476,6 +475,7 @@ CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_VXLAN=m
 CONFIG_BAREUDP=m
+CONFIG_AMT=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -489,6 +489,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
@@ -571,6 +572,7 @@ CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
 CONFIG_DIAG288_WATCHDOG=m
+# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
@@ -775,12 +777,14 @@ CONFIG_CRC4=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
 CONFIG_RANDOM32_SELFTEST=y
+CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_HEADERS_INSTALL=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
@@ -807,6 +811,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_KFENCE=y
+CONFIG_KFENCE_STATIC_KEYS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DETECT_HUNG_TASK=y
@@ -842,6 +847,7 @@ CONFIG_FTRACE_STARTUP_TEST=y
 CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
 CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
 CONFIG_DEBUG_ENTRY=y
 CONFIG_CIO_INJECT=y
 CONFIG_KUNIT=m
@@ -860,7 +866,7 @@ CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_MIN_HEAP=y
-CONFIG_KPROBES_SANITY_TEST=y
+CONFIG_KPROBES_SANITY_TEST=m
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
index c9c3cedff2d85634327af0d5c36d795da74ddb01..0056cab273723d9c0423f4aee4f397d6852c4797 100644 (file)
@@ -394,7 +394,6 @@ CONFIG_DEVTMPFS=y
 CONFIG_CONNECTOR=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
@@ -467,6 +466,7 @@ CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_VXLAN=m
 CONFIG_BAREUDP=m
+CONFIG_AMT=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -480,6 +480,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
@@ -762,12 +763,14 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC4=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
+CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
@@ -792,9 +795,11 @@ CONFIG_HIST_TRIGGERS=y
 CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
 CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
 CONFIG_KUNIT=m
 CONFIG_KUNIT_DEBUGFS=y
 CONFIG_LKDTM=m
+CONFIG_KPROBES_SANITY_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
index aceccf3b9a882a57ecd15dc24324b0a63434f17c..eed3b9acfa71aaf59f8fdd2a2dd58517c0f2e144 100644 (file)
@@ -65,9 +65,11 @@ CONFIG_ZFCP=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_LSM="yama,loadpin,safesetid,integrity"
 # CONFIG_ZLIB_DFLTCC is not set
+CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_PRINTK_TIME=y
 # CONFIG_SYMBOLIC_ERRNAME is not set
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_BTF=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
index e4dc64cc9c555c11abb747e642faf6fecc910d71..287bb88f76986e127388efd03c18d117bf4c417e 100644 (file)
 
 /* I/O Map */
 #define ZPCI_IOMAP_SHIFT               48
-#define ZPCI_IOMAP_ADDR_BASE           0x8000000000000000UL
+#define ZPCI_IOMAP_ADDR_SHIFT          62
+#define ZPCI_IOMAP_ADDR_BASE           (1UL << ZPCI_IOMAP_ADDR_SHIFT)
 #define ZPCI_IOMAP_ADDR_OFF_MASK       ((1UL << ZPCI_IOMAP_SHIFT) - 1)
 #define ZPCI_IOMAP_MAX_ENTRIES                                                 \
-       ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
+       (1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
 #define ZPCI_IOMAP_ADDR_IDX_MASK                                               \
-       (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
+       ((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
 
 struct zpci_iomap_entry {
        u32 fh;
index cfc5f5557c06756236b935eacee5313da1f663d3..bc7973359ae2786b71f8386bf9903b0daaf66e1f 100644 (file)
@@ -173,10 +173,11 @@ static noinline int unwindme_func4(struct unwindme *u)
                }
 
                /*
-                * trigger specification exception
+                * Trigger operation exception; use insn notation to bypass
+                * llvm's integrated assembler sanity checks.
                 */
                asm volatile(
-                       "       mvcl    %%r1,%%r1\n"
+                       "       .insn   e,0x0000\n"     /* illegal opcode */
                        "0:     nopr    %%r7\n"
                        EX_TABLE(0b, 0b)
                        :);
index e38a4cf795d962bbb8312dc6241333b8939a6b8b..97b1f84bb53f808b9bcddba8af67732030171026 100644 (file)
@@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
        ud2
 1:
 #endif
+#ifdef CONFIG_XEN_PV
+       ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
+#endif
+
        POP_REGS pop_rdi=0
 
        /*
@@ -890,6 +894,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
 .Lparanoid_entry_checkgs:
        /* EBX = 1 -> kernel GSBASE active, no restore required */
        movl    $1, %ebx
+
        /*
         * The kernel-enforced convention is a negative GSBASE indicates
         * a kernel value. No SWAPGS needed on entry and exit.
@@ -897,21 +902,14 @@ SYM_CODE_START_LOCAL(paranoid_entry)
        movl    $MSR_GS_BASE, %ecx
        rdmsr
        testl   %edx, %edx
-       jns     .Lparanoid_entry_swapgs
-       ret
+       js      .Lparanoid_kernel_gsbase
 
-.Lparanoid_entry_swapgs:
+       /* EBX = 0 -> SWAPGS required on exit */
+       xorl    %ebx, %ebx
        swapgs
+.Lparanoid_kernel_gsbase:
 
-       /*
-        * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
-        * unconditional CR3 write, even in the PTI case.  So do an lfence
-        * to prevent GS speculation, regardless of whether PTI is enabled.
-        */
        FENCE_SWAPGS_KERNEL_ENTRY
-
-       /* EBX = 0 -> SWAPGS required on exit */
-       xorl    %ebx, %ebx
        ret
 SYM_CODE_END(paranoid_entry)
 
@@ -993,11 +991,6 @@ SYM_CODE_START_LOCAL(error_entry)
        pushq   %r12
        ret
 
-.Lerror_entry_done_lfence:
-       FENCE_SWAPGS_KERNEL_ENTRY
-.Lerror_entry_done:
-       ret
-
        /*
         * There are two places in the kernel that can potentially fault with
         * usergs. Handle them here.  B stepping K8s sometimes report a
@@ -1020,8 +1013,14 @@ SYM_CODE_START_LOCAL(error_entry)
         * .Lgs_change's error handler with kernel gsbase.
         */
        SWAPGS
-       FENCE_SWAPGS_USER_ENTRY
-       jmp .Lerror_entry_done
+
+       /*
+        * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
+        * kernel or user gsbase.
+        */
+.Lerror_entry_done_lfence:
+       FENCE_SWAPGS_KERNEL_ENTRY
+       ret
 
 .Lbstep_iret:
        /* Fix truncated RIP */
index 5a0bcf8b78d7c2026e93715153decec133a33869..048b6d5aff504f394baeca8d3bf1f39e816bfdd5 100644 (file)
 #define INTEL_FAM6_ALDERLAKE           0x97    /* Golden Cove / Gracemont */
 #define INTEL_FAM6_ALDERLAKE_L         0x9A    /* Golden Cove / Gracemont */
 
-#define INTEL_FAM6_RAPTOR_LAKE         0xB7
+#define INTEL_FAM6_RAPTORLAKE          0xB7
 
 /* "Small Core" Processors (Atom) */
 
index 6ac61f85e07b9971c40158d0f7d88cde0e3ba55c..860ed500580cc9af3330ce22b4fa6fec5aac21e8 100644 (file)
@@ -1036,6 +1036,7 @@ struct kvm_x86_msr_filter {
 #define APICV_INHIBIT_REASON_PIT_REINJ  4
 #define APICV_INHIBIT_REASON_X2APIC    5
 #define APICV_INHIBIT_REASON_BLOCKIRQ  6
+#define APICV_INHIBIT_REASON_ABSENT    7
 
 struct kvm_arch {
        unsigned long n_used_mmu_pages;
index 2cef6c5a52c2a71a550393c78111dfd3242f47d1..6acaf5af0a3d0657ed48b876c95ef0ad73358c71 100644 (file)
 
 #define GHCB_RESP_CODE(v)              ((v) & GHCB_MSR_INFO_MASK)
 
+/*
+ * Error codes related to GHCB input that can be communicated back to the guest
+ * by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
+ */
+#define GHCB_ERR_NOT_REGISTERED                1
+#define GHCB_ERR_INVALID_USAGE         2
+#define GHCB_ERR_INVALID_SCRATCH_AREA  3
+#define GHCB_ERR_MISSING_INPUT         4
+#define GHCB_ERR_INVALID_INPUT         5
+#define GHCB_ERR_INVALID_EVENT         6
+
 #endif
index d5958278eba6d311d3af758e2ff9e99f60fa3b8a..91d4b6de58abef38792eef58546b525eae278cb3 100644 (file)
@@ -118,7 +118,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
                                      struct fpstate *fpstate)
 {
        struct xregs_state __user *x = buf;
-       struct _fpx_sw_bytes sw_bytes;
+       struct _fpx_sw_bytes sw_bytes = {};
        u32 xfeatures;
        int err;
 
index 74f0ec95538486a8dba1d4fab55866ed1b646afd..a9fc2ac7a8bd59cd06ca1c6764cabcd014a61aa3 100644 (file)
@@ -294,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
                                   char *dst, char *buf, size_t size)
 {
        unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
-       char __user *target = (char __user *)dst;
-       u64 d8;
-       u32 d4;
-       u16 d2;
-       u8  d1;
 
        /*
         * This function uses __put_user() independent of whether kernel or user
@@ -320,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
         * instructions here would cause infinite nesting.
         */
        switch (size) {
-       case 1:
+       case 1: {
+               u8 d1;
+               u8 __user *target = (u8 __user *)dst;
+
                memcpy(&d1, buf, 1);
                if (__put_user(d1, target))
                        goto fault;
                break;
-       case 2:
+       }
+       case 2: {
+               u16 d2;
+               u16 __user *target = (u16 __user *)dst;
+
                memcpy(&d2, buf, 2);
                if (__put_user(d2, target))
                        goto fault;
                break;
-       case 4:
+       }
+       case 4: {
+               u32 d4;
+               u32 __user *target = (u32 __user *)dst;
+
                memcpy(&d4, buf, 4);
                if (__put_user(d4, target))
                        goto fault;
                break;
-       case 8:
+       }
+       case 8: {
+               u64 d8;
+               u64 __user *target = (u64 __user *)dst;
+
                memcpy(&d8, buf, 8);
                if (__put_user(d8, target))
                        goto fault;
                break;
+       }
        default:
                WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
                return ES_UNSUPPORTED;
@@ -362,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
                                  char *src, char *buf, size_t size)
 {
        unsigned long error_code = X86_PF_PROT;
-       char __user *s = (char __user *)src;
-       u64 d8;
-       u32 d4;
-       u16 d2;
-       u8  d1;
 
        /*
         * This function uses __get_user() independent of whether kernel or user
@@ -388,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
         * instructions here would cause infinite nesting.
         */
        switch (size) {
-       case 1:
+       case 1: {
+               u8 d1;
+               u8 __user *s = (u8 __user *)src;
+
                if (__get_user(d1, s))
                        goto fault;
                memcpy(buf, &d1, 1);
                break;
-       case 2:
+       }
+       case 2: {
+               u16 d2;
+               u16 __user *s = (u16 __user *)src;
+
                if (__get_user(d2, s))
                        goto fault;
                memcpy(buf, &d2, 2);
                break;
-       case 4:
+       }
+       case 4: {
+               u32 d4;
+               u32 __user *s = (u32 __user *)src;
+
                if (__get_user(d4, s))
                        goto fault;
                memcpy(buf, &d4, 4);
                break;
-       case 8:
+       }
+       case 8: {
+               u64 d8;
+               u64 __user *s = (u64 __user *)src;
                if (__get_user(d8, s))
                        goto fault;
                memcpy(buf, &d8, 8);
                break;
+       }
        default:
                WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
                return ES_UNSUPPORTED;
index 2e076a459a0c084aa279f32f3fc644604713e595..a698196377be9bf650eb8bc1ea28692068568626 100644 (file)
@@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason)
 
 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
 
+static void __init tsc_disable_clocksource_watchdog(void)
+{
+       clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+       clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+}
+
 static void __init check_system_tsc_reliable(void)
 {
 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
@@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void)
 #endif
        if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
                tsc_clocksource_reliable = 1;
+
+       /*
+        * Disable the clocksource watchdog when the system has:
+        *  - TSC running at constant frequency
+        *  - TSC which does not stop in C-States
+        *  - the TSC_ADJUST register which allows to detect even minimal
+        *    modifications
+        *  - not more than two sockets. As the number of sockets cannot be
+        *    evaluated at the early boot stage where this has to be
+        *    invoked, check the number of online memory nodes as a
+        *    fallback solution which is an reasonable estimate.
+        */
+       if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
+           boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
+           boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
+           nr_online_nodes <= 2)
+               tsc_disable_clocksource_watchdog();
 }
 
 /*
@@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void)
        if (tsc_unstable)
                goto unreg;
 
-       if (tsc_clocksource_reliable || no_tsc_watchdog)
-               clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
-
        if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
                clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
 
@@ -1527,7 +1547,7 @@ void __init tsc_init(void)
        }
 
        if (tsc_clocksource_reliable || no_tsc_watchdog)
-               clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+               tsc_disable_clocksource_watchdog();
 
        clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
        detect_art();
index 50a4515fe0ad15ec241c257735022287094a4514..9452dc9664b51fddcfaeb6274935c91885814fda 100644 (file)
@@ -30,6 +30,7 @@ struct tsc_adjust {
 };
 
 static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
+static struct timer_list tsc_sync_check_timer;
 
 /*
  * TSC's on different sockets may be reset asynchronously.
@@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
        }
 }
 
+/*
+ * Normally the tsc_sync will be checked every time system enters idle
+ * state, but there is still caveat that a system won't enter idle,
+ * either because it's too busy or configured purposely to not enter
+ * idle.
+ *
+ * So setup a periodic timer (every 10 minutes) to make sure the check
+ * is always on.
+ */
+
+#define SYNC_CHECK_INTERVAL            (HZ * 600)
+
+static void tsc_sync_check_timer_fn(struct timer_list *unused)
+{
+       int next_cpu;
+
+       tsc_verify_tsc_adjust(false);
+
+       /* Run the check for all onlined CPUs in turn */
+       next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
+       if (next_cpu >= nr_cpu_ids)
+               next_cpu = cpumask_first(cpu_online_mask);
+
+       tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
+       add_timer_on(&tsc_sync_check_timer, next_cpu);
+}
+
+static int __init start_sync_check_timer(void)
+{
+       if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
+               return 0;
+
+       timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
+       tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
+       add_timer(&tsc_sync_check_timer);
+
+       return 0;
+}
+late_initcall(start_sync_check_timer);
+
 static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
                                   unsigned int cpu, bool bootcpu)
 {
index e66e620c3bed9eafde705b0315ff977fb158d031..539333ac4b38082f01e0407908441aa39b9ca136 100644 (file)
@@ -81,7 +81,6 @@ struct kvm_ioapic {
        unsigned long irq_states[IOAPIC_NUM_PINS];
        struct kvm_io_device dev;
        struct kvm *kvm;
-       void (*ack_notifier)(void *opaque, int irq);
        spinlock_t lock;
        struct rtc_status rtc_status;
        struct delayed_work eoi_inject;
index 650642b18d151083e7120b81ef51dae690090333..c2d7cfe82d004b1ae4d9518b0a73a0755114bedd 100644 (file)
@@ -56,7 +56,6 @@ struct kvm_pic {
        struct kvm_io_device dev_master;
        struct kvm_io_device dev_slave;
        struct kvm_io_device dev_elcr;
-       void (*ack_notifier)(void *opaque, int irq);
        unsigned long irq_states[PIC_NUM_PINS];
 };
 
index 759952dd122284b183c3735bad40717495920304..f206fc35deff6ef4d0a236eddd4476635d4e5b1f 100644 (file)
@@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
 {
        int highest_irr;
-       if (apic->vcpu->arch.apicv_active)
+       if (kvm_x86_ops.sync_pir_to_irr)
                highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
        else
                highest_irr = apic_find_highest_irr(apic);
index 3be9beea838d134a077a67e05f12a1c9aaa7c2ef..e2e1d012df2269d26524f396c286b44d93e947bf 100644 (file)
@@ -1582,7 +1582,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
                flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
 
        if (is_tdp_mmu_enabled(kvm))
-               flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
+               flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
 
        return flush;
 }
@@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { }
 
 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
-       return sp->role.invalid ||
+       if (sp->role.invalid)
+               return true;
+
+       /* TDP MMU pages due not use the MMU generation. */
+       return !sp->tdp_mmu_page &&
               unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
 }
 
@@ -2173,10 +2177,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
        iterator->shadow_addr = root;
        iterator->level = vcpu->arch.mmu->shadow_root_level;
 
-       if (iterator->level == PT64_ROOT_4LEVEL &&
+       if (iterator->level >= PT64_ROOT_4LEVEL &&
            vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
            !vcpu->arch.mmu->direct_map)
-               --iterator->level;
+               iterator->level = PT32E_ROOT_LEVEL;
 
        if (iterator->level == PT32E_ROOT_LEVEL) {
                /*
@@ -3976,6 +3980,20 @@ out_retry:
        return true;
 }
 
+/*
+ * Returns true if the page fault is stale and needs to be retried, i.e. if the
+ * root was invalidated by a memslot update or a relevant mmu_notifier fired.
+ */
+static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
+                               struct kvm_page_fault *fault, int mmu_seq)
+{
+       if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa)))
+               return true;
+
+       return fault->slot &&
+              mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
+}
+
 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
        bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
@@ -4013,8 +4031,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        else
                write_lock(&vcpu->kvm->mmu_lock);
 
-       if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+       if (is_page_fault_stale(vcpu, fault, mmu_seq))
                goto out_unlock;
+
        r = make_mmu_pages_available(vcpu);
        if (r)
                goto out_unlock;
@@ -4855,7 +4874,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
        struct kvm_mmu *context = &vcpu->arch.guest_mmu;
        struct kvm_mmu_role_regs regs = {
                .cr0 = cr0,
-               .cr4 = cr4,
+               .cr4 = cr4 & ~X86_CR4_PKE,
                .efer = efer,
        };
        union kvm_mmu_role new_role;
@@ -4919,7 +4938,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
        context->direct_map = false;
 
        update_permission_bitmask(context, true);
-       update_pkru_bitmask(context);
+       context->pkru_mask = 0;
        reset_rsvds_bits_mask_ept(vcpu, context, execonly);
        reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
 }
@@ -5025,6 +5044,14 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
        /*
         * Invalidate all MMU roles to force them to reinitialize as CPUID
         * information is factored into reserved bit calculations.
+        *
+        * Correctly handling multiple vCPU models with respect to paging and
+        * physical address properties) in a single VM would require tracking
+        * all relevant CPUID information in kvm_mmu_page_role. That is very
+        * undesirable as it would increase the memory requirements for
+        * gfn_track (see struct kvm_mmu_page_role comments).  For now that
+        * problem is swept under the rug; KVM's CPUID API is horrific and
+        * it's all but impossible to solve it without introducing a new API.
         */
        vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
        vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
@@ -5032,24 +5059,10 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
        kvm_mmu_reset_context(vcpu);
 
        /*
-        * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
-        * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
-        * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
-        * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
-        * sweep the problem under the rug.
-        *
-        * KVM's horrific CPUID ABI makes the problem all but impossible to
-        * solve, as correctly handling multiple vCPU models (with respect to
-        * paging and physical address properties) in a single VM would require
-        * tracking all relevant CPUID information in kvm_mmu_page_role.  That
-        * is very undesirable as it would double the memory requirements for
-        * gfn_track (see struct kvm_mmu_page_role comments), and in practice
-        * no sane VMM mucks with the core vCPU model on the fly.
+        * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
+        * kvm_arch_vcpu_ioctl().
         */
-       if (vcpu->arch.last_vmentry_cpu != -1) {
-               pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
-               pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
-       }
+       KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
 }
 
 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
@@ -5369,7 +5382,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
+       kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
        ++vcpu->stat.invlpg;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@@ -5854,8 +5867,6 @@ restart:
 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
                                   const struct kvm_memory_slot *slot)
 {
-       bool flush = false;
-
        if (kvm_memslots_have_rmaps(kvm)) {
                write_lock(&kvm->mmu_lock);
                /*
@@ -5863,17 +5874,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
                 * logging at a 4k granularity and never creates collapsible
                 * 2m SPTEs during dirty logging.
                 */
-               flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
-               if (flush)
+               if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
                        kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
                write_unlock(&kvm->mmu_lock);
        }
 
        if (is_tdp_mmu_enabled(kvm)) {
                read_lock(&kvm->mmu_lock);
-               flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
-               if (flush)
-                       kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+               kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
                read_unlock(&kvm->mmu_lock);
        }
 }
@@ -6182,23 +6190,46 @@ void kvm_mmu_module_exit(void)
        mmu_audit_disable();
 }
 
+/*
+ * Calculate the effective recovery period, accounting for '0' meaning "let KVM
+ * select a halving time of 1 hour".  Returns true if recovery is enabled.
+ */
+static bool calc_nx_huge_pages_recovery_period(uint *period)
+{
+       /*
+        * Use READ_ONCE to get the params, this may be called outside of the
+        * param setters, e.g. by the kthread to compute its next timeout.
+        */
+       bool enabled = READ_ONCE(nx_huge_pages);
+       uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+
+       if (!enabled || !ratio)
+               return false;
+
+       *period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+       if (!*period) {
+               /* Make sure the period is not less than one second.  */
+               ratio = min(ratio, 3600u);
+               *period = 60 * 60 * 1000 / ratio;
+       }
+       return true;
+}
+
 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
 {
        bool was_recovery_enabled, is_recovery_enabled;
        uint old_period, new_period;
        int err;
 
-       was_recovery_enabled = nx_huge_pages_recovery_ratio;
-       old_period = nx_huge_pages_recovery_period_ms;
+       was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
 
        err = param_set_uint(val, kp);
        if (err)
                return err;
 
-       is_recovery_enabled = nx_huge_pages_recovery_ratio;
-       new_period = nx_huge_pages_recovery_period_ms;
+       is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
 
-       if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
+       if (is_recovery_enabled &&
            (!was_recovery_enabled || old_period > new_period)) {
                struct kvm *kvm;
 
@@ -6262,18 +6293,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
 
 static long get_nx_lpage_recovery_timeout(u64 start_time)
 {
-       uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
-       uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+       bool enabled;
+       uint period;
 
-       if (!period && ratio) {
-               /* Make sure the period is not less than one second.  */
-               ratio = min(ratio, 3600u);
-               period = 60 * 60 * 1000 / ratio;
-       }
+       enabled = calc_nx_huge_pages_recovery_period(&period);
 
-       return READ_ONCE(nx_huge_pages) && ratio
-               ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
-               : MAX_SCHEDULE_TIMEOUT;
+       return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
+                      : MAX_SCHEDULE_TIMEOUT;
 }
 
 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
index f87d36898c44e33db8dafabf0f603ae9e7ac3458..708a5d297fe1e370c9912da506cc316b84a9bb83 100644 (file)
@@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
 
        r = RET_PF_RETRY;
        write_lock(&vcpu->kvm->mmu_lock);
-       if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+
+       if (is_page_fault_stale(vcpu, fault, mmu_seq))
                goto out_unlock;
 
        kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
index a54c3491af42c9fba8a894619ee7bd5c7f3f4628..1db8496259add5411626a99311d4ed6b372aed5d 100644 (file)
@@ -317,9 +317,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
        struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
        int level = sp->role.level;
        gfn_t base_gfn = sp->gfn;
-       u64 old_child_spte;
-       u64 *sptep;
-       gfn_t gfn;
        int i;
 
        trace_kvm_mmu_prepare_zap_page(sp);
@@ -327,8 +324,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
        tdp_mmu_unlink_page(kvm, sp, shared);
 
        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
-               sptep = rcu_dereference(pt) + i;
-               gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
+               u64 *sptep = rcu_dereference(pt) + i;
+               gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
+               u64 old_child_spte;
 
                if (shared) {
                        /*
@@ -374,7 +372,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
                                    shared);
        }
 
-       kvm_flush_remote_tlbs_with_address(kvm, gfn,
+       kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
                                           KVM_PAGES_PER_HPAGE(level + 1));
 
        call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
@@ -1033,9 +1031,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
 {
        struct kvm_mmu_page *root;
 
-       for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
-               flush |= zap_gfn_range(kvm, root, range->start, range->end,
-                                      range->may_block, flush, false);
+       for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
+               flush = zap_gfn_range(kvm, root, range->start, range->end,
+                                     range->may_block, flush, false);
 
        return flush;
 }
@@ -1364,10 +1362,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
  * Clear leaf entries which could be replaced by large mappings, for
  * GFNs within the slot.
  */
-static bool zap_collapsible_spte_range(struct kvm *kvm,
+static void zap_collapsible_spte_range(struct kvm *kvm,
                                       struct kvm_mmu_page *root,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush)
+                                      const struct kvm_memory_slot *slot)
 {
        gfn_t start = slot->base_gfn;
        gfn_t end = start + slot->npages;
@@ -1378,10 +1375,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
 
        tdp_root_for_each_pte(iter, root, start, end) {
 retry:
-               if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
-                       flush = false;
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
                        continue;
-               }
 
                if (!is_shadow_present_pte(iter.old_spte) ||
                    !is_last_spte(iter.old_spte, iter.level))
@@ -1393,6 +1388,7 @@ retry:
                                                            pfn, PG_LEVEL_NUM))
                        continue;
 
+               /* Note, a successful atomic zap also does a remote TLB flush. */
                if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
                        /*
                         * The iter must explicitly re-read the SPTE because
@@ -1401,30 +1397,24 @@ retry:
                        iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
                        goto retry;
                }
-               flush = true;
        }
 
        rcu_read_unlock();
-
-       return flush;
 }
 
 /*
  * Clear non-leaf entries (and free associated page tables) which could
  * be replaced by large mappings, for GFNs within the slot.
  */
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush)
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+                                      const struct kvm_memory_slot *slot)
 {
        struct kvm_mmu_page *root;
 
        lockdep_assert_held_read(&kvm->mmu_lock);
 
        for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
-               flush = zap_collapsible_spte_range(kvm, root, slot, flush);
-
-       return flush;
+               zap_collapsible_spte_range(kvm, root, slot);
 }
 
 /*
index 476b133544dd94e8465258c91cfdba0f2ddb8029..3899004a5d91e70b8821656cc0715519dc770d36 100644 (file)
@@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                       struct kvm_memory_slot *slot,
                                       gfn_t gfn, unsigned long mask,
                                       bool wrprot);
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush);
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+                                      const struct kvm_memory_slot *slot);
 
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, gfn_t gfn,
index affc0ea98d302286303188c91bfdb73bb2cef7e2..8f9af7b7dbbe479fbf914d7c27afdb2d77eb7513 100644 (file)
@@ -900,6 +900,7 @@ out:
 bool svm_check_apicv_inhibit_reasons(ulong bit)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+                         BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_HYPERV) |
                          BIT(APICV_INHIBIT_REASON_NESTED) |
                          BIT(APICV_INHIBIT_REASON_IRQWIN) |
@@ -989,16 +990,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+       int cpu = get_cpu();
 
+       WARN_ON(cpu != vcpu->cpu);
        svm->avic_is_running = is_run;
 
-       if (!kvm_vcpu_apicv_active(vcpu))
-               return;
-
-       if (is_run)
-               avic_vcpu_load(vcpu, vcpu->cpu);
-       else
-               avic_vcpu_put(vcpu);
+       if (kvm_vcpu_apicv_active(vcpu)) {
+               if (is_run)
+                       avic_vcpu_load(vcpu, cpu);
+               else
+                       avic_vcpu_put(vcpu);
+       }
+       put_cpu();
 }
 
 void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
index 871c426ec389a98632307b16d661c2abf5b856b0..b4095dfeeee62fa1702c3aa48d2af059d03e4d28 100644 (file)
@@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
                pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
 
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
-       pmu->reserved_bits = 0xffffffff00200000ull;
+       pmu->reserved_bits = 0xfffffff000280000ull;
        pmu->version = 1;
        /* not applicable to AMD; but clean them to prevent any fall out */
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
index 21ac0a5de4e0c8ba019fe943c0cc11e9533edfcc..7656a2c5662a68425716469b4f94cb164368cb01 100644 (file)
@@ -1543,28 +1543,50 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id)
        return false;
 }
 
-static int sev_lock_for_migration(struct kvm *kvm)
+static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
 {
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
+       struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
+       int r = -EBUSY;
+
+       if (dst_kvm == src_kvm)
+               return -EINVAL;
 
        /*
-        * Bail if this VM is already involved in a migration to avoid deadlock
-        * between two VMs trying to migrate to/from each other.
+        * Bail if these VMs are already involved in a migration to avoid
+        * deadlock between two VMs trying to migrate to/from each other.
         */
-       if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
+       if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
                return -EBUSY;
 
-       mutex_lock(&kvm->lock);
+       if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
+               goto release_dst;
 
+       r = -EINTR;
+       if (mutex_lock_killable(&dst_kvm->lock))
+               goto release_src;
+       if (mutex_lock_killable(&src_kvm->lock))
+               goto unlock_dst;
        return 0;
+
+unlock_dst:
+       mutex_unlock(&dst_kvm->lock);
+release_src:
+       atomic_set_release(&src_sev->migration_in_progress, 0);
+release_dst:
+       atomic_set_release(&dst_sev->migration_in_progress, 0);
+       return r;
 }
 
-static void sev_unlock_after_migration(struct kvm *kvm)
+static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
 {
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
+       struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
 
-       mutex_unlock(&kvm->lock);
-       atomic_set_release(&sev->migration_in_progress, 0);
+       mutex_unlock(&dst_kvm->lock);
+       mutex_unlock(&src_kvm->lock);
+       atomic_set_release(&dst_sev->migration_in_progress, 0);
+       atomic_set_release(&src_sev->migration_in_progress, 0);
 }
 
 
@@ -1607,14 +1629,15 @@ static void sev_migrate_from(struct kvm_sev_info *dst,
        dst->asid = src->asid;
        dst->handle = src->handle;
        dst->pages_locked = src->pages_locked;
+       dst->enc_context_owner = src->enc_context_owner;
 
        src->asid = 0;
        src->active = false;
        src->handle = 0;
        src->pages_locked = 0;
+       src->enc_context_owner = NULL;
 
-       INIT_LIST_HEAD(&dst->regions_list);
-       list_replace_init(&src->regions_list, &dst->regions_list);
+       list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
 }
 
 static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
@@ -1666,15 +1689,6 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
        bool charged = false;
        int ret;
 
-       ret = sev_lock_for_migration(kvm);
-       if (ret)
-               return ret;
-
-       if (sev_guest(kvm)) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
        source_kvm_file = fget(source_fd);
        if (!file_is_kvm(source_kvm_file)) {
                ret = -EBADF;
@@ -1682,16 +1696,26 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
        }
 
        source_kvm = source_kvm_file->private_data;
-       ret = sev_lock_for_migration(source_kvm);
+       ret = sev_lock_two_vms(kvm, source_kvm);
        if (ret)
                goto out_fput;
 
-       if (!sev_guest(source_kvm)) {
+       if (sev_guest(kvm) || !sev_guest(source_kvm)) {
                ret = -EINVAL;
-               goto out_source;
+               goto out_unlock;
        }
 
        src_sev = &to_kvm_svm(source_kvm)->sev_info;
+
+       /*
+        * VMs mirroring src's encryption context rely on it to keep the
+        * ASID allocated, but below we are clearing src_sev->asid.
+        */
+       if (src_sev->num_mirrored_vms) {
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+
        dst_sev->misc_cg = get_current_misc_cg();
        cg_cleanup_sev = dst_sev;
        if (dst_sev->misc_cg != src_sev->misc_cg) {
@@ -1728,13 +1752,11 @@ out_dst_cgroup:
                sev_misc_cg_uncharge(cg_cleanup_sev);
        put_misc_cg(cg_cleanup_sev->misc_cg);
        cg_cleanup_sev->misc_cg = NULL;
-out_source:
-       sev_unlock_after_migration(source_kvm);
+out_unlock:
+       sev_unlock_two_vms(kvm, source_kvm);
 out_fput:
        if (source_kvm_file)
                fput(source_kvm_file);
-out_unlock:
-       sev_unlock_after_migration(kvm);
        return ret;
 }
 
@@ -1953,76 +1975,60 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
 {
        struct file *source_kvm_file;
        struct kvm *source_kvm;
-       struct kvm_sev_info source_sev, *mirror_sev;
+       struct kvm_sev_info *source_sev, *mirror_sev;
        int ret;
 
        source_kvm_file = fget(source_fd);
        if (!file_is_kvm(source_kvm_file)) {
                ret = -EBADF;
-               goto e_source_put;
+               goto e_source_fput;
        }
 
        source_kvm = source_kvm_file->private_data;
-       mutex_lock(&source_kvm->lock);
-
-       if (!sev_guest(source_kvm)) {
-               ret = -EINVAL;
-               goto e_source_unlock;
-       }
+       ret = sev_lock_two_vms(kvm, source_kvm);
+       if (ret)
+               goto e_source_fput;
 
-       /* Mirrors of mirrors should work, but let's not get silly */
-       if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
+       /*
+        * Mirrors of mirrors should work, but let's not get silly.  Also
+        * disallow out-of-band SEV/SEV-ES init if the target is already an
+        * SEV guest, or if vCPUs have been created.  KVM relies on vCPUs being
+        * created after SEV/SEV-ES initialization, e.g. to init intercepts.
+        */
+       if (sev_guest(kvm) || !sev_guest(source_kvm) ||
+           is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
                ret = -EINVAL;
-               goto e_source_unlock;
+               goto e_unlock;
        }
 
-       memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
-              sizeof(source_sev));
-
        /*
         * The mirror kvm holds an enc_context_owner ref so its asid can't
         * disappear until we're done with it
         */
+       source_sev = &to_kvm_svm(source_kvm)->sev_info;
        kvm_get_kvm(source_kvm);
-
-       fput(source_kvm_file);
-       mutex_unlock(&source_kvm->lock);
-       mutex_lock(&kvm->lock);
-
-       /*
-        * Disallow out-of-band SEV/SEV-ES init if the target is already an
-        * SEV guest, or if vCPUs have been created.  KVM relies on vCPUs being
-        * created after SEV/SEV-ES initialization, e.g. to init intercepts.
-        */
-       if (sev_guest(kvm) || kvm->created_vcpus) {
-               ret = -EINVAL;
-               goto e_mirror_unlock;
-       }
+       source_sev->num_mirrored_vms++;
 
        /* Set enc_context_owner and copy its encryption context over */
        mirror_sev = &to_kvm_svm(kvm)->sev_info;
        mirror_sev->enc_context_owner = source_kvm;
        mirror_sev->active = true;
-       mirror_sev->asid = source_sev.asid;
-       mirror_sev->fd = source_sev.fd;
-       mirror_sev->es_active = source_sev.es_active;
-       mirror_sev->handle = source_sev.handle;
+       mirror_sev->asid = source_sev->asid;
+       mirror_sev->fd = source_sev->fd;
+       mirror_sev->es_active = source_sev->es_active;
+       mirror_sev->handle = source_sev->handle;
+       INIT_LIST_HEAD(&mirror_sev->regions_list);
+       ret = 0;
+
        /*
         * Do not copy ap_jump_table. Since the mirror does not share the same
         * KVM contexts as the original, and they may have different
         * memory-views.
         */
 
-       mutex_unlock(&kvm->lock);
-       return 0;
-
-e_mirror_unlock:
-       mutex_unlock(&kvm->lock);
-       kvm_put_kvm(source_kvm);
-       return ret;
-e_source_unlock:
-       mutex_unlock(&source_kvm->lock);
-e_source_put:
+e_unlock:
+       sev_unlock_two_vms(kvm, source_kvm);
+e_source_fput:
        if (source_kvm_file)
                fput(source_kvm_file);
        return ret;
@@ -2034,17 +2040,24 @@ void sev_vm_destroy(struct kvm *kvm)
        struct list_head *head = &sev->regions_list;
        struct list_head *pos, *q;
 
+       WARN_ON(sev->num_mirrored_vms);
+
        if (!sev_guest(kvm))
                return;
 
        /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
        if (is_mirroring_enc_context(kvm)) {
-               kvm_put_kvm(sev->enc_context_owner);
+               struct kvm *owner_kvm = sev->enc_context_owner;
+               struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
+
+               mutex_lock(&owner_kvm->lock);
+               if (!WARN_ON(!owner_sev->num_mirrored_vms))
+                       owner_sev->num_mirrored_vms--;
+               mutex_unlock(&owner_kvm->lock);
+               kvm_put_kvm(owner_kvm);
                return;
        }
 
-       mutex_lock(&kvm->lock);
-
        /*
         * Ensure that all guest tagged cache entries are flushed before
         * releasing the pages back to the system for use. CLFLUSH will
@@ -2064,8 +2077,6 @@ void sev_vm_destroy(struct kvm *kvm)
                }
        }
 
-       mutex_unlock(&kvm->lock);
-
        sev_unbind_asid(kvm, sev->handle);
        sev_asid_free(sev);
 }
@@ -2249,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
        __free_page(virt_to_page(svm->sev_es.vmsa));
 
        if (svm->sev_es.ghcb_sa_free)
-               kfree(svm->sev_es.ghcb_sa);
+               kvfree(svm->sev_es.ghcb_sa);
 }
 
 static void dump_ghcb(struct vcpu_svm *svm)
@@ -2341,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
        memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
 }
 
-static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
 {
        struct kvm_vcpu *vcpu;
        struct ghcb *ghcb;
-       u64 exit_code = 0;
+       u64 exit_code;
+       u64 reason;
 
        ghcb = svm->sev_es.ghcb;
 
-       /* Only GHCB Usage code 0 is supported */
-       if (ghcb->ghcb_usage)
-               goto vmgexit_err;
-
        /*
-        * Retrieve the exit code now even though is may not be marked valid
+        * Retrieve the exit code now even though it may not be marked valid
         * as it could help with debugging.
         */
        exit_code = ghcb_get_sw_exit_code(ghcb);
 
+       /* Only GHCB Usage code 0 is supported */
+       if (ghcb->ghcb_usage) {
+               reason = GHCB_ERR_INVALID_USAGE;
+               goto vmgexit_err;
+       }
+
+       reason = GHCB_ERR_MISSING_INPUT;
+
        if (!ghcb_sw_exit_code_is_valid(ghcb) ||
            !ghcb_sw_exit_info_1_is_valid(ghcb) ||
            !ghcb_sw_exit_info_2_is_valid(ghcb))
@@ -2437,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
        case SVM_VMGEXIT_UNSUPPORTED_EVENT:
                break;
        default:
+               reason = GHCB_ERR_INVALID_EVENT;
                goto vmgexit_err;
        }
 
-       return 0;
+       return true;
 
 vmgexit_err:
        vcpu = &svm->vcpu;
 
-       if (ghcb->ghcb_usage) {
+       if (reason == GHCB_ERR_INVALID_USAGE) {
                vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
                            ghcb->ghcb_usage);
+       } else if (reason == GHCB_ERR_INVALID_EVENT) {
+               vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
+                           exit_code);
        } else {
-               vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
+               vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
                            exit_code);
                dump_ghcb(svm);
        }
 
-       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-       vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
-       vcpu->run->internal.ndata = 2;
-       vcpu->run->internal.data[0] = exit_code;
-       vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
+       /* Clear the valid entries fields */
+       memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+
+       ghcb_set_sw_exit_info_1(ghcb, 2);
+       ghcb_set_sw_exit_info_2(ghcb, reason);
 
-       return -EINVAL;
+       return false;
 }
 
 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
@@ -2482,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
                        svm->sev_es.ghcb_sa_sync = false;
                }
 
-               kfree(svm->sev_es.ghcb_sa);
+               kvfree(svm->sev_es.ghcb_sa);
                svm->sev_es.ghcb_sa = NULL;
                svm->sev_es.ghcb_sa_free = false;
        }
@@ -2530,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
        scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
        if (!scratch_gpa_beg) {
                pr_err("vmgexit: scratch gpa not provided\n");
-               return false;
+               goto e_scratch;
        }
 
        scratch_gpa_end = scratch_gpa_beg + len;
        if (scratch_gpa_end < scratch_gpa_beg) {
                pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
                       len, scratch_gpa_beg);
-               return false;
+               goto e_scratch;
        }
 
        if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
@@ -2555,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
                    scratch_gpa_end > ghcb_scratch_end) {
                        pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
                               scratch_gpa_beg, scratch_gpa_end);
-                       return false;
+                       goto e_scratch;
                }
 
                scratch_va = (void *)svm->sev_es.ghcb;
@@ -2568,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
                if (len > GHCB_SCRATCH_AREA_LIMIT) {
                        pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
                               len, GHCB_SCRATCH_AREA_LIMIT);
-                       return false;
+                       goto e_scratch;
                }
-               scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
+               scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
                if (!scratch_va)
-                       return false;
+                       goto e_scratch;
 
                if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
                        /* Unable to copy scratch area from guest */
                        pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
 
-                       kfree(scratch_va);
-                       return false;
+                       kvfree(scratch_va);
+                       goto e_scratch;
                }
 
                /*
@@ -2596,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
        svm->sev_es.ghcb_sa_len = len;
 
        return true;
+
+e_scratch:
+       ghcb_set_sw_exit_info_1(ghcb, 2);
+       ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
+
+       return false;
 }
 
 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
@@ -2646,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
 
                ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
                if (!ret) {
-                       ret = -EINVAL;
+                       /* Error, keep GHCB MSR value as-is */
                        break;
                }
 
@@ -2682,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
                                                GHCB_MSR_TERM_REASON_POS);
                pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
                        reason_set, reason_code);
-               fallthrough;
+
+               ret = -EINVAL;
+               break;
        }
        default:
-               ret = -EINVAL;
+               /* Error, keep GHCB MSR value as-is */
+               break;
        }
 
        trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
@@ -2709,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
 
        if (!ghcb_gpa) {
                vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
-               return -EINVAL;
+
+               /* Without a GHCB, just return right back to the guest */
+               return 1;
        }
 
        if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
                /* Unable to map GHCB from guest */
                vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
                            ghcb_gpa);
-               return -EINVAL;
+
+               /* Without a GHCB, just return right back to the guest */
+               return 1;
        }
 
        svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
@@ -2726,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
 
        exit_code = ghcb_get_sw_exit_code(ghcb);
 
-       ret = sev_es_validate_vmgexit(svm);
-       if (ret)
-               return ret;
+       if (!sev_es_validate_vmgexit(svm))
+               return 1;
 
        sev_es_sync_from_ghcb(svm);
        ghcb_set_sw_exit_info_1(ghcb, 0);
        ghcb_set_sw_exit_info_2(ghcb, 0);
 
-       ret = -EINVAL;
+       ret = 1;
        switch (exit_code) {
        case SVM_VMGEXIT_MMIO_READ:
                if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
@@ -2775,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
                default:
                        pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
                               control->exit_info_1);
-                       ghcb_set_sw_exit_info_1(ghcb, 1);
-                       ghcb_set_sw_exit_info_2(ghcb,
-                                               X86_TRAP_UD |
-                                               SVM_EVTINJ_TYPE_EXEPT |
-                                               SVM_EVTINJ_VALID);
+                       ghcb_set_sw_exit_info_1(ghcb, 2);
+                       ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
                }
 
-               ret = 1;
                break;
        }
        case SVM_VMGEXIT_UNSUPPORTED_EVENT:
                vcpu_unimpl(vcpu,
                            "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
                            control->exit_info_1, control->exit_info_2);
+               ret = -EINVAL;
                break;
        default:
                ret = svm_invoke_exit_handler(vcpu, exit_code);
@@ -2810,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
                return -EINVAL;
 
        if (!setup_vmgexit_scratch(svm, in, bytes))
-               return -EINVAL;
+               return 1;
 
        return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
                                    count, in);
index 5630c241d5f6e0bdf1899163cfdfef57c18b5d47..d0f68d11ec70bec31890e1dbd23b5a11a14a02e6 100644 (file)
@@ -4651,7 +4651,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .load_eoi_exitmap = svm_load_eoi_exitmap,
        .hwapic_irr_update = svm_hwapic_irr_update,
        .hwapic_isr_update = svm_hwapic_isr_update,
-       .sync_pir_to_irr = kvm_lapic_find_highest_irr,
        .apicv_post_state_restore = avic_post_state_restore,
 
        .set_tss_addr = svm_set_tss_addr,
index 5faad3dc10e27ac0dc987cd6c7041fd8b2ea4162..1c7306c370fa3c4924a83371c1a1b21da8f6c5b2 100644 (file)
@@ -79,6 +79,7 @@ struct kvm_sev_info {
        struct list_head regions_list;  /* List of registered regions */
        u64 ap_jump_table;      /* SEV-ES AP Jump Table address */
        struct kvm *enc_context_owner; /* Owner of copied encryption context */
+       unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
        struct misc_cg *misc_cg; /* For misc cgroup accounting */
        atomic_t migration_in_progress;
 };
index 1e2f669515665b233e591343458c5ccce54ee32c..9c941535f78c050a45a5d134dbb3b020a06a755f 100644 (file)
@@ -1162,29 +1162,26 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
        WARN_ON(!enable_vpid);
 
        /*
-        * If VPID is enabled and used by vmc12, but L2 does not have a unique
-        * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
-        * a VPID for L2, flush the current context as the effective ASID is
-        * common to both L1 and L2.
-        *
-        * Defer the flush so that it runs after vmcs02.EPTP has been set by
-        * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
-        * redundant flushes further down the nested pipeline.
-        *
-        * If a TLB flush isn't required due to any of the above, and vpid12 is
-        * changing then the new "virtual" VPID (vpid12) will reuse the same
-        * "real" VPID (vpid02), and so needs to be flushed.  There's no direct
-        * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
-        * all nested vCPUs.  Remember, a flush on VM-Enter does not invalidate
-        * guest-physical mappings, so there is no need to sync the nEPT MMU.
+        * VPID is enabled and in use by vmcs12.  If vpid12 is changing, then
+        * emulate a guest TLB flush as KVM does not track vpid12 history nor
+        * is the VPID incorporated into the MMU context.  I.e. KVM must assume
+        * that the new vpid12 has never been used and thus represents a new
+        * guest ASID that cannot have entries in the TLB.
         */
-       if (!nested_has_guest_tlb_tag(vcpu)) {
-               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
-       } else if (is_vmenter &&
-                  vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
+       if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
                vmx->nested.last_vpid = vmcs12->virtual_processor_id;
-               vpid_sync_context(nested_get_vpid02(vcpu));
+               kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
+               return;
        }
+
+       /*
+        * If VPID is enabled, used by vmc12, and vpid12 is not changing but
+        * does not have a unique TLB tag (ASID), i.e. EPT is disabled and
+        * KVM was unable to allocate a VPID for L2, flush the current context
+        * as the effective ASID is common to both L1 and L2.
+        */
+       if (!nested_has_guest_tlb_tag(vcpu))
+               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
 }
 
 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
@@ -2594,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 
        if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
            WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
-                                    vmcs12->guest_ia32_perf_global_ctrl)))
+                                    vmcs12->guest_ia32_perf_global_ctrl))) {
+               *entry_failure_code = ENTRY_FAIL_DEFAULT;
                return -EINVAL;
+       }
 
        kvm_rsp_write(vcpu, vmcs12->guest_rsp);
        kvm_rip_write(vcpu, vmcs12->guest_rip);
@@ -3344,8 +3343,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
        };
        u32 failed_index;
 
-       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
-               kvm_vcpu_flush_tlb_current(vcpu);
+       kvm_service_local_tlb_flush_requests(vcpu);
 
        evaluate_pending_interrupts = exec_controls_get(vmx) &
                (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
@@ -4502,9 +4500,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
                (void)nested_get_evmcs_page(vcpu);
        }
 
-       /* Service the TLB flush request for L2 before switching to L1. */
-       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
-               kvm_vcpu_flush_tlb_current(vcpu);
+       /* Service pending TLB flush requests for L2 before switching to L1. */
+       kvm_service_local_tlb_flush_requests(vcpu);
 
        /*
         * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
@@ -4857,6 +4854,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
        if (!vmx->nested.cached_vmcs12)
                goto out_cached_vmcs12;
 
+       vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
        vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
        if (!vmx->nested.cached_shadow_vmcs12)
                goto out_cached_shadow_vmcs12;
@@ -5289,8 +5287,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
                struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
                struct vmcs_hdr hdr;
 
-               if (ghc->gpa != vmptr &&
-                   kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
                        /*
                         * Reads from an unbacked page return all 1s,
                         * which means that the 32 bits located at the
index 5f81ef092bd436b1a25ded21fbab536d6743ca24..1c94783b5a54c5520466bb8b3753c89cfa1d5850 100644 (file)
@@ -5,6 +5,7 @@
 #include <asm/cpu.h>
 
 #include "lapic.h"
+#include "irq.h"
 #include "posted_intr.h"
 #include "trace.h"
 #include "vmx.h"
@@ -77,13 +78,18 @@ after_clear_sn:
                pi_set_on(pi_desc);
 }
 
+static bool vmx_can_use_vtd_pi(struct kvm *kvm)
+{
+       return irqchip_in_kernel(kvm) && enable_apicv &&
+               kvm_arch_has_assigned_device(kvm) &&
+               irq_remapping_cap(IRQ_POSTING_CAP);
+}
+
 void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
 {
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
-       if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
-               !kvm_vcpu_apicv_active(vcpu))
+       if (!vmx_can_use_vtd_pi(vcpu->kvm))
                return;
 
        /* Set SN when the vCPU is preempted */
@@ -141,9 +147,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
        struct pi_desc old, new;
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
-       if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
-               !kvm_vcpu_apicv_active(vcpu))
+       if (!vmx_can_use_vtd_pi(vcpu->kvm))
                return 0;
 
        WARN_ON(irqs_disabled());
@@ -270,9 +274,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
        struct vcpu_data vcpu_info;
        int idx, ret = 0;
 
-       if (!kvm_arch_has_assigned_device(kvm) ||
-           !irq_remapping_cap(IRQ_POSTING_CAP) ||
-           !kvm_vcpu_apicv_active(kvm->vcpus[0]))
+       if (!vmx_can_use_vtd_pi(kvm))
                return 0;
 
        idx = srcu_read_lock(&kvm->irq_srcu);
index ba66c171d951ba06308503570e4d247b40825914..9453743ce0c410e439ab6074b15c140cc993f03a 100644 (file)
@@ -2918,6 +2918,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
        }
 }
 
+static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
+{
+       if (is_guest_mode(vcpu))
+               return nested_get_vpid02(vcpu);
+       return to_vmx(vcpu)->vpid;
+}
+
 static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -2930,31 +2937,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
        if (enable_ept)
                ept_sync_context(construct_eptp(vcpu, root_hpa,
                                                mmu->shadow_root_level));
-       else if (!is_guest_mode(vcpu))
-               vpid_sync_context(to_vmx(vcpu)->vpid);
        else
-               vpid_sync_context(nested_get_vpid02(vcpu));
+               vpid_sync_context(vmx_get_current_vpid(vcpu));
 }
 
 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
 {
        /*
-        * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
+        * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
         * vmx_flush_tlb_guest() for an explanation of why this is ok.
         */
-       vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr);
+       vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
 }
 
 static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
 {
        /*
-        * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
-        * or a vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit
-        * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
+        * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
+        * vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit are
+        * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
         * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
         * i.e. no explicit INVVPID is necessary.
         */
-       vpid_sync_context(to_vmx(vcpu)->vpid);
+       vpid_sync_context(vmx_get_current_vpid(vcpu));
 }
 
 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -6262,9 +6267,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int max_irr;
-       bool max_irr_updated;
+       bool got_posted_interrupt;
 
-       if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm))
+       if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
                return -EIO;
 
        if (pi_test_on(&vmx->pi_desc)) {
@@ -6274,22 +6279,33 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
                 * But on x86 this is just a compiler barrier anyway.
                 */
                smp_mb__after_atomic();
-               max_irr_updated =
+               got_posted_interrupt =
                        kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
-
-               /*
-                * If we are running L2 and L1 has a new pending interrupt
-                * which can be injected, this may cause a vmexit or it may
-                * be injected into L2.  Either way, this interrupt will be
-                * processed via KVM_REQ_EVENT, not RVI, because we do not use
-                * virtual interrupt delivery to inject L1 interrupts into L2.
-                */
-               if (is_guest_mode(vcpu) && max_irr_updated)
-                       kvm_make_request(KVM_REQ_EVENT, vcpu);
        } else {
                max_irr = kvm_lapic_find_highest_irr(vcpu);
+               got_posted_interrupt = false;
        }
-       vmx_hwapic_irr_update(vcpu, max_irr);
+
+       /*
+        * Newly recognized interrupts are injected via either virtual interrupt
+        * delivery (RVI) or KVM_REQ_EVENT.  Virtual interrupt delivery is
+        * disabled in two cases:
+        *
+        * 1) If L2 is running and the vCPU has a new pending interrupt.  If L1
+        * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
+        * VM-Exit to L1.  If L1 doesn't want to exit, the interrupt is injected
+        * into L2, but KVM doesn't use virtual interrupt delivery to inject
+        * interrupts into L2, and so KVM_REQ_EVENT is again needed.
+        *
+        * 2) If APICv is disabled for this vCPU, assigned devices may still
+        * attempt to post interrupts.  The posted interrupt vector will cause
+        * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
+        */
+       if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
+               vmx_set_rvi(max_irr);
+       else if (got_posted_interrupt)
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return max_irr;
 }
 
@@ -7509,6 +7525,7 @@ static void hardware_unsetup(void)
 static bool vmx_check_apicv_inhibit_reasons(ulong bit)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+                         BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_HYPERV) |
                          BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
 
@@ -7761,10 +7778,10 @@ static __init int hardware_setup(void)
                ple_window_shrink = 0;
        }
 
-       if (!cpu_has_vmx_apicv()) {
+       if (!cpu_has_vmx_apicv())
                enable_apicv = 0;
+       if (!enable_apicv)
                vmx_x86_ops.sync_pir_to_irr = NULL;
-       }
 
        if (cpu_has_vmx_tsc_scaling()) {
                kvm_has_tsc_control = true;
index 5a403d92833f51e4f77d6fd67b38fd85efddb698..e0aa4dd53c7fc98957d0a916df12f37fda9c2f39 100644 (file)
@@ -3258,6 +3258,29 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
        static_call(kvm_x86_tlb_flush_guest)(vcpu);
 }
 
+
+static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
+{
+       ++vcpu->stat.tlb_flush;
+       static_call(kvm_x86_tlb_flush_current)(vcpu);
+}
+
+/*
+ * Service "local" TLB flush requests, which are specific to the current MMU
+ * context.  In addition to the generic event handling in vcpu_enter_guest(),
+ * TLB flushes that are targeted at an MMU context also need to be serviced
+ * prior before nested VM-Enter/VM-Exit.
+ */
+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
+{
+       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+               kvm_vcpu_flush_tlb_current(vcpu);
+
+       if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
+               kvm_vcpu_flush_tlb_guest(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
+
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
        struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
@@ -4133,6 +4156,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_SGX_ATTRIBUTE:
 #endif
        case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
+       case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
        case KVM_CAP_SREGS2:
        case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
        case KVM_CAP_VCPU_ATTRIBUTES:
@@ -4448,8 +4472,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
                                    struct kvm_lapic_state *s)
 {
-       if (vcpu->arch.apicv_active)
-               static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+       static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
 
        return kvm_apic_get_state(vcpu, s);
 }
@@ -5124,6 +5147,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_cpuid __user *cpuid_arg = argp;
                struct kvm_cpuid cpuid;
 
+               /*
+                * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
+                * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
+                * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
+                * faults due to reusing SPs/SPTEs.  In practice no sane VMM mucks with
+                * the core vCPU model on the fly, so fail.
+                */
+               r = -EINVAL;
+               if (vcpu->arch.last_vmentry_cpu != -1)
+                       goto out;
+
                r = -EFAULT;
                if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
                        goto out;
@@ -5134,6 +5168,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_cpuid2 __user *cpuid_arg = argp;
                struct kvm_cpuid2 cpuid;
 
+               /*
+                * KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in
+                * KVM_SET_CPUID case above.
+                */
+               r = -EINVAL;
+               if (vcpu->arch.last_vmentry_cpu != -1)
+                       goto out;
+
                r = -EFAULT;
                if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
                        goto out;
@@ -5698,6 +5740,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
                kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
+               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
                r = 0;
 split_irqchip_unlock:
                mutex_unlock(&kvm->lock);
@@ -6078,6 +6121,7 @@ set_identity_unlock:
                /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
+               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
        create_irqchip_unlock:
                mutex_unlock(&kvm->lock);
                break;
@@ -8776,10 +8820,9 @@ static void kvm_apicv_init(struct kvm *kvm)
 {
        init_rwsem(&kvm->arch.apicv_update_lock);
 
-       if (enable_apicv)
-               clear_bit(APICV_INHIBIT_REASON_DISABLE,
-                         &kvm->arch.apicv_inhibit_reasons);
-       else
+       set_bit(APICV_INHIBIT_REASON_ABSENT,
+               &kvm->arch.apicv_inhibit_reasons);
+       if (!enable_apicv)
                set_bit(APICV_INHIBIT_REASON_DISABLE,
                        &kvm->arch.apicv_inhibit_reasons);
 }
@@ -9528,8 +9571,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
        if (irqchip_split(vcpu->kvm))
                kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
        else {
-               if (vcpu->arch.apicv_active)
-                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+               static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
                if (ioapic_in_kernel(vcpu->kvm))
                        kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
        }
@@ -9648,10 +9690,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        /* Flushing all ASIDs flushes the current ASID... */
                        kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
                }
-               if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
-                       kvm_vcpu_flush_tlb_current(vcpu);
-               if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
-                       kvm_vcpu_flush_tlb_guest(vcpu);
+               kvm_service_local_tlb_flush_requests(vcpu);
 
                if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
                        vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
@@ -9802,10 +9841,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        /*
         * This handles the case where a posted interrupt was
-        * notified with kvm_vcpu_kick.
+        * notified with kvm_vcpu_kick.  Assigned devices can
+        * use the POSTED_INTR_VECTOR even if APICv is disabled,
+        * so do it even if APICv is disabled on this vCPU.
         */
-       if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
-               static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+       if (kvm_lapic_enabled(vcpu))
+               static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
 
        if (kvm_vcpu_exit_request(vcpu)) {
                vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -9849,8 +9890,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
                        break;
 
-               if (vcpu->arch.apicv_active)
-                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+               if (kvm_lapic_enabled(vcpu))
+                       static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
 
                if (unlikely(kvm_vcpu_exit_request(vcpu))) {
                        exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
index 997669ae9caa21749d2339a48e761132b04201fd..4abcd8d9836ddc2296748069f2bfcf9c26fe8c9c 100644 (file)
@@ -103,6 +103,7 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
 
 #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
 
+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
 int kvm_check_nested_events(struct kvm_vcpu *vcpu);
 
 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
@@ -185,12 +186,6 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
        return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
 }
 
-static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
-{
-       ++vcpu->stat.tlb_flush;
-       static_call(kvm_x86_tlb_flush_current)(vcpu);
-}
-
 static inline int is_pae(struct kvm_vcpu *vcpu)
 {
        return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
index 4a3da7592b99c938eed72dd583475bef4ae131a1..38d24d2ab38b3329e3ec3f9f527b573688c7dbe9 100644 (file)
@@ -72,6 +72,7 @@ static void __init setup_real_mode(void)
 #ifdef CONFIG_X86_64
        u64 *trampoline_pgd;
        u64 efer;
+       int i;
 #endif
 
        base = (unsigned char *)real_mode_header;
@@ -128,8 +129,17 @@ static void __init setup_real_mode(void)
        trampoline_header->flags = 0;
 
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+
+       /* Map the real mode stub as virtual == physical */
        trampoline_pgd[0] = trampoline_pgd_entry.pgd;
-       trampoline_pgd[511] = init_top_pgt[511].pgd;
+
+       /*
+        * Include the entirety of the kernel mapping into the trampoline
+        * PGD.  This way, all mappings present in the normal kernel page
+        * tables are usable while running on trampoline_pgd.
+        */
+       for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
+               trampoline_pgd[i] = init_top_pgt[i].pgd;
 #endif
 
        sme_sev_setup_real_mode(trampoline_header);
index 220dd96784947624d9d43fb62dd72e4ae0614936..444d824775f6a9ccb10929c55ac980af7f73376e 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/init.h>
 #include <linux/linkage.h>
+#include <../entry/calling.h>
 
 .pushsection .noinstr.text, "ax"
 /*
@@ -192,6 +193,25 @@ SYM_CODE_START(xen_iret)
        jmp hypercall_iret
 SYM_CODE_END(xen_iret)
 
+/*
+ * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
+ * also the kernel stack.  Reusing swapgs_restore_regs_and_return_to_usermode()
+ * in XEN pv would cause %rsp to move up to the top of the kernel stack and
+ * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
+ * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
+ * frame at the same address is useless.
+ */
+SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
+       UNWIND_HINT_REGS
+       POP_REGS
+
+       /* stackleak_erase() can work safely on the kernel stack. */
+       STACKLEAK_ERASE_NOCLOBBER
+
+       addq    $8, %rsp        /* skip regs->orig_ax */
+       jmp xen_iret
+SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
+
 /*
  * Xen handles syscall callbacks much like ordinary exceptions, which
  * means we have:
index 5b78e86e345924f387ee0b0708819fb0ded4a813..b9c77885b8726ee0c43b5318602d95353397b176 100644 (file)
@@ -827,7 +827,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev,
        if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
                return -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
+       return sysfs_emit(buf, "%s\n",
                        ata_lpm_policy_names[ap->target_lpm_policy]);
 }
 DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
index 121635aa8c00c10e8f5e42cc5855d173971cf4e7..823c88622e34a089e670a8bb9d7dc0838ced78d5 100644 (file)
@@ -55,14 +55,14 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
        /* Transfer multiple of 2 bytes */
        if (rw == READ) {
                if (swap)
-                       raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_insw_swapw(data_addr, (u16 *)buf, words);
                else
-                       raw_insw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_insw(data_addr, (u16 *)buf, words);
        } else {
                if (swap)
-                       raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_outsw_swapw(data_addr, (u16 *)buf, words);
                else
-                       raw_outsw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_outsw(data_addr, (u16 *)buf, words);
        }
 
        /* Transfer trailing byte, if any. */
@@ -74,16 +74,16 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
 
                if (rw == READ) {
                        if (swap)
-                               raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_insw_swapw(data_addr, (u16 *)pad, 1);
                        else
-                               raw_insw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_insw(data_addr, (u16 *)pad, 1);
                        *buf = pad[0];
                } else {
                        pad[0] = *buf;
                        if (swap)
-                               raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_outsw_swapw(data_addr, (u16 *)pad, 1);
                        else
-                               raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_outsw(data_addr, (u16 *)pad, 1);
                }
                words++;
        }
index e5838b23c9e0a177712283e3c5df5087f4ff1da0..3b31a4f596d865f0a9a7ea9022729aa0a84f0ca9 100644 (file)
@@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
        return 0;
 }
 
+static void sata_fsl_host_stop(struct ata_host *host)
+{
+        struct sata_fsl_host_priv *host_priv = host->private_data;
+
+        iounmap(host_priv->hcr_base);
+        kfree(host_priv);
+}
+
 /*
  * scsi mid-layer and libata interface structures
  */
@@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
        .port_start = sata_fsl_port_start,
        .port_stop = sata_fsl_port_stop,
 
+       .host_stop      = sata_fsl_host_stop,
+
        .pmp_attach = sata_fsl_pmp_attach,
        .pmp_detach = sata_fsl_pmp_detach,
 };
@@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
        host_priv->ssr_base = ssr_base;
        host_priv->csr_base = csr_base;
 
-       irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-       if (!irq) {
-               dev_err(&ofdev->dev, "invalid irq from platform\n");
+       irq = platform_get_irq(ofdev, 0);
+       if (irq < 0) {
+               retval = irq;
                goto error_exit_with_cleanup;
        }
        host_priv->irq = irq;
@@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
 
        ata_host_detach(host);
 
-       irq_dispose_mapping(host_priv->irq);
-       iounmap(host_priv->hcr_base);
-       kfree(host_priv);
-
        return 0;
 }
 
index a154cab6cd989808b5cae51fd3d12506a6d52f9c..c3a36cfaa855a679f8f7d215852c8981aea3e2f7 100644 (file)
@@ -2103,7 +2103,7 @@ static int loop_control_remove(int idx)
        int ret;
 
        if (idx < 0) {
-               pr_warn("deleting an unspecified loop device is not supported.\n");
+               pr_warn_once("deleting an unspecified loop device is not supported.\n");
                return -EINVAL;
        }
                
index deed355422f4e9561e10ac0a1d62cc42649bffde..c837d5416e0eeeb9ca578f23d75f8141be5e9481 100644 (file)
@@ -191,6 +191,8 @@ struct ipmi_user {
        struct work_struct remove_work;
 };
 
+static struct workqueue_struct *remove_work_wq;
+
 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
        __acquires(user->release_barrier)
 {
@@ -1297,7 +1299,7 @@ static void free_user(struct kref *ref)
        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
 
        /* SRCU cleanup must happen in task context. */
-       schedule_work(&user->remove_work);
+       queue_work(remove_work_wq, &user->remove_work);
 }
 
 static void _ipmi_destroy_user(struct ipmi_user *user)
@@ -3918,9 +3920,11 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
                /* We didn't find a user, deliver an error response. */
                ipmi_inc_stat(intf, unhandled_commands);
 
-               msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
-               msg->data[1] = msg->rsp[2];
-               msg->data[2] = msg->rsp[4] & ~0x3;
+               msg->data[0] = (netfn + 1) << 2;
+               msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
+               msg->data[1] = msg->rsp[1]; /* Addr */
+               msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
+               msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
                msg->data[3] = cmd;
                msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
                msg->data_size = 5;
@@ -4455,13 +4459,24 @@ return_unspecified:
                msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
                msg->rsp_size = 3;
        } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
-               /* commands must have at least 3 bytes, responses 4. */
-               if (is_cmd && (msg->rsp_size < 3)) {
+               /* commands must have at least 4 bytes, responses 5. */
+               if (is_cmd && (msg->rsp_size < 4)) {
                        ipmi_inc_stat(intf, invalid_commands);
                        goto out;
                }
-               if (!is_cmd && (msg->rsp_size < 4))
-                       goto return_unspecified;
+               if (!is_cmd && (msg->rsp_size < 5)) {
+                       ipmi_inc_stat(intf, invalid_ipmb_responses);
+                       /* Construct a valid error response. */
+                       msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
+                       msg->rsp[0] |= (1 << 2); /* Make it a response */
+                       msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
+                       msg->rsp[1] = msg->data[1]; /* Addr */
+                       msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
+                       msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
+                       msg->rsp[3] = msg->data[3]; /* Cmd */
+                       msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
+                       msg->rsp_size = 5;
+               }
        } else if ((msg->data_size >= 2)
            && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
            && (msg->data[1] == IPMI_SEND_MSG_CMD)
@@ -5031,6 +5046,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
        if (rv) {
                rv->done = free_smi_msg;
                rv->user_data = NULL;
+               rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
                atomic_inc(&smi_msg_inuse_count);
        }
        return rv;
@@ -5383,6 +5399,13 @@ static int ipmi_init_msghandler(void)
 
        atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
+       remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+       if (!remove_work_wq) {
+               pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
+               rv = -ENOMEM;
+               goto out;
+       }
+
        initialized = true;
 
 out:
@@ -5408,6 +5431,8 @@ static void __exit cleanup_ipmi(void)
        int count;
 
        if (initialized) {
+               destroy_workqueue(remove_work_wq);
+
                atomic_notifier_chain_unregister(&panic_notifier_list,
                                                 &panic_block);
 
index e338d2f010feb2a8978fc3ddfa22970d28aa15e5..096c3848fa415a5c6d22a14a85aa636926d3f226 100644 (file)
@@ -1004,10 +1004,9 @@ static struct kobj_type ktype_cpufreq = {
        .release        = cpufreq_sysfs_release,
 };
 
-static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
+                               struct device *dev)
 {
-       struct device *dev = get_cpu_device(cpu);
-
        if (unlikely(!dev))
                return;
 
@@ -1296,8 +1295,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
 
        if (policy->max_freq_req) {
                /*
-                * CPUFREQ_CREATE_POLICY notification is sent only after
-                * successfully adding max_freq_req request.
+                * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
+                * notification, since CPUFREQ_CREATE_POLICY notification was
+                * sent after adding max_freq_req earlier.
                 */
                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                             CPUFREQ_REMOVE_POLICY, policy);
@@ -1391,7 +1391,7 @@ static int cpufreq_online(unsigned int cpu)
        if (new_policy) {
                for_each_cpu(j, policy->related_cpus) {
                        per_cpu(cpufreq_cpu_data, j) = policy;
-                       add_cpu_dev_symlink(policy, j);
+                       add_cpu_dev_symlink(policy, j, get_cpu_device(j));
                }
 
                policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
@@ -1565,7 +1565,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        /* Create sysfs link on CPU registration */
        policy = per_cpu(cpufreq_cpu_data, cpu);
        if (policy)
-               add_cpu_dev_symlink(policy, cpu);
+               add_cpu_dev_symlink(policy, cpu, dev);
 
        return 0;
 }
index f57a39ddd0635e8b830d8b51a6e0c3b69933024c..ab7fd896d2c43dd0d8635d0d2cbc544d6cfd13ab 100644 (file)
@@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
        int i;
 
        table = &buffer->sg_table;
-       for_each_sg(table->sgl, sg, table->nents, i) {
+       for_each_sgtable_sg(table, sg, i) {
                struct page *page = sg_page(sg);
 
                __free_pages(page, compound_order(page));
index 71a6a9ef54ac79938c85db54a92efc287bf1207f..6348559608ce78bb5febda33b4cc69f9b99d9b12 100644 (file)
@@ -1396,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        struct sg_table *sg = NULL;
        uint64_t user_addr = 0;
        struct amdgpu_bo *bo;
-       struct drm_gem_object *gobj;
+       struct drm_gem_object *gobj = NULL;
        u32 domain, alloc_domain;
        u64 alloc_flags;
        int ret;
@@ -1506,14 +1506,16 @@ allocate_init_user_pages_failed:
        remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
        drm_vma_node_revoke(&gobj->vma_node, drm_priv);
 err_node_allow:
-       drm_gem_object_put(gobj);
        /* Don't unreserve system mem limit twice */
        goto err_reserve_limit;
 err_bo_create:
        unreserve_mem_limit(adev, size, alloc_domain, !!sg);
 err_reserve_limit:
        mutex_destroy(&(*mem)->lock);
-       kfree(*mem);
+       if (gobj)
+               drm_gem_object_put(gobj);
+       else
+               kfree(*mem);
 err:
        if (sg) {
                sg_free_table(sg);
index d94fa748e6bbe6967f7452a999cefb2d70336bd4..1e651b9591419e340031ae52c20737de79905ed7 100644 (file)
@@ -3833,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
        /* disable all interrupts */
        amdgpu_irq_disable_all(adev);
        if (adev->mode_info.mode_config_initialized){
-               if (!amdgpu_device_has_dc_support(adev))
+               if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
                        drm_helper_force_disable_all(adev_to_drm(adev));
                else
                        drm_atomic_helper_shutdown(adev_to_drm(adev));
@@ -4289,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 {
        int r;
 
+       amdgpu_amdkfd_pre_reset(adev);
+
        if (from_hypervisor)
                r = amdgpu_virt_request_full_gpu(adev, true);
        else
@@ -4316,6 +4318,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 
        amdgpu_irq_gpu_reset_resume_helper(adev);
        r = amdgpu_ib_ring_tests(adev);
+       amdgpu_amdkfd_post_reset(adev);
 
 error:
        if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
@@ -5030,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
                cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
 
-               amdgpu_amdkfd_pre_reset(tmp_adev);
+               if (!amdgpu_sriov_vf(tmp_adev))
+                       amdgpu_amdkfd_pre_reset(tmp_adev);
 
                /*
                 * Mark these ASICs to be reseted as untracked first
@@ -5129,7 +5133,7 @@ skip_hw_reset:
                        drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
                }
 
-               if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
+               if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
                        drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
                }
 
@@ -5148,9 +5152,9 @@ skip_hw_reset:
 
 skip_sched_resume:
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
-               /* unlock kfd */
-               if (!need_emergency_restart)
-                       amdgpu_amdkfd_post_reset(tmp_adev);
+               /* unlock kfd: SRIOV would do it separately */
+               if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
+                       amdgpu_amdkfd_post_reset(tmp_adev);
 
                /* kfd_post_reset will do nothing if kfd device is not initialized,
                 * need to bring up kfd here if it's not be initialized before
index 503995c7ff6c1e6acd85eef8f3d298918e70c490..ea00090b3fb36f93e65c3c1d60c93a591c328eaa 100644 (file)
@@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = {
        [HDP_HWIP]      = HDP_HWID,
        [SDMA0_HWIP]    = SDMA0_HWID,
        [SDMA1_HWIP]    = SDMA1_HWID,
+       [SDMA2_HWIP]    = SDMA2_HWID,
+       [SDMA3_HWIP]    = SDMA3_HWID,
        [MMHUB_HWIP]    = MMHUB_HWID,
        [ATHUB_HWIP]    = ATHUB_HWID,
        [NBIO_HWIP]     = NBIF_HWID,
@@ -918,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
                case IP_VERSION(3, 0, 64):
                case IP_VERSION(3, 1, 1):
                case IP_VERSION(3, 0, 2):
+               case IP_VERSION(3, 0, 192):
                        amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
                        if (!amdgpu_sriov_vf(adev))
                                amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
index 4f7c70845785a9ed20efd1f5f78ab85e6a63d043..585961c2f5f27c34ccaf15d68a10a57367c1265a 100644 (file)
@@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
                break;
        case IP_VERSION(3, 0, 0):
        case IP_VERSION(3, 0, 64):
+       case IP_VERSION(3, 0, 192):
                if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
                        fw_name = FIRMWARE_SIENNA_CICHLID;
                else
index ce982afeff913e7ac064954a0a384eb06a5512f7..ac9a8cd21c4b64b2c90d51c830f3f21d9589d2f3 100644 (file)
@@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle)
        int i = 0;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++)
-               if (adev->mode_info.crtcs[i])
-                       hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
+               if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function)
+                       hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer);
 
        kfree(adev->mode_info.bios_hardcoded_edid);
        kfree(adev->amdgpu_vkms_output);
index 34478bcc4d095cd94bd6e23d1e5fbab14cb799dd..b305fd39874fe68a8ab7f5141c3f01aaa0f7517d 100644 (file)
@@ -4060,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle)
 
        gfx_v9_0_cp_enable(adev, false);
 
-       /* Skip suspend with A+A reset */
-       if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
-               dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+       /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
+       if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
+           (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
+               dev_dbg(adev->dev, "Skipping RLC halt\n");
                return 0;
        }
 
index a6659d9ecdd220a212ebb80ee7203c214cf28548..2ec1ffb36b1fc54db2b840b2498a6963d1a36a69 100644 (file)
@@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
        switch (adev->ip_versions[UVD_HWIP][0]) {
        case IP_VERSION(3, 0, 0):
        case IP_VERSION(3, 0, 64):
+       case IP_VERSION(3, 0, 192):
                if (amdgpu_sriov_vf(adev)) {
                        if (encode)
                                *codecs = &sriov_sc_video_codecs_encode;
index 58b89b53ebe617c8dbab7dd95cac550cb82a8d5f..3cb4681c5f539abe9075df9a3c237dc62a3ac313 100644 (file)
@@ -1574,7 +1574,6 @@ retry_flush_work:
 static void svm_range_restore_work(struct work_struct *work)
 {
        struct delayed_work *dwork = to_delayed_work(work);
-       struct amdkfd_process_info *process_info;
        struct svm_range_list *svms;
        struct svm_range *prange;
        struct kfd_process *p;
@@ -1594,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work)
         * the lifetime of this thread, kfd_process and mm will be valid.
         */
        p = container_of(svms, struct kfd_process, svms);
-       process_info = p->kgd_process_info;
        mm = p->mm;
        if (!mm)
                return;
 
-       mutex_lock(&process_info->lock);
        svm_range_list_lock_and_flush_work(svms, mm);
        mutex_lock(&svms->lock);
 
@@ -1652,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work)
 out_reschedule:
        mutex_unlock(&svms->lock);
        mmap_write_unlock(mm);
-       mutex_unlock(&process_info->lock);
 
        /* If validation failed, reschedule another attempt */
        if (evicted_ranges) {
@@ -2614,6 +2610,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
 
        if (atomic_read(&svms->drain_pagefaults)) {
                pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+               r = 0;
                goto out;
        }
 
@@ -2623,6 +2620,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
        mm = get_task_mm(p->lead_thread);
        if (!mm) {
                pr_debug("svms 0x%p failed to get mm\n", svms);
+               r = 0;
                goto out;
        }
 
@@ -2660,6 +2658,7 @@ retry_write_locked:
 
        if (svm_range_skip_recover(prange)) {
                amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
+               r = 0;
                goto out_unlock_range;
        }
 
@@ -2668,6 +2667,7 @@ retry_write_locked:
        if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
                pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
                         svms, prange->start, prange->last);
+               r = 0;
                goto out_unlock_range;
        }
 
@@ -3177,7 +3177,6 @@ static int
 svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
                   uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
 {
-       struct amdkfd_process_info *process_info = p->kgd_process_info;
        struct mm_struct *mm = current->mm;
        struct list_head update_list;
        struct list_head insert_list;
@@ -3196,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
 
        svms = &p->svms;
 
-       mutex_lock(&process_info->lock);
-
        svm_range_list_lock_and_flush_work(svms, mm);
 
        r = svm_range_is_valid(p, start, size);
@@ -3273,8 +3270,6 @@ out_unlock_range:
        mutex_unlock(&svms->lock);
        mmap_read_unlock(mm);
 out:
-       mutex_unlock(&process_info->lock);
-
        pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
                 &p->svms, start, start + size - 1, r);
 
index cce062adc439149e3a808c920110f8409ab549f2..8a441a22c46ec7493910f7f6ead1cad05b14c778 100644 (file)
@@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
                        ret = -EINVAL;
                        goto cleanup;
                }
+
+               if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
+                               (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
+                       DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
+                       ret = -EINVAL;
+                       goto cleanup;
+               }
+
        }
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
index 32a5ce09a62a9bb373b318f881e649250b951e99..cc34a35d0bcbfe2d9c6db547a160ffa7c0bba5e9 100644 (file)
@@ -36,6 +36,8 @@
 #include "dm_helpers.h"
 
 #include "dc_link_ddc.h"
+#include "ddc_service_types.h"
+#include "dpcd_defs.h"
 
 #include "i2caux_interface.h"
 #include "dmub_cmd.h"
@@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
 };
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool needs_dsc_aux_workaround(struct dc_link *link)
+{
+       if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+           (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
+           link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
+               return true;
+
+       return false;
+}
+
 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
 {
        struct dc_sink *dc_sink = aconnector->dc_sink;
@@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
        u8 *dsc_branch_dec_caps = NULL;
 
        aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
-#if defined(CONFIG_HP_HOOK_WORKAROUND)
+
        /*
         * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
         * because it only check the dsc/fec caps of the "port variable" and not the dock
@@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
         * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
         *
         */
-
-       if (!aconnector->dsc_aux && !port->parent->port_parent)
+       if (!aconnector->dsc_aux && !port->parent->port_parent &&
+           needs_dsc_aux_workaround(aconnector->dc_link))
                aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
-#endif
+
        if (!aconnector->dsc_aux)
                return false;
 
index 60544788e911ee15969e0cefa1aeb630cf1f3f44..c8457babfdea428b57a6bd5084f61cd0fa5208a4 100644 (file)
@@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link,
                        dal_ddc_service_set_transaction_type(link->ddc,
                                                             sink_caps->transaction_type);
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+                       /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
+                        * reports DSC support.
+                        */
+                       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+                                       link->type == dc_connection_mst_branch &&
+                                       link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+                                       link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+                                       !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
+                               link->wa_flags.dpia_mst_dsc_always_on = true;
+#endif
+
 #if defined(CONFIG_DRM_AMD_DC_HDCP)
                        /* In case of fallback to SST when topology discovery below fails
                         * HDCP caps will be querried again later by the upper layer (caller
@@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
                        LINK_INFO("link=%d, mst branch is now Disconnected\n",
                                  link->link_index);
 
+                       /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
+                       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+                               link->wa_flags.dpia_mst_dsc_always_on = false;
+
                        dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
 
                        link->mst_stream_alloc_table.stream_count = 0;
index cb7bf9148904edb02534b01db8ca1e8f90b966c8..13bc69d6b6791c4616131467f940c5c0e6272eaa 100644 (file)
@@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
                }
 
                for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
-                       lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
+                       lt_settings->dpcd_lane_settings[lane].raw = 0;
        }
 
        if (status == LINK_TRAINING_SUCCESS) {
index c32fdccd4d925c96b4f1bda165453d29a0340feb..e2d9a46d0e1ad4ccf10cf5160fb070e6fffd170a 100644 (file)
@@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged(
        if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
                return false;
 
+       // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
+       if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+               return false;
+
        return true;
 }
 
@@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state(
 
        if (!new_ctx)
                return DC_ERROR_UNEXPECTED;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
-       /*
-        * Update link encoder to stream assignment.
-        * TODO: Split out reason allocation from validation.
-        */
-       if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
-               dc->res_pool->funcs->link_encs_assign(
-                       dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
-#endif
 
        if (dc->res_pool->funcs->validate_global) {
                result = dc->res_pool->funcs->validate_global(dc, new_ctx);
@@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state(
                if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
                        result = DC_FAIL_BANDWIDTH_VALIDATE;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       /*
+        * Only update link encoder to stream assignment after bandwidth validation passed.
+        * TODO: Split out assignment and validation.
+        */
+       if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
+               dc->res_pool->funcs->link_encs_assign(
+                       dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
+#endif
+
        return result;
 }
 
index 3aac3f4a28525623382f21dd934dd39851baf1af..618e7989176fc86c964aa35a8df5d926b13b6c2f 100644 (file)
@@ -508,7 +508,8 @@ union dpia_debug_options {
                uint32_t disable_dpia:1;
                uint32_t force_non_lttpr:1;
                uint32_t extend_aux_rd_interval:1;
-               uint32_t reserved:29;
+               uint32_t disable_mst_dsc_work_around:1;
+               uint32_t reserved:28;
        } bits;
        uint32_t raw;
 };
index 180ecd860296b250fe40f6d040d6a2a33bb88817..b01077a6af0e6cdf95a376f90b0d9d2974081bdd 100644 (file)
@@ -191,6 +191,8 @@ struct dc_link {
                bool dp_skip_DID2;
                bool dp_skip_reset_segment;
                bool dp_mot_reset_segment;
+               /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+               bool dpia_mst_dsc_always_on;
        } wa_flags;
        struct link_mst_stream_allocation_table mst_stream_alloc_table;
 
index 01168b8955bff3ce80b1c7a6e6df04b050a9bcab..8a3244585d809372e2b179ae1d906a2a55005740 100644 (file)
@@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
                        dev_err(adev->dev, "Failed to disable smu features.\n");
        }
 
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
+       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
            adev->gfx.rlc.funcs->stop)
                adev->gfx.rlc.funcs->stop(adev);
 
index 39e11eaec1a3f1cccade492af13a57abcea826a6..aa7238245b0ea108a0d4bacf357b189af672e161 100644 (file)
@@ -1640,6 +1640,9 @@ struct intel_dp {
        struct intel_dp_pcon_frl frl;
 
        struct intel_psr psr;
+
+       /* When we last wrote the OUI for eDP */
+       unsigned long last_oui_write;
 };
 
 enum lspcon_vendor {
index be883469d2fcc30299a211dafddc2f5c6c489c84..a552f05a67e58b157de60bc79110e92a2d393bf1 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/i2c.h>
 #include <linux/notifier.h>
 #include <linux/slab.h>
+#include <linux/timekeeping.h>
 #include <linux/types.h>
 
 #include <asm/byteorder.h>
@@ -1955,6 +1956,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
 
        if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
                drm_err(&i915->drm, "Failed to write source OUI\n");
+
+       intel_dp->last_oui_write = jiffies;
+}
+
+void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+       drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
+       wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
 }
 
 /* If the device supports it, try to set the power state appropriately */
index ce229026dc91dccd795292de944b98ab3ea55ccc..b64145a3869a9eb5d14edbc20721415f8eb32638 100644 (file)
@@ -119,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
                                 const struct intel_crtc_state *crtc_state);
 void intel_dp_phy_test(struct intel_encoder *encoder);
 
+void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
+
 #endif /* __INTEL_DP_H__ */
index 569d17b4d00f0bd4136604cd3dd7439c8d99137e..3897468140e02885ee12d6e98749984f1d0c1891 100644 (file)
@@ -36,6 +36,7 @@
 
 #include "intel_backlight.h"
 #include "intel_display_types.h"
+#include "intel_dp.h"
 #include "intel_dp_aux_backlight.h"
 
 /* TODO:
@@ -106,6 +107,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
        int ret;
        u8 tcon_cap[4];
 
+       intel_dp_wait_source_oui(intel_dp);
+
        ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
        if (ret != sizeof(tcon_cap))
                return false;
@@ -204,6 +207,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
        int ret;
        u8 old_ctrl, ctrl;
 
+       intel_dp_wait_source_oui(intel_dp);
+
        ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
        if (ret != 1) {
                drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
@@ -293,6 +298,13 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
        struct intel_panel *panel = &connector->panel;
        struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 
+       if (!panel->backlight.edp.vesa.info.aux_enable) {
+               u32 pwm_level = intel_backlight_invert_pwm_level(connector,
+                                                                panel->backlight.pwm_level_max);
+
+               panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+       }
+
        drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
 }
 
@@ -304,6 +316,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
        struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 
        drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
+
+       if (!panel->backlight.edp.vesa.info.aux_enable)
+               panel->backlight.pwm_funcs->disable(old_conn_state,
+                                                   intel_backlight_invert_pwm_level(connector, 0));
 }
 
 static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
@@ -321,6 +337,15 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
        if (ret < 0)
                return ret;
 
+       if (!panel->backlight.edp.vesa.info.aux_enable) {
+               ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+               if (ret < 0) {
+                       drm_err(&i915->drm,
+                               "Failed to setup PWM backlight controls for eDP backlight: %d\n",
+                               ret);
+                       return ret;
+               }
+       }
        panel->backlight.max = panel->backlight.edp.vesa.info.max;
        panel->backlight.min = 0;
        if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
@@ -340,12 +365,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
-       /* TODO: We currently only support AUX only backlight configurations, not backlights which
-        * require a mix of PWM and AUX controls to work. In the mean time, these machines typically
-        * work just fine using normal PWM controls anyway.
-        */
-       if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
-           drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
+       if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
                drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
                return true;
        }
index e1f36253088918c9bedc0805e3bb9a7f80235c7b..ed73d9bc9d40b2aabff9933fe8d763e788b29834 100644 (file)
@@ -621,13 +621,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
               FF_MODE2_GS_TIMER_MASK,
               FF_MODE2_GS_TIMER_224,
               0, false);
-
-       /*
-        * Wa_14012131227:dg1
-        * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
-        */
-       wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1,
-                    GEN9_RHWO_OPTIMIZATION_DISABLE);
 }
 
 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
index ae11061727ff80d7141609909552bee59f444f1d..39197b4beea78b8ed7b93c5a1a98c4d524376cd6 100644 (file)
@@ -4,8 +4,8 @@ config DRM_MSM
        tristate "MSM DRM"
        depends on DRM
        depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
+       depends on COMMON_CLK
        depends on IOMMU_SUPPORT
-       depends on (OF && COMMON_CLK) || COMPILE_TEST
        depends on QCOM_OCMEM || QCOM_OCMEM=n
        depends on QCOM_LLCC || QCOM_LLCC=n
        depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
index 40577f8856d8f10cf1b1c45420df80d34321ef34..093454457545f14424408c8ea7393e332b9f1c4d 100644 (file)
@@ -23,8 +23,10 @@ msm-y := \
        hdmi/hdmi_i2c.o \
        hdmi/hdmi_phy.o \
        hdmi/hdmi_phy_8960.o \
+       hdmi/hdmi_phy_8996.o \
        hdmi/hdmi_phy_8x60.o \
        hdmi/hdmi_phy_8x74.o \
+       hdmi/hdmi_pll_8960.o \
        edp/edp.o \
        edp/edp_aux.o \
        edp/edp_bridge.o \
@@ -37,6 +39,7 @@ msm-y := \
        disp/mdp4/mdp4_dtv_encoder.o \
        disp/mdp4/mdp4_lcdc_encoder.o \
        disp/mdp4/mdp4_lvds_connector.o \
+       disp/mdp4/mdp4_lvds_pll.o \
        disp/mdp4/mdp4_irq.o \
        disp/mdp4/mdp4_kms.o \
        disp/mdp4/mdp4_plane.o \
@@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
        dp/dp_audio.o
 
 msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
 
 msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
 
index 267a880811d654c78ba89de035fd660026afe898..78aad5216a613041fa210acb021208c42df918b9 100644 (file)
@@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
 {
        struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
        struct msm_gpu *gpu = &adreno_gpu->base;
-       u32 gpu_scid, cntl1_regval = 0;
+       u32 cntl1_regval = 0;
 
        if (IS_ERR(a6xx_gpu->llc_mmio))
                return;
 
        if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
-               gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+               u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
 
                gpu_scid &= 0x1f;
                cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
                               (gpu_scid << 15) | (gpu_scid << 20);
+
+               /* On A660, the SCID programming for UCHE traffic is done in
+                * A6XX_GBIF_SCACHE_CNTL0[14:10]
+                */
+               if (adreno_is_a660_family(adreno_gpu))
+                       gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
+                               (1 << 8), (gpu_scid << 10) | (1 << 8));
        }
 
        /*
@@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
        }
 
        gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
-
-       /* On A660, the SCID programming for UCHE traffic is done in
-        * A6XX_GBIF_SCACHE_CNTL0[14:10]
-        */
-       if (adreno_is_a660_family(adreno_gpu))
-               gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
-                       (1 << 8), (gpu_scid << 10) | (1 << 8));
 }
 
 static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
@@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
-void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
index 7501849ed15d93c4871348421dd6c7f3c88254e7..6e90209cd543bf2819f5c8bfff8e7de26e8f66b4 100644 (file)
@@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 
        a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
-               2, sizeof(*a6xx_state->gmu_registers));
+               3, sizeof(*a6xx_state->gmu_registers));
 
        if (!a6xx_state->gmu_registers)
                return;
 
-       a6xx_state->nr_gmu_registers = 2;
+       a6xx_state->nr_gmu_registers = 3;
 
        /* Get the CX GMU registers from AHB */
        _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
index eb40d8413bca937e8c87ccf457e9cf88723801f7..6d36f63c333881c01f2ecc0cfc1a504537dbc4f7 100644 (file)
@@ -33,6 +33,7 @@ struct dp_aux_private {
        bool read;
        bool no_send_addr;
        bool no_send_stop;
+       bool initted;
        u32 offset;
        u32 segment;
 
@@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
        }
 
        mutex_lock(&aux->mutex);
+       if (!aux->initted) {
+               ret = -EIO;
+               goto exit;
+       }
 
        dp_aux_update_offset_and_segment(aux, msg);
        dp_aux_transfer_helper(aux, msg, true);
@@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
        }
 
        aux->cmd_busy = false;
+
+exit:
        mutex_unlock(&aux->mutex);
 
        return ret;
@@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux)
 
        aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
+       mutex_lock(&aux->mutex);
+
        dp_catalog_aux_enable(aux->catalog, true);
        aux->retry_cnt = 0;
+       aux->initted = true;
+
+       mutex_unlock(&aux->mutex);
 }
 
 void dp_aux_deinit(struct drm_dp_aux *dp_aux)
@@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
 
        aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
+       mutex_lock(&aux->mutex);
+
+       aux->initted = false;
        dp_catalog_aux_enable(aux->catalog, false);
+
+       mutex_unlock(&aux->mutex);
 }
 
 int dp_aux_register(struct drm_dp_aux *dp_aux)
index f69a125f955958ae41d89cf9214c1cb963df30d6..0afc3b756f92ddd62f8207ad8dfc6b211b715d0e 100644 (file)
@@ -1658,6 +1658,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
        if (!prop) {
                DRM_DEV_DEBUG(dev,
                        "failed to find data lane mapping, using default\n");
+               /* Set the number of date lanes to 4 by default. */
+               msm_host->num_data_lanes = 4;
                return 0;
        }
 
index 09d2d279c30ae7bc92ae35c9921a9425ea51c693..dee13fedee3b5cc925d4e7352c0906ebd8123521 100644 (file)
@@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
                goto free_priv;
 
        pm_runtime_get_sync(&gpu->pdev->dev);
+       msm_gpu_hw_init(gpu);
        show_priv->state = gpu->funcs->gpu_state_get(gpu);
        pm_runtime_put_sync(&gpu->pdev->dev);
 
index 7936e8d498dda30e900d5a76c8ab57f11ad94994..892c04365239bb4397a3f5aa8b5debdf9a1aee56 100644 (file)
@@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
        return ret;
 }
 
-static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
-               struct drm_file *file)
+static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
+                     ktime_t timeout)
 {
-       struct msm_drm_private *priv = dev->dev_private;
-       struct drm_msm_wait_fence *args = data;
-       ktime_t timeout = to_ktime(args->timeout);
-       struct msm_gpu_submitqueue *queue;
-       struct msm_gpu *gpu = priv->gpu;
        struct dma_fence *fence;
        int ret;
 
-       if (args->pad) {
-               DRM_ERROR("invalid pad: %08x\n", args->pad);
+       if (fence_id > queue->last_fence) {
+               DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
+                                     fence_id, queue->last_fence);
                return -EINVAL;
        }
 
-       if (!gpu)
-               return 0;
-
-       queue = msm_submitqueue_get(file->driver_priv, args->queueid);
-       if (!queue)
-               return -ENOENT;
-
        /*
         * Map submitqueue scoped "seqno" (which is actually an idr key)
         * back to underlying dma-fence
@@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
        ret = mutex_lock_interruptible(&queue->lock);
        if (ret)
                return ret;
-       fence = idr_find(&queue->fence_idr, args->fence);
+       fence = idr_find(&queue->fence_idr, fence_id);
        if (fence)
                fence = dma_fence_get_rcu(fence);
        mutex_unlock(&queue->lock);
@@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
        }
 
        dma_fence_put(fence);
+
+       return ret;
+}
+
+static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
+               struct drm_file *file)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       struct drm_msm_wait_fence *args = data;
+       struct msm_gpu_submitqueue *queue;
+       int ret;
+
+       if (args->pad) {
+               DRM_ERROR("invalid pad: %08x\n", args->pad);
+               return -EINVAL;
+       }
+
+       if (!priv->gpu)
+               return 0;
+
+       queue = msm_submitqueue_get(file->driver_priv, args->queueid);
+       if (!queue)
+               return -ENOENT;
+
+       ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
+
        msm_submitqueue_put(queue);
 
        return ret;
index 104fdfc140278863c62a1dc1225c253d863e727a..512d55eecbaf15e225e3ba5f84f4107ae34962bb 100644 (file)
@@ -1056,8 +1056,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
 
        return 0;
@@ -1121,7 +1120,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
                        break;
                fallthrough;
        default:
-               DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
+               DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
                                (flags & MSM_BO_CACHE_MASK));
                return -EINVAL;
        }
index 3cb029f1092555bfa4aa3ef1b0bd99d897dad23c..282628d6b72c09f8ed5bbf141e0330e417d463f4 100644 (file)
@@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                args->nr_cmds);
        if (IS_ERR(submit)) {
                ret = PTR_ERR(submit);
+               submit = NULL;
                goto out_unlock;
        }
 
@@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        drm_sched_entity_push_job(&submit->base);
 
        args->fence = submit->fence_id;
+       queue->last_fence = submit->fence_id;
 
        msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
        msm_process_post_deps(post_deps, args->nr_out_syncobjs,
index 59cdd00b69d0401e2720b2e37b8ff8d546c0c7ac..48ea2de911f1357bf69c7169dc0fce8f522e4c56 100644 (file)
@@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
  * @ring_nr:   the ringbuffer used by this submitqueue, which is determined
  *             by the submitqueue's priority
  * @faults:    the number of GPU hangs associated with this submitqueue
+ * @last_fence: the sequence number of the last allocated fence (for error
+ *             checking)
  * @ctx:       the per-drm_file context associated with the submitqueue (ie.
  *             which set of pgtables do submits jobs associated with the
  *             submitqueue use)
@@ -374,6 +376,7 @@ struct msm_gpu_submitqueue {
        u32 flags;
        u32 ring_nr;
        int faults;
+       uint32_t last_fence;
        struct msm_file_private *ctx;
        struct list_head node;
        struct idr fence_idr;
index 8b7473f69cb8fe6bf86a86a097dfd0396e5b4207..384e90c4b2a7999bb1310e91d0253afd3f8558be 100644 (file)
@@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
        struct msm_gpu *gpu = dev_to_gpu(dev);
        struct dev_pm_opp *opp;
 
+       /*
+        * Note that devfreq_recommended_opp() can modify the freq
+        * to something that actually is in the opp table:
+        */
        opp = devfreq_recommended_opp(dev, freq, flags);
 
        /*
@@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
         */
        if (gpu->devfreq.idle_freq) {
                gpu->devfreq.idle_freq = *freq;
+               dev_pm_opp_put(opp);
                return 0;
        }
 
@@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
        struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
        unsigned long idle_freq, target_freq = 0;
 
-       if (!df->devfreq)
-               return;
-
        /*
         * Hold devfreq lock to synchronize with get_dev_status()/
         * target() callbacks
@@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
 {
        struct msm_gpu_devfreq *df = &gpu->devfreq;
 
+       if (!df->devfreq)
+               return;
+
        msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
-                              HRTIMER_MODE_ABS);
+                              HRTIMER_MODE_REL);
 }
index f0b3e4cf5bceb6c33f41663cbc10bd904d18e630..b61792d2aa65740db39b49f957dc41eda99c5824 100644 (file)
@@ -337,10 +337,10 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
        struct drm_device *dev = state->dev;
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_hvs *hvs = vc4->hvs;
-       struct drm_crtc_state *old_crtc_state;
        struct drm_crtc_state *new_crtc_state;
        struct drm_crtc *crtc;
        struct vc4_hvs_state *old_hvs_state;
+       unsigned int channel;
        int i;
 
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -353,30 +353,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
                vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
        }
 
-       if (vc4->hvs->hvs5)
-               clk_set_min_rate(hvs->core_clk, 500000000);
-
        old_hvs_state = vc4_hvs_get_old_global_state(state);
-       if (!old_hvs_state)
+       if (IS_ERR(old_hvs_state))
                return;
 
-       for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
-               struct vc4_crtc_state *vc4_crtc_state =
-                       to_vc4_crtc_state(old_crtc_state);
-               unsigned int channel = vc4_crtc_state->assigned_channel;
+       for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
+               struct drm_crtc_commit *commit;
                int ret;
 
-               if (channel == VC4_HVS_CHANNEL_DISABLED)
+               if (!old_hvs_state->fifo_state[channel].in_use)
                        continue;
 
-               if (!old_hvs_state->fifo_state[channel].in_use)
+               commit = old_hvs_state->fifo_state[channel].pending_commit;
+               if (!commit)
                        continue;
 
-               ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
+               ret = drm_crtc_commit_wait(commit);
                if (ret)
                        drm_err(dev, "Timed out waiting for commit\n");
+
+               drm_crtc_commit_put(commit);
+               old_hvs_state->fifo_state[channel].pending_commit = NULL;
        }
 
+       if (vc4->hvs->hvs5)
+               clk_set_min_rate(hvs->core_clk, 500000000);
+
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
        vc4_ctm_commit(vc4, state);
@@ -410,8 +412,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
        unsigned int i;
 
        hvs_state = vc4_hvs_get_new_global_state(state);
-       if (!hvs_state)
-               return -EINVAL;
+       if (WARN_ON(IS_ERR(hvs_state)))
+               return PTR_ERR(hvs_state);
 
        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
                struct vc4_crtc_state *vc4_crtc_state =
@@ -668,12 +670,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
 
        for (i = 0; i < HVS_NUM_CHANNELS; i++) {
                state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
-
-               if (!old_state->fifo_state[i].pending_commit)
-                       continue;
-
-               state->fifo_state[i].pending_commit =
-                       drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
        }
 
        return &state->base;
@@ -762,8 +758,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
        unsigned int i;
 
        hvs_new_state = vc4_hvs_get_global_state(state);
-       if (!hvs_new_state)
-               return -EINVAL;
+       if (IS_ERR(hvs_new_state))
+               return PTR_ERR(hvs_new_state);
 
        for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
                if (!hvs_new_state->fifo_state[i].in_use)
index d86e1ad4a97260b82895d98bbb2feef93ef2ca7f..5072dbb0669a333fb4b722fa6b26167e20843c65 100644 (file)
@@ -157,36 +157,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
        schedule_work(&vgdev->config_changed_work);
 }
 
-static __poll_t virtio_gpu_poll(struct file *filp,
-                               struct poll_table_struct *wait)
-{
-       struct drm_file *drm_file = filp->private_data;
-       struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
-       struct drm_device *dev = drm_file->minor->dev;
-       struct virtio_gpu_device *vgdev = dev->dev_private;
-       struct drm_pending_event *e = NULL;
-       __poll_t mask = 0;
-
-       if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
-               return drm_poll(filp, wait);
-
-       poll_wait(filp, &drm_file->event_wait, wait);
-
-       if (!list_empty(&drm_file->event_list)) {
-               spin_lock_irq(&dev->event_lock);
-               e = list_first_entry(&drm_file->event_list,
-                                    struct drm_pending_event, link);
-               drm_file->event_space += e->event->length;
-               list_del(&e->link);
-               spin_unlock_irq(&dev->event_lock);
-
-               kfree(e);
-               mask |= EPOLLIN | EPOLLRDNORM;
-       }
-
-       return mask;
-}
-
 static struct virtio_device_id id_table[] = {
        { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
        { 0 },
@@ -226,17 +196,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
 MODULE_AUTHOR("Alon Levy");
 
-static const struct file_operations virtio_gpu_driver_fops = {
-       .owner          = THIS_MODULE,
-       .open           = drm_open,
-       .release        = drm_release,
-       .unlocked_ioctl = drm_ioctl,
-       .compat_ioctl   = drm_compat_ioctl,
-       .poll           = virtio_gpu_poll,
-       .read           = drm_read,
-       .llseek         = noop_llseek,
-       .mmap           = drm_gem_mmap
-};
+DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
 
 static const struct drm_driver driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
index e0265fe74aa565f639127042f7071faf80c5b50c..0a194aaad4192b7b5dff1b8e2762d0461fee9631 100644 (file)
@@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver {
        spinlock_t       lock;
 };
 
-#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
 struct virtio_gpu_fence_event {
        struct drm_pending_event base;
        struct drm_event event;
index 5618a1d5879c56382ed23d6aa046f58a88524c25..3607646d322954c9d07a3245e052cf7857fae8c5 100644 (file)
@@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
        if (!e)
                return -ENOMEM;
 
-       e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
+       e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
        e->event.length = sizeof(e->event);
 
        ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
index 72df563477b1c3e3d99feea2126bdc03fd408cd2..f8639a4457d23ae55eece7874c10e3a05562b58d 100644 (file)
@@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter)
 }
 
 static const struct i2c_algorithm cbus_i2c_algo = {
-       .smbus_xfer     = cbus_i2c_smbus_xfer,
-       .functionality  = cbus_i2c_func,
+       .smbus_xfer             = cbus_i2c_smbus_xfer,
+       .smbus_xfer_atomic      = cbus_i2c_smbus_xfer,
+       .functionality          = cbus_i2c_func,
 };
 
 static int cbus_i2c_remove(struct platform_device *pdev)
index 819ab4ee517e13cf1367cbfb157a72add5129d6b..02ddb237f69afdfcc766f1703e9b81297882534a 100644 (file)
@@ -423,8 +423,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
        if (!(ipd & REG_INT_MBRF))
                return;
 
-       /* ack interrupt */
-       i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
+       /* ack interrupt (read also produces a spurious START flag, clear it too) */
+       i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD);
 
        /* Can only handle a maximum of 32 bytes at a time */
        if (len > 32)
index b9b19a2a2ffa0b83957cdec0314b636db968b168..66145d2b9b55867e16dce3a756877fc145a4e323 100644 (file)
@@ -1493,6 +1493,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
 {
        struct stm32f7_i2c_dev *i2c_dev = data;
        struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+       struct stm32_i2c_dma *dma = i2c_dev->dma;
        void __iomem *base = i2c_dev->base;
        u32 status, mask;
        int ret = IRQ_HANDLED;
@@ -1518,6 +1519,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
                        __func__, f7_msg->addr);
                writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
+               if (i2c_dev->use_dma) {
+                       stm32f7_i2c_disable_dma_req(i2c_dev);
+                       dmaengine_terminate_async(dma->chan_using);
+               }
                f7_msg->result = -ENXIO;
        }
 
@@ -1533,7 +1538,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                /* Clear STOP flag */
                writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
 
-               if (i2c_dev->use_dma) {
+               if (i2c_dev->use_dma && !f7_msg->result) {
                        ret = IRQ_WAKE_THREAD;
                } else {
                        i2c_dev->master_mode = false;
@@ -1546,7 +1551,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                if (f7_msg->stop) {
                        mask = STM32F7_I2C_CR2_STOP;
                        stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
-               } else if (i2c_dev->use_dma) {
+               } else if (i2c_dev->use_dma && !f7_msg->result) {
                        ret = IRQ_WAKE_THREAD;
                } else if (f7_msg->smbus) {
                        stm32f7_i2c_smbus_rep_start(i2c_dev);
@@ -1583,7 +1588,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
        if (!ret) {
                dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
                stm32f7_i2c_disable_dma_req(i2c_dev);
-               dmaengine_terminate_all(dma->chan_using);
+               dmaengine_terminate_async(dma->chan_using);
                f7_msg->result = -ETIMEDOUT;
        }
 
@@ -1660,7 +1665,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
        /* Disable dma */
        if (i2c_dev->use_dma) {
                stm32f7_i2c_disable_dma_req(i2c_dev);
-               dmaengine_terminate_all(dma->chan_using);
+               dmaengine_terminate_async(dma->chan_using);
        }
 
        i2c_dev->master_mode = false;
@@ -1696,12 +1701,26 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
        time_left = wait_for_completion_timeout(&i2c_dev->complete,
                                                i2c_dev->adap.timeout);
        ret = f7_msg->result;
+       if (ret) {
+               if (i2c_dev->use_dma)
+                       dmaengine_synchronize(dma->chan_using);
+
+               /*
+                * It is possible that some unsent data have already been
+                * written into TXDR. To avoid sending old data in a
+                * further transfer, flush TXDR in case of any error
+                */
+               writel_relaxed(STM32F7_I2C_ISR_TXE,
+                              i2c_dev->base + STM32F7_I2C_ISR);
+               goto pm_free;
+       }
 
        if (!time_left) {
                dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
                        i2c_dev->msg->addr);
                if (i2c_dev->use_dma)
-                       dmaengine_terminate_all(dma->chan_using);
+                       dmaengine_terminate_sync(dma->chan_using);
+               stm32f7_i2c_wait_free_bus(i2c_dev);
                ret = -ETIMEDOUT;
        }
 
@@ -1744,13 +1763,25 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
        timeout = wait_for_completion_timeout(&i2c_dev->complete,
                                              i2c_dev->adap.timeout);
        ret = f7_msg->result;
-       if (ret)
+       if (ret) {
+               if (i2c_dev->use_dma)
+                       dmaengine_synchronize(dma->chan_using);
+
+               /*
+                * It is possible that some unsent data have already been
+                * written into TXDR. To avoid sending old data in a
+                * further transfer, flush TXDR in case of any error
+                */
+               writel_relaxed(STM32F7_I2C_ISR_TXE,
+                              i2c_dev->base + STM32F7_I2C_ISR);
                goto pm_free;
+       }
 
        if (!timeout) {
                dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
                if (i2c_dev->use_dma)
-                       dmaengine_terminate_all(dma->chan_using);
+                       dmaengine_terminate_sync(dma->chan_using);
+               stm32f7_i2c_wait_free_bus(i2c_dev);
                ret = -ETIMEDOUT;
                goto pm_free;
        }
index 01e37b75471e111430fcd30c1a159afc7d5540bc..2b88f03e5252182251a47e056221f60802311543 100644 (file)
@@ -349,6 +349,19 @@ static const struct of_device_id b53_spi_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, b53_spi_of_match);
 
+static const struct spi_device_id b53_spi_ids[] = {
+       { .name = "bcm5325" },
+       { .name = "bcm5365" },
+       { .name = "bcm5395" },
+       { .name = "bcm5397" },
+       { .name = "bcm5398" },
+       { .name = "bcm53115" },
+       { .name = "bcm53125" },
+       { .name = "bcm53128" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, b53_spi_ids);
+
 static struct spi_driver b53_spi_driver = {
        .driver = {
                .name   = "b53-switch",
@@ -357,6 +370,7 @@ static struct spi_driver b53_spi_driver = {
        .probe  = b53_spi_probe,
        .remove = b53_spi_remove,
        .shutdown = b53_spi_shutdown,
+       .id_table = b53_spi_ids,
 };
 
 module_spi_driver(b53_spi_driver);
index 6ea003678798651f70df5f8235863f7c79973c85..55273013bfb55c5b59a72b6b7fdaff0c1c508915 100644 (file)
@@ -50,11 +50,22 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
 }
 
 static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
-                                         u16 status, u16 lpa,
+                                         u16 ctrl, u16 status, u16 lpa,
                                          struct phylink_link_state *state)
 {
+       state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+
        if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
-               state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+               /* The Spped and Duplex Resolved register is 1 if AN is enabled
+                * and complete, or if AN is disabled. So with disabled AN we
+                * still get here on link up. But we want to set an_complete
+                * only if AN was enabled, thus we look at BMCR_ANENABLE.
+                * (According to 802.3-2008 section 22.2.4.2.10, we should be
+                *  able to get this same value from BMSR_ANEGCAPABLE, but tests
+                *  show that these Marvell PHYs don't conform to this part of
+                *  the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+                */
+               state->an_complete = !!(ctrl & BMCR_ANENABLE);
                state->duplex = status &
                                MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
                                                 DUPLEX_FULL : DUPLEX_HALF;
@@ -81,6 +92,18 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
                        dev_err(chip->dev, "invalid PHY speed\n");
                        return -EINVAL;
                }
+       } else if (state->link &&
+                  state->interface != PHY_INTERFACE_MODE_SGMII) {
+               /* If Speed and Duplex Resolved register is 0 and link is up, it
+                * means that AN was enabled, but link partner had it disabled
+                * and the PHY invoked the Auto-Negotiation Bypass feature and
+                * linked anyway.
+                */
+               state->duplex = DUPLEX_FULL;
+               if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+                       state->speed = SPEED_2500;
+               else
+                       state->speed = SPEED_1000;
        } else {
                state->link = false;
        }
@@ -168,9 +191,15 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
 int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
                                   int lane, struct phylink_link_state *state)
 {
-       u16 lpa, status;
+       u16 lpa, status, ctrl;
        int err;
 
+       err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+       if (err) {
+               dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+               return err;
+       }
+
        err = mv88e6352_serdes_read(chip, 0x11, &status);
        if (err) {
                dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
@@ -183,7 +212,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
                return err;
        }
 
-       return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+       return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
 }
 
 int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -883,9 +912,16 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
 static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
        int port, int lane, struct phylink_link_state *state)
 {
-       u16 lpa, status;
+       u16 lpa, status, ctrl;
        int err;
 
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6390_SGMII_BMCR, &ctrl);
+       if (err) {
+               dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+               return err;
+       }
+
        err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
                                    MV88E6390_SGMII_PHY_STATUS, &status);
        if (err) {
@@ -900,7 +936,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
                return err;
        }
 
-       return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+       return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
 }
 
 static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
@@ -1271,9 +1307,31 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
        }
 }
 
-static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
+static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
+                                       bool on)
 {
-       u16 reg, pcs;
+       u16 reg;
+       int err;
+
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6393X_SERDES_CTRL1, &reg);
+       if (err)
+               return err;
+
+       if (on)
+               reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+                        MV88E6393X_SERDES_CTRL1_RX_PDOWN);
+       else
+               reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+                      MV88E6393X_SERDES_CTRL1_RX_PDOWN;
+
+       return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+                                     MV88E6393X_SERDES_CTRL1, reg);
+}
+
+static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane)
+{
+       u16 reg;
        int err;
 
        /* mv88e6393x family errata 4.6:
@@ -1284,26 +1342,45 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
         * It seems that after this workaround the SERDES is automatically
         * powered up (the bit is cleared), so power it down.
         */
-       if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
-           lane == MV88E6393X_PORT10_LANE) {
-               err = mv88e6390_serdes_read(chip, lane,
-                                           MDIO_MMD_PHYXS,
-                                           MV88E6393X_SERDES_POC, &reg);
-               if (err)
-                       return err;
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6393X_SERDES_POC, &reg);
+       if (err)
+               return err;
 
-               reg &= ~MV88E6393X_SERDES_POC_PDOWN;
-               reg |= MV88E6393X_SERDES_POC_RESET;
+       reg &= ~MV88E6393X_SERDES_POC_PDOWN;
+       reg |= MV88E6393X_SERDES_POC_RESET;
 
-               err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
-                                            MV88E6393X_SERDES_POC, reg);
-               if (err)
-                       return err;
+       err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+                                    MV88E6393X_SERDES_POC, reg);
+       if (err)
+               return err;
 
-               err = mv88e6390_serdes_power_sgmii(chip, lane, false);
-               if (err)
-                       return err;
-       }
+       err = mv88e6390_serdes_power_sgmii(chip, lane, false);
+       if (err)
+               return err;
+
+       return mv88e6393x_serdes_power_lane(chip, lane, false);
+}
+
+int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+{
+       int err;
+
+       err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE);
+       if (err)
+               return err;
+
+       err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE);
+       if (err)
+               return err;
+
+       return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE);
+}
+
+static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane)
+{
+       u16 reg, pcs;
+       int err;
 
        /* mv88e6393x family errata 4.8:
         * When a SERDES port is operating in 1000BASE-X or SGMII mode link may
@@ -1334,38 +1411,149 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
                                      MV88E6393X_ERRATA_4_8_REG, reg);
 }
 
-int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane,
+                                        u8 cmode)
+{
+       static const struct {
+               u16 dev, reg, val, mask;
+       } fixes[] = {
+               { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
+               { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
+               { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
+               { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
+               { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
+               { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
+               { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
+                 MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
+       };
+       int err, i;
+       u16 reg;
+
+       /* mv88e6393x family errata 5.2:
+        * For optimal signal integrity the following sequence should be applied
+        * to SERDES operating in 10G mode. These registers only apply to 10G
+        * operation and have no effect on other speeds.
+        */
+       if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER)
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
+               err = mv88e6390_serdes_read(chip, lane, fixes[i].dev,
+                                           fixes[i].reg, &reg);
+               if (err)
+                       return err;
+
+               reg &= ~fixes[i].mask;
+               reg |= fixes[i].val;
+
+               err = mv88e6390_serdes_write(chip, lane, fixes[i].dev,
+                                            fixes[i].reg, reg);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip,
+                                             int lane, u8 cmode, bool on)
 {
+       u16 reg;
        int err;
 
-       err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE);
+       if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+               return 0;
+
+       /* Inband AN is broken on Amethyst in 2500base-x mode when set by
+        * standard mechanism (via cmode).
+        * We can get around this by configuring the PCS mode to 1000base-x
+        * and then writing value 0x58 to register 1e.8000. (This must be done
+        * while SerDes receiver and transmitter are disabled, which is, when
+        * this function is called.)
+        * It seem that when we do this configuration to 2500base-x mode (by
+        * changing PCS mode to 1000base-x and frequency to 3.125 GHz from
+        * 1.25 GHz) and then configure to sgmii or 1000base-x, the device
+        * thinks that it already has SerDes at 1.25 GHz and does not change
+        * the 1e.8000 register, leaving SerDes at 3.125 GHz.
+        * To avoid this, change PCS mode back to 2500base-x when disabling
+        * SerDes from 2500base-x mode.
+        */
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6393X_SERDES_POC, &reg);
+       if (err)
+               return err;
+
+       reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN);
+       if (on)
+               reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX |
+                      MV88E6393X_SERDES_POC_AN;
+       else
+               reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX;
+       reg |= MV88E6393X_SERDES_POC_RESET;
+
+       err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+                                    MV88E6393X_SERDES_POC, reg);
        if (err)
                return err;
 
-       err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE);
+       err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58);
        if (err)
                return err;
 
-       return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE);
+       return 0;
 }
 
 int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
                            bool on)
 {
        u8 cmode = chip->ports[port].cmode;
+       int err;
 
        if (port != 0 && port != 9 && port != 10)
                return -EOPNOTSUPP;
 
+       if (on) {
+               err = mv88e6393x_serdes_erratum_4_8(chip, lane);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+                                                        true);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_power_lane(chip, lane, true);
+               if (err)
+                       return err;
+       }
+
        switch (cmode) {
        case MV88E6XXX_PORT_STS_CMODE_SGMII:
        case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
        case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
-               return mv88e6390_serdes_power_sgmii(chip, lane, on);
+               err = mv88e6390_serdes_power_sgmii(chip, lane, on);
+               break;
        case MV88E6393X_PORT_STS_CMODE_5GBASER:
        case MV88E6393X_PORT_STS_CMODE_10GBASER:
-               return mv88e6390_serdes_power_10g(chip, lane, on);
+               err = mv88e6390_serdes_power_10g(chip, lane, on);
+               break;
        }
 
-       return 0;
+       if (err)
+               return err;
+
+       if (!on) {
+               err = mv88e6393x_serdes_power_lane(chip, lane, false);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+                                                        false);
+       }
+
+       return err;
 }
index cbb3ba30caea9db2e5f9f3fa3820946b50d1c4fe..8dd8ed225b4594f5421d91fb3755dfe242cb0bfb 100644 (file)
 #define MV88E6393X_SERDES_POC_PCS_MASK         0x0007
 #define MV88E6393X_SERDES_POC_RESET            BIT(15)
 #define MV88E6393X_SERDES_POC_PDOWN            BIT(5)
+#define MV88E6393X_SERDES_POC_AN               BIT(3)
+#define MV88E6393X_SERDES_CTRL1                        0xf003
+#define MV88E6393X_SERDES_CTRL1_TX_PDOWN       BIT(9)
+#define MV88E6393X_SERDES_CTRL1_RX_PDOWN       BIT(8)
 
 #define MV88E6393X_ERRATA_4_8_REG              0xF074
 #define MV88E6393X_ERRATA_4_8_BIT              BIT(14)
index baaae97283c5e259ef3cbd7e293c3db3f257e459..078ca4cd716057ee3674be7516de58185c03a533 100644 (file)
 #define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC    2112
 
 /* Family-specific data and limits */
+#define RTL8365MB_PHYADDRMAX   7
 #define RTL8365MB_NUM_PHYREGS  32
 #define RTL8365MB_PHYREGMAX    (RTL8365MB_NUM_PHYREGS - 1)
 #define RTL8365MB_MAX_NUM_PORTS        (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
 #define RTL8365MB_INDIRECT_ACCESS_STATUS_REG                   0x1F01
 #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG                  0x1F02
 #define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK    GENMASK(4, 0)
-#define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK                GENMASK(6, 5)
+#define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK                GENMASK(7, 5)
 #define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK    GENMASK(11, 8)
 #define   RTL8365MB_PHY_BASE                                   0x2000
 #define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG               0x1F03
@@ -679,6 +680,9 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
        u16 val;
        int ret;
 
+       if (phy > RTL8365MB_PHYADDRMAX)
+               return -EINVAL;
+
        if (regnum > RTL8365MB_PHYREGMAX)
                return -EINVAL;
 
@@ -704,6 +708,9 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
        u32 ocp_addr;
        int ret;
 
+       if (phy > RTL8365MB_PHYADDRMAX)
+               return -EINVAL;
+
        if (regnum > RTL8365MB_PHYREGMAX)
                return -EINVAL;
 
index 23b2d390fcdda8af72fe7e474fc5fef7194a9274..ace691d7cd759f52d38d116ce8cfe979089d7f25 100644 (file)
 
 #define AQ_DEVICE_ID_AQC113DEV 0x00C0
 #define AQ_DEVICE_ID_AQC113CS  0x94C0
+#define AQ_DEVICE_ID_AQC113CA  0x34C0
 #define AQ_DEVICE_ID_AQC114CS  0x93C0
 #define AQ_DEVICE_ID_AQC113    0x04C0
 #define AQ_DEVICE_ID_AQC113C   0x14C0
 #define AQ_DEVICE_ID_AQC115C   0x12C0
+#define AQ_DEVICE_ID_AQC116C   0x11C0
 
 #define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
 
 
 #define AQ_NIC_RATE_10G                BIT(0)
 #define AQ_NIC_RATE_5G         BIT(1)
-#define AQ_NIC_RATE_5GSR       BIT(2)
-#define AQ_NIC_RATE_2G5                BIT(3)
-#define AQ_NIC_RATE_1G         BIT(4)
-#define AQ_NIC_RATE_100M       BIT(5)
-#define AQ_NIC_RATE_10M                BIT(6)
-#define AQ_NIC_RATE_1G_HALF    BIT(7)
-#define AQ_NIC_RATE_100M_HALF  BIT(8)
-#define AQ_NIC_RATE_10M_HALF   BIT(9)
+#define AQ_NIC_RATE_2G5                BIT(2)
+#define AQ_NIC_RATE_1G         BIT(3)
+#define AQ_NIC_RATE_100M       BIT(4)
+#define AQ_NIC_RATE_10M                BIT(5)
+#define AQ_NIC_RATE_1G_HALF    BIT(6)
+#define AQ_NIC_RATE_100M_HALF  BIT(7)
+#define AQ_NIC_RATE_10M_HALF   BIT(8)
 
-#define AQ_NIC_RATE_EEE_10G    BIT(10)
-#define AQ_NIC_RATE_EEE_5G     BIT(11)
-#define AQ_NIC_RATE_EEE_2G5    BIT(12)
-#define AQ_NIC_RATE_EEE_1G     BIT(13)
-#define AQ_NIC_RATE_EEE_100M   BIT(14)
+#define AQ_NIC_RATE_EEE_10G    BIT(9)
+#define AQ_NIC_RATE_EEE_5G     BIT(10)
+#define AQ_NIC_RATE_EEE_2G5    BIT(11)
+#define AQ_NIC_RATE_EEE_1G     BIT(12)
+#define AQ_NIC_RATE_EEE_100M   BIT(13)
 #define AQ_NIC_RATE_EEE_MSK     (AQ_NIC_RATE_EEE_10G |\
                                 AQ_NIC_RATE_EEE_5G |\
                                 AQ_NIC_RATE_EEE_2G5 |\
index 062a300a566a55c505cb556530a3ca0aec83c703..dbd28466013580aca8cab6b99bbc168dd9c40e99 100644 (file)
@@ -80,6 +80,8 @@ struct aq_hw_link_status_s {
 };
 
 struct aq_stats_s {
+       u64 brc;
+       u64 btc;
        u64 uprc;
        u64 mprc;
        u64 bprc;
index 1acf544afeb4449b55ad4ce93781c13ccfa98a51..33f1a1377588bda47db7a76c94421a3f84d3ac4f 100644 (file)
@@ -316,18 +316,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
        aq_macsec_init(self);
 #endif
 
-       mutex_lock(&self->fwreq_mutex);
-       err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
-       mutex_unlock(&self->fwreq_mutex);
-       if (err)
-               goto err_exit;
+       if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
+               // If DT has none or an invalid one, ask device for MAC address
+               mutex_lock(&self->fwreq_mutex);
+               err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
+               mutex_unlock(&self->fwreq_mutex);
 
-       eth_hw_addr_set(self->ndev, addr);
+               if (err)
+                       goto err_exit;
 
-       if (!is_valid_ether_addr(self->ndev->dev_addr) ||
-           !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
-               netdev_warn(self->ndev, "MAC is invalid, will use random.");
-               eth_hw_addr_random(self->ndev);
+               if (is_valid_ether_addr(addr) &&
+                   aq_nic_is_valid_ether_addr(addr)) {
+                       eth_hw_addr_set(self->ndev, addr);
+               } else {
+                       netdev_warn(self->ndev, "MAC is invalid, will use random.");
+                       eth_hw_addr_random(self->ndev);
+               }
        }
 
 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -905,8 +909,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
        data[++i] = stats->mbtc;
        data[++i] = stats->bbrc;
        data[++i] = stats->bbtc;
-       data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
-       data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+       if (stats->brc)
+               data[++i] = stats->brc;
+       else
+               data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+       if (stats->btc)
+               data[++i] = stats->btc;
+       else
+               data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
        data[++i] = stats->dma_pkt_rc;
        data[++i] = stats->dma_pkt_tc;
        data[++i] = stats->dma_oct_rc;
index d4b1976ee69b934d4a5dffac2d13da5697d78066..797a95142d1f44dbc0ed454e2daf955ec8fc90ea 100644 (file)
@@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = {
        { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
        { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
        { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+       { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
+       { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
 
        {}
 };
@@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
        { AQ_DEVICE_ID_AQC113CS,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
        { AQ_DEVICE_ID_AQC114CS,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
        { AQ_DEVICE_ID_AQC113C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
-       { AQ_DEVICE_ID_AQC115C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+       { AQ_DEVICE_ID_AQC115C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
+       { AQ_DEVICE_ID_AQC113CA,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+       { AQ_DEVICE_ID_AQC116C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
+
 };
 
 MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
index d281322d7dd29074185e2645aa0a17bb344ef404..f4774cf051c9780cfc434450673bb82d175e2736 100644 (file)
@@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u
 {
        unsigned int count;
 
-       WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
-                 "Invalid tc %u (#rx=%u, #tx=%u)\n",
-                 tc, self->rx_rings, self->tx_rings);
        if (!aq_vec_is_valid_tc(self, tc))
                return 0;
 
index 3f1704cbe1cb96bc0cf80ab1480242e7e06c4463..7e88d7234b14588c1816b2808ab1421ecf23b4a8 100644 (file)
@@ -867,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
 int hw_atl_utils_update_stats(struct aq_hw_s *self)
 {
        struct aq_stats_s *cs = &self->curr_stats;
+       struct aq_stats_s curr_stats = *cs;
        struct hw_atl_utils_mbox mbox;
+       bool corrupted_stats = false;
 
        hw_atl_utils_mpi_read_stats(self, &mbox);
 
-#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
-                       mbox.stats._N_ - self->last_stats._N_)
+#define AQ_SDELTA(_N_)  \
+do { \
+       if (!corrupted_stats && \
+           ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
+               curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
+       else \
+               corrupted_stats = true; \
+} while (0)
 
        if (self->aq_link_status.mbps) {
                AQ_SDELTA(uprc);
@@ -892,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
                AQ_SDELTA(bbrc);
                AQ_SDELTA(bbtc);
                AQ_SDELTA(dpc);
+
+               if (!corrupted_stats)
+                       *cs = curr_stats;
        }
 #undef AQ_SDELTA
 
index eac631c45c565a4c17802123e38c4120bfef848d..4d4cfbc91e19cf658fccd181d037c876a864e8f2 100644 (file)
@@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
        if (speed & AQ_NIC_RATE_5G)
                rate |= FW2X_RATE_5G;
 
-       if (speed & AQ_NIC_RATE_5GSR)
-               rate |= FW2X_RATE_5G;
-
        if (speed & AQ_NIC_RATE_2G5)
                rate |= FW2X_RATE_2G5;
 
index c98708bb044cad11597fba703882b761ec46f132..5dfc751572edc5d4817d3a0bab21ebddd376bded 100644 (file)
@@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
                          AQ_NIC_RATE_5G  |
                          AQ_NIC_RATE_2G5 |
                          AQ_NIC_RATE_1G  |
-                         AQ_NIC_RATE_1G_HALF   |
                          AQ_NIC_RATE_100M      |
-                         AQ_NIC_RATE_100M_HALF |
-                         AQ_NIC_RATE_10M       |
-                         AQ_NIC_RATE_10M_HALF,
+                         AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc115c = {
+       DEFAULT_BOARD_BASIC_CAPABILITIES,
+       .media_type = AQ_HW_MEDIA_TYPE_TP,
+       .link_speed_msk = AQ_NIC_RATE_2G5 |
+                         AQ_NIC_RATE_1G  |
+                         AQ_NIC_RATE_100M      |
+                         AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc116c = {
+       DEFAULT_BOARD_BASIC_CAPABILITIES,
+       .media_type = AQ_HW_MEDIA_TYPE_TP,
+       .link_speed_msk = AQ_NIC_RATE_1G  |
+                         AQ_NIC_RATE_100M      |
+                         AQ_NIC_RATE_10M,
 };
 
 static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
index de8723f1c28a13f0e98214f9011ac771028f2eff..346f0dc9912e500014d253135ab431f67be8eb3c 100644 (file)
@@ -9,6 +9,8 @@
 #include "aq_common.h"
 
 extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c;
 extern const struct aq_hw_ops hw_atl2_ops;
 
 #endif /* HW_ATL2_H */
index b66fa346581ce30b94627f810cf013d02f63323c..6bad64c77b87c94a258b4bee5391ffa66a52c031 100644 (file)
@@ -239,7 +239,8 @@ struct version_s {
                u8 minor;
                u16 build;
        } phy;
-       u32 rsvd;
+       u32 drv_iface_ver:4;
+       u32 rsvd:28;
 };
 
 struct link_status_s {
@@ -424,7 +425,7 @@ struct cable_diag_status_s {
        u16 rsvd2;
 };
 
-struct statistics_s {
+struct statistics_a0_s {
        struct {
                u32 link_up;
                u32 link_down;
@@ -457,6 +458,33 @@ struct statistics_s {
        u32 reserve_fw_gap;
 };
 
+struct __packed statistics_b0_s {
+       u64 rx_good_octets;
+       u64 rx_pause_frames;
+       u64 rx_good_frames;
+       u64 rx_errors;
+       u64 rx_unicast_frames;
+       u64 rx_multicast_frames;
+       u64 rx_broadcast_frames;
+
+       u64 tx_good_octets;
+       u64 tx_pause_frames;
+       u64 tx_good_frames;
+       u64 tx_errors;
+       u64 tx_unicast_frames;
+       u64 tx_multicast_frames;
+       u64 tx_broadcast_frames;
+
+       u32 main_loop_cycles;
+};
+
+struct __packed statistics_s {
+       union __packed {
+               struct statistics_a0_s a0;
+               struct statistics_b0_s b0;
+       };
+};
+
 struct filter_caps_s {
        u8 l2_filters_base_index:6;
        u8 flexible_filter_mask:2;
@@ -545,7 +573,7 @@ struct management_status_s {
        u32 rsvd5;
 };
 
-struct fw_interface_out {
+struct __packed fw_interface_out {
        struct transaction_counter_s transaction_id;
        struct version_s version;
        struct link_status_s link_status;
@@ -569,7 +597,6 @@ struct fw_interface_out {
        struct core_dump_s core_dump;
        u32 rsvd11;
        struct statistics_s stats;
-       u32 rsvd12;
        struct filter_caps_s filter_caps;
        struct device_caps_s device_caps;
        u32 rsvd13;
@@ -592,6 +619,9 @@ struct fw_interface_out {
 #define  AQ_HOST_MODE_LOW_POWER    3U
 #define  AQ_HOST_MODE_SHUTDOWN     4U
 
+#define  AQ_A2_FW_INTERFACE_A0     0
+#define  AQ_A2_FW_INTERFACE_B0     1
+
 int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
 
 int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
index dd259c8f2f4f397adbed0b5ea284bd8881a48735..58d426dda3edbf13b4e852b3fab14e720d55d3b2 100644 (file)
@@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
                        if (cnt > AQ_A2_FW_READ_TRY_MAX)
                                return -ETIME;
                        if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
-                               udelay(1);
+                               mdelay(1);
                } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
 
                hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
@@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed,
 {
        link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
        link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
-       link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
+       link_options->rate_N5G = link_options->rate_5G;
        link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
        link_options->rate_N2P5G = link_options->rate_2P5G;
        link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
@@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
                rate |= AQ_NIC_RATE_10G;
        if (lkp_link_caps->rate_5G)
                rate |= AQ_NIC_RATE_5G;
-       if (lkp_link_caps->rate_N5G)
-               rate |= AQ_NIC_RATE_5GSR;
        if (lkp_link_caps->rate_2P5G)
                rate |= AQ_NIC_RATE_2G5;
        if (lkp_link_caps->rate_1G)
@@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
        return 0;
 }
 
-static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
+                               struct statistics_s *stats)
 {
        struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
-       struct statistics_s stats;
-
-       hw_atl2_shared_buffer_read_safe(self, stats, &stats);
-
-#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
-                       stats.msm._F_ - priv->last_stats.msm._F_)
+       struct aq_stats_s *cs = &self->curr_stats;
+       struct aq_stats_s curr_stats = *cs;
+       bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F)  \
+do { \
+       if (!corrupted_stats && \
+           ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
+               curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
+       else \
+               corrupted_stats = true; \
+} while (0)
 
        if (self->aq_link_status.mbps) {
                AQ_SDELTA(uprc, rx_unicast_frames);
@@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
                AQ_SDELTA(mbtc, tx_multicast_octets);
                AQ_SDELTA(bbrc, rx_broadcast_octets);
                AQ_SDELTA(bbtc, tx_broadcast_octets);
+
+               if (!corrupted_stats)
+                       *cs = curr_stats;
        }
 #undef AQ_SDELTA
-       self->curr_stats.dma_pkt_rc =
-               hw_atl_stats_rx_dma_good_pkt_counter_get(self);
-       self->curr_stats.dma_pkt_tc =
-               hw_atl_stats_tx_dma_good_pkt_counter_get(self);
-       self->curr_stats.dma_oct_rc =
-               hw_atl_stats_rx_dma_good_octet_counter_get(self);
-       self->curr_stats.dma_oct_tc =
-               hw_atl_stats_tx_dma_good_octet_counter_get(self);
-       self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+}
+
+static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
+                               struct statistics_s *stats)
+{
+       struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+       struct aq_stats_s *cs = &self->curr_stats;
+       struct aq_stats_s curr_stats = *cs;
+       bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F)  \
+do { \
+       if (!corrupted_stats && \
+           ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
+               curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
+       else \
+               corrupted_stats = true; \
+} while (0)
+
+       if (self->aq_link_status.mbps) {
+               AQ_SDELTA(uprc, rx_unicast_frames);
+               AQ_SDELTA(mprc, rx_multicast_frames);
+               AQ_SDELTA(bprc, rx_broadcast_frames);
+               AQ_SDELTA(erpr, rx_errors);
+               AQ_SDELTA(brc, rx_good_octets);
+
+               AQ_SDELTA(uptc, tx_unicast_frames);
+               AQ_SDELTA(mptc, tx_multicast_frames);
+               AQ_SDELTA(bptc, tx_broadcast_frames);
+               AQ_SDELTA(erpt, tx_errors);
+               AQ_SDELTA(btc, tx_good_octets);
+
+               if (!corrupted_stats)
+                       *cs = curr_stats;
+       }
+#undef AQ_SDELTA
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+       struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+       struct aq_stats_s *cs = &self->curr_stats;
+       struct statistics_s stats;
+       struct version_s version;
+       int err;
+
+       err = hw_atl2_shared_buffer_read_safe(self, version, &version);
+       if (err)
+               return err;
+
+       err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+       if (err)
+               return err;
+
+       if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
+               aq_a2_fill_a0_stats(self, &stats);
+       else
+               aq_a2_fill_b0_stats(self, &stats);
+
+       cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+       cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+       cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+       cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
+       cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
 
        memcpy(&priv->last_stats, &stats, sizeof(stats));
 
@@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
        hw_atl2_shared_buffer_read_safe(self, version, &version);
 
        /* A2 FW version is stored in reverse order */
-       return version.mac.major << 24 |
-              version.mac.minor << 16 |
-              version.mac.build;
+       return version.bundle.major << 24 |
+              version.bundle.minor << 16 |
+              version.bundle.build;
 }
 
 int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
index 6451c8383639fcedbe61098ad3c8e244a4fd0a69..8e643567abce2a0673eb32eb3e55d3cc6c15b1bd 100644 (file)
@@ -4550,6 +4550,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
 
        fsl_mc_portal_free(priv->mc_io);
 
+       destroy_workqueue(priv->dpaa2_ptp_wq);
+
        dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
 
        free_netdev(net_dev);
index 3cca51735421a7435f4c7f32fa3f5af9003f2d37..0bb3911dd014d08c3611ea209961fe4e6f7125d4 100644 (file)
@@ -628,17 +628,9 @@ static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
        old_buff_size = adapter->prev_rx_buf_sz;
        new_buff_size = adapter->cur_rx_buf_sz;
 
-       /* Require buff size to be exactly same for now */
-       if (old_buff_size != new_buff_size)
-               return false;
-
-       if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
-               return true;
-
-       if (old_num_pools < adapter->min_rx_queues ||
-           old_num_pools > adapter->max_rx_queues ||
-           old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
-           old_pool_size > adapter->max_rx_add_entries_per_subcrq)
+       if (old_buff_size != new_buff_size ||
+           old_num_pools != new_num_pools ||
+           old_pool_size != new_pool_size)
                return false;
 
        return true;
@@ -874,17 +866,9 @@ static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
        old_mtu = adapter->prev_mtu;
        new_mtu = adapter->req_mtu;
 
-       /* Require MTU to be exactly same to reuse pools for now */
-       if (old_mtu != new_mtu)
-               return false;
-
-       if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
-               return true;
-
-       if (old_num_pools < adapter->min_tx_queues ||
-           old_num_pools > adapter->max_tx_queues ||
-           old_pool_size < adapter->min_tx_entries_per_subcrq ||
-           old_pool_size > adapter->max_tx_entries_per_subcrq)
+       if (old_mtu != new_mtu ||
+           old_num_pools != new_num_pools ||
+           old_pool_size != new_pool_size)
                return false;
 
        return true;
index ff55cb415b110fb9e63bd8adc6a401e1c78887ad..bb9a8084729888ab764cf9b6834edf2085068b79 100644 (file)
@@ -383,6 +383,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
        while (i--) {
                dma = xsk_buff_xdp_get_dma(*xdp);
                rx_desc->read.pkt_addr = cpu_to_le64(dma);
+               rx_desc->wb.status_error0 = 0;
 
                rx_desc++;
                xdp++;
index ce486e16489c59609c6c07c14fe9f2f4f5981b13..6480696c979beb2e8e304f43c3a64d329127669f 100644 (file)
@@ -7458,7 +7458,7 @@ static int mvpp2_probe(struct platform_device *pdev)
 
        shared = num_present_cpus() - priv->nthreads;
        if (shared > 0)
-               bitmap_fill(&priv->lock_map,
+               bitmap_set(&priv->lock_map, 0,
                            min_t(int, shared, MVPP2_MAX_THREADS));
 
        for (i = 0; i < MVPP2_MAX_THREADS; i++) {
index cb56e171ddd4c27d420f8f38604f51c1275aa3db..3ca6b942ebe2539ab9fc2d10f8cbe48934157588 100644 (file)
@@ -2341,7 +2341,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
                        goto free_regions;
                break;
        default:
-               return err;
+               goto free_regions;
        }
 
        mw->mbox_wq = alloc_workqueue(name,
index 066d79e4ecfc28d8bfc5fa7ced33f3192a339cb9..10238bedd694fe7752edb5734a2dc1aa4a5183b3 100644 (file)
@@ -670,7 +670,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
                                       ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
-                                      ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+                                      ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
                                       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
@@ -682,9 +682,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
                                       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
-                                      ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+                                      ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
-                                      ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+                                      ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
                                       ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
                                       ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
index 3f6d5c38463723e23be94af802bdf707f9a7c4e2..f1c10f2bda780a1d4a9dabe3c1a126b80809e5e9 100644 (file)
@@ -2286,9 +2286,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                                bool carry_xdp_prog)
 {
        struct bpf_prog *xdp_prog;
-       int i, t;
+       int i, t, ret;
 
-       mlx4_en_copy_priv(tmp, priv, prof);
+       ret = mlx4_en_copy_priv(tmp, priv, prof);
+       if (ret) {
+               en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
+                       __func__);
+               return ret;
+       }
 
        if (mlx4_en_alloc_resources(tmp)) {
                en_warn(priv,
index 8eaa24d865c55b0c50088db22abf9e2c0c271989..a46284ca517200dde86231bf3aa741c226774e48 100644 (file)
@@ -341,6 +341,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_DEALLOC_SF:
        case MLX5_CMD_OP_DESTROY_UCTX:
        case MLX5_CMD_OP_DESTROY_UMEM:
+       case MLX5_CMD_OP_MODIFY_RQT:
                return MLX5_CMD_STAT_OK;
 
        case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -446,7 +447,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_MODIFY_TIS:
        case MLX5_CMD_OP_QUERY_TIS:
        case MLX5_CMD_OP_CREATE_RQT:
-       case MLX5_CMD_OP_MODIFY_RQT:
        case MLX5_CMD_OP_QUERY_RQT:
 
        case MLX5_CMD_OP_CREATE_FLOW_TABLE:
index 14295384799606cc69cdc7e421ad835d9f32d67c..0015a81eb9a17b6550cb5038baa8b5271b7580de 100644 (file)
@@ -13,6 +13,9 @@ struct mlx5e_rx_res {
        unsigned int max_nch;
        u32 drop_rqn;
 
+       struct mlx5e_packet_merge_param pkt_merge_param;
+       struct rw_semaphore pkt_merge_param_sem;
+
        struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
        bool rss_active;
        u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
@@ -392,6 +395,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
        if (err)
                goto out;
 
+       /* Separated from the channels RQs, does not share pkt_merge state with them */
        mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
                                    mlx5e_rqt_get_rqtn(&res->ptp.rqt),
                                    inner_ft_support);
@@ -447,6 +451,9 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
        res->max_nch = max_nch;
        res->drop_rqn = drop_rqn;
 
+       res->pkt_merge_param = *init_pkt_merge_param;
+       init_rwsem(&res->pkt_merge_param_sem);
+
        err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
        if (err)
                goto err_out;
@@ -513,7 +520,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
        return mlx5e_tir_get_tirn(&res->ptp.tir);
 }
 
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
 {
        return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
 }
@@ -656,6 +663,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
        if (!builder)
                return -ENOMEM;
 
+       down_write(&res->pkt_merge_param_sem);
+       res->pkt_merge_param = *pkt_merge_param;
+
        mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
 
        final_err = 0;
@@ -681,6 +691,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
                }
        }
 
+       up_write(&res->pkt_merge_param_sem);
        mlx5e_tir_builder_free(builder);
        return final_err;
 }
@@ -689,3 +700,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *
 {
        return mlx5e_rss_get_hash(res->rss[0]);
 }
+
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+                               struct mlx5e_tir *tir)
+{
+       bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+       struct mlx5e_tir_builder *builder;
+       u32 rqtn;
+       int err;
+
+       builder = mlx5e_tir_builder_alloc(false);
+       if (!builder)
+               return -ENOMEM;
+
+       rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
+
+       mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
+                                   inner_ft_support);
+       mlx5e_tir_builder_build_direct(builder);
+       mlx5e_tir_builder_build_tls(builder);
+       down_read(&res->pkt_merge_param_sem);
+       mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
+       err = mlx5e_tir_init(tir, builder, res->mdev, false);
+       up_read(&res->pkt_merge_param_sem);
+
+       mlx5e_tir_builder_free(builder);
+
+       return err;
+}
index d09f7d174a5180a18cb0db8948bf043323325e82..b39b20a720e0fa244b2047d2ee3ad969f18bfdc9 100644 (file)
@@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types
 u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
 u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
 
-/* RQTN getters for modules that create their own TIRs */
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
-
 /* Activate/deactivate API */
 void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
 void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
@@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
 /* Workaround for hairpin */
 struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
 
+/* Accel TIRs */
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+                               struct mlx5e_tir *tir);
 #endif /* __MLX5_EN_RX_RES_H__ */
index fb5397324aa4f2b597a1f1b0bcd355836e0c382f..2db9573a3fe69d9f175c663334cc6c6c14ad5ec0 100644 (file)
@@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
                        eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
                        eseg->swp_inner_l4_offset =
                                (skb->csum_start + skb->head - skb->data) / 2;
-                       if (skb->protocol == htons(ETH_P_IPV6))
+                       if (inner_ip_hdr(skb)->version == 6)
                                eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
                        break;
                default:
index a2a9f68579dd82d544b64a9e12a179552119e824..15711814d2d28d8498eec641230470706df14115 100644 (file)
@@ -100,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
        return resp_list;
 }
 
-static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
-{
-       struct mlx5e_tir_builder *builder;
-       int err;
-
-       builder = mlx5e_tir_builder_alloc(false);
-       if (!builder)
-               return -ENOMEM;
-
-       mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
-       mlx5e_tir_builder_build_direct(builder);
-       mlx5e_tir_builder_build_tls(builder);
-       err = mlx5e_tir_init(tir, builder, mdev, false);
-
-       mlx5e_tir_builder_free(builder);
-
-       return err;
-}
-
 static void accel_rule_handle_work(struct work_struct *work)
 {
        struct mlx5e_ktls_offload_context_rx *priv_rx;
@@ -609,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        struct mlx5_core_dev *mdev;
        struct mlx5e_priv *priv;
        int rxq, err;
-       u32 rqtn;
 
        tls_ctx = tls_get_ctx(sk);
        priv = netdev_priv(netdev);
@@ -635,9 +615,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        priv_rx->sw_stats = &priv->tls->sw_stats;
        mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
 
-       rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
-
-       err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
+       err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir);
        if (err)
                goto err_create_tir;
 
index e58a9ec4255322538e6cb847f8dddecc6659754c..48895d79796a82634441668f2a0369a5e7b7e96d 100644 (file)
@@ -1080,6 +1080,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
        &MLX5E_STATS_GRP(pme),
        &MLX5E_STATS_GRP(channels),
        &MLX5E_STATS_GRP(per_port_buff_congest),
+#ifdef CONFIG_MLX5_EN_IPSEC
+       &MLX5E_STATS_GRP(ipsec_sw),
+       &MLX5E_STATS_GRP(ipsec_hw),
+#endif
 };
 
 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
index 96967b0a24418c5aa4018f5396a4a73f731a0c37..793511d5ee4cd969d15c16c7e20af5914161e8f7 100644 (file)
@@ -543,13 +543,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
                                     u16 klm_entries, u16 index)
 {
        struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-       u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
+       u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
        u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
        struct page *page = shampo->last_page;
        u64 addr = shampo->last_addr;
        struct mlx5e_dma_info *dma_info;
        struct mlx5e_umr_wqe *umr_wqe;
-       int headroom;
+       int headroom, i;
 
        headroom = rq->buff.headroom;
        new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
@@ -601,9 +601,7 @@ update_klm:
 
 err_unmap:
        while (--i >= 0) {
-               if (--index < 0)
-                       index = shampo->hd_per_wq - 1;
-               dma_info = &shampo->info[index];
+               dma_info = &shampo->info[--index];
                if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
                        dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
                        mlx5e_page_release(rq, dma_info, true);
index c6cc67cb4f6add88e0b1f24f00c6112fd8c521d1..d377ddc70fc70b072c441d8a512ce98781e17e71 100644 (file)
@@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
        /* If vports min rate divider is 0 but their group has bw_share configured, then
         * need to set bw_share for vports to minimal value.
         */
-       if (!group_level && !max_guarantee && group->bw_share)
+       if (!group_level && !max_guarantee && group && group->bw_share)
                return 1;
        return 0;
 }
@@ -423,7 +423,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
                return err;
 
        /* Recalculate bw share weights of old and new groups */
-       if (vport->qos.bw_share) {
+       if (vport->qos.bw_share || new_group->bw_share) {
                esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
                esw_qos_normalize_vports_min_rate(esw, new_group, extack);
        }
index a46455694f7ae79eec21c461f9668ff39401f540..32bc08a399256c5ce3bcfed8b9de181ec76045b2 100644 (file)
@@ -329,14 +329,25 @@ static bool
 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
 {
        struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+       bool result = false;
        int i;
 
-       for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+       /* Indirect table is supported only for flows with in_port uplink
+        * and the destination is vport on the same eswitch as the uplink,
+        * return false in case at least one of destinations doesn't meet
+        * this criteria.
+        */
+       for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
                if (esw_attr->dests[i].rep &&
                    mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
-                                               esw_attr->dests[i].mdev))
-                       return true;
-       return false;
+                                               esw_attr->dests[i].mdev)) {
+                       result = true;
+               } else {
+                       result = false;
+                       break;
+               }
+       }
+       return result;
 }
 
 static int
@@ -2512,6 +2523,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
        struct mlx5_eswitch *esw = master->priv.eswitch;
        struct mlx5_flow_table_attr ft_attr = {
                .max_fte = 1, .prio = 0, .level = 0,
+               .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
        };
        struct mlx5_flow_namespace *egress_ns;
        struct mlx5_flow_table *acl;
index 64f1abc4dc367fe1d05787a571742b9522cdfd93..3ca998874c50d583bbe3713d950bcaf7aa7f950b 100644 (file)
@@ -835,6 +835,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
 
        health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
        add_timer(&health->timer);
+
+       if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
+               queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
 }
 
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
@@ -902,8 +905,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
        INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
        INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
        INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
-       if (mlx5_core_is_pf(dev))
-               queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
 
        return 0;
 
index ad63dd45c8fb9dbf4d401e16c35f5343b41af857..a6592f9c3c05fc8d2c38f9ea491e3360e9527e80 100644 (file)
@@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
        if (port_sel->tunnel)
                mlx5_destroy_ttc_table(port_sel->inner.ttc);
        mlx5_lag_destroy_definers(ldev);
+       memset(port_sel, 0, sizeof(*port_sel));
 }
index 0dd96a6b140dddfd993ca1a57aa5ffd8acc7d2c0..c1df0d3595d87e283994af8d7a798d93da7842b6 100644 (file)
@@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
        dev->timeouts->to[type] = val;
 }
 
-static void tout_set_def_val(struct mlx5_core_dev *dev)
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
 {
        int i;
 
-       for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
+       for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
                tout_set(dev, tout_def_sw_val[i], i);
 }
 
@@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev)
        if (!dev->timeouts)
                return -ENOMEM;
 
-       tout_set_def_val(dev);
        return 0;
 }
 
index 31faa5c17aa91c89d9e428bb82ef2074e4cfc19a..1c42ead782fa7f4470a3f58dc1d9b2cd4c57e990 100644 (file)
@@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
 void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
 void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
 int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
 u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
 
 #define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
index a92a92a52346d8c33c2d272fb8a087bb81a07a22..7df9c7f8d9c8ad27fd624bffb98ddfd80ae75303 100644 (file)
@@ -992,11 +992,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
        if (mlx5_core_is_pf(dev))
                pcie_print_link_status(dev->pdev);
 
-       err = mlx5_tout_init(dev);
-       if (err) {
-               mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
-               return err;
-       }
+       mlx5_tout_set_def_val(dev);
 
        /* wait for firmware to accept initialization segments configurations
         */
@@ -1005,13 +1001,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
        if (err) {
                mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
                              mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
-               goto err_tout_cleanup;
+               return err;
        }
 
        err = mlx5_cmd_init(dev);
        if (err) {
                mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
-               goto err_tout_cleanup;
+               return err;
        }
 
        mlx5_tout_query_iseg(dev);
@@ -1075,18 +1071,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
 
        mlx5_set_driver_version(dev);
 
-       mlx5_start_health_poll(dev);
-
        err = mlx5_query_hca_caps(dev);
        if (err) {
                mlx5_core_err(dev, "query hca failed\n");
-               goto stop_health;
+               goto reclaim_boot_pages;
        }
 
+       mlx5_start_health_poll(dev);
+
        return 0;
 
-stop_health:
-       mlx5_stop_health_poll(dev, boot);
 reclaim_boot_pages:
        mlx5_reclaim_startup_pages(dev);
 err_disable_hca:
@@ -1094,8 +1088,6 @@ err_disable_hca:
 err_cmd_cleanup:
        mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
        mlx5_cmd_cleanup(dev);
-err_tout_cleanup:
-       mlx5_tout_cleanup(dev);
 
        return err;
 }
@@ -1114,7 +1106,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
        mlx5_core_disable_hca(dev, 0);
        mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
        mlx5_cmd_cleanup(dev);
-       mlx5_tout_cleanup(dev);
 
        return 0;
 }
@@ -1476,6 +1467,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
                                            mlx5_debugfs_root);
        INIT_LIST_HEAD(&priv->traps);
 
+       err = mlx5_tout_init(dev);
+       if (err) {
+               mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
+               goto err_timeout_init;
+       }
+
        err = mlx5_health_init(dev);
        if (err)
                goto err_health_init;
@@ -1501,6 +1498,8 @@ err_adev_init:
 err_pagealloc_init:
        mlx5_health_cleanup(dev);
 err_health_init:
+       mlx5_tout_cleanup(dev);
+err_timeout_init:
        debugfs_remove(dev->priv.dbg_root);
        mutex_destroy(&priv->pgdir_mutex);
        mutex_destroy(&priv->alloc_mutex);
@@ -1518,6 +1517,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
        mlx5_adev_cleanup(dev);
        mlx5_pagealloc_cleanup(dev);
        mlx5_health_cleanup(dev);
+       mlx5_tout_cleanup(dev);
        debugfs_remove_recursive(dev->priv.dbg_root);
        mutex_destroy(&priv->pgdir_mutex);
        mutex_destroy(&priv->alloc_mutex);
index 409cde1e59c6f2caeb34848ca405cc873007c7ed..1e4ad953cffbc5fc066954c75f6b2bf9469142e9 100644 (file)
@@ -1563,8 +1563,10 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
        }
 
        err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
-       if (err)
+       if (err) {
+               mutex_unlock(&ocelot->ptp_lock);
                return err;
+       }
 
        if (l2 && l4)
                cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
index ca4686094701c60eff5a9991990575257f1310ca..0a02d8bd0a3e57ed996c798a5e4fa7875f422338 100644 (file)
@@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
 };
 
-static int __init sonic_probe1(struct net_device *dev)
+static int sonic_probe1(struct net_device *dev)
 {
        unsigned int silicon_revision;
        struct sonic_local *lp = netdev_priv(dev);
index d51bac7ba5afadca6df37a8761838432fba08b1f..bd06076803295fb5a6a0946db8be6bf4ad901076 100644 (file)
@@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
        sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
        context_id = recv_ctx->context_id;
        num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
-       ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
-                                   QLCNIC_CMD_ADD_RCV_RINGS);
+       err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+                                       QLCNIC_CMD_ADD_RCV_RINGS);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to alloc mbx args %d\n", err);
+               return err;
+       }
+
        cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
 
        /* set up status rings, mbx 2-81 */
index 748195697e5a09bf09b7a3206ece0ec19807232c..da8306f6073027ac99d57423d1cc50bf426ae233 100644 (file)
@@ -5540,8 +5540,6 @@ static int stmmac_set_features(struct net_device *netdev,
                               netdev_features_t features)
 {
        struct stmmac_priv *priv = netdev_priv(netdev);
-       bool sph_en;
-       u32 chan;
 
        /* Keep the COE Type in case of csum is supporting */
        if (features & NETIF_F_RXCSUM)
@@ -5553,10 +5551,13 @@ static int stmmac_set_features(struct net_device *netdev,
         */
        stmmac_rx_ipc(priv, priv->hw);
 
-       sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+       if (priv->sph_cap) {
+               bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+               u32 chan;
 
-       for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
-               stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+               for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+                       stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+       }
 
        return 0;
 }
index f20376c1ef3fb1f3cc924a7e06a52801aac7696a..8cd265fc1fd9d4eb11b5196e7d831d2ec828f58c 100644 (file)
@@ -2228,7 +2228,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
        if (dev->domain_data.phyirq > 0)
                phydev->irq = dev->domain_data.phyirq;
        else
-               phydev->irq = 0;
+               phydev->irq = PHY_POLL;
        netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
 
        /* set to AUTOMDIX */
index ccf677015d5bc7f7a93d42f1c71571eb08b38e0f..131c745dc7010b1653b937e87c7f7f5a67e3460d 100644 (file)
@@ -497,6 +497,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
        /* strip the ethernet header added for pass through VRF device */
        __skb_pull(skb, skb_network_offset(skb));
 
+       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
        ret = vrf_ip6_local_out(net, skb->sk, skb);
        if (unlikely(net_xmit_eval(ret)))
                dev->stats.tx_errors++;
@@ -579,6 +580,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
                                               RT_SCOPE_LINK);
        }
 
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
        ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
        if (unlikely(net_xmit_eval(ret)))
                vrf_dev->stats.tx_errors++;
index b7197e80f2264053d4e4e28bdf69a33038335294..9a4c8ff32d9dd9407ec50591a44008570f4e7411 100644 (file)
@@ -163,7 +163,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
        return exact;
 }
 
-static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node)
 {
        node->parent_bit_packed = (unsigned long)parent | bit;
        rcu_assign_pointer(*parent, node);
index 551ddaaaf5400e6eb3e138853f0b4710e0a48901..a46067c38bf5def99accd4b885d3c3bd3095ad4b 100644 (file)
@@ -98,6 +98,7 @@ static int wg_stop(struct net_device *dev)
 {
        struct wg_device *wg = netdev_priv(dev);
        struct wg_peer *peer;
+       struct sk_buff *skb;
 
        mutex_lock(&wg->device_update_lock);
        list_for_each_entry(peer, &wg->peer_list, peer_list) {
@@ -108,7 +109,9 @@ static int wg_stop(struct net_device *dev)
                wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
        }
        mutex_unlock(&wg->device_update_lock);
-       skb_queue_purge(&wg->incoming_handshakes);
+       while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
+               kfree_skb(skb);
+       atomic_set(&wg->handshake_queue_len, 0);
        wg_socket_reinit(wg, NULL, NULL);
        return 0;
 }
@@ -235,14 +238,13 @@ static void wg_destruct(struct net_device *dev)
        destroy_workqueue(wg->handshake_receive_wq);
        destroy_workqueue(wg->handshake_send_wq);
        destroy_workqueue(wg->packet_crypt_wq);
-       wg_packet_queue_free(&wg->decrypt_queue);
-       wg_packet_queue_free(&wg->encrypt_queue);
+       wg_packet_queue_free(&wg->handshake_queue, true);
+       wg_packet_queue_free(&wg->decrypt_queue, false);
+       wg_packet_queue_free(&wg->encrypt_queue, false);
        rcu_barrier(); /* Wait for all the peers to be actually freed. */
        wg_ratelimiter_uninit();
        memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
-       skb_queue_purge(&wg->incoming_handshakes);
        free_percpu(dev->tstats);
-       free_percpu(wg->incoming_handshakes_worker);
        kvfree(wg->index_hashtable);
        kvfree(wg->peer_hashtable);
        mutex_unlock(&wg->device_update_lock);
@@ -298,7 +300,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        init_rwsem(&wg->static_identity.lock);
        mutex_init(&wg->socket_update_lock);
        mutex_init(&wg->device_update_lock);
-       skb_queue_head_init(&wg->incoming_handshakes);
        wg_allowedips_init(&wg->peer_allowedips);
        wg_cookie_checker_init(&wg->cookie_checker, wg);
        INIT_LIST_HEAD(&wg->peer_list);
@@ -316,16 +317,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        if (!dev->tstats)
                goto err_free_index_hashtable;
 
-       wg->incoming_handshakes_worker =
-               wg_packet_percpu_multicore_worker_alloc(
-                               wg_packet_handshake_receive_worker, wg);
-       if (!wg->incoming_handshakes_worker)
-               goto err_free_tstats;
-
        wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
                        WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
        if (!wg->handshake_receive_wq)
-               goto err_free_incoming_handshakes;
+               goto err_free_tstats;
 
        wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
                        WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
@@ -347,10 +342,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        if (ret < 0)
                goto err_free_encrypt_queue;
 
-       ret = wg_ratelimiter_init();
+       ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
+                                  MAX_QUEUED_INCOMING_HANDSHAKES);
        if (ret < 0)
                goto err_free_decrypt_queue;
 
+       ret = wg_ratelimiter_init();
+       if (ret < 0)
+               goto err_free_handshake_queue;
+
        ret = register_netdevice(dev);
        if (ret < 0)
                goto err_uninit_ratelimiter;
@@ -367,18 +367,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
 
 err_uninit_ratelimiter:
        wg_ratelimiter_uninit();
+err_free_handshake_queue:
+       wg_packet_queue_free(&wg->handshake_queue, false);
 err_free_decrypt_queue:
-       wg_packet_queue_free(&wg->decrypt_queue);
+       wg_packet_queue_free(&wg->decrypt_queue, false);
 err_free_encrypt_queue:
-       wg_packet_queue_free(&wg->encrypt_queue);
+       wg_packet_queue_free(&wg->encrypt_queue, false);
 err_destroy_packet_crypt:
        destroy_workqueue(wg->packet_crypt_wq);
 err_destroy_handshake_send:
        destroy_workqueue(wg->handshake_send_wq);
 err_destroy_handshake_receive:
        destroy_workqueue(wg->handshake_receive_wq);
-err_free_incoming_handshakes:
-       free_percpu(wg->incoming_handshakes_worker);
 err_free_tstats:
        free_percpu(dev->tstats);
 err_free_index_hashtable:
@@ -398,6 +398,7 @@ static struct rtnl_link_ops link_ops __read_mostly = {
 static void wg_netns_pre_exit(struct net *net)
 {
        struct wg_device *wg;
+       struct wg_peer *peer;
 
        rtnl_lock();
        list_for_each_entry(wg, &device_list, device_list) {
@@ -407,6 +408,8 @@ static void wg_netns_pre_exit(struct net *net)
                        mutex_lock(&wg->device_update_lock);
                        rcu_assign_pointer(wg->creating_net, NULL);
                        wg_socket_reinit(wg, NULL, NULL);
+                       list_for_each_entry(peer, &wg->peer_list, peer_list)
+                               wg_socket_clear_peer_endpoint_src(peer);
                        mutex_unlock(&wg->device_update_lock);
                }
        }
index 854bc3d97150e1c1dab3befbe64966add4f65746..43c7cebbf50b08f2a1868f0017d0bee8aee700f8 100644 (file)
@@ -39,21 +39,18 @@ struct prev_queue {
 
 struct wg_device {
        struct net_device *dev;
-       struct crypt_queue encrypt_queue, decrypt_queue;
+       struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue;
        struct sock __rcu *sock4, *sock6;
        struct net __rcu *creating_net;
        struct noise_static_identity static_identity;
-       struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
-       struct workqueue_struct *packet_crypt_wq;
-       struct sk_buff_head incoming_handshakes;
-       int incoming_handshake_cpu;
-       struct multicore_worker __percpu *incoming_handshakes_worker;
+       struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq;
        struct cookie_checker cookie_checker;
        struct pubkey_hashtable *peer_hashtable;
        struct index_hashtable *index_hashtable;
        struct allowedips peer_allowedips;
        struct mutex device_update_lock, socket_update_lock;
        struct list_head device_list, peer_list;
+       atomic_t handshake_queue_len;
        unsigned int num_peers, device_update_gen;
        u32 fwmark;
        u16 incoming_port;
index 75dbe77b0b4b4aeacbc75d77524108248b37a2fa..ee4da9ab8013c3ad2721e0e1d4432b2fe007886b 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/genetlink.h>
 #include <net/rtnetlink.h>
 
-static int __init mod_init(void)
+static int __init wg_mod_init(void)
 {
        int ret;
 
@@ -60,7 +60,7 @@ err_allowedips:
        return ret;
 }
 
-static void __exit mod_exit(void)
+static void __exit wg_mod_exit(void)
 {
        wg_genetlink_uninit();
        wg_device_uninit();
@@ -68,8 +68,8 @@ static void __exit mod_exit(void)
        wg_allowedips_slab_uninit();
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(wg_mod_init);
+module_exit(wg_mod_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("WireGuard secure network tunnel");
 MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
index 48e7b982a30736bc147712ce52957517e3e862af..1de413b19e3424a2ace2edcbcf0d0d49c4be6167 100644 (file)
@@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
        return 0;
 }
 
-void wg_packet_queue_free(struct crypt_queue *queue)
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
 {
        free_percpu(queue->worker);
-       WARN_ON(!__ptr_ring_empty(&queue->ring));
-       ptr_ring_cleanup(&queue->ring, NULL);
+       WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
+       ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
 }
 
 #define NEXT(skb) ((skb)->prev)
index 4ef2944a68bc906ebec5167d1e17e281ea67be61..e2388107f7fdc9c040841adfc164459e0c777d7d 100644 (file)
@@ -23,7 +23,7 @@ struct sk_buff;
 /* queueing.c APIs: */
 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
                         unsigned int len);
-void wg_packet_queue_free(struct crypt_queue *queue);
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
 struct multicore_worker __percpu *
 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
 
index 3fedd1d21f5ee019917a7280294cfc7398e65b11..dd55e5c26f468f71518cc5956f4b84480af5d9c8 100644 (file)
@@ -176,12 +176,12 @@ int wg_ratelimiter_init(void)
                        (1U << 14) / sizeof(struct hlist_head)));
        max_entries = table_size * 8;
 
-       table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
+       table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL);
        if (unlikely(!table_v4))
                goto err_kmemcache;
 
 #if IS_ENABLED(CONFIG_IPV6)
-       table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
+       table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL);
        if (unlikely(!table_v6)) {
                kvfree(table_v4);
                goto err_kmemcache;
index 7dc84bcca26139991be00759c0228d3126df2671..7b8df406c7737398f0270361afcb196af4b6a76e 100644 (file)
@@ -116,8 +116,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
                return;
        }
 
-       under_load = skb_queue_len(&wg->incoming_handshakes) >=
-                    MAX_QUEUED_INCOMING_HANDSHAKES / 8;
+       under_load = atomic_read(&wg->handshake_queue_len) >=
+                       MAX_QUEUED_INCOMING_HANDSHAKES / 8;
        if (under_load) {
                last_under_load = ktime_get_coarse_boottime_ns();
        } else if (last_under_load) {
@@ -212,13 +212,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
 
 void wg_packet_handshake_receive_worker(struct work_struct *work)
 {
-       struct wg_device *wg = container_of(work, struct multicore_worker,
-                                           work)->ptr;
+       struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
+       struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
        struct sk_buff *skb;
 
-       while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
+       while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
                wg_receive_handshake_packet(wg, skb);
                dev_kfree_skb(skb);
+               atomic_dec(&wg->handshake_queue_len);
                cond_resched();
        }
 }
@@ -553,22 +554,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
        case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
        case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
        case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
-               int cpu;
-
-               if (skb_queue_len(&wg->incoming_handshakes) >
-                           MAX_QUEUED_INCOMING_HANDSHAKES ||
-                   unlikely(!rng_is_initialized())) {
+               int cpu, ret = -EBUSY;
+
+               if (unlikely(!rng_is_initialized()))
+                       goto drop;
+               if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
+                       if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
+                               ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
+                               spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
+                       }
+               } else
+                       ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
+               if (ret) {
+       drop:
                        net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
                                                wg->dev->name, skb);
                        goto err;
                }
-               skb_queue_tail(&wg->incoming_handshakes, skb);
-               /* Queues up a call to packet_process_queued_handshake_
-                * packets(skb):
-                */
-               cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
+               atomic_inc(&wg->handshake_queue_len);
+               cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
+               /* Queues up a call to packet_process_queued_handshake_packets(skb): */
                queue_work_on(cpu, wg->handshake_receive_wq,
-                       &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
+                             &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
                break;
        }
        case cpu_to_le32(MESSAGE_DATA):
index 8c496b7471082eb6c093154d7a05662718f9999c..6f07b949cb81d037842934d6836f3c8b79e4d0ed 100644 (file)
@@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
 {
        write_lock_bh(&peer->endpoint_lock);
        memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
-       dst_cache_reset(&peer->endpoint_cache);
+       dst_cache_reset_now(&peer->endpoint_cache);
        write_unlock_bh(&peer->endpoint_lock);
 }
 
index c875bf35533ce4b8126f88e78ae9bb271d951129..009dd4be597b0c8c096582eeb876cbcb30ba658c 100644 (file)
@@ -86,6 +86,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
                if (len < tlv_len) {
                        IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
                                len, tlv_len);
+                       kfree(reduce_power_data);
                        reduce_power_data = ERR_PTR(-EINVAL);
                        goto out;
                }
@@ -105,6 +106,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
                                IWL_DEBUG_FW(trans,
                                             "Couldn't allocate (more) reduce_power_data\n");
 
+                               kfree(reduce_power_data);
                                reduce_power_data = ERR_PTR(-ENOMEM);
                                goto out;
                        }
@@ -134,6 +136,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
 done:
        if (!size) {
                IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
+               /* Better safe than sorry, but 'reduce_power_data' should
+                * always be NULL if !size.
+                */
+               kfree(reduce_power_data);
                reduce_power_data = ERR_PTR(-ENOENT);
                goto out;
        }
index 36196e07b1a04597d61b5cadca7f7d0d927fc432..5cec467b995bb665f340d95b6aa33f10436ab0e0 100644 (file)
@@ -1313,23 +1313,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
        const struct iwl_op_mode_ops *ops = op->ops;
        struct dentry *dbgfs_dir = NULL;
        struct iwl_op_mode *op_mode = NULL;
+       int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
+
+       for (retry = 0; retry <= max_retry; retry++) {
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-       drv->dbgfs_op_mode = debugfs_create_dir(op->name,
-                                               drv->dbgfs_drv);
-       dbgfs_dir = drv->dbgfs_op_mode;
+               drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+                                                       drv->dbgfs_drv);
+               dbgfs_dir = drv->dbgfs_op_mode;
 #endif
 
-       op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
+               op_mode = ops->start(drv->trans, drv->trans->cfg,
+                                    &drv->fw, dbgfs_dir);
+
+               if (op_mode)
+                       return op_mode;
+
+               IWL_ERR(drv, "retry init count %d\n", retry);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (!op_mode) {
                debugfs_remove_recursive(drv->dbgfs_op_mode);
                drv->dbgfs_op_mode = NULL;
-       }
 #endif
+       }
 
-       return op_mode;
+       return NULL;
 }
 
 static void _iwl_op_mode_stop(struct iwl_drv *drv)
index 2e2d60a586925d8a03714ab5919344da50ae4438..0fd009e6d6857f5939e73356c9f73a46001fe4d5 100644 (file)
@@ -89,4 +89,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
 #define IWL_EXPORT_SYMBOL(sym)
 #endif
 
+/* max retry for init flow */
+#define IWL_MAX_INIT_RETRY 2
+
 #endif /* __iwl_drv_h__ */
index 9fb9c7dad314f1bcbbf6e4f67645befdb3d31d08..897e3b91ddb2fec9e3d2f85842fbcc85f44281ca 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/ieee80211_radiotap.h>
 #include <net/tcp.h>
 
+#include "iwl-drv.h"
 #include "iwl-op-mode.h"
 #include "iwl-io.h"
 #include "mvm.h"
@@ -1117,9 +1118,30 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        int ret;
+       int retry, max_retry = 0;
 
        mutex_lock(&mvm->mutex);
-       ret = __iwl_mvm_mac_start(mvm);
+
+       /* we are starting the mac not in error flow, and restart is enabled */
+       if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
+           iwlwifi_mod_params.fw_restart) {
+               max_retry = IWL_MAX_INIT_RETRY;
+               /*
+                * This will prevent mac80211 recovery flows to trigger during
+                * init failures
+                */
+               set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
+       }
+
+       for (retry = 0; retry <= max_retry; retry++) {
+               ret = __iwl_mvm_mac_start(mvm);
+               if (!ret)
+                       break;
+
+               IWL_ERR(mvm, "mac start retry %d\n", retry);
+       }
+       clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
+
        mutex_unlock(&mvm->mutex);
 
        return ret;
index 2b1dcd60e00f65598d77712b042493247b4a2b91..a72d85086fe331cb256a2e88c88293a5e4b55bf0 100644 (file)
@@ -1123,6 +1123,8 @@ struct iwl_mvm {
  * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
  * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
  * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
+ * @IWL_MVM_STATUS_STARTING: starting mac,
+ *     used to disable restart flow while in STARTING state
  */
 enum iwl_mvm_status {
        IWL_MVM_STATUS_HW_RFKILL,
@@ -1134,6 +1136,7 @@ enum iwl_mvm_status {
        IWL_MVM_STATUS_FIRMWARE_RUNNING,
        IWL_MVM_STATUS_NEED_FLUSH_P2P,
        IWL_MVM_STATUS_IN_D3,
+       IWL_MVM_STATUS_STARTING,
 };
 
 /* Keep track of completed init configuration */
index 232ad531d612a2d2b4e85fc7a8558919b119ad8d..cd08e289cd9a0bf8c3746e1734ea72664277074b 100644 (file)
@@ -686,6 +686,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
        int ret;
 
        rtnl_lock();
+       wiphy_lock(mvm->hw->wiphy);
        mutex_lock(&mvm->mutex);
 
        ret = iwl_run_init_mvm_ucode(mvm);
@@ -701,6 +702,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
                iwl_mvm_stop_device(mvm);
 
        mutex_unlock(&mvm->mutex);
+       wiphy_unlock(mvm->hw->wiphy);
        rtnl_unlock();
 
        if (ret < 0)
@@ -1600,6 +1602,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         */
        if (!mvm->fw_restart && fw_error) {
                iwl_fw_error_collect(&mvm->fwrt, false);
+       } else if (test_bit(IWL_MVM_STATUS_STARTING,
+                           &mvm->status)) {
+               IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
        } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
 
index c574f041f0969268132a1fe3f68b347bea3b903a..5ce07f28e7c33e853768581f851e3b72e9bd715d 100644 (file)
@@ -1339,9 +1339,13 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
                      u16 mac_type, u8 mac_step,
                      u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores)
 {
+       int num_devices = ARRAY_SIZE(iwl_dev_info_table);
        int i;
 
-       for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) {
+       if (!num_devices)
+               return NULL;
+
+       for (i = num_devices - 1; i >= 0; i--) {
                const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
 
                if (dev_info->device != (u16)IWL_CFG_ANY &&
@@ -1442,8 +1446,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        if (iwl_trans->trans_cfg->rf_id &&
            iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
-           !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans))
+           !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) {
+               ret = -EINVAL;
                goto out_free_trans;
+       }
 
        dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
                                         CSR_HW_REV_TYPE(iwl_trans->hw_rev),
index 5ee52cd70a4b45a1be50b518015a8e0943b71879..d1806f198aed99e866aa9683d8114eba2e92be5c 100644 (file)
@@ -143,8 +143,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        if (!wcid)
                wcid = &dev->mt76.global_wcid;
 
-       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
-
        if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
                struct mt7615_phy *phy = &dev->phy;
 
@@ -164,6 +162,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        if (id < 0)
                return id;
 
+       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
        mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
                              pid, key, false);
 
index bd2939ebcbf4841a4265e162b0cfacdee11a7338..5a6d7829c6e04f7fd9c101e62fbc047d75486b6b 100644 (file)
@@ -43,19 +43,11 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map);
 static void
 mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
                           enum mt76_txq_id qid, struct ieee80211_sta *sta,
+                          struct ieee80211_key_conf *key, int pid,
                           struct sk_buff *skb)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_key_conf *key = info->control.hw_key;
-       __le32 *txwi;
-       int pid;
-
-       if (!wcid)
-               wcid = &dev->mt76.global_wcid;
-
-       pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+       __le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
 
-       txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
        memset(txwi, 0, MT_USB_TXD_SIZE);
        mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
        skb_push(skb, MT_USB_TXD_SIZE);
@@ -194,10 +186,14 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
        struct sk_buff *skb = tx_info->skb;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_key_conf *key = info->control.hw_key;
        struct mt7615_sta *msta;
-       int pad;
+       int pad, err, pktid;
 
        msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
+       if (!wcid)
+               wcid = &dev->mt76.global_wcid;
+
        if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
            msta && !msta->rate_probe) {
                /* request to configure sampling rate */
@@ -207,7 +203,8 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                spin_unlock_bh(&dev->mt76.lock);
        }
 
-       mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
+       pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+       mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
        if (mt76_is_usb(mdev)) {
                u32 len = skb->len;
 
@@ -217,7 +214,12 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                pad = round_up(skb->len, 4) - skb->len;
        }
 
-       return mt76_skb_adjust_pad(skb, pad);
+       err = mt76_skb_adjust_pad(skb, pad);
+       if (err)
+               /* Release pktid in case of error. */
+               idr_remove(&wcid->pktid, pktid);
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb);
 
index efd70ddc2fd109b754df8e8c12c71c5b2403f9d0..2c6c03809b20eb628b99a22bb851ff07f14be2a6 100644 (file)
@@ -72,6 +72,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
        bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
        enum mt76_qsel qsel;
        u32 flags;
+       int err;
 
        mt76_insert_hdr_pad(tx_info->skb);
 
@@ -106,7 +107,12 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
                ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
        }
 
-       return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+       err = mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+       if (err && wcid)
+               /* Release pktid in case of error. */
+               idr_remove(&wcid->pktid, pid);
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
 
index 5fcf35f2d9fbe42760c7ff28b53c3a96f68d62f8..809dc18e5083c21206447e4b8f4e811fd0c3c41f 100644 (file)
@@ -1151,8 +1151,14 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                }
        }
 
-       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+       t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+       t->skb = tx_info->skb;
+
+       id = mt76_token_consume(mdev, &t);
+       if (id < 0)
+               return id;
 
+       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
        mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
                              false);
 
@@ -1178,13 +1184,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                txp->bss_idx = mvif->idx;
        }
 
-       t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
-       t->skb = tx_info->skb;
-
-       id = mt76_token_consume(mdev, &t);
-       if (id < 0)
-               return id;
-
        txp->token = cpu_to_le16(id);
        if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
                txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
index 899957b9d0f19c6c11ccb3f0c52654d49e3dec50..852d5d97c70b1517902af6177b5f16a0c0a98698 100644 (file)
@@ -176,7 +176,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
                if (ht_cap->ht_supported)
                        mode |= PHY_MODE_GN;
 
-               if (he_cap->has_he)
+               if (he_cap && he_cap->has_he)
                        mode |= PHY_MODE_AX_24G;
        } else if (band == NL80211_BAND_5GHZ) {
                mode |= PHY_MODE_A;
@@ -187,7 +187,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
                if (vht_cap->vht_supported)
                        mode |= PHY_MODE_AC;
 
-               if (he_cap->has_he)
+               if (he_cap && he_cap->has_he)
                        mode |= PHY_MODE_AX_5G;
        }
 
index 137f86a6dbf875d3122c23b2a26e670843a16db4..bdec508b6b9ffa00a65875df4de56259ad5be02e 100644 (file)
@@ -142,15 +142,11 @@ out:
 static void
 mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
                   enum mt76_txq_id qid, struct ieee80211_sta *sta,
+                  struct ieee80211_key_conf *key, int pid,
                   struct sk_buff *skb)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_key_conf *key = info->control.hw_key;
-       __le32 *txwi;
-       int pid;
+       __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
 
-       pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
-       txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
        memset(txwi, 0, MT_SDIO_TXD_SIZE);
        mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
        skb_push(skb, MT_SDIO_TXD_SIZE);
@@ -163,8 +159,9 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
 {
        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+       struct ieee80211_key_conf *key = info->control.hw_key;
        struct sk_buff *skb = tx_info->skb;
-       int pad;
+       int err, pad, pktid;
 
        if (unlikely(tx_info->skb->len <= ETH_HLEN))
                return -EINVAL;
@@ -181,12 +178,18 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                }
        }
 
-       mt7921s_write_txwi(dev, wcid, qid, sta, skb);
+       pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+       mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
 
        mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA);
        pad = round_up(skb->len, 4) - skb->len;
 
-       return mt76_skb_adjust_pad(skb, pad);
+       err = mt76_skb_adjust_pad(skb, pad);
+       if (err)
+               /* Release pktid in case of error. */
+               idr_remove(&wcid->pktid, pktid);
+
+       return err;
 }
 
 void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
index 11719ef034d888482144f30dab21d0270e480ffc..6b8c9dc80542554f19a2232f4080bb5cd9b79da8 100644 (file)
@@ -173,7 +173,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
                        if (!(cb->flags & MT_TX_CB_DMA_DONE))
                                continue;
 
-                       if (!time_is_after_jiffies(cb->jiffies +
+                       if (time_is_after_jiffies(cb->jiffies +
                                                   MT_TX_STATUS_SKB_TIMEOUT))
                                continue;
                }
index e4473a5512415241d012f973862e12d122b3845e..74c3d8cb31002d0ec583fbbda66df4d9efcde6db 100644 (file)
@@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status)
        if (status == -ENODEV || status == -ENOENT)
                return true;
 
+       if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
+               return false;
+
        if (status == -EPROTO || status == -ETIMEDOUT)
                rt2x00dev->num_proto_errs++;
        else
index 212aaf577d3c5eca878c0793749f142720658a0f..65ef3dc9d061415acc9ce97cd11ebafb742b2b36 100644 (file)
@@ -91,7 +91,6 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
        info->section_num = GET_FW_HDR_SEC_NUM(fw);
        info->hdr_len = RTW89_FW_HDR_SIZE +
                        info->section_num * RTW89_FW_SECTION_HDR_SIZE;
-       SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN);
 
        bin = fw + info->hdr_len;
 
@@ -275,6 +274,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
        }
 
        skb_put_data(skb, fw, len);
+       SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
        rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
                                   H2C_CAT_MAC, H2C_CL_MAC_FWDL,
                                   H2C_FUNC_MAC_FWHDR_DL, len);
index 7ee0d932331075359d0af2f14ba2048822e0d531..36e8d0da6c1e78a0af948c8e4d5caaf4dd7e48a8 100644 (file)
@@ -282,8 +282,10 @@ struct rtw89_h2creg_sch_tx_en {
        le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8))
 #define GET_FW_HDR_CMD_VERSERION(fwhdr)        \
        le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24))
-#define SET_FW_HDR_PART_SIZE(fwhdr, val)       \
-       le32p_replace_bits((__le32 *)(fwhdr) + 7, val, GENMASK(15, 0))
+static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
+{
+       le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
+}
 
 #define SET_CTRL_INFO_MACID(table, val) \
        le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0))
index b9fac786246ab5c076da2d6248bfeb136adbbc31..2a5c1829aab790f6b4ade183ec96e2eca84c357b 100644 (file)
@@ -463,17 +463,12 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
 
 static int __init init_dtpm(void)
 {
-       struct dtpm_descr *dtpm_descr;
-
        pct = powercap_register_control_type(NULL, "dtpm", NULL);
        if (IS_ERR(pct)) {
                pr_err("Failed to register control type\n");
                return PTR_ERR(pct);
        }
 
-       for_each_dtpm_table(dtpm_descr)
-               dtpm_descr->init();
-
        return 0;
 }
 late_initcall(init_dtpm);
index b940e0268f96fad37563e49782fafa7b240a0588..e83453bea2aee1eaf2957b5223f8b41685c7760c 100644 (file)
@@ -5095,14 +5095,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                /* NPort Recovery mode or node is just allocated */
                if (!lpfc_nlp_not_used(ndlp)) {
                        /* A LOGO is completing and the node is in NPR state.
-                        * If this a fabric node that cleared its transport
-                        * registration, release the rpi.
+                        * Just unregister the RPI because the node is still
+                        * required.
                         */
-                       spin_lock_irq(&ndlp->lock);
-                       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-                       if (phba->sli_rev == LPFC_SLI_REV4)
-                               ndlp->nlp_flag |= NLP_RELEASE_RPI;
-                       spin_unlock_irq(&ndlp->lock);
                        lpfc_unreg_rpi(vport, ndlp);
                } else {
                        /* Indicate the node has already released, should
index 51424557810dab378a5178d2f7beda751fa12a94..f725248ba57f428f107a822880a4ebde605135ab 100644 (file)
@@ -421,6 +421,13 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
        return err;
 }
 
+static int ufs_intel_adl_init(struct ufs_hba *hba)
+{
+       hba->nop_out_timeout = 200;
+       hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+       return ufs_intel_common_init(hba);
+}
+
 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
        .name                   = "intel-pci",
        .init                   = ufs_intel_common_init,
@@ -449,6 +456,15 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
        .device_reset           = ufs_intel_device_reset,
 };
 
+static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
+       .name                   = "intel-pci",
+       .init                   = ufs_intel_adl_init,
+       .exit                   = ufs_intel_common_exit,
+       .link_startup_notify    = ufs_intel_link_startup_notify,
+       .resume                 = ufs_intel_resume,
+       .device_reset           = ufs_intel_device_reset,
+};
+
 #ifdef CONFIG_PM_SLEEP
 static int ufshcd_pci_restore(struct device *dev)
 {
@@ -563,6 +579,8 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
        { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
        { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
        { }     /* terminate list */
 };
 
index 8239fe7129dd7a3b241813e215c04dd69df20d36..019351c0b52cfa250b4325fe9434e416fb4cd342 100644 (file)
@@ -434,6 +434,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1532, 0x0116), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
+       { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
        { USB_DEVICE(0x17ef, 0xa012), .driver_info =
                        USB_QUIRK_DISCONNECT_SUSPEND },
index 56cd551e0e04dfb5840cddc5a227cffdee1b904f..362f91ec884585c929b0f1cfb22913a6b9c94793 100644 (file)
@@ -98,7 +98,8 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
                        version = cpu_to_le16(0x0201);
 
                if (igd_opregion_shift_copy(buf, &off,
-                                           &version + (pos - OPREGION_VERSION),
+                                           (u8 *)&version +
+                                           (pos - OPREGION_VERSION),
                                            &pos, &remaining, bytes))
                        return -EFAULT;
        }
@@ -121,7 +122,7 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
                                          OPREGION_SIZE : 0);
 
                if (igd_opregion_shift_copy(buf, &off,
-                                           &rvda + (pos - OPREGION_RVDA),
+                                           (u8 *)&rvda + (pos - OPREGION_RVDA),
                                            &pos, &remaining, bytes))
                        return -EFAULT;
        }
index 82fb75464f923d47a225f8262595af0e51b51753..735d1d344af9d48277508e565221182c695858f0 100644 (file)
@@ -232,7 +232,7 @@ static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
 }
 #endif /* CONFIG_VFIO_NOIOMMU */
 
-/**
+/*
  * IOMMU driver registration
  */
 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
@@ -285,7 +285,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
                                     unsigned long action, void *data);
 static void vfio_group_get(struct vfio_group *group);
 
-/**
+/*
  * Container objects - containers are created when /dev/vfio/vfio is
  * opened, but their lifecycle extends until the last user is done, so
  * it's freed via kref.  Must support container/group/device being
@@ -309,7 +309,7 @@ static void vfio_container_put(struct vfio_container *container)
        kref_put(&container->kref, vfio_container_release);
 }
 
-/**
+/*
  * Group objects - create, release, get, put, search
  */
 static struct vfio_group *
@@ -488,7 +488,7 @@ static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
        return group;
 }
 
-/**
+/*
  * Device objects - create, release, get, put, search
  */
 /* Device reference always implies a group reference */
@@ -595,7 +595,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
        return ret;
 }
 
-/**
+/*
  * Async device support
  */
 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
@@ -689,7 +689,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
-/**
+/*
  * VFIO driver API
  */
 void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
@@ -831,7 +831,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device)
 }
 EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
 
-/**
+/*
  * Get a reference to the vfio_device for a device.  Even if the
  * caller thinks they own the device, they could be racing with a
  * release call path, so we can't trust drvdata for the shortcut.
@@ -965,7 +965,7 @@ void vfio_unregister_group_dev(struct vfio_device *device)
 }
 EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
 
-/**
+/*
  * VFIO base fd, /dev/vfio/vfio
  */
 static long vfio_ioctl_check_extension(struct vfio_container *container,
@@ -1183,7 +1183,7 @@ static const struct file_operations vfio_fops = {
        .compat_ioctl   = compat_ptr_ioctl,
 };
 
-/**
+/*
  * VFIO Group fd, /dev/vfio/$GROUP
  */
 static void __vfio_group_unset_container(struct vfio_group *group)
@@ -1536,7 +1536,7 @@ static const struct file_operations vfio_group_fops = {
        .release        = vfio_group_fops_release,
 };
 
-/**
+/*
  * VFIO Device fd
  */
 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
@@ -1611,7 +1611,7 @@ static const struct file_operations vfio_device_fops = {
        .mmap           = vfio_device_fops_mmap,
 };
 
-/**
+/*
  * External user API, exported by symbols to be linked dynamically.
  *
  * The protocol includes:
@@ -1659,7 +1659,7 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
 }
 EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
 
-/**
+/*
  * External user API, exported by symbols to be linked dynamically.
  * The external user passes in a device pointer
  * to verify that:
@@ -1725,7 +1725,7 @@ long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
 }
 EXPORT_SYMBOL_GPL(vfio_external_check_extension);
 
-/**
+/*
  * Sub-module support
  */
 /*
@@ -2272,7 +2272,7 @@ struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group)
 }
 EXPORT_SYMBOL_GPL(vfio_group_iommu_domain);
 
-/**
+/*
  * Module/class support
  */
 static char *vfio_devnode(struct device *dev, umode_t *mode)
index 6b705026da1a37d238348e0fefe06f022913bfe9..18448dbd762a89ce92b6c70e0b55eb54c1d0f74a 100644 (file)
@@ -1562,6 +1562,10 @@ smbd_connected:
        /* fscache server cookies are based on primary channel only */
        if (!CIFS_SERVER_IS_CHAN(tcp_ses))
                cifs_fscache_get_client_cookie(tcp_ses);
+#ifdef CONFIG_CIFS_FSCACHE
+       else
+               tcp_ses->fscache = tcp_ses->primary_server->fscache;
+#endif /* CONFIG_CIFS_FSCACHE */
 
        /* queue echo request delayed work */
        queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
@@ -3046,12 +3050,6 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
                                cifs_dbg(VFS, "read only mount of RW share\n");
                        /* no need to log a RW mount of a typical RW share */
                }
-               /*
-                * The cookie is initialized from volume info returned above.
-                * Inside cifs_fscache_get_super_cookie it checks
-                * that we do not get super cookie twice.
-                */
-               cifs_fscache_get_super_cookie(tcon);
        }
 
        /*
@@ -3426,6 +3424,7 @@ static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list
         */
        mount_put_conns(mnt_ctx);
        mount_get_dfs_conns(mnt_ctx);
+       set_root_ses(mnt_ctx);
 
        full_path = build_unc_path_to_root(ctx, cifs_sb, true);
        if (IS_ERR(full_path))
index 7e409a38a2d7c184abf3ab0c1afbea2ccbfe9f6f..003c5f1f4dfb1a058afee9a917818f6c43891d75 100644 (file)
  * Key layout of CIFS server cache index object
  */
 struct cifs_server_key {
-       struct {
-               uint16_t        family;         /* address family */
-               __be16          port;           /* IP port */
-       } hdr;
-       union {
-               struct in_addr  ipv4_addr;
-               struct in6_addr ipv6_addr;
-       };
+       __u64 conn_id;
 } __packed;
 
 /*
@@ -31,42 +24,23 @@ struct cifs_server_key {
  */
 void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
 {
-       const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr;
-       const struct sockaddr_in *addr = (struct sockaddr_in *) sa;
-       const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa;
        struct cifs_server_key key;
-       uint16_t key_len = sizeof(key.hdr);
-
-       memset(&key, 0, sizeof(key));
 
        /*
-        * Should not be a problem as sin_family/sin6_family overlays
-        * sa_family field
+        * Check if cookie was already initialized so don't reinitialize it.
+        * In the future, as we integrate with newer fscache features,
+        * we may want to instead add a check if cookie has changed
         */
-       key.hdr.family = sa->sa_family;
-       switch (sa->sa_family) {
-       case AF_INET:
-               key.hdr.port = addr->sin_port;
-               key.ipv4_addr = addr->sin_addr;
-               key_len += sizeof(key.ipv4_addr);
-               break;
-
-       case AF_INET6:
-               key.hdr.port = addr6->sin6_port;
-               key.ipv6_addr = addr6->sin6_addr;
-               key_len += sizeof(key.ipv6_addr);
-               break;
-
-       default:
-               cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
-               server->fscache = NULL;
+       if (server->fscache)
                return;
-       }
+
+       memset(&key, 0, sizeof(key));
+       key.conn_id = server->conn_id;
 
        server->fscache =
                fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
                                       &cifs_fscache_server_index_def,
-                                      &key, key_len,
+                                      &key, sizeof(key),
                                       NULL, 0,
                                       server, 0, true);
        cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
@@ -92,7 +66,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
         * In the future, as we integrate with newer fscache features,
         * we may want to instead add a check if cookie has changed
         */
-       if (tcon->fscache == NULL)
+       if (tcon->fscache)
                return;
 
        sharename = extract_sharename(tcon->treeName);
index 82848412ad85208f08d1fad12bc2871297c8f252..96d083db173724901e3193b51ce4e1dda3bde64d 100644 (file)
@@ -1376,6 +1376,13 @@ iget_no_retry:
                inode = ERR_PTR(rc);
        }
 
+       /*
+        * The cookie is initialized from volume info returned above.
+        * Inside cifs_fscache_get_super_cookie it checks
+        * that we do not get super cookie twice.
+        */
+       cifs_fscache_get_super_cookie(tcon);
+
 out:
        kfree(path);
        free_xid(xid);
index 8627dacfc4246fb975ed38f2b2dc8090f7c7202c..ad4a8bf3cf109fd984449eedef5de6bf9fa342e8 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -858,6 +858,10 @@ loop:
                        file = NULL;
                else if (!get_file_rcu_many(file, refs))
                        goto loop;
+               else if (files_lookup_fd_raw(files, fd) != file) {
+                       fput_many(file, refs);
+                       goto loop;
+               }
        }
        rcu_read_unlock();
 
index 8dbd6fe664204b8a7bee7b8908fdbe6c3aa312b2..44a7a4288956b60d174340befd7ee3b19bc8c8e3 100644 (file)
@@ -1857,7 +1857,6 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
 
 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
 {
-       struct gfs2_holder mock_gh = { .gh_gl = gl, .gh_state = state, };
        unsigned long delay = 0;
        unsigned long holdtime;
        unsigned long now = jiffies;
@@ -1890,8 +1889,13 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
         * keep the glock until the last strong holder is done with it.
         */
        if (!find_first_strong_holder(gl)) {
-               if (state == LM_ST_UNLOCKED)
-                       mock_gh.gh_state = LM_ST_EXCLUSIVE;
+               struct gfs2_holder mock_gh = {
+                       .gh_gl = gl,
+                       .gh_state = (state == LM_ST_UNLOCKED) ?
+                                   LM_ST_EXCLUSIVE : state,
+                       .gh_iflags = BIT(HIF_HOLDER)
+               };
+
                demote_incompat_holders(gl, &mock_gh);
        }
        handle_callback(gl, state, delay, true);
index 6424b903e88515f191bc21b7f4b72cd51c0a70f6..89905f4f29bb6de91e181373c4f168431f3e8e74 100644 (file)
@@ -40,37 +40,6 @@ static const struct inode_operations gfs2_file_iops;
 static const struct inode_operations gfs2_dir_iops;
 static const struct inode_operations gfs2_symlink_iops;
 
-static int iget_test(struct inode *inode, void *opaque)
-{
-       u64 no_addr = *(u64 *)opaque;
-
-       return GFS2_I(inode)->i_no_addr == no_addr;
-}
-
-static int iget_set(struct inode *inode, void *opaque)
-{
-       u64 no_addr = *(u64 *)opaque;
-
-       GFS2_I(inode)->i_no_addr = no_addr;
-       inode->i_ino = no_addr;
-       return 0;
-}
-
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
-{
-       struct inode *inode;
-
-repeat:
-       inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
-       if (!inode)
-               return inode;
-       if (is_bad_inode(inode)) {
-               iput(inode);
-               goto repeat;
-       }
-       return inode;
-}
-
 /**
  * gfs2_set_iop - Sets inode operations
  * @inode: The inode with correct i_mode filled in
@@ -104,6 +73,22 @@ static void gfs2_set_iop(struct inode *inode)
        }
 }
 
+static int iget_test(struct inode *inode, void *opaque)
+{
+       u64 no_addr = *(u64 *)opaque;
+
+       return GFS2_I(inode)->i_no_addr == no_addr;
+}
+
+static int iget_set(struct inode *inode, void *opaque)
+{
+       u64 no_addr = *(u64 *)opaque;
+
+       GFS2_I(inode)->i_no_addr = no_addr;
+       inode->i_ino = no_addr;
+       return 0;
+}
+
 /**
  * gfs2_inode_lookup - Lookup an inode
  * @sb: The super block
@@ -132,12 +117,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 {
        struct inode *inode;
        struct gfs2_inode *ip;
-       struct gfs2_glock *io_gl = NULL;
        struct gfs2_holder i_gh;
        int error;
 
        gfs2_holder_mark_uninitialized(&i_gh);
-       inode = gfs2_iget(sb, no_addr);
+       inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
@@ -145,22 +129,16 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
        if (inode->i_state & I_NEW) {
                struct gfs2_sbd *sdp = GFS2_SB(inode);
+               struct gfs2_glock *io_gl;
 
                error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
                if (unlikely(error))
                        goto fail;
-               flush_delayed_work(&ip->i_gl->gl_work);
-
-               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
-               if (unlikely(error))
-                       goto fail;
-               if (blktype != GFS2_BLKST_UNLINKED)
-                       gfs2_cancel_delete_work(io_gl);
 
                if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
                        /*
                         * The GL_SKIP flag indicates to skip reading the inode
-                        * block.  We read the inode with gfs2_inode_refresh
+                        * block.  We read the inode when instantiating it
                         * after possibly checking the block type.
                         */
                        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
@@ -181,24 +159,31 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
                        }
                }
 
-               glock_set_object(ip->i_gl, ip);
                set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
-               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+
+               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
                if (unlikely(error))
                        goto fail;
-               glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+               if (blktype != GFS2_BLKST_UNLINKED)
+                       gfs2_cancel_delete_work(io_gl);
+               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
                gfs2_glock_put(io_gl);
-               io_gl = NULL;
+               if (unlikely(error))
+                       goto fail;
 
                /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
                inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
                inode->i_atime.tv_nsec = 0;
 
+               glock_set_object(ip->i_gl, ip);
+
                if (type == DT_UNKNOWN) {
                        /* Inode glock must be locked already */
                        error = gfs2_instantiate(&i_gh);
-                       if (error)
+                       if (error) {
+                               glock_clear_object(ip->i_gl, ip);
                                goto fail;
+                       }
                } else {
                        ip->i_no_formal_ino = no_formal_ino;
                        inode->i_mode = DT2IF(type);
@@ -206,31 +191,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
                if (gfs2_holder_initialized(&i_gh))
                        gfs2_glock_dq_uninit(&i_gh);
+               glock_set_object(ip->i_iopen_gh.gh_gl, ip);
 
                gfs2_set_iop(inode);
+               unlock_new_inode(inode);
        }
 
        if (no_formal_ino && ip->i_no_formal_ino &&
            no_formal_ino != ip->i_no_formal_ino) {
-               error = -ESTALE;
-               if (inode->i_state & I_NEW)
-                       goto fail;
                iput(inode);
-               return ERR_PTR(error);
+               return ERR_PTR(-ESTALE);
        }
 
-       if (inode->i_state & I_NEW)
-               unlock_new_inode(inode);
-
        return inode;
 
 fail:
-       if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
-               glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
+       if (gfs2_holder_initialized(&ip->i_iopen_gh))
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
-       }
-       if (io_gl)
-               gfs2_glock_put(io_gl);
        if (gfs2_holder_initialized(&i_gh))
                gfs2_glock_dq_uninit(&i_gh);
        iget_failed(inode);
@@ -730,18 +707,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
        if (error)
                goto fail_free_inode;
-       flush_delayed_work(&ip->i_gl->gl_work);
 
        error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
        if (error)
                goto fail_free_inode;
        gfs2_cancel_delete_work(io_gl);
 
+       error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
+       BUG_ON(error);
+
        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
        if (error)
                goto fail_gunlock2;
 
-       glock_set_object(ip->i_gl, ip);
        error = gfs2_trans_begin(sdp, blocks, 0);
        if (error)
                goto fail_gunlock2;
@@ -757,9 +735,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (error)
                goto fail_gunlock2;
 
+       glock_set_object(ip->i_gl, ip);
        glock_set_object(io_gl, ip);
        gfs2_set_iop(inode);
-       insert_inode_hash(inode);
 
        free_vfs_inode = 0; /* After this point, the inode is no longer
                               considered free. Any failures need to undo
@@ -801,17 +779,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        gfs2_glock_dq_uninit(ghs + 1);
        gfs2_glock_put(io_gl);
        gfs2_qa_put(dip);
+       unlock_new_inode(inode);
        return error;
 
 fail_gunlock3:
+       glock_clear_object(ip->i_gl, ip);
        glock_clear_object(io_gl, ip);
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_gunlock2:
-       glock_clear_object(io_gl, ip);
        gfs2_glock_put(io_gl);
 fail_free_inode:
        if (ip->i_gl) {
-               glock_clear_object(ip->i_gl, ip);
                if (free_vfs_inode) /* else evict will do the put for us */
                        gfs2_glock_put(ip->i_gl);
        }
@@ -829,7 +807,10 @@ fail_gunlock:
                        mark_inode_dirty(inode);
                set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED,
                        &GFS2_I(inode)->i_flags);
-               iput(inode);
+               if (inode->i_state & I_NEW)
+                       iget_failed(inode);
+               else
+                       iput(inode);
        }
        if (gfs2_holder_initialized(ghs + 1))
                gfs2_glock_dq_uninit(ghs + 1);
index 88202de519f6d1755103f435ae6b2eea14e55adb..50cf9f92da3613f22da54392dcbba54e3d20e8a3 100644 (file)
@@ -714,6 +714,13 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
 
 static inline bool io_should_retry_thread(long err)
 {
+       /*
+        * Prevent perpetual task_work retry, if the task (or its group) is
+        * exiting.
+        */
+       if (fatal_signal_pending(current))
+               return false;
+
        switch (err) {
        case -EAGAIN:
        case -ERESTARTSYS:
index 9320a42dfaf9737629045c4d81908f96993edd39..7046f9bdd8dcdef66052c3c8a1002770144455e1 100644 (file)
@@ -1008,8 +1008,8 @@ out:
 }
 EXPORT_SYMBOL(netfs_readpage);
 
-/**
- * netfs_skip_folio_read - prep a folio for writing without reading first
+/*
+ * Prepare a folio for writing without reading first
  * @folio: The folio being prepared
  * @pos: starting position for the write
  * @len: length of write
index 64b9bf33480659fcbaa0a5c8c5ad259c9a44cd55..6771f357ad2cce9738c4bdbc9720763e1a3f5025 100644 (file)
@@ -3122,7 +3122,6 @@ xfs_rename(
         * appropriately.
         */
        if (flags & RENAME_WHITEOUT) {
-               ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
                error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
                if (error)
                        return error;
index e974caf39d3e3bfa7d09b577b488650edc5ceebf..8c8f7a4d93afb96518c18da10c7e8f5bee62ef2d 100644 (file)
@@ -153,6 +153,8 @@ struct kretprobe {
        struct kretprobe_holder *rph;
 };
 
+#define KRETPROBE_MAX_DATA_SIZE        4096
+
 struct kretprobe_instance {
        union {
                struct freelist_node freelist;
index 3636df90899a2431ecbf0acd66789a851db5a164..fbaab440a4846ecfab2ecaf7ae58f05a4dc974c3 100644 (file)
@@ -9698,7 +9698,10 @@ struct mlx5_ifc_mcam_access_reg_bits {
        u8         regs_84_to_68[0x11];
        u8         tracer_registers[0x4];
 
-       u8         regs_63_to_32[0x20];
+       u8         regs_63_to_46[0x12];
+       u8         mrtc[0x1];
+       u8         regs_44_to_32[0xd];
+
        u8         regs_31_to_0[0x20];
 };
 
index 3ec42495a43a56dbd51fecd166d572a9e586e3e4..be5cb3360b944ca1519f2da335b3b6053c18e443 100644 (file)
@@ -4404,7 +4404,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
 {
        spin_lock(&txq->_xmit_lock);
-       txq->xmit_lock_owner = cpu;
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, cpu);
 }
 
 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -4421,26 +4422,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
 {
        spin_lock_bh(&txq->_xmit_lock);
-       txq->xmit_lock_owner = smp_processor_id();
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
 }
 
 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
 {
        bool ok = spin_trylock(&txq->_xmit_lock);
-       if (likely(ok))
-               txq->xmit_lock_owner = smp_processor_id();
+
+       if (likely(ok)) {
+               /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+               WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+       }
        return ok;
 }
 
 static inline void __netif_tx_unlock(struct netdev_queue *txq)
 {
-       txq->xmit_lock_owner = -1;
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, -1);
        spin_unlock(&txq->_xmit_lock);
 }
 
 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
 {
-       txq->xmit_lock_owner = -1;
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, -1);
        spin_unlock_bh(&txq->_xmit_lock);
 }
 
index 6c9f19a33865ab857de84c576718ae95998d7be0..ce3c58286062c4d33e88e9fc1830621625a0bc4d 100644 (file)
 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void task_cputime(struct task_struct *t,
+extern bool task_cputime(struct task_struct *t,
                         u64 *utime, u64 *stime);
 extern u64 task_gtime(struct task_struct *t);
 #else
-static inline void task_cputime(struct task_struct *t,
+static inline bool task_cputime(struct task_struct *t,
                                u64 *utime, u64 *stime)
 {
        *utime = t->utime;
        *stime = t->stime;
+       return false;
 }
 
 static inline u64 task_gtime(struct task_struct *t)
index bf21591a9e5e653585c26cb3f3f0857256c0eb89..0cda61855d90719e6175d0325598bbbe1b79254b 100644 (file)
@@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
 }
 
 u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
-#endif
 
 u64 siphash_1u64(const u64 a, const siphash_key_t *key);
 u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
 static inline u64 siphash(const void *data, size_t len,
                          const siphash_key_t *key)
 {
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-       if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+           !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
                return __siphash_unaligned(data, len, key);
-#endif
        return ___siphash_aligned(data, len, key);
 }
 
@@ -96,10 +93,8 @@ typedef struct {
 
 u32 __hsiphash_aligned(const void *data, size_t len,
                       const hsiphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_unaligned(const void *data, size_t len,
                         const hsiphash_key_t *key);
-#endif
 
 u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
 u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
 static inline u32 hsiphash(const void *data, size_t len,
                           const hsiphash_key_t *key)
 {
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-       if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+           !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
                return __hsiphash_unaligned(data, len, key);
-#endif
        return ___hsiphash_aligned(data, len, key);
 }
 
index 4202c609bb0b09345c0f1c5105adf409a3a89f74..7994455ec714610fbcb563d8c7a1411b02201e05 100644 (file)
@@ -133,7 +133,7 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
        if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
                WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
 #endif
-       sk_rx_queue_set(sk, skb);
+       sk_rx_queue_update(sk, skb);
 }
 
 static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
index 67634675e9197cdbd8225e0e4aa1547d8f09f036..df6622a5fe98f0a9732617bb2a757ef9c9611797 100644 (file)
@@ -79,6 +79,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache)
        dst_cache->reset_ts = jiffies;
 }
 
+/**
+ *     dst_cache_reset_now - invalidate the cache contents immediately
+ *     @dst_cache: the cache
+ *
+ *     The caller must be sure there are no concurrent users, as this frees
+ *     all dst_cache users immediately, rather than waiting for the next
+ *     per-cpu usage like dst_cache_reset does. Most callers should use the
+ *     higher speed lazily-freed dst_cache_reset function instead.
+ */
+void dst_cache_reset_now(struct dst_cache *dst_cache);
+
 /**
  *     dst_cache_init - initialize the cache, allocating the required storage
  *     @dst_cache: the cache
index 4b10676c69d1917e4c30e086bf8f00b1e0f37ed4..bd07484ab9dd5f9de0321f63393941b521a0b5fa 100644 (file)
@@ -69,7 +69,7 @@ struct fib_rules_ops {
        int                     (*action)(struct fib_rule *,
                                          struct flowi *, int,
                                          struct fib_lookup_arg *);
-       bool                    (*suppress)(struct fib_rule *,
+       bool                    (*suppress)(struct fib_rule *, int,
                                            struct fib_lookup_arg *);
        int                     (*match)(struct fib_rule *,
                                         struct flowi *, int);
@@ -218,7 +218,9 @@ INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule,
                            struct fib_lookup_arg *arg));
 
 INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg));
 INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg));
 #endif
index ab5348e57db1a627cbce2dededb2e9b754d1f2cd..3417ba2d27ad6a1b5612a8855d2788f10d9fdf25 100644 (file)
@@ -438,7 +438,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
 #ifdef CONFIG_IP_ROUTE_CLASSID
 static inline int fib_num_tclassid_users(struct net *net)
 {
-       return net->ipv4.fib_num_tclassid_users;
+       return atomic_read(&net->ipv4.fib_num_tclassid_users);
 }
 #else
 static inline int fib_num_tclassid_users(struct net *net)
index 2f65701a43c953bd3a9a9e3d491882cb7bb11859..6c5b2efc4f17d0d17be750d0c1a2e1d169ec063e 100644 (file)
@@ -65,7 +65,7 @@ struct netns_ipv4 {
        bool                    fib_has_custom_local_routes;
        bool                    fib_offload_disabled;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-       int                     fib_num_tclassid_users;
+       atomic_t                fib_num_tclassid_users;
 #endif
        struct hlist_head       *fib_table_hash;
        struct sock             *fibnl;
index b32906e1ab55527b5418f203d3de05853863f166..bea21ff70e74d906216f4eaa2d5a712d12551216 100644 (file)
@@ -1913,18 +1913,31 @@ static inline int sk_tx_queue_get(const struct sock *sk)
        return -1;
 }
 
-static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+static inline void __sk_rx_queue_set(struct sock *sk,
+                                    const struct sk_buff *skb,
+                                    bool force_set)
 {
 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
        if (skb_rx_queue_recorded(skb)) {
                u16 rx_queue = skb_get_rx_queue(skb);
 
-               if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+               if (force_set ||
+                   unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
                        WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
        }
 #endif
 }
 
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+       __sk_rx_queue_set(sk, skb, true);
+}
+
+static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
+{
+       __sk_rx_queue_set(sk, skb, false);
+}
+
 static inline void sk_rx_queue_clear(struct sock *sk)
 {
 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
@@ -2430,19 +2443,22 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
  * @sk: socket
  *
  * Use the per task page_frag instead of the per socket one for
- * optimization when we know that we're in the normal context and owns
+ * optimization when we know that we're in process context and own
  * everything that's associated with %current.
  *
- * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
- * inside other socket operations and end up recursing into sk_page_frag()
- * while it's already in use.
+ * Both direct reclaim and page faults can nest inside other
+ * socket operations and end up recursing into sk_page_frag()
+ * while it's already in use: explicitly avoid task page_frag
+ * usage if the caller is potentially doing any of them.
+ * This assumes that page fault handlers use the GFP_NOFS flags.
  *
  * Return: a per task page_frag if context allows that,
  * otherwise a per socket one.
  */
 static inline struct page_frag *sk_page_frag(struct sock *sk)
 {
-       if (gfpflags_normal_context(sk->sk_allocation))
+       if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
+           (__GFP_DIRECT_RECLAIM | __GFP_FS))
                return &current->task_frag;
 
        return &sk->sk_frag;
index 31f4c4f9aeea072bec17ef6b0f71a4a3186835ec..ac0893df9c76d4025046fbb1d0ce3f02fac322c2 100644 (file)
@@ -147,7 +147,7 @@ struct snd_soc_acpi_link_adr {
  */
 /* Descriptor for SST ASoC machine driver */
 struct snd_soc_acpi_mach {
-       const u8 id[ACPI_ID_LEN];
+       u8 id[ACPI_ID_LEN];
        const struct snd_soc_acpi_codecs *comp_ids;
        const u32 link_mask;
        const struct snd_soc_acpi_link_adr *links;
index a13e20cc66b45bf0c31e52d0e327b0ce624c6195..0512fde5e6978a83666bac4d92895d1a41b6ea1e 100644 (file)
@@ -196,6 +196,13 @@ struct drm_virtgpu_context_init {
        __u64 ctx_set_params;
 };
 
+/*
+ * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
+ * effect.  The event size is sizeof(drm_event), since there is no additional
+ * payload.
+ */
+#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
+
 #define DRM_IOCTL_VIRTGPU_MAP \
        DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
 
index 5da4ee234e0b7e677d65b0e887c5ca26d3002dcb..c0c2f3ed57298e2fe843b99c02d0714fa12da1ec 100644 (file)
 #define ETH_P_IFE      0xED3E          /* ForCES inter-FE LFB type */
 #define ETH_P_AF_IUCV   0xFBFB         /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
 
-#define ETH_P_802_3_MIN        0x0600          /* If the value in the ethernet type is less than this value
+#define ETH_P_802_3_MIN        0x0600          /* If the value in the ethernet type is more than this value
                                         * then the frame is Ethernet II. Else it is 802.3 */
 
 /*
index e9db0c810554e2c6b209b15de9b04d504e4080d1..21eccc961bba31cad928b2bf6e9bfc325191e64e 100644 (file)
@@ -2086,6 +2086,9 @@ int register_kretprobe(struct kretprobe *rp)
                }
        }
 
+       if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
+               return -E2BIG;
+
        rp->kp.pre_handler = pre_handler_kretprobe;
        rp->kp.post_handler = NULL;
 
index 76f9deeaa942099798f4851fbd85feab7c55d244..77563109c0ea0111d9783a246585fd4d2b5e2531 100644 (file)
@@ -1918,7 +1918,7 @@ static void __init init_uclamp_rq(struct rq *rq)
                };
        }
 
-       rq->uclamp_flags = 0;
+       rq->uclamp_flags = UCLAMP_FLAG_IDLE;
 }
 
 static void __init init_uclamp(void)
@@ -6617,11 +6617,11 @@ static int __init setup_preempt_mode(char *str)
        int mode = sched_dynamic_mode(str);
        if (mode < 0) {
                pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
-               return 1;
+               return 0;
        }
 
        sched_dynamic_update(mode);
-       return 0;
+       return 1;
 }
 __setup("preempt=", setup_preempt_mode);
 
index 872e481d5098c84c6e604ab567cf5d4334d192da..9392aea1804e5d8512363f1a837cd27752ce8338 100644 (file)
@@ -615,7 +615,8 @@ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
                .sum_exec_runtime = p->se.sum_exec_runtime,
        };
 
-       task_cputime(p, &cputime.utime, &cputime.stime);
+       if (task_cputime(p, &cputime.utime, &cputime.stime))
+               cputime.sum_exec_runtime = task_sched_runtime(p);
        cputime_adjust(&cputime, &p->prev_cputime, ut, st);
 }
 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
@@ -828,19 +829,21 @@ u64 task_gtime(struct task_struct *t)
  * add up the pending nohz execution time since the last
  * cputime snapshot.
  */
-void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
+bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
 {
        struct vtime *vtime = &t->vtime;
        unsigned int seq;
        u64 delta;
+       int ret;
 
        if (!vtime_accounting_enabled()) {
                *utime = t->utime;
                *stime = t->stime;
-               return;
+               return false;
        }
 
        do {
+               ret = false;
                seq = read_seqcount_begin(&vtime->seqcount);
 
                *utime = t->utime;
@@ -850,6 +853,7 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
                if (vtime->state < VTIME_SYS)
                        continue;
 
+               ret = true;
                delta = vtime_delta(vtime);
 
                /*
@@ -861,6 +865,8 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
                else
                        *utime += vtime->utime + delta;
        } while (read_seqcount_retry(&vtime->seqcount, seq));
+
+       return ret;
 }
 
 static int vtime_state_fetch(struct vtime *vtime, int cpu)
index 9555b8e1d1e386b1699163648507532961f05dcd..319f9c8ca7e7d8d4da9ef77e2c5eab9c78c63568 100644 (file)
@@ -3757,7 +3757,7 @@ static int check_synth_field(struct synth_event *event,
 
        if (strcmp(field->type, hist_field->type) != 0) {
                if (field->size != hist_field->size ||
-                   field->is_signed != hist_field->is_signed)
+                   (!field->is_string && field->is_signed != hist_field->is_signed))
                        return -EINVAL;
        }
 
index 39bb56d2dcbef650f1309a8fb3098cd9aab37812..9628b557184688485b586b3d130c414cc09e6442 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/jhash.h>
 #include <linux/slab.h>
 #include <linux/sort.h>
+#include <linux/kmemleak.h>
 
 #include "tracing_map.h"
 #include "trace.h"
@@ -307,6 +308,7 @@ static void tracing_map_array_free(struct tracing_map_array *a)
        for (i = 0; i < a->n_pages; i++) {
                if (!a->pages[i])
                        break;
+               kmemleak_free(a->pages[i]);
                free_page((unsigned long)a->pages[i]);
        }
 
@@ -342,6 +344,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
                a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
                if (!a->pages[i])
                        goto free;
+               kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
        }
  out:
        return a;
index a90112ee72a1fee70ddab19281d354734b9b1bec..72b9068ab57bffbb308e1c449218aa205f5a8ffd 100644 (file)
@@ -49,6 +49,7 @@
        SIPROUND; \
        return (v0 ^ v1) ^ (v2 ^ v3);
 
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u64));
@@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
        POSTAMBLE
 }
 EXPORT_SYMBOL(__siphash_aligned);
+#endif
 
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u64));
@@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
        POSTAMBLE
 }
 EXPORT_SYMBOL(__siphash_unaligned);
-#endif
 
 /**
  * siphash_1u64 - compute 64-bit siphash PRF value of a u64
@@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32);
        HSIPROUND; \
        return (v0 ^ v1) ^ (v2 ^ v3);
 
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u64));
@@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
 
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_unaligned(const void *data, size_t len,
                         const hsiphash_key_t *key)
 {
@@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
 
 /**
  * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
@@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
        HSIPROUND; \
        return v1 ^ v3;
 
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u32));
@@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
 
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_unaligned(const void *data, size_t len,
                         const hsiphash_key_t *key)
 {
@@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
 
 /**
  * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
index 15ac064b5562d7b99f885610a3d12733b63aa325..2a352e668d103948121970350d344f18b98f4aba 100644 (file)
@@ -4210,7 +4210,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
        if (dev->flags & IFF_UP) {
                int cpu = smp_processor_id(); /* ok because BHs are off */
 
-               if (txq->xmit_lock_owner != cpu) {
+               /* Other cpus might concurrently change txq->xmit_lock_owner
+                * to -1 or to their cpu id, but not to our id.
+                */
+               if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
                        if (dev_xmit_recursion())
                                goto recursion_alert;
 
index be74ab4551c204a0ddf2cfc17c886d0f36f5be98..0ccfd5fa5cb9b5f608ab6715a718cd57ee4eecab 100644 (file)
@@ -162,3 +162,22 @@ void dst_cache_destroy(struct dst_cache *dst_cache)
        free_percpu(dst_cache->cache);
 }
 EXPORT_SYMBOL_GPL(dst_cache_destroy);
+
+void dst_cache_reset_now(struct dst_cache *dst_cache)
+{
+       int i;
+
+       if (!dst_cache->cache)
+               return;
+
+       dst_cache->reset_ts = jiffies;
+       for_each_possible_cpu(i) {
+               struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i);
+               struct dst_entry *dst = idst->dst;
+
+               idst->cookie = 0;
+               idst->dst = NULL;
+               dst_release(dst);
+       }
+}
+EXPORT_SYMBOL_GPL(dst_cache_reset_now);
index 79df7cd9dbc16d5bd91394bce15ba5e3fd8244c9..1bb567a3b329cd06534f3e0fa27a463e06e538cc 100644 (file)
@@ -323,7 +323,7 @@ jumped:
                if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress,
                                                              fib6_rule_suppress,
                                                              fib4_rule_suppress,
-                                                             rule, arg))
+                                                             rule, flags, arg))
                        continue;
 
                if (err != -EAGAIN) {
index 9fe13e4f5d08a5cf9cd9ff15033b9f6e0dc9e492..4d61ddd8a0ecfc4cc47b4802eb5a573beb84ee44 100644 (file)
@@ -1582,7 +1582,7 @@ static int __net_init fib_net_init(struct net *net)
        int error;
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
-       net->ipv4.fib_num_tclassid_users = 0;
+       atomic_set(&net->ipv4.fib_num_tclassid_users, 0);
 #endif
        error = ip_fib_net_init(net);
        if (error < 0)
index ce54a30c2ef1e8e79c8922be5eee35055fa51178..d279cb8ac1584487885f66819634b421c01bf819 100644 (file)
@@ -141,6 +141,7 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_action(struct fib_rule *rule,
 }
 
 INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg)
 {
        struct fib_result *result = (struct fib_result *) arg->result;
@@ -263,7 +264,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        if (tb[FRA_FLOW]) {
                rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
                if (rule4->tclassid)
-                       net->ipv4.fib_num_tclassid_users++;
+                       atomic_inc(&net->ipv4.fib_num_tclassid_users);
        }
 #endif
 
@@ -295,7 +296,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
        if (((struct fib4_rule *)rule)->tclassid)
-               net->ipv4.fib_num_tclassid_users--;
+               atomic_dec(&net->ipv4.fib_num_tclassid_users);
 #endif
        net->ipv4.fib_has_custom_rules = true;
 
index 3364cb9c67e018fea2b2e370046de5252581b996..fde7797b580694bb3924c5c6e9560cf04fd67387 100644 (file)
@@ -220,7 +220,7 @@ void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
 {
 #ifdef CONFIG_IP_ROUTE_CLASSID
        if (fib_nh->nh_tclassid)
-               net->ipv4.fib_num_tclassid_users--;
+               atomic_dec(&net->ipv4.fib_num_tclassid_users);
 #endif
        fib_nh_common_release(&fib_nh->nh_common);
 }
@@ -632,7 +632,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh,
 #ifdef CONFIG_IP_ROUTE_CLASSID
        nh->nh_tclassid = cfg->fc_flow;
        if (nh->nh_tclassid)
-               net->ipv4.fib_num_tclassid_users++;
+               atomic_inc(&net->ipv4.fib_num_tclassid_users);
 #endif
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        nh->fib_nh_weight = nh_weight;
index 40f3e4f9f33a238ae9d748d278aea37769a83f57..dcedfe29d9d932a3a85491021557005228860ffe 100644 (file)
@@ -267,6 +267,7 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_action(struct fib_rule *rule,
 }
 
 INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg)
 {
        struct fib6_result *res = arg->result;
@@ -294,8 +295,7 @@ INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
        return false;
 
 suppress_route:
-       if (!(arg->flags & FIB_LOOKUP_NOREF))
-               ip6_rt_put(rt);
+       ip6_rt_put_flags(rt, flags);
        return true;
 }
 
index 1b9827ff8ccf48e61e233e39d671aa67c8fff0ab..1cbd49d5788dd4cfb1b3224bed49df90f75b5d20 100644 (file)
@@ -248,9 +248,9 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
                 * memcmp() alone below is sufficient, right?
                 */
                 if ((first_word & htonl(0xF00FFFFF)) ||
-                   !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
-                   !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
-                   *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
+                    !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
+                    !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
+                    *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
 not_same_flow:
                        NAPI_GRO_CB(p)->same_flow = 0;
                        continue;
index 46c44823edb7dd03a10804f737ae281ebb04b1ba..cdf09c2a7007a764f61c0cbc6ee977afe9bcd8e4 100644 (file)
@@ -952,7 +952,7 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
 }
 
 static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
-                            unsigned int daddr_extent)
+                            unsigned int daddr_extent, unsigned char type)
 {
        struct net *net = dev_net(mdev->dev);
        struct mctp_route *rt, *tmp;
@@ -969,7 +969,8 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
 
        list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
                if (rt->dev == mdev &&
-                   rt->min == daddr_start && rt->max == daddr_end) {
+                   rt->min == daddr_start && rt->max == daddr_end &&
+                   rt->type == type) {
                        list_del_rcu(&rt->list);
                        /* TODO: immediate RTM_DELROUTE */
                        mctp_route_release(rt);
@@ -987,7 +988,7 @@ int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
 
 int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
 {
-       return mctp_route_remove(mdev, addr, 0);
+       return mctp_route_remove(mdev, addr, 0, RTN_LOCAL);
 }
 
 /* removes all entries for a given device */
@@ -1195,7 +1196,7 @@ static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (rtm->rtm_type != RTN_UNICAST)
                return -EINVAL;
 
-       rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len);
+       rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST);
        return rc;
 }
 
index cc6b8803aa9d0a7bec28f01daef8d458bd90fe46..7b7918702592a738452ec34159f83f57c33b5f20 100644 (file)
@@ -12,7 +12,7 @@
 static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb,
                                    struct net_device *ndev)
 {
-       kfree(skb);
+       kfree_skb(skb);
        return NETDEV_TX_OK;
 }
 
index ffeb2df8be7ae8da3d00b71bfdc90694db7f7f29..0c7bde1c14a6a879a3263fae3390720c796a3610 100644 (file)
@@ -409,7 +409,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
                goto err;
 
        /* Find the output device */
-       out_dev = rcu_dereference(nh->nh_dev);
+       out_dev = nh->nh_dev;
        if (!mpls_output_possible(out_dev))
                goto tx_err;
 
@@ -698,7 +698,7 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
            (dev->addr_len != nh->nh_via_alen))
                goto errout;
 
-       RCU_INIT_POINTER(nh->nh_dev, dev);
+       nh->nh_dev = dev;
 
        if (!(dev->flags & IFF_UP)) {
                nh->nh_flags |= RTNH_F_DEAD;
@@ -1491,26 +1491,53 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head)
        kfree(mdev);
 }
 
-static void mpls_ifdown(struct net_device *dev, int event)
+static int mpls_ifdown(struct net_device *dev, int event)
 {
        struct mpls_route __rcu **platform_label;
        struct net *net = dev_net(dev);
-       u8 alive, deleted;
        unsigned index;
 
        platform_label = rtnl_dereference(net->mpls.platform_label);
        for (index = 0; index < net->mpls.platform_labels; index++) {
                struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+               bool nh_del = false;
+               u8 alive = 0;
 
                if (!rt)
                        continue;
 
-               alive = 0;
-               deleted = 0;
+               if (event == NETDEV_UNREGISTER) {
+                       u8 deleted = 0;
+
+                       for_nexthops(rt) {
+                               if (!nh->nh_dev || nh->nh_dev == dev)
+                                       deleted++;
+                               if (nh->nh_dev == dev)
+                                       nh_del = true;
+                       } endfor_nexthops(rt);
+
+                       /* if there are no more nexthops, delete the route */
+                       if (deleted == rt->rt_nhn) {
+                               mpls_route_update(net, index, NULL, NULL);
+                               continue;
+                       }
+
+                       if (nh_del) {
+                               size_t size = sizeof(*rt) + rt->rt_nhn *
+                                       rt->rt_nh_size;
+                               struct mpls_route *orig = rt;
+
+                               rt = kmalloc(size, GFP_KERNEL);
+                               if (!rt)
+                                       return -ENOMEM;
+                               memcpy(rt, orig, size);
+                       }
+               }
+
                change_nexthops(rt) {
                        unsigned int nh_flags = nh->nh_flags;
 
-                       if (rtnl_dereference(nh->nh_dev) != dev)
+                       if (nh->nh_dev != dev)
                                goto next;
 
                        switch (event) {
@@ -1523,23 +1550,22 @@ static void mpls_ifdown(struct net_device *dev, int event)
                                break;
                        }
                        if (event == NETDEV_UNREGISTER)
-                               RCU_INIT_POINTER(nh->nh_dev, NULL);
+                               nh->nh_dev = NULL;
 
                        if (nh->nh_flags != nh_flags)
                                WRITE_ONCE(nh->nh_flags, nh_flags);
 next:
                        if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
                                alive++;
-                       if (!rtnl_dereference(nh->nh_dev))
-                               deleted++;
                } endfor_nexthops(rt);
 
                WRITE_ONCE(rt->rt_nhn_alive, alive);
 
-               /* if there are no more nexthops, delete the route */
-               if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
-                       mpls_route_update(net, index, NULL, NULL);
+               if (nh_del)
+                       mpls_route_update(net, index, rt, NULL);
        }
+
+       return 0;
 }
 
 static void mpls_ifup(struct net_device *dev, unsigned int flags)
@@ -1559,14 +1585,12 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
                alive = 0;
                change_nexthops(rt) {
                        unsigned int nh_flags = nh->nh_flags;
-                       struct net_device *nh_dev =
-                               rtnl_dereference(nh->nh_dev);
 
                        if (!(nh_flags & flags)) {
                                alive++;
                                continue;
                        }
-                       if (nh_dev != dev)
+                       if (nh->nh_dev != dev)
                                continue;
                        alive++;
                        nh_flags &= ~flags;
@@ -1597,8 +1621,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                return NOTIFY_OK;
 
        switch (event) {
+               int err;
+
        case NETDEV_DOWN:
-               mpls_ifdown(dev, event);
+               err = mpls_ifdown(dev, event);
+               if (err)
+                       return notifier_from_errno(err);
                break;
        case NETDEV_UP:
                flags = dev_get_flags(dev);
@@ -1609,13 +1637,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                break;
        case NETDEV_CHANGE:
                flags = dev_get_flags(dev);
-               if (flags & (IFF_RUNNING | IFF_LOWER_UP))
+               if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
                        mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
-               else
-                       mpls_ifdown(dev, event);
+               } else {
+                       err = mpls_ifdown(dev, event);
+                       if (err)
+                               return notifier_from_errno(err);
+               }
                break;
        case NETDEV_UNREGISTER:
-               mpls_ifdown(dev, event);
+               err = mpls_ifdown(dev, event);
+               if (err)
+                       return notifier_from_errno(err);
                mdev = mpls_dev_get(dev);
                if (mdev) {
                        mpls_dev_sysctl_unregister(dev, mdev);
@@ -1626,8 +1659,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
        case NETDEV_CHANGENAME:
                mdev = mpls_dev_get(dev);
                if (mdev) {
-                       int err;
-
                        mpls_dev_sysctl_unregister(dev, mdev);
                        err = mpls_dev_sysctl_register(dev, mdev);
                        if (err)
@@ -1994,7 +2025,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
                    nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
                                nh->nh_via_alen))
                        goto nla_put_failure;
-               dev = rtnl_dereference(nh->nh_dev);
+               dev = nh->nh_dev;
                if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
                        goto nla_put_failure;
                if (nh->nh_flags & RTNH_F_LINKDOWN)
@@ -2012,7 +2043,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
                        goto nla_put_failure;
 
                for_nexthops(rt) {
-                       dev = rtnl_dereference(nh->nh_dev);
+                       dev = nh->nh_dev;
                        if (!dev)
                                continue;
 
@@ -2123,18 +2154,14 @@ static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
 static bool mpls_rt_uses_dev(struct mpls_route *rt,
                             const struct net_device *dev)
 {
-       struct net_device *nh_dev;
-
        if (rt->rt_nhn == 1) {
                struct mpls_nh *nh = rt->rt_nh;
 
-               nh_dev = rtnl_dereference(nh->nh_dev);
-               if (dev == nh_dev)
+               if (nh->nh_dev == dev)
                        return true;
        } else {
                for_nexthops(rt) {
-                       nh_dev = rtnl_dereference(nh->nh_dev);
-                       if (nh_dev == dev)
+                       if (nh->nh_dev == dev)
                                return true;
                } endfor_nexthops(rt);
        }
@@ -2222,7 +2249,7 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
                size_t nhsize = 0;
 
                for_nexthops(rt) {
-                       if (!rtnl_dereference(nh->nh_dev))
+                       if (!nh->nh_dev)
                                continue;
                        nhsize += nla_total_size(sizeof(struct rtnexthop));
                        /* RTA_VIA */
@@ -2468,7 +2495,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
            nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
                        nh->nh_via_alen))
                goto nla_put_failure;
-       dev = rtnl_dereference(nh->nh_dev);
+       dev = nh->nh_dev;
        if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
                goto nla_put_failure;
 
@@ -2507,7 +2534,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
                rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
                if (IS_ERR(rt0))
                        goto nort0;
-               RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
+               rt0->rt_nh->nh_dev = lo;
                rt0->rt_protocol = RTPROT_KERNEL;
                rt0->rt_payload_type = MPT_IPV4;
                rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
@@ -2521,7 +2548,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
                rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
                if (IS_ERR(rt2))
                        goto nort2;
-               RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
+               rt2->rt_nh->nh_dev = lo;
                rt2->rt_protocol = RTPROT_KERNEL;
                rt2->rt_payload_type = MPT_IPV6;
                rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
index 838cdfc10e47d6efd51a7f89c674458f845e6a44..893df00b77b62ec8a6c5a85dd37679a2210fce73 100644 (file)
@@ -87,7 +87,7 @@ enum mpls_payload_type {
 };
 
 struct mpls_nh { /* next hop label forwarding entry */
-       struct net_device __rcu *nh_dev;
+       struct net_device       *nh_dev;
 
        /* nh_flags is accessed under RCU in the packet path; it is
         * modified handling netdev events with rtnl lock held
index 4c575324a98528bec4188acf27eecc2f98ae5e0a..9eba2e6483851db2313a093d3ac17deb4c45b7b6 100644 (file)
@@ -1852,6 +1852,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        if (msg->msg_flags & MSG_OOB)
                return -EOPNOTSUPP;
 
+       if (len == 0) {
+               pr_warn_once("Zero length message leads to an empty skb\n");
+               return -ENODATA;
+       }
+
        err = scm_send(sock, msg, &scm, true);
        if (err < 0)
                return err;
index abf19c0e3ba0bfcf0396df2d78b937e288b84ab0..5327d130c4b5691e788bbbcb990a349d714ad8d4 100644 (file)
@@ -500,7 +500,7 @@ void rds_tcp_tune(struct socket *sock)
                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
        }
        if (rtn->rcvbuf_size > 0) {
-               sk->sk_sndbuf = rtn->rcvbuf_size;
+               sk->sk_rcvbuf = rtn->rcvbuf_size;
                sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
        }
        release_sock(sk);
index dbea0bfee48e9e4df50fda68ade4624761f44d33..8120138dac01810854c8376acdec90e3c13dd4a8 100644 (file)
@@ -135,16 +135,20 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
        return bundle;
 }
 
+static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
+{
+       rxrpc_put_peer(bundle->params.peer);
+       kfree(bundle);
+}
+
 void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
 {
        unsigned int d = bundle->debug_id;
        unsigned int u = atomic_dec_return(&bundle->usage);
 
        _debug("PUT B=%x %u", d, u);
-       if (u == 0) {
-               rxrpc_put_peer(bundle->params.peer);
-               kfree(bundle);
-       }
+       if (u == 0)
+               rxrpc_free_bundle(bundle);
 }
 
 /*
@@ -328,7 +332,7 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
        return candidate;
 
 found_bundle_free:
-       kfree(candidate);
+       rxrpc_free_bundle(candidate);
 found_bundle:
        rxrpc_get_bundle(bundle);
        spin_unlock(&local->client_bundles_lock);
index 68396d05205252177ea9525b43972f1598480e12..0298fe2ad6d323b377b46d5dc70c8d472d899966 100644 (file)
@@ -299,6 +299,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
        return peer;
 }
 
+static void rxrpc_free_peer(struct rxrpc_peer *peer)
+{
+       rxrpc_put_local(peer->local);
+       kfree_rcu(peer, rcu);
+}
+
 /*
  * Set up a new incoming peer.  There shouldn't be any other matching peers
  * since we've already done a search in the list from the non-reentrant context
@@ -365,7 +371,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
                spin_unlock_bh(&rxnet->peer_hash_lock);
 
                if (peer)
-                       kfree(candidate);
+                       rxrpc_free_peer(candidate);
                else
                        peer = candidate;
        }
@@ -420,8 +426,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
        list_del_init(&peer->keepalive_link);
        spin_unlock_bh(&rxnet->peer_hash_lock);
 
-       rxrpc_put_local(peer->local);
-       kfree_rcu(peer, rcu);
+       rxrpc_free_peer(peer);
 }
 
 /*
@@ -457,8 +462,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
        if (n == 0) {
                hash_del_rcu(&peer->hash_link);
                list_del_init(&peer->keepalive_link);
-               rxrpc_put_local(peer->local);
-               kfree_rcu(peer, rcu);
+               rxrpc_free_peer(peer);
        }
 }
 
index 3715d2f5ad555ce365738e73a99ed8c7ca6a2cfd..292e4d904ab6e4afbba2cef421bce27cae5af364 100644 (file)
@@ -195,6 +195,7 @@ int smc_close_active(struct smc_sock *smc)
        int old_state;
        long timeout;
        int rc = 0;
+       int rc1 = 0;
 
        timeout = current->flags & PF_EXITING ?
                  0 : sock_flag(sk, SOCK_LINGER) ?
@@ -232,8 +233,11 @@ again:
                        /* actively shutdown clcsock before peer close it,
                         * prevent peer from entering TIME_WAIT state.
                         */
-                       if (smc->clcsock && smc->clcsock->sk)
-                               rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+                       if (smc->clcsock && smc->clcsock->sk) {
+                               rc1 = kernel_sock_shutdown(smc->clcsock,
+                                                          SHUT_RDWR);
+                               rc = rc ? rc : rc1;
+                       }
                } else {
                        /* peer event has changed the state */
                        goto again;
index bb52c8b5f148af8536246953434a50a22cce2800..387d28b2f8dd055a3b3ceea6024ffb231f115877 100644 (file)
@@ -625,18 +625,17 @@ int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
 void smc_lgr_cleanup_early(struct smc_connection *conn)
 {
        struct smc_link_group *lgr = conn->lgr;
-       struct list_head *lgr_list;
        spinlock_t *lgr_lock;
 
        if (!lgr)
                return;
 
        smc_conn_free(conn);
-       lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
+       smc_lgr_list_head(lgr, &lgr_lock);
        spin_lock_bh(lgr_lock);
        /* do not use this link group for new connections */
-       if (!list_empty(lgr_list))
-               list_del_init(lgr_list);
+       if (!list_empty(&lgr->list))
+               list_del_init(&lgr->list);
        spin_unlock_bh(lgr_lock);
        __smc_lgr_terminate(lgr, true);
 }
index d3e7ff90889e35da299bf4d9127b6f766845c6e1..dfe623a4e72f48bb12a68abcfb3081c45a7716f5 100644 (file)
@@ -521,7 +521,7 @@ static int tls_do_encryption(struct sock *sk,
        memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
               prot->iv_size + prot->salt_size);
 
-       xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
+       xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
 
        sge->offset += prot->prepend_size;
        sge->length -= prot->prepend_size;
@@ -1499,7 +1499,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
        else
                memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
 
-       xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
+       xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
 
        /* Prepare AAD */
        tls_make_aad(aad, rxm->full_len - prot->overhead_size +
index 10a0bffc3cf6c205b4596d48728c442b286f9365..4208fa8a4db5bc10787d797b04ead71572b6f503 100644 (file)
@@ -252,6 +252,11 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x02c8,
        },
+       {
+               .flags = FLAG_SOF,
+               .device = 0x02c8,
+               .codec_hid = "ESSX8336",
+       },
 /* Cometlake-H */
        {
                .flags = FLAG_SOF,
@@ -276,6 +281,11 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x06c8,
        },
+               {
+               .flags = FLAG_SOF,
+               .device = 0x06c8,
+               .codec_hid = "ESSX8336",
+       },
 #endif
 
 /* Icelake */
index fe51163f2d82df9347be08a94a486503705779cd..1b46b599a5cff282dbdb02ea22202b941711d724 100644 (file)
@@ -335,7 +335,10 @@ enum {
                                        ((pci)->device == 0x0c0c) || \
                                        ((pci)->device == 0x0d0c) || \
                                        ((pci)->device == 0x160c) || \
-                                       ((pci)->device == 0x490d))
+                                       ((pci)->device == 0x490d) || \
+                                       ((pci)->device == 0x4f90) || \
+                                       ((pci)->device == 0x4f91) || \
+                                       ((pci)->device == 0x4f92))
 
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
 
@@ -2473,6 +2476,13 @@ static const struct pci_device_id azx_ids[] = {
        /* DG1 */
        { PCI_DEVICE(0x8086, 0x490d),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* DG2 */
+       { PCI_DEVICE(0x8086, 0x4f90),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       { PCI_DEVICE(0x8086, 0x4f91),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       { PCI_DEVICE(0x8086, 0x4f92),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Alderlake-S */
        { PCI_DEVICE(0x8086, 0x7ad0),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index ea8ab8b43337896ba3ebee6a89a94d63fedff2df..d22c96eb2f8fb7324f44335835dd36bd8576e234 100644 (file)
@@ -438,6 +438,15 @@ int snd_hda_codec_set_pin_target(struct hda_codec *codec, hda_nid_t nid,
 #define for_each_hda_codec_node(nid, codec) \
        for ((nid) = (codec)->core.start_nid; (nid) < (codec)->core.end_nid; (nid)++)
 
+/* Set the codec power_state flag to indicate to allow unsol event handling;
+ * see hda_codec_unsol_event() in hda_bind.c.  Calling this might confuse the
+ * state tracking, so use with care.
+ */
+static inline void snd_hda_codec_allow_unsol_events(struct hda_codec *codec)
+{
+       codec->core.dev.power.power_state = PMSG_ON;
+}
+
 /*
  * get widget capabilities
  */
index 31ff11ab868e1ac3c1aff15d20a52728884e2788..039b9f2f8e94700f097cc58dc3a02ace0fae2dcf 100644 (file)
@@ -750,6 +750,11 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
        if (cs42l42->full_scale_vol)
                cs8409_i2c_write(cs42l42, 0x2001, 0x01);
 
+       /* we have to explicitly allow unsol event handling even during the
+        * resume phase so that the jack event is processed properly
+        */
+       snd_hda_codec_allow_unsol_events(cs42l42->codec);
+
        cs42l42_enable_jack_detect(cs42l42);
 }
 
index 65d2c55399195a4c6b89afe3f62f1debbe8fbeed..415701bd10ac8ca23ba7a505dea0b0d7ccbbb0da 100644 (file)
@@ -4380,10 +4380,11 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI",     patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI",  patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI",        patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI",  patch_i915_tgl_hdmi),
-HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",        patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",        patch_i915_icl_hdmi),
+HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",        patch_i915_byt_hdmi),
 HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",   patch_i915_byt_hdmi),
index 90a921f726c3bdb398ff15bd7e52a673d76b5d8d..3fa99741779af3d90216bf004349435f6b0d4c12 100644 (file)
@@ -42,34 +42,6 @@ static const struct spi_device_id cs35l41_id_spi[] = {
 
 MODULE_DEVICE_TABLE(spi, cs35l41_id_spi);
 
-static void cs35l41_spi_otp_setup(struct cs35l41_private *cs35l41,
-                                 bool is_pre_setup, unsigned int *freq)
-{
-       struct spi_device *spi;
-       u32 orig_spi_freq;
-
-       spi = to_spi_device(cs35l41->dev);
-
-       if (!spi) {
-               dev_err(cs35l41->dev, "%s: No SPI device\n", __func__);
-               return;
-       }
-
-       if (is_pre_setup) {
-               orig_spi_freq = spi->max_speed_hz;
-               if (orig_spi_freq > CS35L41_SPI_MAX_FREQ_OTP) {
-                       spi->max_speed_hz = CS35L41_SPI_MAX_FREQ_OTP;
-                       spi_setup(spi);
-               }
-               *freq = orig_spi_freq;
-       } else {
-               if (spi->max_speed_hz != *freq) {
-                       spi->max_speed_hz = *freq;
-                       spi_setup(spi);
-               }
-       }
-}
-
 static int cs35l41_spi_probe(struct spi_device *spi)
 {
        const struct regmap_config *regmap_config = &cs35l41_regmap_spi;
@@ -81,6 +53,9 @@ static int cs35l41_spi_probe(struct spi_device *spi)
        if (!cs35l41)
                return -ENOMEM;
 
+       spi->max_speed_hz = CS35L41_SPI_MAX_FREQ;
+       spi_setup(spi);
+
        spi_set_drvdata(spi, cs35l41);
        cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config);
        if (IS_ERR(cs35l41->regmap)) {
@@ -91,7 +66,6 @@ static int cs35l41_spi_probe(struct spi_device *spi)
 
        cs35l41->dev = &spi->dev;
        cs35l41->irq = spi->irq;
-       cs35l41->otp_setup = cs35l41_spi_otp_setup;
 
        return cs35l41_probe(cs35l41, pdata);
 }
index 9d0530dde996728892a178d1dd6ba52b89557df3..9c4d481f7614c6a61edb065cfd45b7603afc9c0f 100644 (file)
@@ -302,7 +302,6 @@ static int cs35l41_otp_unpack(void *data)
        const struct cs35l41_otp_packed_element_t *otp_map;
        struct cs35l41_private *cs35l41 = data;
        int bit_offset, word_offset, ret, i;
-       unsigned int orig_spi_freq;
        unsigned int bit_sum = 8;
        u32 otp_val, otp_id_reg;
        u32 *otp_mem;
@@ -326,9 +325,6 @@ static int cs35l41_otp_unpack(void *data)
                goto err_otp_unpack;
        }
 
-       if (cs35l41->otp_setup)
-               cs35l41->otp_setup(cs35l41, true, &orig_spi_freq);
-
        ret = regmap_bulk_read(cs35l41->regmap, CS35L41_OTP_MEM0, otp_mem,
                               CS35L41_OTP_SIZE_WORDS);
        if (ret < 0) {
@@ -336,9 +332,6 @@ static int cs35l41_otp_unpack(void *data)
                goto err_otp_unpack;
        }
 
-       if (cs35l41->otp_setup)
-               cs35l41->otp_setup(cs35l41, false, &orig_spi_freq);
-
        otp_map = otp_map_match->map;
 
        bit_offset = otp_map_match->bit_offset;
index 6cffe8a55beb1b9789f16f06c0ef0f94250f401c..48485b08a6f1fdb5dde5c14d20fba978d90268a7 100644 (file)
 #define CS35L41_FS2_WINDOW_MASK                0x00FFF800
 #define CS35L41_FS2_WINDOW_SHIFT       12
 
-#define CS35L41_SPI_MAX_FREQ_OTP       4000000
+#define CS35L41_SPI_MAX_FREQ           4000000
 
 #define CS35L41_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
 #define CS35L41_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
@@ -764,8 +764,6 @@ struct cs35l41_private {
        int irq;
        /* GPIO for /RST */
        struct gpio_desc *reset_gpio;
-       void (*otp_setup)(struct cs35l41_private *cs35l41, bool is_pre_setup,
-                         unsigned int *freq);
 };
 
 int cs35l41_probe(struct cs35l41_private *cs35l41,
index 943d7d933e81b3325e3ae09a25df7c1ada8e422d..03f24edfe4f6492b86fe8fc97a3ae362b6083e50 100644 (file)
@@ -539,3 +539,4 @@ module_platform_driver(rk817_codec_driver);
 MODULE_DESCRIPTION("ASoC RK817 codec driver");
 MODULE_AUTHOR("binyuan <kevan.lan@rock-chips.com>");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rk817-codec");
index b4eb0c97edf1c486e1608a54ae8e348823609c6d..4eebc79d4b486df60b07490e6f1cb19a8401f181 100644 (file)
@@ -81,6 +81,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
                .sof_fw_filename = "sof-cml.ri",
                .sof_tplg_filename = "sof-cml-da7219-max98390.tplg",
        },
+       {
+               .id = "ESSX8336",
+               .drv_name = "sof-essx8336",
+               .sof_fw_filename = "sof-cml.ri",
+               .sof_tplg_filename = "sof-cml-es8336.tplg",
+       },
        {},
 };
 EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines);
index 2ae99b49d3f5f6b72c5990f05432bf06c588d888..cbd7ea48837b246ddcface067b0eff746fec59da 100644 (file)
@@ -20,8 +20,10 @@ static bool snd_soc_acpi_id_present(struct snd_soc_acpi_mach *machine)
 
        if (comp_ids) {
                for (i = 0; i < comp_ids->num_codecs; i++) {
-                       if (acpi_dev_present(comp_ids->codecs[i], NULL, -1))
+                       if (acpi_dev_present(comp_ids->codecs[i], NULL, -1)) {
+                               strscpy(machine->id, comp_ids->codecs[i], ACPI_ID_LEN);
                                return true;
+                       }
                }
        }
 
index 568d351b7a4e97cb5f9beb3a612c8cf709e56dae..2c0d4d06ab364125a4a3a987c1000385905a7042 100644 (file)
@@ -58,6 +58,13 @@ int hda_ctrl_dai_widget_setup(struct snd_soc_dapm_widget *w)
                return -EINVAL;
        }
 
+       /* DAI already configured, reset it before reconfiguring it */
+       if (sof_dai->configured) {
+               ret = hda_ctrl_dai_widget_free(w);
+               if (ret < 0)
+                       return ret;
+       }
+
        config = &sof_dai->dai_config[sof_dai->current_config];
 
        /*
index 8ee9a77bd83d375832de31094be44878093ecfdd..a74c980ee77539cd53dd8006eaea1487b8ecc653 100644 (file)
@@ -26,51 +26,162 @@ static const struct reg_default tegra186_dspk_reg_defaults[] = {
        { TEGRA186_DSPK_CODEC_CTRL,  0x03000000 },
 };
 
-static int tegra186_dspk_get_control(struct snd_kcontrol *kcontrol,
+static int tegra186_dspk_get_fifo_th(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
        struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
 
-       if (strstr(kcontrol->id.name, "FIFO Threshold"))
-               ucontrol->value.integer.value[0] = dspk->rx_fifo_th;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               ucontrol->value.integer.value[0] = dspk->osr_val;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               ucontrol->value.integer.value[0] = dspk->lrsel;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               ucontrol->value.integer.value[0] = dspk->ch_sel;
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               ucontrol->value.integer.value[0] = dspk->mono_to_stereo;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               ucontrol->value.integer.value[0] = dspk->stereo_to_mono;
+       ucontrol->value.integer.value[0] = dspk->rx_fifo_th;
 
        return 0;
 }
 
-static int tegra186_dspk_put_control(struct snd_kcontrol *kcontrol,
+static int tegra186_dspk_put_fifo_th(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
        struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
-       int val = ucontrol->value.integer.value[0];
-
-       if (strstr(kcontrol->id.name, "FIFO Threshold"))
-               dspk->rx_fifo_th = val;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               dspk->osr_val = val;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               dspk->lrsel = val;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               dspk->ch_sel = val;
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               dspk->mono_to_stereo = val;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               dspk->stereo_to_mono = val;
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == dspk->rx_fifo_th)
+               return 0;
+
+       dspk->rx_fifo_th = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_osr_val(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->osr_val;
 
        return 0;
 }
 
+static int tegra186_dspk_put_osr_val(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->osr_val)
+               return 0;
+
+       dspk->osr_val = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->lrsel;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->lrsel)
+               return 0;
+
+       dspk->lrsel = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_ch_sel(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->ch_sel;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_ch_sel(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->ch_sel)
+               return 0;
+
+       dspk->ch_sel = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->mono_to_stereo;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->mono_to_stereo)
+               return 0;
+
+       dspk->mono_to_stereo = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->stereo_to_mono;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->stereo_to_mono)
+               return 0;
+
+       dspk->stereo_to_mono = value;
+
+       return 1;
+}
+
 static int __maybe_unused tegra186_dspk_runtime_suspend(struct device *dev)
 {
        struct tegra186_dspk *dspk = dev_get_drvdata(dev);
@@ -279,17 +390,19 @@ static const struct soc_enum tegra186_dspk_lrsel_enum =
 static const struct snd_kcontrol_new tegrat186_dspk_controls[] = {
        SOC_SINGLE_EXT("FIFO Threshold", SND_SOC_NOPM, 0,
                       TEGRA186_DSPK_RX_FIFO_DEPTH - 1, 0,
-                      tegra186_dspk_get_control, tegra186_dspk_put_control),
+                      tegra186_dspk_get_fifo_th, tegra186_dspk_put_fifo_th),
        SOC_ENUM_EXT("OSR Value", tegra186_dspk_osr_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_osr_val, tegra186_dspk_put_osr_val),
        SOC_ENUM_EXT("LR Polarity Select", tegra186_dspk_lrsel_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_pol_sel, tegra186_dspk_put_pol_sel),
        SOC_ENUM_EXT("Channel Select", tegra186_dspk_ch_sel_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_ch_sel, tegra186_dspk_put_ch_sel),
        SOC_ENUM_EXT("Mono To Stereo", tegra186_dspk_mono_conv_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_mono_to_stereo,
+                    tegra186_dspk_put_mono_to_stereo),
        SOC_ENUM_EXT("Stereo To Mono", tegra186_dspk_stereo_conv_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_stereo_to_mono,
+                    tegra186_dspk_put_stereo_to_mono),
 };
 
 static const struct snd_soc_component_driver tegra186_dspk_cmpnt = {
index bcccdf3ddc528b2d2ccea759390c0564c1ea6c86..1a2e868a6220932f48f6eab1150523e1c9ee37dd 100644 (file)
@@ -424,46 +424,122 @@ static const struct snd_soc_dai_ops tegra_admaif_dai_ops = {
        .trigger        = tegra_admaif_trigger,
 };
 
-static int tegra_admaif_get_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_admaif_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+
+       ucontrol->value.enumerated.item[0] =
+               admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg];
+
+       return 0;
+}
+
+static int tegra210_admaif_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg])
+               return 0;
+
+       admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
+static int tegra210_admaif_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+
+       ucontrol->value.enumerated.item[0] =
+               admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg];
+
+       return 0;
+}
+
+static int tegra210_admaif_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
        struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg])
+               return 0;
+
+       admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
+static int tegra210_admaif_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
-       long *uctl_val = &ucontrol->value.integer.value[0];
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
 
-       if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
-               *uctl_val = admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg];
-       else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
-               *uctl_val = admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg];
-       else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
-               *uctl_val = admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg];
-       else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
-               *uctl_val = admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg];
+       ucontrol->value.enumerated.item[0] =
+               admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg];
 
        return 0;
 }
 
-static int tegra_admaif_put_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_admaif_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
        struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg])
+               return 0;
+
+       admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
+static int tegra210_admaif_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
-       int value = ucontrol->value.integer.value[0];
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
 
-       if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
-               admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value;
-       else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
-               admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value;
-       else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
-               admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value;
-       else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
-               admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value;
+       ucontrol->value.enumerated.item[0] =
+               admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg];
 
        return 0;
 }
 
+static int tegra210_admaif_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg])
+               return 0;
+
+       admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
 static int tegra_admaif_dai_probe(struct snd_soc_dai *dai)
 {
        struct tegra_admaif *admaif = snd_soc_dai_get_drvdata(dai);
@@ -559,17 +635,21 @@ static const char * const tegra_admaif_mono_conv_text[] = {
 }
 
 #define TEGRA_ADMAIF_CIF_CTRL(reg)                                            \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1,\
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1,     \
+                       tegra210_admaif_pget_mono_to_stereo,                   \
+                       tegra210_admaif_pput_mono_to_stereo,                   \
                        tegra_admaif_mono_conv_text),                          \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1,\
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1,     \
+                       tegra210_admaif_pget_stereo_to_mono,                   \
+                       tegra210_admaif_pput_stereo_to_mono,                   \
                        tegra_admaif_stereo_conv_text),                        \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1,      \
+                       tegra210_admaif_cget_mono_to_stereo,                   \
+                       tegra210_admaif_cput_mono_to_stereo,                   \
                        tegra_admaif_mono_conv_text),                          \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1,      \
+                       tegra210_admaif_cget_stereo_to_mono,                   \
+                       tegra210_admaif_cput_stereo_to_mono,                   \
                        tegra_admaif_stereo_conv_text)
 
 static struct snd_kcontrol_new tegra210_admaif_controls[] = {
index d7c7849c2f92c999b8463b7715284e6555fb74b7..933c4503fe50c2e9a617d663997797e57c7b1358 100644 (file)
@@ -193,6 +193,9 @@ static int tegra210_adx_put_byte_map(struct snd_kcontrol *kcontrol,
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;;
 
+       if (value == bytes_map[mc->reg])
+               return 0;
+
        if (value >= 0 && value <= 255) {
                /* update byte map and enable slot */
                bytes_map[mc->reg] = value;
index a1989eae2b525ad818245c56c588b8da0aa55997..388b815443c7d1dff33617667a592e91ab176153 100644 (file)
@@ -62,6 +62,7 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl,
        unsigned int *item = uctl->value.enumerated.item;
        unsigned int value = e->values[item[0]];
        unsigned int i, bit_pos, reg_idx = 0, reg_val = 0;
+       int change = 0;
 
        if (item[0] >= e->items)
                return -EINVAL;
@@ -86,12 +87,14 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl,
 
                /* Update widget power if state has changed */
                if (snd_soc_component_test_bits(cmpnt, update[i].reg,
-                                               update[i].mask, update[i].val))
-                       snd_soc_dapm_mux_update_power(dapm, kctl, item[0], e,
-                                                     &update[i]);
+                                               update[i].mask,
+                                               update[i].val))
+                       change |= snd_soc_dapm_mux_update_power(dapm, kctl,
+                                                               item[0], e,
+                                                               &update[i]);
        }
 
-       return 0;
+       return change;
 }
 
 static struct snd_soc_dai_driver tegra210_ahub_dais[] = {
index af9bddfc312073d70ded76d3deffb2d5bf637fe7..689576302ede1aee1f2c8976f642df7a9a1a1fe3 100644 (file)
@@ -222,6 +222,9 @@ static int tegra210_amx_put_byte_map(struct snd_kcontrol *kcontrol,
        int reg = mc->reg;
        int value = ucontrol->value.integer.value[0];
 
+       if (value == bytes_map[reg])
+               return 0;
+
        if (value >= 0 && value <= 255) {
                /* Update byte map and enable slot */
                bytes_map[reg] = value;
index b096478cd2ef0f65eb30239b1e781c0831ec8fad..db95794530f4678b322dabc1ab13dedb552acfea 100644 (file)
@@ -156,51 +156,162 @@ static int tegra210_dmic_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static int tegra210_dmic_get_control(struct snd_kcontrol *kcontrol,
+static int tegra210_dmic_get_boost_gain(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.integer.value[0] = dmic->boost_gain;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_boost_gain(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == dmic->boost_gain)
+               return 0;
+
+       dmic->boost_gain = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_ch_select(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->ch_select;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_ch_select(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->ch_select)
+               return 0;
+
+       dmic->ch_select = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->mono_to_stereo;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->mono_to_stereo)
+               return 0;
+
+       dmic->mono_to_stereo = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->stereo_to_mono;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->stereo_to_mono)
+               return 0;
+
+       dmic->stereo_to_mono = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_osr_val(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
 
-       if (strstr(kcontrol->id.name, "Boost Gain Volume"))
-               ucontrol->value.integer.value[0] = dmic->boost_gain;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               ucontrol->value.integer.value[0] = dmic->ch_select;
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               ucontrol->value.integer.value[0] = dmic->mono_to_stereo;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               ucontrol->value.integer.value[0] = dmic->stereo_to_mono;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               ucontrol->value.integer.value[0] = dmic->osr_val;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               ucontrol->value.integer.value[0] = dmic->lrsel;
+       ucontrol->value.enumerated.item[0] = dmic->osr_val;
 
        return 0;
 }
 
-static int tegra210_dmic_put_control(struct snd_kcontrol *kcontrol,
+static int tegra210_dmic_put_osr_val(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
-       int value = ucontrol->value.integer.value[0];
+       unsigned int value = ucontrol->value.enumerated.item[0];
 
-       if (strstr(kcontrol->id.name, "Boost Gain Volume"))
-               dmic->boost_gain = value;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               dmic->ch_select = ucontrol->value.integer.value[0];
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               dmic->mono_to_stereo = value;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               dmic->stereo_to_mono = value;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               dmic->osr_val = value;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               dmic->lrsel = value;
+       if (value == dmic->osr_val)
+               return 0;
+
+       dmic->osr_val = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->lrsel;
 
        return 0;
 }
 
+static int tegra210_dmic_put_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->lrsel)
+               return 0;
+
+       dmic->lrsel = value;
+
+       return 1;
+}
+
 static const struct snd_soc_dai_ops tegra210_dmic_dai_ops = {
        .hw_params      = tegra210_dmic_hw_params,
 };
@@ -287,19 +398,22 @@ static const struct soc_enum tegra210_dmic_lrsel_enum =
 
 static const struct snd_kcontrol_new tegra210_dmic_controls[] = {
        SOC_SINGLE_EXT("Boost Gain Volume", 0, 0, MAX_BOOST_GAIN, 0,
-                      tegra210_dmic_get_control, tegra210_dmic_put_control),
+                      tegra210_dmic_get_boost_gain,
+                      tegra210_dmic_put_boost_gain),
        SOC_ENUM_EXT("Channel Select", tegra210_dmic_ch_enum,
-                    tegra210_dmic_get_control, tegra210_dmic_put_control),
+                    tegra210_dmic_get_ch_select, tegra210_dmic_put_ch_select),
        SOC_ENUM_EXT("Mono To Stereo",
-                    tegra210_dmic_mono_conv_enum, tegra210_dmic_get_control,
-                    tegra210_dmic_put_control),
+                    tegra210_dmic_mono_conv_enum,
+                    tegra210_dmic_get_mono_to_stereo,
+                    tegra210_dmic_put_mono_to_stereo),
        SOC_ENUM_EXT("Stereo To Mono",
-                    tegra210_dmic_stereo_conv_enum, tegra210_dmic_get_control,
-                    tegra210_dmic_put_control),
+                    tegra210_dmic_stereo_conv_enum,
+                    tegra210_dmic_get_stereo_to_mono,
+                    tegra210_dmic_put_stereo_to_mono),
        SOC_ENUM_EXT("OSR Value", tegra210_dmic_osr_enum,
-                    tegra210_dmic_get_control, tegra210_dmic_put_control),
+                    tegra210_dmic_get_osr_val, tegra210_dmic_put_osr_val),
        SOC_ENUM_EXT("LR Polarity Select", tegra210_dmic_lrsel_enum,
-                    tegra210_dmic_get_control, tegra210_dmic_put_control),
+                    tegra210_dmic_get_pol_sel, tegra210_dmic_put_pol_sel),
 };
 
 static const struct snd_soc_component_driver tegra210_dmic_compnt = {
index 45f31ccb49d89ee2fc195e64e1e89a180c45f862..9552bbb939dd1e9343ad989a5bafb036a75db0e0 100644 (file)
@@ -302,85 +302,235 @@ static int tegra210_i2s_set_tdm_slot(struct snd_soc_dai *dai,
        return 0;
 }
 
-static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
-                                          unsigned int ratio)
+static int tegra210_i2s_get_loopback(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
 {
-       struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
 
-       i2s->bclk_ratio = ratio;
+       ucontrol->value.integer.value[0] = i2s->loopback;
 
        return 0;
 }
 
-static int tegra210_i2s_get_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_i2s_put_loopback(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == i2s->loopback)
+               return 0;
+
+       i2s->loopback = value;
+
+       regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, I2S_CTRL_LPBK_MASK,
+                          i2s->loopback << I2S_CTRL_LPBK_SHIFT);
+
+       return 1;
+}
+
+static int tegra210_i2s_get_fsync_width(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
-       long *uctl_val = &ucontrol->value.integer.value[0];
-
-       if (strstr(kcontrol->id.name, "Loopback"))
-               *uctl_val = i2s->loopback;
-       else if (strstr(kcontrol->id.name, "FSYNC Width"))
-               *uctl_val = i2s->fsync_width;
-       else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
-               *uctl_val = i2s->stereo_to_mono[I2S_TX_PATH];
-       else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
-               *uctl_val = i2s->mono_to_stereo[I2S_TX_PATH];
-       else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
-               *uctl_val = i2s->stereo_to_mono[I2S_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
-               *uctl_val = i2s->mono_to_stereo[I2S_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Playback FIFO Threshold"))
-               *uctl_val = i2s->rx_fifo_th;
-       else if (strstr(kcontrol->id.name, "BCLK Ratio"))
-               *uctl_val = i2s->bclk_ratio;
+
+       ucontrol->value.integer.value[0] = i2s->fsync_width;
 
        return 0;
 }
 
-static int tegra210_i2s_put_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_i2s_put_fsync_width(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
        int value = ucontrol->value.integer.value[0];
 
-       if (strstr(kcontrol->id.name, "Loopback")) {
-               i2s->loopback = value;
+       if (value == i2s->fsync_width)
+               return 0;
 
-               regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
-                                  I2S_CTRL_LPBK_MASK,
-                                  i2s->loopback << I2S_CTRL_LPBK_SHIFT);
+       i2s->fsync_width = value;
 
-       } else if (strstr(kcontrol->id.name, "FSYNC Width")) {
-               /*
-                * Frame sync width is used only for FSYNC modes and not
-                * applicable for LRCK modes. Reset value for this field is "0",
-                * which means the width is one bit clock wide.
-                * The width requirement may depend on the codec and in such
-                * cases mixer control is used to update custom values. A value
-                * of "N" here means, width is "N + 1" bit clock wide.
-                */
-               i2s->fsync_width = value;
-
-               regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
-                                  I2S_CTRL_FSYNC_WIDTH_MASK,
-                                  i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
-
-       } else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) {
-               i2s->stereo_to_mono[I2S_TX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) {
-               i2s->mono_to_stereo[I2S_TX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) {
-               i2s->stereo_to_mono[I2S_RX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) {
-               i2s->mono_to_stereo[I2S_RX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Playback FIFO Threshold")) {
-               i2s->rx_fifo_th = value;
-       } else if (strstr(kcontrol->id.name, "BCLK Ratio")) {
-               i2s->bclk_ratio = value;
-       }
+       /*
+        * Frame sync width is used only for FSYNC modes and not
+        * applicable for LRCK modes. Reset value for this field is "0",
+        * which means the width is one bit clock wide.
+        * The width requirement may depend on the codec and in such
+        * cases mixer control is used to update custom values. A value
+        * of "N" here means, width is "N + 1" bit clock wide.
+        */
+       regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
+                          I2S_CTRL_FSYNC_WIDTH_MASK,
+                          i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
+
+       return 1;
+}
+
+static int tegra210_i2s_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->stereo_to_mono[I2S_TX_PATH])
+               return 0;
+
+       i2s->stereo_to_mono[I2S_TX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->mono_to_stereo[I2S_TX_PATH])
+               return 0;
+
+       i2s->mono_to_stereo[I2S_TX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_RX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->stereo_to_mono[I2S_RX_PATH])
+               return 0;
+
+       i2s->stereo_to_mono[I2S_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_RX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->mono_to_stereo[I2S_RX_PATH])
+               return 0;
+
+       i2s->mono_to_stereo[I2S_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_pget_fifo_th(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.integer.value[0] = i2s->rx_fifo_th;
+
+       return 0;
+}
+
+static int tegra210_i2s_pput_fifo_th(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == i2s->rx_fifo_th)
+               return 0;
+
+       i2s->rx_fifo_th = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_get_bclk_ratio(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.integer.value[0] = i2s->bclk_ratio;
+
+       return 0;
+}
+
+static int tegra210_i2s_put_bclk_ratio(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == i2s->bclk_ratio)
+               return 0;
+
+       i2s->bclk_ratio = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
+                                          unsigned int ratio)
+{
+       struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+       i2s->bclk_ratio = ratio;
 
        return 0;
 }
@@ -598,22 +748,28 @@ static const struct soc_enum tegra210_i2s_stereo_conv_enum =
                        tegra210_i2s_stereo_conv_text);
 
 static const struct snd_kcontrol_new tegra210_i2s_controls[] = {
-       SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_control,
-                      tegra210_i2s_put_control),
-       SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0, tegra210_i2s_get_control,
-                      tegra210_i2s_put_control),
+       SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_loopback,
+                      tegra210_i2s_put_loopback),
+       SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0,
+                      tegra210_i2s_get_fsync_width,
+                      tegra210_i2s_put_fsync_width),
        SOC_ENUM_EXT("Capture Stereo To Mono", tegra210_i2s_stereo_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_cget_stereo_to_mono,
+                    tegra210_i2s_cput_stereo_to_mono),
        SOC_ENUM_EXT("Capture Mono To Stereo", tegra210_i2s_mono_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_cget_mono_to_stereo,
+                    tegra210_i2s_cput_mono_to_stereo),
        SOC_ENUM_EXT("Playback Stereo To Mono", tegra210_i2s_stereo_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_pget_mono_to_stereo,
+                    tegra210_i2s_pput_mono_to_stereo),
        SOC_ENUM_EXT("Playback Mono To Stereo", tegra210_i2s_mono_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_pget_stereo_to_mono,
+                    tegra210_i2s_pput_stereo_to_mono),
        SOC_SINGLE_EXT("Playback FIFO Threshold", 0, 0, I2S_RX_FIFO_DEPTH - 1,
-                      0, tegra210_i2s_get_control, tegra210_i2s_put_control),
-       SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0, tegra210_i2s_get_control,
-                      tegra210_i2s_put_control),
+                      0, tegra210_i2s_pget_fifo_th, tegra210_i2s_pput_fifo_th),
+       SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0,
+                      tegra210_i2s_get_bclk_ratio,
+                      tegra210_i2s_put_bclk_ratio),
 };
 
 static const struct snd_soc_dapm_widget tegra210_i2s_widgets[] = {
index 55e61776c565ad68cb24f86e59953e4bb804c69c..51d375573cfa3a05d302f70f66de0427b78e1238 100644 (file)
@@ -192,24 +192,24 @@ static int tegra210_mixer_get_gain(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
-static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
-                                  struct snd_ctl_elem_value *ucontrol)
+static int tegra210_mixer_apply_gain(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol,
+                                    bool instant_gain)
 {
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt);
        unsigned int reg = mc->reg, id;
-       bool instant_gain = false;
        int err;
 
-       if (strstr(kcontrol->id.name, "Instant Gain Volume"))
-               instant_gain = true;
-
        /* Save gain value for specific MIXER input */
        id = (reg - TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_0) /
             TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_STRIDE;
 
+       if (mixer->gain_value[id] == ucontrol->value.integer.value[0])
+               return 0;
+
        mixer->gain_value[id] = ucontrol->value.integer.value[0];
 
        err = tegra210_mixer_configure_gain(cmpnt, id, instant_gain);
@@ -221,6 +221,18 @@ static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
        return 1;
 }
 
+static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
+                                  struct snd_ctl_elem_value *ucontrol)
+{
+       return tegra210_mixer_apply_gain(kcontrol, ucontrol, false);
+}
+
+static int tegra210_mixer_put_instant_gain(struct snd_kcontrol *kcontrol,
+                                          struct snd_ctl_elem_value *ucontrol)
+{
+       return tegra210_mixer_apply_gain(kcontrol, ucontrol, true);
+}
+
 static int tegra210_mixer_set_audio_cif(struct tegra210_mixer *mixer,
                                        struct snd_pcm_hw_params *params,
                                        unsigned int reg,
@@ -388,7 +400,7 @@ ADDER_CTRL_DECL(adder5, TEGRA210_MIXER_TX5_ADDER_CONFIG);
        SOC_SINGLE_EXT("RX" #id " Instant Gain Volume",         \
                       MIXER_GAIN_CFG_RAM_ADDR((id) - 1), 0,    \
                       0x20000, 0, tegra210_mixer_get_gain,     \
-                      tegra210_mixer_put_gain),
+                      tegra210_mixer_put_instant_gain),
 
 /* Volume controls for all MIXER inputs */
 static const struct snd_kcontrol_new tegra210_mixer_gain_ctls[] = {
index 7b9c7006e4197e5fc3537b448de461285b019a2a..85b155887ec2144ac7c8c26a6da6c7dd228da844 100644 (file)
@@ -136,7 +136,7 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
        unsigned int value;
-       u8 mute_mask;
+       u8 new_mask, old_mask;
        int err;
 
        pm_runtime_get_sync(cmpnt->dev);
@@ -148,11 +148,19 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
        if (err < 0)
                goto end;
 
-       mute_mask = ucontrol->value.integer.value[0];
+       regmap_read(mvc->regmap, TEGRA210_MVC_CTRL, &value);
+
+       old_mask = (value >> TEGRA210_MVC_MUTE_SHIFT) & TEGRA210_MUTE_MASK_EN;
+       new_mask = ucontrol->value.integer.value[0];
+
+       if (new_mask == old_mask) {
+               err = 0;
+               goto end;
+       }
 
        err = regmap_update_bits(mvc->regmap, mc->reg,
                                 TEGRA210_MVC_MUTE_MASK,
-                                mute_mask << TEGRA210_MVC_MUTE_SHIFT);
+                                new_mask << TEGRA210_MVC_MUTE_SHIFT);
        if (err < 0)
                goto end;
 
@@ -195,7 +203,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
        unsigned int reg = mc->reg;
        unsigned int value;
        u8 chan;
-       int err;
+       int err, old_volume;
 
        pm_runtime_get_sync(cmpnt->dev);
 
@@ -207,10 +215,16 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
                goto end;
 
        chan = (reg - TEGRA210_MVC_TARGET_VOL) / REG_SIZE;
+       old_volume = mvc->volume[chan];
 
        tegra210_mvc_conv_vol(mvc, chan,
                              ucontrol->value.integer.value[0]);
 
+       if (mvc->volume[chan] == old_volume) {
+               err = 0;
+               goto end;
+       }
+
        /* Configure init volume same as target volume */
        regmap_write(mvc->regmap,
                TEGRA210_MVC_REG_OFFSET(TEGRA210_MVC_INIT_VOL, chan),
@@ -275,7 +289,7 @@ static int tegra210_mvc_get_curve_type(struct snd_kcontrol *kcontrol,
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
 
-       ucontrol->value.integer.value[0] = mvc->curve_type;
+       ucontrol->value.enumerated.item[0] = mvc->curve_type;
 
        return 0;
 }
@@ -285,7 +299,7 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
-       int value;
+       unsigned int value;
 
        regmap_read(mvc->regmap, TEGRA210_MVC_ENABLE, &value);
        if (value & TEGRA210_MVC_EN) {
@@ -294,10 +308,10 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol,
                return -EINVAL;
        }
 
-       if (mvc->curve_type == ucontrol->value.integer.value[0])
+       if (mvc->curve_type == ucontrol->value.enumerated.item[0])
                return 0;
 
-       mvc->curve_type = ucontrol->value.integer.value[0];
+       mvc->curve_type = ucontrol->value.enumerated.item[0];
 
        tegra210_mvc_reset_vol_settings(mvc, cmpnt->dev);
 
index dc477ee1b82cd06041bed5ca4940a21d856f1398..7a2227ed3df6b4b7177cf41c151efb039771d852 100644 (file)
@@ -3244,46 +3244,107 @@ static int tegra210_sfc_init(struct snd_soc_dapm_widget *w,
        return tegra210_sfc_write_coeff_ram(cmpnt);
 }
 
-static int tegra210_sfc_get_control(struct snd_kcontrol *kcontrol,
+static int tegra210_sfc_iget_stereo_to_mono(struct snd_kcontrol *kcontrol,
                                    struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
 
-       if (strstr(kcontrol->id.name, "Input Stereo To Mono"))
-               ucontrol->value.integer.value[0] =
-                       sfc->stereo_to_mono[SFC_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Input Mono To Stereo"))
-               ucontrol->value.integer.value[0] =
-                       sfc->mono_to_stereo[SFC_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Output Stereo To Mono"))
-               ucontrol->value.integer.value[0] =
-                       sfc->stereo_to_mono[SFC_TX_PATH];
-       else if (strstr(kcontrol->id.name, "Output Mono To Stereo"))
-               ucontrol->value.integer.value[0] =
-                       sfc->mono_to_stereo[SFC_TX_PATH];
+       ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_RX_PATH];
 
        return 0;
 }
 
-static int tegra210_sfc_put_control(struct snd_kcontrol *kcontrol,
+static int tegra210_sfc_iput_stereo_to_mono(struct snd_kcontrol *kcontrol,
                                    struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
-       int value = ucontrol->value.integer.value[0];
-
-       if (strstr(kcontrol->id.name, "Input Stereo To Mono"))
-               sfc->stereo_to_mono[SFC_RX_PATH] = value;
-       else if (strstr(kcontrol->id.name, "Input Mono To Stereo"))
-               sfc->mono_to_stereo[SFC_RX_PATH] = value;
-       else if (strstr(kcontrol->id.name, "Output Stereo To Mono"))
-               sfc->stereo_to_mono[SFC_TX_PATH] = value;
-       else if (strstr(kcontrol->id.name, "Output Mono To Stereo"))
-               sfc->mono_to_stereo[SFC_TX_PATH] = value;
-       else
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->stereo_to_mono[SFC_RX_PATH])
+               return 0;
+
+       sfc->stereo_to_mono[SFC_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_sfc_iget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+       ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_RX_PATH];
+
+       return 0;
+}
+
+static int tegra210_sfc_iput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->mono_to_stereo[SFC_RX_PATH])
                return 0;
 
+       sfc->mono_to_stereo[SFC_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_sfc_oget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+       ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_sfc_oput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->stereo_to_mono[SFC_TX_PATH])
+               return 0;
+
+       sfc->stereo_to_mono[SFC_TX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_sfc_oget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+       ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_sfc_oput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->mono_to_stereo[SFC_TX_PATH])
+               return 0;
+
+       sfc->mono_to_stereo[SFC_TX_PATH] = value;
+
        return 1;
 }
 
@@ -3384,13 +3445,17 @@ static const struct soc_enum tegra210_sfc_mono_conv_enum =
 
 static const struct snd_kcontrol_new tegra210_sfc_controls[] = {
        SOC_ENUM_EXT("Input Stereo To Mono", tegra210_sfc_stereo_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_iget_stereo_to_mono,
+                    tegra210_sfc_iput_stereo_to_mono),
        SOC_ENUM_EXT("Input Mono To Stereo", tegra210_sfc_mono_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_iget_mono_to_stereo,
+                    tegra210_sfc_iput_mono_to_stereo),
        SOC_ENUM_EXT("Output Stereo To Mono", tegra210_sfc_stereo_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_oget_stereo_to_mono,
+                    tegra210_sfc_oput_stereo_to_mono),
        SOC_ENUM_EXT("Output Mono To Stereo", tegra210_sfc_mono_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_oget_mono_to_stereo,
+                    tegra210_sfc_oput_mono_to_stereo),
 };
 
 static const struct snd_soc_component_driver tegra210_sfc_cmpnt = {
index a7e54a08fb54c41b7b5da231119caab66d5431cc..3e8df500cfbd41d4139906348dfb259fb4641fab 100644 (file)
@@ -7,6 +7,7 @@
 #include <assert.h>
 #include <linux/build_bug.h>
 #include <linux/compiler.h>
+#include <linux/math.h>
 #include <endian.h>
 #include <byteswap.h>
 
@@ -14,8 +15,6 @@
 #define UINT_MAX       (~0U)
 #endif
 
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-
 #define PERF_ALIGN(x, a)       __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
 #define __PERF_ALIGN_MASK(x, mask)     (((x)+(mask))&~(mask))
 
        _min1 < _min2 ? _min1 : _min2; })
 #endif
 
-#ifndef roundup
-#define roundup(x, y) (                                \
-{                                                      \
-       const typeof(y) __y = y;                       \
-       (((x) + (__y - 1)) / __y) * __y;               \
-}                                                      \
-)
-#endif
-
 #ifndef BUG_ON
 #ifdef NDEBUG
 #define BUG_ON(cond) do { if (cond) {} } while (0)
@@ -104,16 +94,6 @@ int scnprintf_pad(char * buf, size_t size, const char * fmt, ...);
 
 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
 
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
-
 #define current_gfp_context(k) 0
 #define synchronize_rcu()
 
diff --git a/tools/include/linux/math.h b/tools/include/linux/math.h
new file mode 100644 (file)
index 0000000..4e7af99
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef _TOOLS_MATH_H
+#define _TOOLS_MATH_H
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#ifndef roundup
+#define roundup(x, y) (                                \
+{                                                      \
+       const typeof(y) __y = y;                       \
+       (((x) + (__y - 1)) / __y) * __y;               \
+}                                                      \
+)
+#endif
+
+#endif
index 81a4c543ff7ea5b966b4de08195dea3f9068b1c6..4b384c907027eb4e21cf0997c1cf2c0f5710986c 100644 (file)
@@ -375,6 +375,7 @@ static int read_symbols(struct elf *elf)
                        return -1;
                }
                memset(sym, 0, sizeof(*sym));
+               INIT_LIST_HEAD(&sym->pv_target);
                sym->alias = sym;
 
                sym->idx = i;
index c90c7084e45a9c68b022846189c5155600626551..bdf699f6552bed6432765c7ce37abbd1d520395c 100644 (file)
@@ -153,6 +153,10 @@ void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
            !strcmp(func->name, "_paravirt_ident_64"))
                return;
 
+       /* already added this function */
+       if (!list_empty(&func->pv_target))
+               return;
+
        list_add(&func->pv_target, &f->pv_ops[idx].targets);
        f->pv_ops[idx].clean = false;
 }
index 565fccdfe6e954a4bec7d11bebb53a108623be56..016cff473cfc483963cd1b4586243fa3e73378e7 100644 (file)
@@ -1,5 +1,8 @@
 #ifndef _LINUX_LOCKDEP_H
 #define _LINUX_LOCKDEP_H
+
+#include <linux/spinlock.h>
+
 struct lock_class_key {
        unsigned int a;
 };
index f968dfd4ee88929d523824c948ff8a1447120ed9..aed9dc3ca1e9eeb45246d643e48da4817f4c542c 100644 (file)
@@ -12,6 +12,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <sys/resource.h>
 
 #include "test_util.h"
 
@@ -40,10 +41,39 @@ int main(int argc, char *argv[])
 {
        int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
        int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+       /*
+        * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds +
+        * an arbitrary number for everything else.
+        */
+       int nr_fds_wanted = kvm_max_vcpus + 100;
+       struct rlimit rl;
 
        pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
        pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
 
+       /*
+        * Check that we're allowed to open nr_fds_wanted file descriptors and
+        * try raising the limits if needed.
+        */
+       TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
+
+       if (rl.rlim_cur < nr_fds_wanted) {
+               rl.rlim_cur = nr_fds_wanted;
+               if (rl.rlim_max < nr_fds_wanted) {
+                       int old_rlim_max = rl.rlim_max;
+                       rl.rlim_max = nr_fds_wanted;
+
+                       int r = setrlimit(RLIMIT_NOFILE, &rl);
+                       if (r < 0) {
+                               printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
+                                      old_rlim_max, nr_fds_wanted);
+                               exit(KSFT_SKIP);
+                       }
+               } else {
+                       TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
+               }
+       }
+
        /*
         * Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
         * Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID
index 3836322add00ce894f11e72233ee34198d6a66e2..ba1fdc3dcf4a90319f1a9d7cd8dd9bbeaaa5f5b4 100644 (file)
@@ -280,7 +280,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
 #ifdef __s390x__
        alignment = max(0x100000, alignment);
 #endif
-       guest_test_phys_mem = align_down(guest_test_virt_mem, alignment);
+       guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
 
        /* Set up the shared data structure test_args */
        test_args.vm = vm;
index 91d88aaa989928723be6aa575bc967082b16ca07..672915ce73d8f6a363b5e2ea690f3fc44e7c9b01 100644 (file)
@@ -165,10 +165,10 @@ static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid,
        vcpu_set_cpuid(vm, VCPU_ID, cpuid);
 }
 
-static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
-                                  struct kvm_cpuid2 *best)
+static void guest_test_msrs_access(void)
 {
        struct kvm_run *run;
+       struct kvm_vm *vm;
        struct ucall uc;
        int stage = 0, r;
        struct kvm_cpuid_entry2 feat = {
@@ -180,11 +180,34 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
        struct kvm_cpuid_entry2 dbg = {
                .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
        };
-       struct kvm_enable_cap cap = {0};
-
-       run = vcpu_state(vm, VCPU_ID);
+       struct kvm_cpuid2 *best;
+       vm_vaddr_t msr_gva;
+       struct kvm_enable_cap cap = {
+               .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+               .args = {1}
+       };
+       struct msr_data *msr;
 
        while (true) {
+               vm = vm_create_default(VCPU_ID, 0, guest_msr);
+
+               msr_gva = vm_vaddr_alloc_page(vm);
+               memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
+               msr = addr_gva2hva(vm, msr_gva);
+
+               vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
+               vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+               vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+               best = kvm_get_supported_hv_cpuid();
+
+               vm_init_descriptor_tables(vm);
+               vcpu_init_descriptor_tables(vm, VCPU_ID);
+               vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
+
+               run = vcpu_state(vm, VCPU_ID);
+
                switch (stage) {
                case 0:
                        /*
@@ -315,6 +338,7 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
                         * capability enabled and guest visible CPUID bit unset.
                         */
                        cap.cap = KVM_CAP_HYPERV_SYNIC2;
+                       cap.args[0] = 0;
                        vcpu_enable_cap(vm, VCPU_ID, &cap);
                        break;
                case 22:
@@ -461,9 +485,9 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
 
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_SYNC:
-                       TEST_ASSERT(uc.args[1] == stage,
-                                   "Unexpected stage: %ld (%d expected)\n",
-                                   uc.args[1], stage);
+                       TEST_ASSERT(uc.args[1] == 0,
+                                   "Unexpected stage: %ld (0 expected)\n",
+                                   uc.args[1]);
                        break;
                case UCALL_ABORT:
                        TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
@@ -474,13 +498,14 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
                }
 
                stage++;
+               kvm_vm_free(vm);
        }
 }
 
-static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall,
-                                    void *input, void *output, struct kvm_cpuid2 *best)
+static void guest_test_hcalls_access(void)
 {
        struct kvm_run *run;
+       struct kvm_vm *vm;
        struct ucall uc;
        int stage = 0, r;
        struct kvm_cpuid_entry2 feat = {
@@ -493,10 +518,38 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
        struct kvm_cpuid_entry2 dbg = {
                .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
        };
-
-       run = vcpu_state(vm, VCPU_ID);
+       struct kvm_enable_cap cap = {
+               .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+               .args = {1}
+       };
+       vm_vaddr_t hcall_page, hcall_params;
+       struct hcall_data *hcall;
+       struct kvm_cpuid2 *best;
 
        while (true) {
+               vm = vm_create_default(VCPU_ID, 0, guest_hcall);
+
+               vm_init_descriptor_tables(vm);
+               vcpu_init_descriptor_tables(vm, VCPU_ID);
+               vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+
+               /* Hypercall input/output */
+               hcall_page = vm_vaddr_alloc_pages(vm, 2);
+               hcall = addr_gva2hva(vm, hcall_page);
+               memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
+
+               hcall_params = vm_vaddr_alloc_page(vm);
+               memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
+
+               vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
+               vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+               vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+               best = kvm_get_supported_hv_cpuid();
+
+               run = vcpu_state(vm, VCPU_ID);
+
                switch (stage) {
                case 0:
                        hcall->control = 0xdeadbeef;
@@ -606,9 +659,9 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
 
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_SYNC:
-                       TEST_ASSERT(uc.args[1] == stage,
-                                   "Unexpected stage: %ld (%d expected)\n",
-                                   uc.args[1], stage);
+                       TEST_ASSERT(uc.args[1] == 0,
+                                   "Unexpected stage: %ld (0 expected)\n",
+                                   uc.args[1]);
                        break;
                case UCALL_ABORT:
                        TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
@@ -619,66 +672,15 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
                }
 
                stage++;
+               kvm_vm_free(vm);
        }
 }
 
 int main(void)
 {
-       struct kvm_cpuid2 *best;
-       struct kvm_vm *vm;
-       vm_vaddr_t msr_gva, hcall_page, hcall_params;
-       struct kvm_enable_cap cap = {
-               .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
-               .args = {1}
-       };
-
-       /* Test MSRs */
-       vm = vm_create_default(VCPU_ID, 0, guest_msr);
-
-       msr_gva = vm_vaddr_alloc_page(vm);
-       memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
-       vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
-       vcpu_enable_cap(vm, VCPU_ID, &cap);
-
-       vcpu_set_hv_cpuid(vm, VCPU_ID);
-
-       best = kvm_get_supported_hv_cpuid();
-
-       vm_init_descriptor_tables(vm);
-       vcpu_init_descriptor_tables(vm, VCPU_ID);
-       vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
-
        pr_info("Testing access to Hyper-V specific MSRs\n");
-       guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
-                              best);
-       kvm_vm_free(vm);
-
-       /* Test hypercalls */
-       vm = vm_create_default(VCPU_ID, 0, guest_hcall);
-
-       vm_init_descriptor_tables(vm);
-       vcpu_init_descriptor_tables(vm, VCPU_ID);
-       vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
-
-       /* Hypercall input/output */
-       hcall_page = vm_vaddr_alloc_pages(vm, 2);
-       memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
-
-       hcall_params = vm_vaddr_alloc_page(vm);
-       memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
-
-       vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
-       vcpu_enable_cap(vm, VCPU_ID, &cap);
-
-       vcpu_set_hv_cpuid(vm, VCPU_ID);
-
-       best = kvm_get_supported_hv_cpuid();
+       guest_test_msrs_access();
 
        pr_info("Testing access to Hyper-V hypercalls\n");
-       guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params),
-                                addr_gva2hva(vm, hcall_page),
-                                addr_gva2hva(vm, hcall_page) + getpagesize(),
-                                best);
-
-       kvm_vm_free(vm);
+       guest_test_hcalls_access();
 }
index 5ba325cd64bfd80c02dd3ff441bb5330070700b2..29b18d565cf4ce1cfbbce5b29e4298dc781e99c1 100644 (file)
@@ -54,12 +54,15 @@ static struct kvm_vm *sev_vm_create(bool es)
        return vm;
 }
 
-static struct kvm_vm *__vm_create(void)
+static struct kvm_vm *aux_vm_create(bool with_vcpus)
 {
        struct kvm_vm *vm;
        int i;
 
        vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+       if (!with_vcpus)
+               return vm;
+
        for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
                vm_vcpu_add(vm, i);
 
@@ -89,11 +92,11 @@ static void test_sev_migrate_from(bool es)
 {
        struct kvm_vm *src_vm;
        struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
-       int i;
+       int i, ret;
 
        src_vm = sev_vm_create(es);
        for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
-               dst_vms[i] = __vm_create();
+               dst_vms[i] = aux_vm_create(true);
 
        /* Initial migration from the src to the first dst. */
        sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
@@ -102,7 +105,10 @@ static void test_sev_migrate_from(bool es)
                sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
 
        /* Migrate the guest back to the original VM. */
-       sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+       ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+       TEST_ASSERT(ret == -1 && errno == EIO,
+                   "VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
+                   errno);
 
        kvm_vm_free(src_vm);
        for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
@@ -146,6 +152,8 @@ static void test_sev_migrate_locking(void)
 
        for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
                pthread_join(pt[i], NULL);
+       for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
+               kvm_vm_free(input[i].vm);
 }
 
 static void test_sev_migrate_parameters(void)
@@ -157,12 +165,11 @@ static void test_sev_migrate_parameters(void)
        sev_vm = sev_vm_create(/* es= */ false);
        sev_es_vm = sev_vm_create(/* es= */ true);
        vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
-       vm_no_sev = __vm_create();
+       vm_no_sev = aux_vm_create(true);
        sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
        sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
        vm_vcpu_add(sev_es_vm_no_vmsa, 1);
 
-
        ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
        TEST_ASSERT(
                ret == -1 && errno == EINVAL,
@@ -191,13 +198,151 @@ static void test_sev_migrate_parameters(void)
        TEST_ASSERT(ret == -1 && errno == EINVAL,
                    "Migrations require SEV enabled. ret %d, errno: %d\n", ret,
                    errno);
+
+       kvm_vm_free(sev_vm);
+       kvm_vm_free(sev_es_vm);
+       kvm_vm_free(sev_es_vm_no_vmsa);
+       kvm_vm_free(vm_no_vcpu);
+       kvm_vm_free(vm_no_sev);
+}
+
+static int __sev_mirror_create(int dst_fd, int src_fd)
+{
+       struct kvm_enable_cap cap = {
+               .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
+               .args = { src_fd }
+       };
+
+       return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
+}
+
+
+static void sev_mirror_create(int dst_fd, int src_fd)
+{
+       int ret;
+
+       ret = __sev_mirror_create(dst_fd, src_fd);
+       TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
+}
+
+static void test_sev_mirror(bool es)
+{
+       struct kvm_vm *src_vm, *dst_vm;
+       struct kvm_sev_launch_start start = {
+               .policy = es ? SEV_POLICY_ES : 0
+       };
+       int i;
+
+       src_vm = sev_vm_create(es);
+       dst_vm = aux_vm_create(false);
+
+       sev_mirror_create(dst_vm->fd, src_vm->fd);
+
+       /* Check that we can complete creation of the mirror VM.  */
+       for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
+               vm_vcpu_add(dst_vm, i);
+       sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_START, &start);
+       if (es)
+               sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
+
+       kvm_vm_free(src_vm);
+       kvm_vm_free(dst_vm);
+}
+
+static void test_sev_mirror_parameters(void)
+{
+       struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
+       int ret;
+
+       sev_vm = sev_vm_create(/* es= */ false);
+       sev_es_vm = sev_vm_create(/* es= */ true);
+       vm_with_vcpu = aux_vm_create(true);
+       vm_no_vcpu = aux_vm_create(false);
+
+       ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "Should not be able copy context to self. ret: %d, errno: %d\n",
+               ret, errno);
+
+       ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
+               ret, errno);
+
+       ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
+               ret, errno);
+
+       ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd);
+       TEST_ASSERT(ret == -1 && errno == EINVAL,
+                   "Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
+                   errno);
+
+       ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
+               ret, errno);
+
+       kvm_vm_free(sev_vm);
+       kvm_vm_free(sev_es_vm);
+       kvm_vm_free(vm_with_vcpu);
+       kvm_vm_free(vm_no_vcpu);
+}
+
+static void test_sev_move_copy(void)
+{
+       struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm;
+       int ret;
+
+       sev_vm = sev_vm_create(/* es= */ false);
+       dst_vm = aux_vm_create(true);
+       mirror_vm = aux_vm_create(false);
+       dst_mirror_vm = aux_vm_create(false);
+
+       sev_mirror_create(mirror_vm->fd, sev_vm->fd);
+       ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
+       TEST_ASSERT(ret == -1 && errno == EBUSY,
+                   "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
+                   errno);
+
+       /* The mirror itself can be migrated.  */
+       sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
+       ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
+       TEST_ASSERT(ret == -1 && errno == EBUSY,
+                   "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
+                   errno);
+
+       /*
+        * mirror_vm is not a mirror anymore, dst_mirror_vm is.  Thus,
+        * the owner can be copied as soon as dst_mirror_vm is gone.
+        */
+       kvm_vm_free(dst_mirror_vm);
+       sev_migrate_from(dst_vm->fd, sev_vm->fd);
+
+       kvm_vm_free(mirror_vm);
+       kvm_vm_free(dst_vm);
+       kvm_vm_free(sev_vm);
 }
 
 int main(int argc, char *argv[])
 {
-       test_sev_migrate_from(/* es= */ false);
-       test_sev_migrate_from(/* es= */ true);
-       test_sev_migrate_locking();
-       test_sev_migrate_parameters();
+       if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
+               test_sev_migrate_from(/* es= */ false);
+               test_sev_migrate_from(/* es= */ true);
+               test_sev_migrate_locking();
+               test_sev_migrate_parameters();
+               if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
+                       test_sev_move_copy();
+       }
+       if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
+               test_sev_mirror(/* es= */ false);
+               test_sev_mirror(/* es= */ true);
+               test_sev_mirror_parameters();
+       }
        return 0;
 }
index 3313566ce9062e285a9872ac2ce6c2ab3c1bc652..7f5b265fcb90551b167759a770b0302eb028cf13 100755 (executable)
@@ -4002,8 +4002,8 @@ EOF
 ################################################################################
 # main
 
-TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter"
-TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter"
+TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter"
+TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter"
 TESTS_OTHER="use_cases"
 
 PAUSE_ON_FAIL=no
index ebc4ee0fe179ff1c135602b4cb332c05293dd18b..8a9461aa0878a0b6ea74fc9ec48f370846e39397 100755 (executable)
@@ -276,7 +276,11 @@ n0 ping -W 1 -c 1 192.168.241.2
 n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
 ip2 link del wg0
 ip2 link del wg1
-! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
+read _ _ tx_bytes_before < <(n0 wg show wg1 transfer)
+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false
+sleep 1
+read _ _ tx_bytes_after < <(n0 wg show wg1 transfer)
+(( tx_bytes_after - tx_bytes_before < 70000 ))
 
 ip0 link del wg1
 ip1 link del wg0
@@ -609,6 +613,28 @@ ip0 link set wg0 up
 kill $ncat_pid
 ip0 link del wg0
 
+# Ensure that dst_cache references don't outlive netns lifetime
+ip1 link add dev wg0 type wireguard
+ip2 link add dev wg0 type wireguard
+configure_peers
+ip1 link add veth1 type veth peer name veth2
+ip1 link set veth2 netns $netns2
+ip1 addr add fd00:aa::1/64 dev veth1
+ip2 addr add fd00:aa::2/64 dev veth2
+ip1 link set veth1 up
+ip2 link set veth2 up
+waitiface $netns1 veth1
+waitiface $netns2 veth2
+ip1 -6 route add default dev veth1 via fd00:aa::2
+ip2 -6 route add default dev veth2 via fd00:aa::1
+n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2
+n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1
+n1 ping6 -c 1 fd00::2
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
 # Ensure there aren't circular reference loops
 ip1 link add wg1 type wireguard
 ip2 link add wg2 type wireguard
@@ -627,7 +653,7 @@ while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
 done < /dev/kmsg
 alldeleted=1
 for object in "${!objects[@]}"; do
-       if [[ ${objects["$object"]} != *createddestroyed ]]; then
+       if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then
                echo "Error: $object: merely ${objects["$object"]}" >&3
                alldeleted=0
        fi
index fe07d97df9fa89044d6493452226b0d31c3534c1..2b321b8a96cf3cc67e15e75461fd5c747307055a 100644 (file)
@@ -47,7 +47,7 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_TRACE_IRQFLAGS=y
 CONFIG_DEBUG_BUGVERBOSE=y
 CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
+CONFIG_DEBUG_PLIST=y
 CONFIG_PROVE_RCU=y
 CONFIG_SPARSE_RCU_POINTER=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=21
index 74db83a0aedd8b67be991e0f856c53ec6ec7c9cd..a9b5a520a1d22e7de62729bf27f746993049d595 100644 (file)
@@ -66,6 +66,7 @@ CONFIG_PROC_SYSCTL=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
+CONFIG_LOG_BUF_SHIFT=18
 CONFIG_PRINTK_TIME=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_LEGACY_VSYSCALL_NONE=y
index 9646bb9112c101ea0398f7d447bd563d2008de50..72c4e6b393896aa9f4a7fa3531151ee8df3d1268 100644 (file)
@@ -1531,11 +1531,10 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 
 static int kvm_set_memslot(struct kvm *kvm,
                           const struct kvm_userspace_memory_region *mem,
-                          struct kvm_memory_slot *old,
                           struct kvm_memory_slot *new, int as_id,
                           enum kvm_mr_change change)
 {
-       struct kvm_memory_slot *slot;
+       struct kvm_memory_slot *slot, old;
        struct kvm_memslots *slots;
        int r;
 
@@ -1566,7 +1565,7 @@ static int kvm_set_memslot(struct kvm *kvm,
                 * Note, the INVALID flag needs to be in the appropriate entry
                 * in the freshly allocated memslots, not in @old or @new.
                 */
-               slot = id_to_memslot(slots, old->id);
+               slot = id_to_memslot(slots, new->id);
                slot->flags |= KVM_MEMSLOT_INVALID;
 
                /*
@@ -1597,6 +1596,26 @@ static int kvm_set_memslot(struct kvm *kvm,
                kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
        }
 
+       /*
+        * Make a full copy of the old memslot, the pointer will become stale
+        * when the memslots are re-sorted by update_memslots(), and the old
+        * memslot needs to be referenced after calling update_memslots(), e.g.
+        * to free its resources and for arch specific behavior.  This needs to
+        * happen *after* (re)acquiring slots_arch_lock.
+        */
+       slot = id_to_memslot(slots, new->id);
+       if (slot) {
+               old = *slot;
+       } else {
+               WARN_ON_ONCE(change != KVM_MR_CREATE);
+               memset(&old, 0, sizeof(old));
+               old.id = new->id;
+               old.as_id = as_id;
+       }
+
+       /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
+       memcpy(&new->arch, &old.arch, sizeof(old.arch));
+
        r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
        if (r)
                goto out_slots;
@@ -1604,14 +1623,18 @@ static int kvm_set_memslot(struct kvm *kvm,
        update_memslots(slots, new, change);
        slots = install_new_memslots(kvm, as_id, slots);
 
-       kvm_arch_commit_memory_region(kvm, mem, old, new, change);
+       kvm_arch_commit_memory_region(kvm, mem, &old, new, change);
+
+       /* Free the old memslot's metadata.  Note, this is the full copy!!! */
+       if (change == KVM_MR_DELETE)
+               kvm_free_memslot(kvm, &old);
 
        kvfree(slots);
        return 0;
 
 out_slots:
        if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
-               slot = id_to_memslot(slots, old->id);
+               slot = id_to_memslot(slots, new->id);
                slot->flags &= ~KVM_MEMSLOT_INVALID;
                slots = install_new_memslots(kvm, as_id, slots);
        } else {
@@ -1626,7 +1649,6 @@ static int kvm_delete_memslot(struct kvm *kvm,
                              struct kvm_memory_slot *old, int as_id)
 {
        struct kvm_memory_slot new;
-       int r;
 
        if (!old->npages)
                return -EINVAL;
@@ -1639,12 +1661,7 @@ static int kvm_delete_memslot(struct kvm *kvm,
         */
        new.as_id = as_id;
 
-       r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
-       if (r)
-               return r;
-
-       kvm_free_memslot(kvm, old);
-       return 0;
+       return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE);
 }
 
 /*
@@ -1672,7 +1689,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
        id = (u16)mem->slot;
 
        /* General sanity checks */
-       if (mem->memory_size & (PAGE_SIZE - 1))
+       if ((mem->memory_size & (PAGE_SIZE - 1)) ||
+           (mem->memory_size != (unsigned long)mem->memory_size))
                return -EINVAL;
        if (mem->guest_phys_addr & (PAGE_SIZE - 1))
                return -EINVAL;
@@ -1718,7 +1736,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
        if (!old.npages) {
                change = KVM_MR_CREATE;
                new.dirty_bitmap = NULL;
-               memset(&new.arch, 0, sizeof(new.arch));
        } else { /* Modify an existing slot. */
                if ((new.userspace_addr != old.userspace_addr) ||
                    (new.npages != old.npages) ||
@@ -1732,9 +1749,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
                else /* Nothing to change. */
                        return 0;
 
-               /* Copy dirty_bitmap and arch from the current memslot. */
+               /* Copy dirty_bitmap from the current memslot. */
                new.dirty_bitmap = old.dirty_bitmap;
-               memcpy(&new.arch, &old.arch, sizeof(new.arch));
        }
 
        if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
@@ -1760,7 +1776,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                        bitmap_set(new.dirty_bitmap, 0, new.npages);
        }
 
-       r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
+       r = kvm_set_memslot(kvm, mem, &new, as_id, change);
        if (r)
                goto out_bitmap;
 
@@ -2915,7 +2931,8 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
        int r;
        gpa_t gpa = ghc->gpa + offset;
 
-       BUG_ON(len + offset > ghc->len);
+       if (WARN_ON_ONCE(len + offset > ghc->len))
+               return -EINVAL;
 
        if (slots->generation != ghc->generation) {
                if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
@@ -2952,7 +2969,8 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
        int r;
        gpa_t gpa = ghc->gpa + offset;
 
-       BUG_ON(len + offset > ghc->len);
+       if (WARN_ON_ONCE(len + offset > ghc->len))
+               return -EINVAL;
 
        if (slots->generation != ghc->generation) {
                if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))