- 'drm_for_each_bridge_in_chain'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
+ - 'drm_for_each_crtc_reverse'
- 'drm_for_each_encoder'
- 'drm_for_each_encoder_mask'
- 'drm_for_each_fb'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'for_each_member'
- - 'for_each_mem_region'
- - 'for_each_memblock_type'
- 'for_each_memcg_cache_index'
- 'for_each_mem_pfn_range'
- '__for_each_mem_range'
- 'for_each_mem_range'
- '__for_each_mem_range_rev'
- 'for_each_mem_range_rev'
+ - 'for_each_mem_region'
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- - 'for_each_rtd_codec_dais_rollback'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- - 'for_each_rtd_cpu_dais_rollback'
- 'for_each_rtd_dais'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_unicast_dest_pgid'
+ - 'for_each_vsi'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'hlist_for_each_entry_rcu_bh'
- 'hlist_for_each_entry_rcu_notrace'
- 'hlist_for_each_entry_safe'
+ - 'hlist_for_each_entry_srcu'
- '__hlist_for_each_rcu'
- 'hlist_for_each_safe'
- 'hlist_nulls_for_each_entry'
- 'list_for_each_entry_safe_continue'
- 'list_for_each_entry_safe_from'
- 'list_for_each_entry_safe_reverse'
+ - 'list_for_each_entry_srcu'
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_safe'
- 'of_property_for_each_string'
- 'of_property_for_each_u32'
- 'pci_bus_for_each_resource'
+ - 'pcl_for_each_chunk'
+ - 'pcl_for_each_segment'
- 'pcm_for_each_format'
- 'ping_portaddr_for_each_entry'
- 'plist_for_each'
#
# Please keep this list dictionary sorted.
#
-# This comment is parsed by git-shortlog:
-# repo-abbrev: /pub/scm/linux/kernel/git/
-#
Aaron Durbin <adurbin@google.com>
Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com>
Kees Cook <keescook@chromium.org> <keescook@google.com>
Kees Cook <keescook@chromium.org> <kees@outflux.net>
Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
+Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
+Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
+Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
+Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
Mark Brown <broonie@sirena.org.uk>
Morten Welinder <welinder@darter.rentec.com>
Morten Welinder <welinder@troll.com>
Mythri P K <mythripk@ti.com>
+Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Nguyen Anh Quynh <aquynh@gmail.com>
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
+Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
+Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/userspace-api/media $2 && \
PYTHONDONTWRITEBYTECODE=1 \
BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
- $(PYTHON) $(srctree)/scripts/jobserver-exec \
+ $(PYTHON3) $(srctree)/scripts/jobserver-exec \
$(SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
$(SPHINXBUILD) \
-b $2 \
and RK3399 SoCs. The driver is located under drivers/staging/media/rkisp1
and uses the Media-Controller API.
+Revisions
+=========
+
+There exist multiple smaller revisions to this ISP that got introduced in
+later SoCs. Revisions can be found in the enum :c:type:`rkisp1_cif_isp_version`
+in the UAPI and the revision of the ISP inside the running SoC can be read
+in the field hw_revision of struct media_device_info as returned by
+ioctl MEDIA_IOC_DEVICE_INFO.
+
+Versions in use are:
+
+- RKISP1_V10: used at least in rk3288 and rk3399
+- RKISP1_V11: declared in the original vendor code, but not used
+- RKISP1_V12: used at least in rk3326 and px30
+- RKISP1_V13: used at least in rk1808
+
Topology
========
.. _rkisp1_topology_graph:
~~~~~~~~~~~~~~~~~~
This section covers ``SYM_FUNC_*`` and ``SYM_CODE_*`` enumerated above.
+``objtool`` requires that all code must be contained in an ELF symbol. Symbol
+names that have a ``.L`` prefix do not emit symbol table entries. ``.L``
+prefixed symbols can be used within a code region, but should be avoided for
+denoting a range of code via ``SYM_*_START/END`` annotations.
+
* ``SYM_FUNC_START`` and ``SYM_FUNC_START_LOCAL`` are supposed to be **the
most frequent markings**. They are used for functions with standard calling
conventions -- global and local. Like in C, they both align the functions to
by this cpu (see ./idle-states.yaml).
capacity-dmips-mhz:
- $ref: '/schemas/types.yaml#/definitions/uint32'
description:
u32 value representing CPU capacity (see ./cpu-capacity.txt) in
DMIPS/MHz, relative to highest capacity-dmips-mhz
documents on how to describe the way the sii902x device is
connected to the rest of the audio system:
Documentation/devicetree/bindings/sound/simple-card.yaml
- Documentation/devicetree/bindings/sound/audio-graph-card.txt
+ Documentation/devicetree/bindings/sound/audio-graph-card.yaml
Note: In case of the audio-graph-card binding the used port
index should be 3.
For a description of the display interface sink function blocks, see
Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt and
-Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt.
+Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml.
Required properties (all function blocks):
- compatible: "mediatek,<chip>-disp-<function>", one of
"mediatek,<chip>-disp-wdma"
the supported chips are mt2701, mt8167 and mt8173.
- larb: Should contain a phandle pointing to the local arbiter device as defined
- in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
- iommus: Should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
for details.
wlf,micd-timeout-ms:
description:
Timeout for microphone detection, specified in milliseconds.
- $ref: "/schemas/types.yaml#/definitions/uint32"
wlf,micd-force-micbias:
description:
description:
This property controls the Accumulation Dead band which allows to set the
level of current below which no accumulation takes place.
- $ref: /schemas/types.yaml#/definitions/uint32
maximum: 255
default: 0
description: |
Temperature sensor trimming factor. It can be used to manually adjust the
temperature measurements within 7.130 degrees Celsius.
- maxItems: 1
- items:
- default: 0
- minimum: 0
- maximum: 7130
+ default: 0
+ minimum: 0
+ maximum: 7130
additionalProperties: false
ti,bus-range-microvolt:
description: |
This is the operating range of the bus voltage in microvolt
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [16000000, 32000000]
default: 32000000
i2c-gpio,delay-us:
description: delay between GPIO operations (may depend on each platform)
- $ref: /schemas/types.yaml#/definitions/uint32
i2c-gpio,timeout-ms:
description: timeout to get data
- $ref: /schemas/types.yaml#/definitions/uint32
# Deprecated properties, do not use in new device tree sources:
gpios:
default: 400000
i2c-sda-hold-time-ns:
- maxItems: 1
description: |
The property should contain the SDA hold time in nanoseconds. This option
is only supported in hardware blocks version 1.11a or newer or on
Microsemi SoCs.
i2c-scl-falling-time-ns:
- maxItems: 1
description: |
The property should contain the SCL falling time in nanoseconds.
This value is used to compute the tLOW period.
default: 300
i2c-sda-falling-time-ns:
- maxItems: 1
description: |
The property should contain the SDA falling time in nanoseconds.
This value is used to compute the tHIGH period.
type: boolean
bipolar:
- description: see Documentation/devicetree/bindings/iio/adc/adc.txt
+ description: see Documentation/devicetree/bindings/iio/adc/adc.yaml
type: boolean
required:
maxItems: 1
shunt-resistor-micro-ohms:
- $ref: /schemas/types.yaml#/definitions/uint32
description: |
Value in micro Ohms of the shunt resistor connected between the RS+ and
RS- inputs, across which the current is measured. Value needed to compute
Resolution (bits) to use for conversions:
- can be 6, 8, 10 or 12 on stm32f4
- can be 8, 10, 12, 14 or 16 on stm32h7 and stm32mp1
- $ref: /schemas/types.yaml#/definitions/uint32
st,adc-channels:
description: |
const: 1
ti,channel0-current-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
description: Channel 0 current in uA.
enum:
- 0
- 20
ti,channel3-current-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
description: Channel 3 current in uA.
enum:
- 0
two properties must be present:
adi,range-microvolt:
- $ref: /schemas/types.yaml#/definitions/int32-array
description: |
Voltage output range specified as <minimum, maximum>
- enum:
- - [[0, 5000000]]
- - [[0, 10000000]]
- - [[-5000000, 5000000]]
- - [[-10000000, 10000000]]
+ oneOf:
+ - items:
+ - const: 0
+ - enum: [5000000, 10000000]
+ - items:
+ - const: -5000000
+ - const: 5000000
+ - items:
+ - const: -10000000
+ - const: 10000000
adi,range-microamp:
- $ref: /schemas/types.yaml#/definitions/int32-array
description: |
Current output range specified as <minimum, maximum>
- enum:
- - [[0, 20000]]
- - [[0, 24000]]
- - [[4, 24000]]
- - [[-20000, 20000]]
- - [[-24000, 24000]]
- - [[-1000, 22000]]
+ oneOf:
+ - items:
+ - const: 0
+ - enum: [20000, 24000]
+ - items:
+ - const: 4
+ - const: 24000
+ - items:
+ - const: -20000
+ - const: 20000
+ - items:
+ - const: -24000
+ - const: 24000
+ - items:
+ - const: -1000
+ - const: 22000
reset-gpios: true
adi,dc-dc-ilim-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [150000, 200000, 250000, 300000, 350000, 400000]
description: |
The dc-to-dc converter current limit.
description: Connected to ADC_RDY pin.
maxim,led-current-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
description: |
- compatible: "adc-keys"
- io-channels: Phandle to an ADC channel
- io-channel-names = "buttons";
- - keyup-threshold-microvolt: Voltage at which all the keys are considered up.
+ - keyup-threshold-microvolt: Voltage above or equal to which all the keys are
+ considered up.
Optional properties:
- poll-interval: Poll interval time in milliseconds
Required subnode-properties:
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
- - press-threshold-microvolt: Voltage ADC input when this key is pressed.
+ - press-threshold-microvolt: voltage above or equal to which this key is
+ considered pressed.
+
+No two values of press-threshold-microvolt may be the same.
+All values of press-threshold-microvolt must be less than
+keyup-threshold-microvolt.
Example:
press-threshold-microvolt = <500000>;
};
};
+
++--------------------------------+------------------------+
+| 2.000.000 <= value | no key pressed |
++--------------------------------+------------------------+
+| 1.500.000 <= value < 2.000.000 | KEY_VOLUMEUP pressed |
++--------------------------------+------------------------+
+| 1.000.000 <= value < 1.500.000 | KEY_VOLUMEDOWN pressed |
++--------------------------------+------------------------+
+| 500.000 <= value < 1.000.000 | KEY_ENTER pressed |
++--------------------------------+------------------------+
+| value < 500.000 | no key pressed |
++--------------------------------+------------------------+
- goodix,gt927
- goodix,gt9271
- goodix,gt928
+ - goodix,gt9286
- goodix,gt967
reg:
touchscreen-x-mm:
description: horizontal length in mm of the touchscreen
- $ref: /schemas/types.yaml#/definitions/uint32
touchscreen-y-mm:
description: vertical length in mm of the touchscreen
- $ref: /schemas/types.yaml#/definitions/uint32
dependencies:
touchscreen-size-x: [ touchscreen-size-y ]
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/richtek,rt8515.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT8515 1.5A dual channel LED driver
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ The Richtek RT8515 is a dual channel (two mode) LED driver that
+ supports driving a white LED in flash or torch mode. The maximum
+ current for each mode is defined in hardware using two resistors
+ RFS and RTS.
+
+properties:
+ compatible:
+ const: richtek,rt8515
+
+ enf-gpios:
+ maxItems: 1
+ description: A connection to the 'ENF' (enable flash) pin.
+
+ ent-gpios:
+ maxItems: 1
+ description: A connection to the 'ENT' (enable torch) pin.
+
+ richtek,rfs-ohms:
+ minimum: 7680
+ maximum: 367000
+ description: The resistance value of the RFS resistor. This
+ resistors limits the maximum flash current. This must be set
+ for the property flash-max-microamp to work, the RFS resistor
+ defines the range of the dimmer setting (brightness) of the
+ flash LED.
+
+ richtek,rts-ohms:
+ minimum: 7680
+ maximum: 367000
+ description: The resistance value of the RTS resistor. This
+ resistors limits the maximum torch current. This must be set
+ for the property torch-max-microamp to work, the RTS resistor
+ defines the range of the dimmer setting (brightness) of the
+ torch LED.
+
+ led:
+ type: object
+ $ref: common.yaml#
+ properties:
+ function: true
+ color: true
+ flash-max-timeout-us: true
+
+ flash-max-microamp:
+ maximum: 700000
+ description: The maximum current for flash mode
+ is hardwired to the component using the RFS resistor to
+ ground. The maximum hardware current setting is calculated
+ according to the formula Imax = 5500 / RFS. The lowest
+ allowed resistance value is 7.86 kOhm giving an absolute
+ maximum current of 700mA. By setting this attribute in
+ the device tree, you can further restrict the maximum
+ current below the hardware limit. This requires the RFS
+ to be defined as it defines the maximum range.
+
+ led-max-microamp:
+ maximum: 700000
+ description: The maximum current for torch mode
+ is hardwired to the component using the RTS resistor to
+ ground. The maximum hardware current setting is calculated
+ according to the formula Imax = 5500 / RTS. The lowest
+ allowed resistance value is 7.86 kOhm giving an absolute
+ maximum current of 700mA. By setting this attribute in
+ the device tree, you can further restrict the maximum
+ current below the hardware limit. This requires the RTS
+ to be defined as it defines the maximum range.
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - ent-gpios
+ - enf-gpios
+ - led
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ led-controller {
+ compatible = "richtek,rt8515";
+ enf-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;
+ ent-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
+ richtek,rfs-ohms = <16000>;
+ richtek,rts-ohms = <100000>;
+
+ led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ flash-max-timeout-us = <250000>;
+ flash-max-microamp = <150000>;
+ led-max-microamp = <25000>;
+ };
+ };
+
+...
- power-domains: a phandle to the power domain, see
Documentation/devicetree/bindings/power/power_domain.txt for details.
- mediatek,larb: must contain the local arbiters in the current Socs, see
- Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
for details.
- iommus: should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
- power-domains: a phandle to the power domain, see
Documentation/devicetree/bindings/power/power_domain.txt for details.
- mediatek,larb: must contain the local arbiters in the current SoCs, see
- Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
for details.
- iommus: should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
for details.
- mediatek,larb: must contain the local arbiters in the current Socs, see
- Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
for details.
Example:
waiting for I/O signalling and card power supply to be stable,
regardless of whether pwrseq-simple is used. Default to 10ms if
no available.
- $ref: /schemas/types.yaml#/definitions/uint32
default: 10
supports-cqe:
description:
Delay in ms after powering the card and de-asserting the
reset-gpios (if any).
- $ref: /schemas/types.yaml#/definitions/uint32
power-off-delay-us:
description:
Delay in us after asserting the reset-gpios (if any)
during power off of the card.
- $ref: /schemas/types.yaml#/definitions/uint32
required:
- compatible
such as flow control thresholds.
rx-internal-delay-ps:
- $ref: /schemas/types.yaml#/definitions/uint32
description: |
RGMII Receive Clock Delay defined in pico seconds.
This is used for controllers that have configurable RX internal delays.
is used for components that can have configurable fifo sizes.
tx-internal-delay-ps:
- $ref: /schemas/types.yaml#/definitions/uint32
description: |
RGMII Transmit Clock Delay defined in pico seconds.
This is used for controllers that have configurable TX internal delays.
Triplet of delays. The 1st cell is reset pre-delay in micro
seconds. The 2nd cell is reset pulse in micro seconds. The 3rd
cell is reset post-delay in micro seconds.
- $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 3
maxItems: 3
for each of the battery capacity lookup table.
operating-range-celsius:
- $ref: /schemas/types.yaml#/definitions/uint32-array
description: operating temperature range of a battery
items:
- description: minimum temperature at which battery can operate
- description: maximum temperature at which battery can operate
ambient-celsius:
- $ref: /schemas/types.yaml#/definitions/uint32-array
description: safe range of ambient temperature
items:
- description: alert when ambient temperature is lower than this value
- description: alert when ambient temperature is higher than this value
alert-celsius:
- $ref: /schemas/types.yaml#/definitions/uint32-array
description: safe range of battery temperature
items:
- description: alert when battery temperature is lower than this value
maxItems: 1
input-current-limit-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
description: Maximum input current in micro Amps.
minimum: 50000
maximum: 500000
description: IRQ line information.
dlg,irq-polling-delay-passive-ms:
- $ref: "/schemas/types.yaml#/definitions/uint32"
minimum: 1000
maximum: 10000
description: |
startup-delay-us:
description: startup time in microseconds
- $ref: /schemas/types.yaml#/definitions/uint32
off-on-delay-us:
description: off delay time in microseconds
- $ref: /schemas/types.yaml#/definitions/uint32
enable-active-high:
description:
1: chargeable
quartz-load-femtofarads:
- $ref: /schemas/types.yaml#/definitions/uint32
description:
The capacitive load of the quartz(x-tal), expressed in femto
Farad (fF). The default value shall be listed (if optional),
deprecated: true
trickle-resistor-ohms:
- $ref: /schemas/types.yaml#/definitions/uint32
description:
Selected resistor for trickle charger. Should be given
if trickle charger should be enabled.
description:
Rate at which poll occurs when auto-poll is set.
default 100ms.
- $ref: /schemas/types.yaml#/definitions/uint32
default: 100
poll-timeout-ms:
description:
Poll timeout when auto-poll is set, default
3000ms.
- $ref: /schemas/types.yaml#/definitions/uint32
default: 3000
required:
title: Mediatek MT8192 with MT6359, RT1015 and RT5682 ASoC sound card driver
maintainers:
- - Jiaxin Yu <jiaxin.yu@mediatek.com>
- - Shane Chien <shane.chien@mediatek.com>
+ - Jiaxin Yu <jiaxin.yu@mediatek.com>
+ - Shane Chien <shane.chien@mediatek.com>
description:
This binding describes the MT8192 sound card.
values of 2k, 4k or 8k. If set to 0 it will be off. If this node is not
mentioned or if the value is unknown, then micbias resistor is set to
4k.
- $ref: "/schemas/types.yaml#/definitions/uint32"
enum: [ 0, 2, 4, 8 ]
micbias-voltage-m-volts:
description: The bias voltage to be used in mVolts. The voltage can take
values from 1.25V to 3V by 250mV steps. If this node is not mentioned
or the value is unknown, then the value is set to 1.25V.
- $ref: "/schemas/types.yaml#/definitions/uint32"
enum: [ 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000 ]
lrclk-strength:
reg:
description: module registers
+ ranges: true
+
power-domains:
description:
PM domain provider node and an args specifier containing
'#size-cells':
const: 2
+ dma-coherent: true
+
patternProperties:
"^usb@":
type: object
pattern: "^watchdog(@.*|-[0-9a-f])?$"
timeout-sec:
- $ref: /schemas/types.yaml#/definitions/uint32
description:
Contains the watchdog timeout in seconds.
The advantage of mounting with the "volatile" option is that all forms of
sync calls to the upper filesystem are omitted.
+In order to avoid a giving a false sense of safety, the syncfs (and fsync)
+semantics of volatile mounts are slightly different than that of the rest of
+VFS. If any writeback error occurs on the upperdir's filesystem after a
+volatile mount takes place, all sync functions will return an error. Once this
+condition is reached, the filesystem will not recover, and every subsequent sync
+call will return an error, even if the upperdir has not experience a new error
+since the last sync call.
+
When overlay is mounted with "volatile" option, the directory
"$workdir/work/incompat/volatile" is created. During next mount, overlay
checks for this directory and refuses to mount if present. This is a strong
We can analyse, change and add further code during compilation via
callbacks [2]_, GIMPLE [3]_, IPA [4]_ and RTL passes [5]_.
-The GCC plugin infrastructure of the kernel supports all gcc versions from
-4.5 to 6.0, building out-of-tree modules, cross-compilation and building in a
-separate directory.
-Plugin source files have to be compilable by both a C and a C++ compiler as well
-because gcc versions 4.5 and 4.6 are compiled by a C compiler,
-gcc-4.7 can be compiled by a C or a C++ compiler,
-and versions 4.8+ can only be compiled by a C++ compiler.
+The GCC plugin infrastructure of the kernel supports building out-of-tree
+modules, cross-compilation and building in a separate directory.
+Plugin source files have to be compilable by a C++ compiler.
-Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
-powerpc architectures.
+Currently the GCC plugin infrastructure supports only some architectures.
+Grep "select HAVE_GCC_PLUGINS" to find out which architectures support
+GCC plugins.
This infrastructure was ported from grsecurity [6]_ and PaX [7]_.
This is a compatibility header for GCC plugins.
It should be always included instead of individual gcc headers.
-**$(src)/scripts/gcc-plugin.sh**
-
- This script checks the availability of the included headers in
- gcc-common.h and chooses the proper host compiler to build the plugins
- (gcc-4.7 can be built by either gcc or g++).
-
**$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h,
$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h,
$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h,
$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h**
These headers automatically generate the registration structures for
- GIMPLE, SIMPLE_IPA, IPA and RTL passes. They support all gcc versions
- from 4.5 to 6.0.
+ GIMPLE, SIMPLE_IPA, IPA and RTL passes.
They should be preferred to creating the structures by hand.
=====
You must install the gcc plugin headers for your gcc version,
-e.g., on Ubuntu for gcc-4.9::
+e.g., on Ubuntu for gcc-10::
- apt-get install gcc-4.9-plugin-dev
+ apt-get install gcc-10-plugin-dev
Or on Fedora::
dnf install gcc-plugin-devel
-Enable a GCC plugin based feature in the kernel config::
+Enable the GCC plugin infrastructure and some plugin(s) you want to use
+in the kernel config::
- CONFIG_GCC_PLUGIN_CYC_COMPLEXITY = y
+ CONFIG_GCC_PLUGINS=y
+ CONFIG_GCC_PLUGIN_CYC_COMPLEXITY=y
+ CONFIG_GCC_PLUGIN_LATENT_ENTROPY=y
+ ...
-To compile only the plugin(s)::
+To compile the minimum tool set including the plugin(s)::
- make gcc-plugins
+ make scripts
or just run the kernel make and compile the whole kernel with
the cyclomatic complexity GCC plugin.
4. How to add a new GCC plugin
==============================
-The GCC plugins are in $(src)/scripts/gcc-plugins/. You can use a file or a directory
-here. It must be added to $(src)/scripts/gcc-plugins/Makefile,
-$(src)/scripts/Makefile.gcc-plugins and $(src)/arch/Kconfig.
+The GCC plugins are in scripts/gcc-plugins/. You need to put plugin source files
+right under scripts/gcc-plugins/. Creating subdirectories is not supported.
+It must be added to scripts/gcc-plugins/Makefile, scripts/Makefile.gcc-plugins
+and a relevant Kconfig file.
See the cyc_complexity_plugin.c (CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) GCC plugin.
Currently, the integrated assembler is disabled by default. You can pass
``LLVM_IAS=1`` to enable it.
+Supported Architectures
+-----------------------
+
+LLVM does not target all of the architectures that Linux supports and
+just because a target is supported in LLVM does not mean that the kernel
+will build or work without any issues. Below is a general summary of
+architectures that currently work with ``CC=clang`` or ``LLVM=1``. Level
+of support corresponds to "S" values in the MAINTAINERS files. If an
+architecture is not present, it either means that LLVM does not target
+it or there are known issues. Using the latest stable version of LLVM or
+even the development tree will generally yield the best results.
+An architecture's ``defconfig`` is generally expected to work well,
+certain configurations may have problems that have not been uncovered
+yet. Bug reports are always welcome at the issue tracker below!
+
+.. list-table::
+ :widths: 10 10 10
+ :header-rows: 1
+
+ * - Architecture
+ - Level of support
+ - ``make`` command
+ * - arm
+ - Supported
+ - ``LLVM=1``
+ * - arm64
+ - Supported
+ - ``LLVM=1``
+ * - mips
+ - Maintained
+ - ``CC=clang``
+ * - powerpc
+ - Maintained
+ - ``CC=clang``
+ * - riscv
+ - Maintained
+ - ``CC=clang``
+ * - s390
+ - Maintained
+ - ``CC=clang``
+ * - x86
+ - Supported
+ - ``LLVM=1``
+
Getting Help
------------
bits on the scripts nonetheless.
Kbuild provides variables $(CONFIG_SHELL), $(AWK), $(PERL),
- $(PYTHON) and $(PYTHON3) to refer to interpreters for the respective
+ and $(PYTHON3) to refer to interpreters for the respective
scripts.
Example::
If non-zero, the message will be sent with the primary address of
the interface that received the packet that caused the icmp error.
- This is the behaviour network many administrators will expect from
+ This is the behaviour many network administrators will expect from
a router. And it can make debugging complicated network layouts
much easier.
``conf/default/*``:
Change the interface-specific default settings.
+ These settings would be used during creating new interfaces.
+
``conf/all/*``:
Change all the interface-specific settings.
[XXX: Other special features than forwarding?]
+conf/all/disable_ipv6 - BOOLEAN
+ Changing this value is same as changing ``conf/default/disable_ipv6``
+ setting and also all per-interface ``disable_ipv6`` settings to the same
+ value.
+
+ Reading this value does not have any particular meaning. It does not say
+ whether IPv6 support is enabled or disabled. Returned value can be 1
+ also in the case when some interface has ``disable_ipv6`` set to 0 and
+ has configured IPv6 addresses.
+
conf/all/forwarding - BOOLEAN
Enable global IPv6 forwarding between all interfaces.
memory slot. Ensure the entire structure is cleared to avoid padding
issues.
-If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
-the address space for which you want to return the dirty bitmap.
-They must be less than the value that KVM_CHECK_EXTENSION returns for
-the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
+the address space for which you want to return the dirty bitmap. See
+KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
The bits in the dirty bitmap are cleared before the ioctl returns, unless
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
the entire memory slot size. Any object may back this memory, including
anonymous memory, ordinary files, and hugetlbfs.
+On architectures that support a form of address tagging, userspace_addr must
+be an untagged address.
+
It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
be identical. This allows large pages in the guest to be backed by large
pages in the host.
:Capability: KVM_CAP_ENABLE_CAP_VM
:Architectures: all
-:Type: vcpu ioctl
+:Type: vm ioctl
:Parameters: struct kvm_enable_cap (in)
:Returns: 0 on success; -1 on error
:Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
:Architectures: x86, arm, arm64, mips
:Type: vm ioctl
-:Parameters: struct kvm_dirty_log (in)
+:Parameters: struct kvm_clear_dirty_log (in)
:Returns: 0 on success, -1 on error
::
(for example via write-protection, or by clearing the dirty bit in
a page table entry).
-If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
-the address space for which you want to return the dirty bitmap.
-They must be less than the value that KVM_CHECK_EXTENSION returns for
-the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
+the address space for which you want to clear the dirty status. See
+KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
is enabled; for more information, see the description of the capability.
Running nested VMX
------------------
-The nested VMX feature is disabled by default. It can be enabled by giving
-the "nested=1" option to the kvm-intel module.
+The nested VMX feature is enabled by default since Linux kernel v4.20. For
+older Linux kernel, it can be enabled by giving the "nested=1" option to the
+kvm-intel module.
+
No modifications are required to user space (qemu). However, qemu's default
emulated CPU type (qemu64) does not list the "VMX" CPU feature, so it must be
Enabling "nested" (x86)
-----------------------
-From Linux kernel v4.19 onwards, the ``nested`` KVM parameter is enabled
+From Linux kernel v4.20 onwards, the ``nested`` KVM parameter is enabled
by default for Intel and AMD. (Though your Linux distribution might
override this default.)
F: drivers/power/reset/keystone-reset.c
ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE
-M: Tero Kristo <t-kristo@ti.com>
M: Nishanth Menon <nm@ti.com>
+M: Tero Kristo <kristo@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/arm/ti/k3.yaml
S: Supported
W: http://sourceforge.net/projects/bonding/
F: drivers/net/bonding/
+F: include/net/bonding.h
F: include/uapi/linux/if_bonding.h
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
F: drivers/pci/controller/pcie-brcmstb.c
F: drivers/staging/vc04_services
N: bcm2711
-N: bcm2835
+N: bcm283*
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
F: .clang-format
CLANG/LLVM BUILD SUPPORT
-M: Nathan Chancellor <natechancellor@gmail.com>
+M: Nathan Chancellor <nathan@kernel.org>
M: Nick Desaulniers <ndesaulniers@google.com>
L: clang-built-linux@googlegroups.com
S: Supported
F: drivers/edac/skx_*.[ch]
EDAC-TI
-M: Tero Kristo <t-kristo@ti.com>
+M: Tero Kristo <kristo@kernel.org>
L: linux-edac@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: drivers/edac/ti_edac.c
EDIROL UA-101/UA-1000 DRIVER
F: include/linux/i3c/
IA64 (Itanium) PLATFORM
-M: Tony Luck <tony.luck@intel.com>
-M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-ia64@vger.kernel.org
-S: Odd Fixes
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
+S: Orphan
F: Documentation/ia64/
F: arch/ia64/
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
+M: David Ahern <dsahern@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
TEXAS INSTRUMENTS' SYSTEM CONTROL INTERFACE (TISCI) PROTOCOL DRIVER
M: Nishanth Menon <nm@ti.com>
-M: Tero Kristo <t-kristo@ti.com>
+M: Tero Kristo <kristo@kernel.org>
M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: drivers/clk/clk-cdce706.c
TI CLOCK DRIVER
-M: Tero Kristo <t-kristo@ti.com>
+M: Tero Kristo <kristo@kernel.org>
L: linux-omap@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: drivers/clk/ti/
F: include/linux/clk/ti.h
VERSION = 5
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
INSTALLKERNEL := installkernel
DEPMOD = depmod
PERL = perl
-PYTHON = python
PYTHON3 = python3
CHECK = sparse
BASH = bash
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
-export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
+export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
endif
+DEBUG_CFLAGS :=
+
# Workaround for GCC versions < 5.0
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
ifdef CONFIG_CC_IS_GCC
-DEBUG_CFLAGS := $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
+DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
endif
ifdef CONFIG_DEBUG_INFO
{
int offset = fdt_path_offset(fdt, node_path);
if (offset == -FDT_ERR_NOTFOUND)
- offset = fdt_add_subnode(fdt, 0, node_path);
+ /* Add the node to root if not found, dropping the leading '/' */
+ offset = fdt_add_subnode(fdt, 0, node_path + 1);
return offset;
}
stdout-path = &uart1;
};
+ aliases {
+ mmc0 = &usdhc2;
+ mmc1 = &usdhc3;
+ mmc2 = &usdhc4;
+ /delete-property/ mmc3;
+ };
+
memory@10000000 {
device_type = "memory";
reg = <0x10000000 0x80000000>;
/* VDD_AUD_1P8: Audio codec */
reg_aud_1p8v: ldo3 {
- regulator-name = "vdd1p8";
+ regulator-name = "vdd1p8a";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
lcd_backlight: lcd-backlight {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
pwm-names = "LCD_BKLT_PWM";
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>;
#size-cells = <0>;
- status = "disabld";
+ status = "disabled";
};
i2c_cam: i2c-gpio-cam {
i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>;
#size-cells = <0>;
- status = "disabld";
+ status = "disabled";
};
};
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
- phy-handle = <&phy>;
phy-mode = "rgmii-id";
phy-reset-duration = <2>;
phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
#address-cells = <1>;
#size-cells = <0>;
- phy: ethernet-phy@0 {
+ /*
+ * The PHY can appear at either address 0 or 4 due to the
+ * configuration (LED) pin not being pulled sufficiently.
+ */
+ ethernet-phy@0 {
reg = <0>;
qca,clk-out-frequency = <125000000>;
};
+
+ ethernet-phy@4 {
+ reg = <4>;
+ qca,clk-out-frequency = <125000000>;
+ };
};
};
compatible = "nxp,pcf2127";
reg = <0>;
spi-max-frequency = <2000000>;
+ reset-source;
};
};
clocks = <&xtal_32k>, <&xtal>;
clock-names = "xtal_32k", "xtal";
-
- assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
- assigned-clock-rates = <208000000>;
};
};
gpio-sck = <&gpio1 12 GPIO_ACTIVE_HIGH>;
gpio-miso = <&gpio1 18 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpio1 20 GPIO_ACTIVE_HIGH>;
- cs-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
num-chipselects = <1>;
/* lcd panel */
spi-max-frequency = <100000>;
spi-cpol;
spi-cpha;
- spi-cs-high;
backlight= <&backlight>;
label = "lcd";
debounce-interval = <10>;
};
+ /*
+ * We use pad 0x4a100116 abe_dmic_din3.gpio_122 as the irq instead
+ * of the gpio interrupt to avoid lost events in deeper idle states.
+ */
slider {
label = "Keypad Slide";
+ interrupts-extended = <&omap4_pmx_core 0xd6>;
gpios = <&gpio4 26 GPIO_ACTIVE_HIGH>; /* gpio122 */
linux,input-type = <EV_SW>;
linux,code = <SW_KEYPAD_SLIDE>;
200000 0>;
};
};
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* Modem trace memory */
+ ram@06000000 {
+ reg = <0x06000000 0x00f00000>;
+ no-map;
+ };
+
+ /* Modem shared memory */
+ ram@06f00000 {
+ reg = <0x06f00000 0x00100000>;
+ no-map;
+ };
+
+ /* Modem private memory */
+ ram@07000000 {
+ reg = <0x07000000 0x01000000>;
+ no-map;
+ };
+
+ /*
+ * Initial Secure Software ISSW memory
+ *
+ * This is probably only used if the kernel tries
+ * to actually call into trustzone to run secure
+ * applications, which the mainline kernel probably
+ * will not do on this old chipset. But you can never
+ * be too careful, so reserve this memory anyway.
+ */
+ ram@17f00000 {
+ reg = <0x17f00000 0x00100000>;
+ no-map;
+ };
+ };
};
200000 0>;
};
};
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* Modem trace memory */
+ ram@06000000 {
+ reg = <0x06000000 0x00f00000>;
+ no-map;
+ };
+
+ /* Modem shared memory */
+ ram@06f00000 {
+ reg = <0x06f00000 0x00100000>;
+ no-map;
+ };
+
+ /* Modem private memory */
+ ram@07000000 {
+ reg = <0x07000000 0x01000000>;
+ no-map;
+ };
+
+ /*
+ * Initial Secure Software ISSW memory
+ *
+ * This is probably only used if the kernel tries
+ * to actually call into trustzone to run secure
+ * applications, which the mainline kernel probably
+ * will not do on this old chipset. But you can never
+ * be too careful, so reserve this memory anyway.
+ */
+ ram@17f00000 {
+ reg = <0x17f00000 0x00100000>;
+ no-map;
+ };
+ };
};
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "ste-dbx5x0.dtsi"
+
+/ {
+ cpus {
+ cpu@300 {
+ /* cpufreq controls */
+ operating-points = <1152000 0
+ 800000 0
+ 400000 0
+ 200000 0>;
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /*
+ * Initial Secure Software ISSW memory
+ *
+ * This is probably only used if the kernel tries
+ * to actually call into trustzone to run secure
+ * applications, which the mainline kernel probably
+ * will not do on this old chipset. But you can never
+ * be too careful, so reserve this memory anyway.
+ */
+ ram@17f00000 {
+ reg = <0x17f00000 0x00100000>;
+ no-map;
+ };
+ };
+};
*/
/dts-v1/;
-#include "ste-db8500.dtsi"
+#include "ste-db9500.dtsi"
#include "ste-href-ab8500.dtsi"
#include "ste-href-family-pinctrl.dtsi"
* during TX anyway and that it only controls drive enable DE
* line. Hence, the RX is always enabled here.
*/
- rs485-rx-en {
+ rs485-rx-en-hog {
gpio-hog;
- gpios = <8 GPIO_ACTIVE_HIGH>;
+ gpios = <8 0>;
output-low;
line-name = "rs485-rx-en";
};
* order to reset the Hub when USB bus is powered down, but
* so far there is no such functionality.
*/
- usb-hub {
+ usb-hub-hog {
gpio-hog;
- gpios = <2 GPIO_ACTIVE_HIGH>;
+ gpios = <2 0>;
output-high;
line-name = "usb-hub-reset";
};
};
};
+&i2c4 {
+ touchscreen@49 {
+ status = "disabled";
+ };
+};
+
&i2c5 { /* TP7/TP8 */
pinctrl-names = "default";
pinctrl-0 = <&i2c5_pins_a>;
* are used for on-board microSD slot instead.
*/
/delete-property/broken-cd;
- cd-gpios = <&gpioi 10 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ cd-gpios = <&gpioi 10 GPIO_ACTIVE_HIGH>;
disable-wp;
};
* in order to turn on port power when USB bus is powered up, but so
* far there is no such functionality.
*/
- usb-port-power {
+ usb-port-power-hog {
gpio-hog;
- gpios = <13 GPIO_ACTIVE_LOW>;
+ gpios = <13 0>;
output-low;
line-name = "usb-port-power";
};
pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
- broken-cd;
+ cd-gpios = <&gpiog 1 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ disable-wp;
st,sig-dir;
st,neg-edge;
st,use-ckin;
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <®_gmac_3v3>;
status = "okay";
};
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_INTERNAL_H
+#define _ARM_KEXEC_INTERNAL_H
+
+struct kexec_relocate_data {
+ unsigned long kexec_start_address;
+ unsigned long kexec_indirection_page;
+ unsigned long kexec_mach_type;
+ unsigned long kexec_r2;
+};
+
+#endif
.align
99: .word .
+#if defined(ZIMAGE)
+ .word . + 4
+/*
+ * Storage for the state maintained by the macro.
+ *
+ * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
+ * That's because this header is included from multiple files, and we only
+ * want a single copy of the data. In particular, the UART probing code above
+ * assumes it's running using physical addresses. This is true when this file
+ * is included from head.o, but not when included from debug.o. So we need
+ * to share the probe results between the two copies, rather than having
+ * to re-run the probing again later.
+ *
+ * In the decompressor, we put the storage right here, since common.c
+ * isn't included in the decompressor build. This storage data gets put in
+ * .text even though it's really data, since .data is discarded from the
+ * decompressor. Luckily, .text is writeable in the decompressor, unless
+ * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
+ */
+ /* Debug UART initialization required */
+ .word 1
+ /* Debug UART physical address */
+ .word 0
+ /* Debug UART virtual address */
+ .word 0
+#else
.word tegra_uart_config
+#endif
.ltorg
/* Load previously selected UART address */
.macro waituarttxrdy,rd,rx
.endm
-
-/*
- * Storage for the state maintained by the macros above.
- *
- * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
- * That's because this header is included from multiple files, and we only
- * want a single copy of the data. In particular, the UART probing code above
- * assumes it's running using physical addresses. This is true when this file
- * is included from head.o, but not when included from debug.o. So we need
- * to share the probe results between the two copies, rather than having
- * to re-run the probing again later.
- *
- * In the decompressor, we put the symbol/storage right here, since common.c
- * isn't included in the decompressor build. This symbol gets put in .text
- * even though it's really data, since .data is discarded from the
- * decompressor. Luckily, .text is writeable in the decompressor, unless
- * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
- */
-#if defined(ZIMAGE)
-tegra_uart_config:
- /* Debug UART initialization required */
- .word 1
- /* Debug UART physical address */
- .word 0
- /* Debug UART virtual address */
- .word 0
-#endif
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif
+ DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
+ DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
+ DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
+ DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
return 0;
}
#include <linux/of_fdt.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
-extern unsigned long kexec_start_address;
-extern unsigned long kexec_indirection_page;
-extern unsigned long kexec_mach_type;
-extern unsigned long kexec_boot_atags;
-
static atomic_t waiting_for_crash_ipi;
/*
void machine_kexec(struct kimage *image)
{
unsigned long page_list, reboot_entry_phys;
+ struct kexec_relocate_data *data;
void (*reboot_entry)(void);
void *reboot_code_buffer;
reboot_code_buffer = page_address(image->control_code_page);
- /* Prepare parameters for reboot_code_buffer*/
- set_kernel_text_rw();
- kexec_start_address = image->start;
- kexec_indirection_page = page_list;
- kexec_mach_type = machine_arch_type;
- kexec_boot_atags = image->arch.kernel_r2;
-
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
&relocate_new_kernel,
relocate_new_kernel_size);
+ data = reboot_code_buffer + relocate_new_kernel_size;
+ data->kexec_start_address = image->start;
+ data->kexec_indirection_page = page_list;
+ data->kexec_mach_type = machine_arch_type;
+ data->kexec_r2 = image->arch.kernel_r2;
+
/* get the identity mapping physical address for the reboot code */
reboot_entry_phys = virt_to_idmap(reboot_entry);
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
#include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel)
- ldr r0,kexec_indirection_page
- ldr r1,kexec_start_address
+ adr r7, relocate_new_kernel_end
+ ldr r0, [r7, #KEXEC_INDIR_PAGE]
+ ldr r1, [r7, #KEXEC_START_ADDR]
/*
* If there is no indirection page (we are doing crashdumps)
2:
/* Jump to relocated kernel */
- mov lr,r1
- mov r0,#0
- ldr r1,kexec_mach_type
- ldr r2,kexec_boot_atags
- ARM( ret lr )
- THUMB( bx lr )
-
- .align
-
- .globl kexec_start_address
-kexec_start_address:
- .long 0x0
-
- .globl kexec_indirection_page
-kexec_indirection_page:
- .long 0x0
-
- .globl kexec_mach_type
-kexec_mach_type:
- .long 0x0
-
- /* phy addr of the atags for the new kernel */
- .globl kexec_boot_atags
-kexec_boot_atags:
- .long 0x0
+ mov lr, r1
+ mov r0, #0
+ ldr r1, [r7, #KEXEC_MACH_TYPE]
+ ldr r2, [r7, #KEXEC_R2]
+ ARM( ret lr )
+ THUMB( bx lr )
ENDPROC(relocate_new_kernel)
+ .align 3
relocate_new_kernel_end:
.globl relocate_new_kernel_size
addr = page_address(page);
+ /* Poison the entire page */
+ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
+ PAGE_SIZE / sizeof(u32));
+
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
+ /* Copy signal return handlers into the page */
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
- ptr = (unsigned long)addr + offset;
- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+ /* Flush out all instructions in this page */
+ ptr = (unsigned long)addr;
+ flush_icache_range(ptr, ptr + PAGE_SIZE);
return page;
}
if (addr)
switch (size) {
case 1:
- asm("ldrb %0, [%1, %2]"
+ asm volatile("ldrb %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
case 2:
- asm("ldrh %0, [%1, %2]"
+ asm volatile("ldrh %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
case 4:
- asm("ldr %0, [%1, %2]"
+ asm volatile("ldr %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
}
if (addr)
switch (size) {
case 1:
- asm("strb %0, [%1, %2]"
+ asm volatile("strb %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
case 2:
- asm("strh %0, [%1, %2]"
+ asm volatile("strh %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
case 4:
- asm("str %0, [%1, %2]"
+ asm volatile("str %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
#define MX6Q_CCM_CCR 0x0
.align 3
+ .arm
.macro sync_l2_cache
*/
gpio_request(OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en");
gpio_direction_output(OSK_TPS_GPIO_USB_PWR_EN, 1);
+ /* Free the GPIO again as the driver will request it */
+ gpio_free(OSK_TPS_GPIO_USB_PWR_EN);
/* Set GPIO 2 high so LED D3 is off by default */
tps65010_set_gpio_out_value(GPIO2, HIGH);
bool "TI OMAP3"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select OMAP_HWMOD
select OMAP_INTERCONNECT
- select PM_OPP if PM
- select PM if CPU_IDLE
+ select PM_OPP
select SOC_HAS_OMAP2_SDRC
select ARM_ERRATA_430973
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select ARM_ERRATA_720789
select ARM_GIC
select HAVE_ARM_SCU if SMP
select OMAP_INTERCONNECT_BARRIER
select PL310_ERRATA_588369 if CACHE_L2X0
select PL310_ERRATA_727915 if CACHE_L2X0
- select PM_OPP if PM
+ select PM_OPP
select PM if CPU_IDLE
select ARM_ERRATA_754322
select ARM_ERRATA_775420
bool "TI OMAP5"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select ARM_GIC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_ARCH_TIMER
select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
- select PM_OPP if PM
+ select PM_OPP
select ZONE_DMA if ARM_LPAE
config SOC_AM33XX
bool "TI AM33XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
config SOC_AM43XX
bool "TI AM43x"
select ARM_ERRATA_754322
select ARM_ERRATA_775420
select OMAP_INTERCONNECT
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
config SOC_DRA7XX
bool "TI DRA7XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select ARM_GIC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_ARCH_TIMER
select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
- select PM_OPP if PM
+ select PM_OPP
select ZONE_DMA if ARM_LPAE
select PINCTRL_TI_IODELAY if OF && PINCTRL
select OMAP_DM_TIMER
select OMAP_GPMC
select PINCTRL
- select PM_GENERIC_DOMAINS if PM
- select PM_GENERIC_DOMAINS_OF if PM
+ select PM
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
select RESET_CONTROLLER
+ select SIMPLE_PM_BUS
select SOC_BUS
select TI_SYSC
select OMAP_IRQCHIP
select I2C_OMAP
select MENELAUS if ARCH_OMAP2
select NEON if CPU_V7
- select PM
select REGULATOR
select REGULATOR_FIXED_VOLTAGE
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
(cx->mpu_logic_state == PWRDM_POWER_OFF);
/* Enter broadcast mode for periodic timers */
- tick_broadcast_enable();
+ RCU_NONIDLE(tick_broadcast_enable());
/* Enter broadcast mode for one-shot timers */
- tick_broadcast_enter();
+ RCU_NONIDLE(tick_broadcast_enter());
/*
* Call idle CPU PM enter notifier chain so that
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
/*
* Call idle CPU cluster PM enter notifier chain
index = 0;
cx = state_ptr + index;
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
mpuss_can_lose_context = 0;
}
}
mpuss_can_lose_context)
gic_dist_disable();
- clkdm_deny_idle(cpu_clkdm[1]);
- omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
- clkdm_allow_idle(cpu_clkdm[1]);
+ RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
+ RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
+ RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
mpuss_can_lose_context) {
cpu_pm_exit();
cpu_pm_out:
- tick_broadcast_exit();
+ RCU_NONIDLE(tick_broadcast_exit());
fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
&dra7_ipu1_dsp_iommu_pdata),
#endif
/* Common auxdata */
+ OF_DEV_AUXDATA("simple-pm-bus", 0, NULL, omap_auxdata_lookup),
OF_DEV_AUXDATA("ti,sysc", 0, NULL, &ti_sysc_pdata),
OF_DEV_AUXDATA("pinctrl-single", 0, NULL, &pcs_pdata),
OF_DEV_AUXDATA("ti,omap-prm-inst", 0, NULL, &ti_prm_pdata),
"timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
- resets = <&reset RESET_ETHERNET>;
- reset-names = "stmmaceth";
power-domains = <&pwrc PWRC_AXG_ETHERNET_MEM_ID>;
status = "disabled";
};
"timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
- resets = <&reset RESET_ETHERNET>;
- reset-names = "stmmaceth";
status = "disabled";
mdio0: mdio {
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,dis_u2_susphy_quirk;
- snps,quirk-frame-length-adjustment;
+ snps,quirk-frame-length-adjustment = <0x20>;
snps,parkmode-disable-ss-quirk;
};
};
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/meson-gxbb-power.h>
-#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
#include <dt-bindings/thermal/thermal.h>
/ {
interrupt-names = "macirq";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
- resets = <&reset RESET_ETHERNET>;
- reset-names = "stmmaceth";
power-domains = <&pwrc PWRC_GXBB_ETHERNET_MEM_ID>;
status = "disabled";
};
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- gpio = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+ gpio = <&gpio_ao GPIOAO_3 GPIO_OPEN_DRAIN>;
enable-active-high;
regulator-always-on;
};
*/
usb {
compatible = "simple-bus";
- dma-ranges;
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
+ /*
+ * Internally, USB bus to the interconnect can only address up
+ * to 40-bit
+ */
+ dma-ranges = <0 0 0 0 0x100 0x0>;
+
usbphy0: usb-phy@0 {
compatible = "brcm,sr-usb-combo-phy";
reg = <0x0 0x00000000 0x0 0x100>;
reboot {
compatible ="syscon-reboot";
regmap = <&rst>;
- offset = <0xb0>;
+ offset = <0>;
mask = <0x02>;
};
dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1046a-dcfg", "syscon";
- reg = <0x0 0x1ee0000 0x0 0x10000>;
+ reg = <0x0 0x1ee0000 0x0 0x1000>;
big-endian;
};
#size-cells = <1>;
ranges;
- spba: bus@30000000 {
+ spba: spba-bus@30000000 {
compatible = "fsl,spba-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
- gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 0 144 4>;
+ gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 26 144 4>;
};
gpio4: gpio@30230000 {
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
- <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
};
&gpu {
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
- <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
};
&gpu {
&i2c3 {
status = "okay";
clock-frequency = <400000>;
+ /* Overwrite pinctrl-0 from sdm845.dtsi */
+ pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>;
tsel: hid@15 {
compatible = "hid-over-i2c";
hid-descr-addr = <0x1>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_hid_active>;
};
tsc2: hid@2c {
hid-descr-addr = <0x20>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_hid_active>;
-
- status = "disabled";
};
};
vopl_mmu: iommu@ff470f00 {
compatible = "rockchip,iommu";
reg = <0x0 0xff470f00 0x0 0x100>;
- interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu";
clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>;
clock-names = "aclk", "iface";
cpu-supply = <&vdd_arm>;
};
+&display_subsystem {
+ status = "disabled";
+};
+
&gmac2io {
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
&pcie0 {
bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
- max-link-speed = <2>;
num-lanes = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreqn_cpm>;
reg = <0x0 0xf8000000 0x0 0x2000000>,
<0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
+ device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
<0 0 0 2 &pcie0_intc 1>,
<0 0 0 3 &pcie0_intc 2>,
<0 0 0 4 &pcie0_intc 3>;
- linux,pci-domain = <0>;
max-link-speed = <1>;
msi-map = <0x0 &its 0x0 0x1000>;
phys = <&pcie_phy 0>, <&pcie_phy 1>,
compatible = "rockchip,rk3399-vdec";
reg = <0x0 0xff660000 0x0 0x400>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "vdpu";
clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
<&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
clock-names = "axi", "ahb", "cabac", "core";
CONFIG_ARCH_TEGRA_186_SOC=y
CONFIG_ARCH_TEGRA_194_SOC=y
CONFIG_ARCH_TEGRA_234_SOC=y
-CONFIG_ARCH_K3_AM6_SOC=y
-CONFIG_ARCH_K3_J721E_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y
CONFIG_EXTCON_PTN5150=m
CONFIG_EXTCON_USB_GPIO=y
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_MSM8916=m
CONFIG_INTERCONNECT_QCOM_OSM_L3=m
-CONFIG_INTERCONNECT_QCOM_SDM845=m
+CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_SM8150=m
CONFIG_INTERCONNECT_QCOM_SM8250=m
CONFIG_EXT2_FS=y
/*
- * The linear kernel range starts at the bottom of the virtual address space.
+ * Check whether an arbitrary address is within the linear map, which
+ * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the
+ * kernel's TTBR1 address range.
*/
-#define __is_lm_address(addr) (((u64)(addr) & ~PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
+#define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
-#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
+#define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET)
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
#define __virt_to_phys_nodebug(x) ({ \
#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
#define virt_addr_valid(addr) ({ \
- __typeof__(addr) __addr = addr; \
+ __typeof__(addr) __addr = __tag_reset(addr); \
__is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
})
* Calculate the raw per-cpu offset without a translation from the
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
+ * Also drop the KASAN tag which gets in the way...
*/
- params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+ params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
(unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
params->mair_el2 = read_sysreg(mair_el1);
b .
/*
+ * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
+ *
* x0: SMCCC function ID
* x1: struct kvm_nvhe_init_params PA
*/
eret
1: mov x0, x1
- mov x4, lr
- bl ___kvm_hyp_init
- mov lr, x4
+ mov x3, lr
+ bl ___kvm_hyp_init // Clobbers x0..x2
+ mov lr, x3
/* Hello, World! */
mov x0, #SMCCC_RET_SUCCESS
/*
* Initialize the hypervisor in EL2.
*
- * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers
- * and leave x4 for the caller.
+ * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
+ * and leave x3 for the caller.
*
* x0: struct kvm_nvhe_init_params PA
*/
/*
* Set the PS bits in TCR_EL2.
*/
- ldr x1, [x0, #NVHE_INIT_TCR_EL2]
- tcr_compute_pa_size x1, #TCR_EL2_PS_SHIFT, x2, x3
- msr tcr_el2, x1
+ ldr x0, [x0, #NVHE_INIT_TCR_EL2]
+ tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
+ msr tcr_el2, x0
isb
/* Enable MMU, set vectors and stack. */
mov x0, x28
- bl ___kvm_hyp_init // Clobbers x0..x3
+ bl ___kvm_hyp_init // Clobbers x0..x2
/* Leave idmap. */
mov x0, x29
cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
}
-static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
-{
- psci_forward(host_ctxt);
- hyp_panic(); /* unreachable */
-}
-
static unsigned int find_cpu_id(u64 mpidr)
{
unsigned int i;
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
return psci_forward(host_ctxt);
+ /*
+ * SYSTEM_OFF/RESET should not return according to the spec.
+ * Allow it so as to stay robust to broken firmware.
+ */
case PSCI_0_2_FN_SYSTEM_OFF:
case PSCI_0_2_FN_SYSTEM_RESET:
- psci_forward_noreturn(host_ctxt);
- unreachable();
+ return psci_forward(host_ctxt);
case PSCI_0_2_FN64_CPU_SUSPEND:
return psci_cpu_suspend(func_id, host_ctxt);
case PSCI_0_2_FN64_CPU_ON:
{
unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
u64 val, mask = 0;
- int base, i;
+ int base, i, nr_events;
if (!pmceid1) {
val = read_sysreg(pmceid0_el0);
if (!bmap)
return val;
+ nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
+
for (i = 0; i < 32; i += 8) {
u64 byte;
byte = bitmap_get_value8(bmap, base + i);
mask |= byte << i;
- byte = bitmap_get_value8(bmap, 0x4000 + base + i);
- mask |= byte << (32 + i);
+ if (nr_events >= (0x4000 + base + 32)) {
+ byte = bitmap_get_value8(bmap, 0x4000 + base + i);
+ mask |= byte << (32 + i);
+ }
}
return val & mask;
* 64bit interface.
*/
+#define reg_to_encoding(x) \
+ sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
+ (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
+
static bool read_from_write_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
const struct sys_reg_desc *r)
{
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
- u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 sr = reg_to_encoding(r);
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
kvm_inject_undefined(vcpu);
vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
}
+static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ if (kvm_vcpu_has_pmu(vcpu))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
{
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
- bool enabled = kvm_vcpu_has_pmu(vcpu);
+ bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
- enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
if (!enabled)
kvm_inject_undefined(vcpu);
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- if (!kvm_vcpu_has_pmu(vcpu)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
-
if (p->is_write) {
if (!vcpu_mode_priv(vcpu)) {
kvm_inject_undefined(vcpu);
return true;
}
-#define reg_to_encoding(x) \
- sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
- (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
-
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
+#define PMU_SYS_REG(r) \
+ SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
+
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
- { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
- access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
+ { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
+ .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
#define PMU_PMEVTYPER_EL0(n) \
- { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
- access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+ { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
+ .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r, bool raz)
{
- u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 id = reg_to_encoding(r);
u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
if (id == SYS_ID_AA64PFR0_EL1) {
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
- u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 id = reg_to_encoding(r);
switch (id) {
case SYS_ID_AA64ZFR0_EL1:
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
- { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
- { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
+ { PMU_SYS_REG(SYS_PMINTENSET_EL1),
+ .access = access_pminten, .reg = PMINTENSET_EL1 },
+ { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
+ .access = access_pminten, .reg = PMINTENSET_EL1 },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
- { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
- { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
- { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
- { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
- { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
- { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
- { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
- { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
- { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
- { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
- { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
+ { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
+ .reset = reset_pmcr, .reg = PMCR_EL0 },
+ { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
+ .access = access_pmovs, .reg = PMOVSSET_EL0 },
+ { PMU_SYS_REG(SYS_PMSWINC_EL0),
+ .access = access_pmswinc, .reg = PMSWINC_EL0 },
+ { PMU_SYS_REG(SYS_PMSELR_EL0),
+ .access = access_pmselr, .reg = PMSELR_EL0 },
+ { PMU_SYS_REG(SYS_PMCEID0_EL0),
+ .access = access_pmceid, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMCEID1_EL0),
+ .access = access_pmceid, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMCCNTR_EL0),
+ .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
+ { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
+ .access = access_pmu_evtyper, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
+ .access = access_pmu_evcntr, .reset = NULL },
/*
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency.
*/
- { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
- { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
+ { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
+ .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
+ { PMU_SYS_REG(SYS_PMOVSSET_EL0),
+ .access = access_pmovs, .reg = PMOVSSET_EL0 },
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency.
*/
- { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
+ { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
+ .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
phys_addr_t __virt_to_phys(unsigned long x)
{
- WARN(!__is_lm_address(x),
+ WARN(!__is_lm_address(__tag_reset(x)),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x,
(void *)x);
$(call if_changed,objcopy)
unwcheck: vmlinux
- -$(Q)READELF=$(READELF) $(PYTHON) $(srctree)/arch/ia64/scripts/unwcheck.py $<
+ -$(Q)READELF=$(READELF) $(PYTHON3) $(srctree)/arch/ia64/scripts/unwcheck.py $<
archclean:
})
#define xchg(ptr, x) \
-((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
+({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
{
- unsigned long cur_itm, new_itm, ticks;
+ unsigned long new_itm;
if (cpu_is_offline(smp_processor_id())) {
return IRQ_HANDLED;
}
new_itm = local_cpu_data->itm_next;
- cur_itm = ia64_get_itc();
- if (!time_after(cur_itm, new_itm)) {
+ if (!time_after(ia64_get_itc(), new_itm))
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
- cur_itm, new_itm);
- ticks = 1;
- } else {
- ticks = DIV_ROUND_UP(cur_itm - new_itm,
- local_cpu_data->itm_delta);
- new_itm += ticks * local_cpu_data->itm_delta;
- }
+ ia64_get_itc(), new_itm);
+
+ while (1) {
+ new_itm += local_cpu_data->itm_delta;
+
+ legacy_timer_tick(smp_processor_id() == time_keeper_id);
- if (smp_processor_id() != time_keeper_id)
- ticks = 0;
+ local_cpu_data->itm_next = new_itm;
- legacy_timer_tick(ticks);
+ if (time_after(new_itm, ia64_get_itc()))
+ break;
+
+ /*
+ * Allow IPIs to interrupt the timer loop.
+ */
+ local_irq_enable();
+ local_irq_disable();
+ }
do {
/*
-#!/usr/bin/env python
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Usage: unwcheck.py FILE
depends on PA8X00 || PA7200
config MLONGCALLS
- bool "Enable the -mlong-calls compiler option for big kernels"
- default y if !MODULES || UBSAN || FTRACE
- default n
+ def_bool y if !MODULES || UBSAN || FTRACE
+ bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
depends on PA8X00
help
If you configure the kernel to include many drivers built-in instead
extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
-/* soft power switch support (power.c) */
-extern struct tasklet_struct power_tasklet;
-
#endif /* _ASM_PARISC_IRQ_H */
bb,<,n %r20, 31 - PSW_SM_I, intr_restore
nop
+ /* ssm PSW_SM_I done later in intr_restore */
+#ifdef CONFIG_MLONGCALLS
+ ldil L%intr_restore, %r2
+ load32 preempt_schedule_irq, %r1
+ bv %r0(%r1)
+ ldo R%intr_restore(%r2), %r2
+#else
+ ldil L%intr_restore, %r1
BL preempt_schedule_irq, %r2
- nop
-
- b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
+ ldo R%intr_restore(%r1), %r2
+#endif
#endif /* CONFIG_PREEMPTION */
/*
obj-$(CONFIG_PPC64) += setup_64.o \
paca.o nvram_64.o note.o syscall_64.o
obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
-obj-$(CONFIG_VDSO32) += vdso32/
+obj-$(CONFIG_VDSO32) += vdso32_wrapper.o
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_DAWR) += dawr.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
-obj-$(CONFIG_PPC64) += vdso64/
+obj-$(CONFIG_PPC64) += vdso64_wrapper.o
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o
procfs-y := proc_powerpc.o
void replay_soft_interrupts(void)
{
+ struct pt_regs regs;
+
/*
- * We use local_paca rather than get_paca() to avoid all
- * the debug_smp_processor_id() business in this low level
- * function
+ * Be careful here, calling these interrupt handlers can cause
+ * softirqs to be raised, which they may run when calling irq_exit,
+ * which will cause local_irq_enable() to be run, which can then
+ * recurse into this function. Don't keep any state across
+ * interrupt handler calls which may change underneath us.
+ *
+ * We use local_paca rather than get_paca() to avoid all the
+ * debug_smp_processor_id() business in this low level function.
*/
- unsigned char happened = local_paca->irq_happened;
- struct pt_regs regs;
ppc_save_regs(®s);
regs.softe = IRQS_ENABLED;
* This is a higher priority interrupt than the others, so
* replay it first.
*/
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_HMI)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
local_paca->irq_happened &= ~PACA_IRQ_HMI;
regs.trap = 0xe60;
handle_hmi_exception(®s);
hard_irq_disable();
}
- if (happened & PACA_IRQ_DEC) {
+ if (local_paca->irq_happened & PACA_IRQ_DEC) {
local_paca->irq_happened &= ~PACA_IRQ_DEC;
regs.trap = 0x900;
timer_interrupt(®s);
hard_irq_disable();
}
- if (happened & PACA_IRQ_EE) {
+ if (local_paca->irq_happened & PACA_IRQ_EE) {
local_paca->irq_happened &= ~PACA_IRQ_EE;
regs.trap = 0x500;
do_IRQ(®s);
hard_irq_disable();
}
- if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) {
+ if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
if (IS_ENABLED(CONFIG_PPC_BOOK3E))
regs.trap = 0x280;
}
/* Book3E does not support soft-masking PMI interrupts */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_PMI)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
local_paca->irq_happened &= ~PACA_IRQ_PMI;
regs.trap = 0xf00;
performance_monitor_exception(®s);
hard_irq_disable();
}
- happened = local_paca->irq_happened;
- if (happened & ~PACA_IRQ_HARD_DIS) {
+ if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
/*
* We are responding to the next interrupt, so interrupt-off
* latencies should be reset here.
KBUILD_CFLAGS := $(filter-out -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc,$(KBUILD_CFLAGS))
endif
-targets := $(obj-vdso32) vdso32.so.dbg
+targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday.o
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
GCOV_PROFILE := n
targets += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -Upowerpc
-# Force dependency (incbin is bad)
-$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so.dbg
-
# link rule for the .so file, .lds has to be first
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday.o FORCE
$(call if_changed,vdso32ld_and_check)
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/linkage.h>
-#include <asm/page.h>
-
- __PAGE_ALIGNED_DATA
-
- .globl vdso32_start, vdso32_end
- .balign PAGE_SIZE
-vdso32_start:
- .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
- .balign PAGE_SIZE
-vdso32_end:
-
- .previous
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso32_start, vdso32_end
+ .balign PAGE_SIZE
+vdso32_start:
+ .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
+ .balign PAGE_SIZE
+vdso32_end:
+
+ .previous
# Build rules
-targets := $(obj-vdso64) vdso64.so.dbg
+targets := $(obj-vdso64) vdso64.so.dbg vgettimeofday.o
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
GCOV_PROFILE := n
-Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
asflags-y := -D__VDSO64__ -s
-obj-y += vdso64_wrapper.o
targets += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
-$(obj)/vgettimeofday.o: %.o: %.c FORCE
-
-# Force dependency (incbin is bad)
-$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so.dbg
-
# link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday.o FORCE
$(call if_changed,vdso64ld_and_check)
.text
+/*
+ * __kernel_start_sigtramp_rt64 and __kernel_sigtramp_rt64 together
+ * are one function split in two parts. The kernel jumps to the former
+ * and the signal handler indirectly (by blr) returns to the latter.
+ * __kernel_sigtramp_rt64 needs to point to the return address so
+ * glibc can correctly identify the trampoline stack frame.
+ */
.balign 8
.balign IFETCH_ALIGN_BYTES
-V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
+V_FUNCTION_BEGIN(__kernel_start_sigtramp_rt64)
.Lsigrt_start:
bctrl /* call the handler */
+V_FUNCTION_END(__kernel_start_sigtramp_rt64)
+V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
addi r1, r1, __SIGNAL_FRAMESIZE
li r0,__NR_rt_sigreturn
sc
/*
* Make the sigreturn code visible to the kernel.
*/
-VDSO_sigtramp_rt64 = __kernel_sigtramp_rt64;
+VDSO_sigtramp_rt64 = __kernel_start_sigtramp_rt64;
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/linkage.h>
-#include <asm/page.h>
-
- __PAGE_ALIGNED_DATA
-
- .globl vdso64_start, vdso64_end
- .balign PAGE_SIZE
-vdso64_start:
- .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
- .balign PAGE_SIZE
-vdso64_end:
-
- .previous
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso64_start, vdso64_end
+ .balign PAGE_SIZE
+vdso64_start:
+ .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
+ .balign PAGE_SIZE
+vdso64_end:
+
+ .previous
break;
if (rev) {
/* reverse 32 bytes */
- buf.d[0] = byterev_8(reg->d[3]);
- buf.d[1] = byterev_8(reg->d[2]);
- buf.d[2] = byterev_8(reg->d[1]);
- buf.d[3] = byterev_8(reg->d[0]);
- reg = &buf;
+ union vsx_reg buf32[2];
+ buf32[0].d[0] = byterev_8(reg[1].d[1]);
+ buf32[0].d[1] = byterev_8(reg[1].d[0]);
+ buf32[1].d[0] = byterev_8(reg[0].d[1]);
+ buf32[1].d[1] = byterev_8(reg[0].d[0]);
+ memcpy(mem, buf32, size);
+ } else {
+ memcpy(mem, reg, size);
}
- memcpy(mem, reg, size);
break;
case 16:
/* stxv, stxvx, stxvl, stxvll */
default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
config MAXPHYSMEM_1GB
+ depends on 32BIT
bool "1GiB"
config MAXPHYSMEM_2GB
+ depends on 64BIT && CMODEL_MEDLOW
bool "2GiB"
config MAXPHYSMEM_128GB
depends on 64BIT && CMODEL_MEDANY
#endif /* __ASSEMBLY__ */
-#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
+})
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
+#ifdef CONFIG_STRICT_KERNEL_RWX
#ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21)
#else
#define SECTION_ALIGN (1 << 22)
#endif
-#else /* !CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+#else /* !CONFIG_STRICT_KERNEL_RWX */
#define SECTION_ALIGN L1_CACHE_BYTES
-#endif /* CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+#endif /* CONFIG_STRICT_KERNEL_RWX */
#endif /* _ASM_RISCV_SET_MEMORY_H */
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
- set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+ set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+
free_initmem_default(POISON_FREE_INITMEM);
}
max_pfn = PFN_DOWN(dram_end);
max_low_pfn = max_pfn;
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
- set_max_mapnr(max_low_pfn);
+ set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
uv_info.guest_cpu_stor_len = uvcb.cpu_stor_len;
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
- uv_info.max_guest_cpus = uvcb.max_guest_cpus;
+ uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
}
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
u32 max_num_sec_conf;
u64 max_guest_stor_addr;
u8 reserved88[158 - 136];
- u16 max_guest_cpus;
+ u16 max_guest_cpu_id;
u8 reserveda0[200 - 160];
} __packed __aligned(8);
unsigned long guest_cpu_stor_len;
unsigned long max_sec_stor_addr;
unsigned int max_num_sec_conf;
- unsigned short max_guest_cpus;
+ unsigned short max_guest_cpu_id;
};
extern struct uv_info uv_info;
struct kobj_attribute *attr, char *page)
{
return scnprintf(page, PAGE_SIZE, "%d\n",
- uv_info.max_guest_cpus);
+ uv_info.max_guest_cpu_id + 1);
}
static struct kobj_attribute uv_query_max_guest_cpus_attr =
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE
select NO_DMA
- select ARCH_HAS_SET_MEMORY
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select HAVE_GCC_PLUGINS
file = NULL;
backing_file = strsep(&str, ",:");
- if (*backing_file == '\0')
+ if (backing_file && *backing_file == '\0')
backing_file = NULL;
serial = strsep(&str, ",:");
- if (*serial == '\0')
+ if (serial && *serial == '\0')
serial = NULL;
if (backing_file && ubd_dev->no_cow) {
/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
* enough. So use anyway the io thread. */
}
- stack = alloc_stack(0);
+ stack = alloc_stack(0, 0);
io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
&thread_fd);
if(io_pid < 0){
}
os_close_file(vu_dev->sock);
+ kfree(vu_dev);
}
/* Platform device */
if (!pdata)
return -EINVAL;
- vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL);
+ vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
if (!vu_dev)
return -ENOMEM;
#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
{
- return (void __iomem *)(unsigned long)offset;
+ return NULL;
}
#define iounmap iounmap
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_RO \
- (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
/*
* The i386 can't do page protection for execute, and considers that the same
+++ /dev/null
-#include <asm-generic/set_memory.h>
#define UML_ROUND_UP(addr) \
((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
-extern unsigned long alloc_stack(int atomic);
+extern unsigned long alloc_stack(int order, int atomic);
extern void free_stack(unsigned long stack, int order);
struct pt_regs;
// SPDX-License-Identifier: GPL-2.0
#include <linux/kmsg_dump.h>
#include <linux/console.h>
+#include <linux/string.h>
#include <shared/init.h>
#include <shared/kern.h>
#include <os.h>
if (!console_trylock())
return;
- for_each_console(con)
- break;
+ for_each_console(con) {
+ if(strcmp(con->name, "tty") == 0 &&
+ (con->flags & (CON_ENABLED | CON_CONSDEV)) != 0) {
+ break;
+ }
+ }
console_unlock();
#include <os.h>
#include <skas.h>
#include <linux/time-internal.h>
-#include <asm/set_memory.h>
/*
* This is a per-cpu array. A processor only modifies its entry and it only
free_pages(stack, order);
}
-unsigned long alloc_stack(int atomic)
+unsigned long alloc_stack(int order, int atomic)
{
- unsigned long addr;
+ unsigned long page;
gfp_t flags = GFP_KERNEL;
if (atomic)
flags = GFP_ATOMIC;
- addr = __get_free_pages(flags, 1);
+ page = __get_free_pages(flags, order);
- set_memory_ro(addr, 1);
-
- return addr + PAGE_SIZE;
+ return page;
}
static inline void set_current(struct task_struct *task)
return 1;
}
+
+static void time_travel_set_start(void)
+{
+ if (time_travel_start_set)
+ return;
+
+ switch (time_travel_mode) {
+ case TT_MODE_EXTERNAL:
+ time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
+ /* controller gave us the *current* time, so adjust by that */
+ time_travel_ext_get_time();
+ time_travel_start -= time_travel_time;
+ break;
+ case TT_MODE_INFCPU:
+ case TT_MODE_BASIC:
+ if (!time_travel_start_set)
+ time_travel_start = os_persistent_clock_emulation();
+ break;
+ case TT_MODE_OFF:
+ /* we just read the host clock with os_persistent_clock_emulation() */
+ break;
+ }
+
+ time_travel_start_set = true;
+}
#else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
#define time_travel_start_set 0
#define time_travel_start 0
{
}
+static inline void time_travel_set_start(void)
+{
+}
+
/* fail link if this actually gets used */
extern u64 time_travel_ext_req(u32 op, u64 time);
{
long long nsecs;
+ time_travel_set_start();
+
if (time_travel_mode != TT_MODE_OFF)
nsecs = time_travel_start + time_travel_time;
else
void __init time_init(void)
{
-#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
- switch (time_travel_mode) {
- case TT_MODE_EXTERNAL:
- time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
- /* controller gave us the *current* time, so adjust by that */
- time_travel_ext_get_time();
- time_travel_start -= time_travel_time;
- break;
- case TT_MODE_INFCPU:
- case TT_MODE_BASIC:
- if (!time_travel_start_set)
- time_travel_start = os_persistent_clock_emulation();
- break;
- case TT_MODE_OFF:
- /* we just read the host clock with os_persistent_clock_emulation() */
- break;
- }
-#endif
-
timer_set_signal_handler();
late_time_init = um_timer_setup;
}
vma = vma->vm_next;
}
}
-
-struct page_change_data {
- unsigned int set_mask, clear_mask;
-};
-
-static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
-{
- struct page_change_data *cdata = data;
- pte_t pte = READ_ONCE(*ptep);
-
- pte_clear_bits(pte, cdata->clear_mask);
- pte_set_bits(pte, cdata->set_mask);
-
- set_pte(ptep, pte);
- return 0;
-}
-
-static int change_memory(unsigned long start, unsigned long pages,
- unsigned int set_mask, unsigned int clear_mask)
-{
- unsigned long size = pages * PAGE_SIZE;
- struct page_change_data data;
- int ret;
-
- data.set_mask = set_mask;
- data.clear_mask = clear_mask;
-
- ret = apply_to_page_range(&init_mm, start, size, change_page_range,
- &data);
-
- flush_tlb_kernel_range(start, start + size);
-
- return ret;
-}
-
-int set_memory_ro(unsigned long addr, int numpages)
-{
- return change_memory(addr, numpages, 0, _PAGE_RW);
-}
-
-int set_memory_rw(unsigned long addr, int numpages)
-{
- return change_memory(addr, numpages, _PAGE_RW, 0);
-}
-
-int set_memory_nx(unsigned long addr, int numpages)
-{
- return -EOPNOTSUPP;
-}
-
-int set_memory_x(unsigned long addr, int numpages)
-{
- return -EOPNOTSUPP;
-}
#include <mem_user.h>
#include <os.h>
-#define DEFAULT_COMMAND_LINE "root=98:0"
+#define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
+#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
/* Changed in add_arg and setup_arch, which run before SMP is started */
static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
int ncpus = 1;
/* Set in early boot */
-static int have_root __initdata = 0;
+static int have_root __initdata;
+static int have_console __initdata;
/* Set in uml_mem_setup and modified in linux_main */
long long physmem_size = 32 * 1024 * 1024;
" this flag is not needed to run gdb on UML in skas mode\n\n"
);
+static int __init uml_console_setup(char *line, int *add)
+{
+ have_console = 1;
+ return 0;
+}
+
+__uml_setup("console=", uml_console_setup,
+"console=<preferred console>\n"
+" Specify the preferred console output driver\n\n"
+);
+
static int __init Usage(char *line, int *add)
{
const char **p;
add_arg(argv[i]);
}
if (have_root == 0)
- add_arg(DEFAULT_COMMAND_LINE);
+ add_arg(DEFAULT_COMMAND_LINE_ROOT);
+
+ if (have_console == 0)
+ add_arg(DEFAULT_COMMAND_LINE_CONSOLE);
host_task_size = os_get_top_address();
/*
unsigned long stack, sp;
int pid, fds[2], ret, n;
- stack = alloc_stack(__cant_sleep());
+ stack = alloc_stack(0, __cant_sleep());
if (stack == 0)
return -ENOMEM;
unsigned long stack, sp;
int pid, status, err;
- stack = alloc_stack(__cant_sleep());
+ stack = alloc_stack(0, __cant_sleep());
if (stack == 0)
return -ENOMEM;
*/
void os_idle_sleep(void)
{
- pause();
+ struct itimerspec its;
+ sigset_t set, old;
+
+ /* block SIGALRM while we analyze the timer state */
+ sigemptyset(&set);
+ sigaddset(&set, SIGALRM);
+ sigprocmask(SIG_BLOCK, &set, &old);
+
+ /* check the timer, and if it'll fire then wait for it */
+ timer_gettime(event_high_res_timer, &its);
+ if (its.it_value.tv_sec || its.it_value.tv_nsec)
+ sigsuspend(&old);
+ /* either way, restore the signal mask */
+ sigprocmask(SIG_UNBLOCK, &set, NULL);
}
#include <asm/export.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
- .macro THUNK name, func, put_ret_addr_in_rdi=0
+ .macro THUNK name, func
SYM_FUNC_START_NOALIGN(\name)
pushq %rbp
movq %rsp, %rbp
pushq %r10
pushq %r11
- .if \put_ret_addr_in_rdi
- /* 8(%rbp) is return addr on stack */
- movq 8(%rbp), %rdi
- .endif
-
call \func
- jmp .L_restore
+ jmp __thunk_restore
SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
#endif
#ifdef CONFIG_PREEMPTION
-SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
+SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore)
popq %r11
popq %r10
popq %r9
popq %rdi
popq %rbp
ret
- _ASM_NOKPROBE(.L_restore)
-SYM_CODE_END(.L_restore)
+ _ASM_NOKPROBE(__thunk_restore)
+SYM_CODE_END(__thunk_restore)
#endif
#ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
+DECLARE_IDTENTRY_RAW(X86_TRAP_OTHER, exc_xen_unknown_trap);
#endif
/* Device interrupts common/spurious */
static void __init trim_bios_range(void)
{
+ /*
+ * A special case is the first 4Kb of memory;
+ * This is a BIOS owned area, not kernel ram, but generally
+ * not listed as such in the E820 table.
+ *
+ * This typically reserves additional memory (64KiB by default)
+ * since some BIOSes are known to corrupt low memory. See the
+ * Kconfig help text for X86_RESERVE_LOW.
+ */
+ e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
+
/*
* special case: Some BIOSes report the PC BIOS
* area (640Kb -> 1Mb) as RAM even though it is not.
static void __init trim_low_memory_range(void)
{
- /*
- * A special case is the first 4Kb of memory;
- * This is a BIOS owned area, not kernel ram, but generally
- * not listed as such in the E820 table.
- *
- * This typically reserves additional memory (64KiB by default)
- * since some BIOSes are known to corrupt low memory. See the
- * Kconfig help text for X86_RESERVE_LOW.
- */
memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
}
if (cpuid->nent < vcpu->arch.cpuid_nent)
goto out;
r = -EFAULT;
- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+ if (copy_to_user(entries, vcpu->arch.cpuid_entries,
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
return 0;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
(u32)msr_data;
+ if (efer & EFER_LMA)
+ ctxt->mode = X86EMUL_MODE_PROT64;
return X86EMUL_CONTINUE;
}
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
-static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-}
-
-static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
-static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-}
-
-static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
{ \
unsigned long val) \
{ \
vcpu->arch.regs[VCPU_REGS_##uname] = val; \
- kvm_register_mark_dirty(vcpu, VCPU_REGS_##uname); \
}
BUILD_KVM_GPR_ACCESSORS(rax, RAX)
BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
BUILD_KVM_GPR_ACCESSORS(r15, R15)
#endif
+static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
+static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
{
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
-static inline u64 rsvd_bits(int s, int e)
+static __always_inline u64 rsvd_bits(int s, int e)
{
+ BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
+
+ if (__builtin_constant_p(e))
+ BUILD_BUG_ON(e > 63);
+ else
+ e &= 63;
+
if (e < s)
return 0;
}
/*
- * Clear non-leaf entries (and free associated page tables) which could
- * be replaced by large mappings, for GFNs within the slot.
+ * Clear leaf entries which could be replaced by large mappings, for
+ * GFNs within the slot.
*/
static void zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root,
tdp_root_for_each_pte(iter, root, start, end) {
if (!is_shadow_present_pte(iter.old_spte) ||
- is_last_spte(iter.old_spte, iter.level))
+ !is_last_spte(iter.old_spte, iter.level))
continue;
pfn = spte_to_pfn(iter.old_spte);
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (WARN_ON(!is_guest_mode(vcpu)))
+ return true;
+
if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma;
if ((vmcb12->save.efer & EFER_SVME) == 0)
vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
- if (!vmcb12_lma) {
- if (vmcb12->save.cr4 & X86_CR4_PAE) {
- if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
- return false;
- } else {
- if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
- return false;
- }
- } else {
+ if (vmcb12_lma) {
if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
!(vmcb12->save.cr0 & X86_CR0_PE) ||
- (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
+ (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
return false;
}
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
unsigned long first, last;
int ret;
+ lockdep_assert_held(&kvm->lock);
+
if (ulen == 0 || uaddr + ulen < uaddr)
return ERR_PTR(-EINVAL);
if (!region)
return -ENOMEM;
+ mutex_lock(&kvm->lock);
region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
if (IS_ERR(region->pages)) {
ret = PTR_ERR(region->pages);
+ mutex_unlock(&kvm->lock);
goto e_free;
}
+ region->uaddr = range->addr;
+ region->size = range->size;
+
+ list_add_tail(®ion->list, &sev->regions_list);
+ mutex_unlock(&kvm->lock);
+
/*
* The guest may change the memory encryption attribute from C=0 -> C=1
* or vice versa for this memory range. Lets make sure caches are
*/
sev_clflush_pages(region->pages, region->npages);
- region->uaddr = range->addr;
- region->size = range->size;
-
- mutex_lock(&kvm->lock);
- list_add_tail(®ion->list, &sev->regions_list);
- mutex_unlock(&kvm->lock);
-
return ret;
e_free:
* to be returned:
* GPRs RAX, RBX, RCX, RDX
*
- * Copy their values to the GHCB if they are dirty.
+ * Copy their values, even if they may not have been written during the
+ * VM-Exit. It's the guest's responsibility to not consume random data.
*/
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RAX))
- ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RBX))
- ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RCX))
- ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RDX))
- ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
+ ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
+ ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
+ ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
+ ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
}
static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
return 0;
}
+ if (sev_active()) {
+ pr_info("KVM is unsupported when running as an SEV guest\n");
+ return 0;
+ }
+
return 1;
}
{
struct vcpu_svm *svm = to_svm(vcpu);
+ trace_kvm_entry(vcpu);
+
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
}
/* svm.c */
-#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
-#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
-#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
#define MSR_INVALID 0xffffffffU
extern int sev;
return 0;
}
-static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_host_map *map;
- struct page *page;
- u64 hpa;
/*
* hv_evmcs may end up being not mapped after migration (when
}
}
+ return true;
+}
+
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_host_map *map;
+ struct page *page;
+ u64 hpa;
+
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/*
* Translate L1 physical address to host physical
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
else
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+
+ return true;
+}
+
+static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
+{
+ if (!nested_get_evmcs_page(vcpu))
+ return false;
+
+ if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
+ return false;
+
return true;
}
if (is_guest_mode(vcpu)) {
sync_vmcs02_to_vmcs12(vcpu, vmcs12);
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
- } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
- if (vmx->nested.hv_evmcs)
- copy_enlightened_to_vmcs12(vmx);
- else if (enable_shadow_vmcs)
- copy_shadow_to_vmcs12(vmx);
+ } else {
+ copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
+ if (!vmx->nested.need_vmcs12_to_shadow_sync) {
+ if (vmx->nested.hv_evmcs)
+ copy_enlightened_to_vmcs12(vmx);
+ else if (enable_shadow_vmcs)
+ copy_shadow_to_vmcs12(vmx);
+ }
}
BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
.hv_timer_pending = nested_vmx_preemption_timer_pending,
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
- .get_nested_state_pages = nested_get_vmcs12_pages,
+ .get_nested_state_pages = vmx_get_nested_state_pages,
.write_log_dirty = nested_vmx_write_pml_buffer,
.enable_evmcs = nested_enable_evmcs,
.get_evmcs_version = nested_get_evmcs_version,
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
+ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
};
/* mapping between fixed pmc index and intel_arch_events array */
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
x86_pmu.num_counters_gp);
+ eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
+ eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
pmu->available_event_types = ~entry->ebx &
((1ull << eax.split.mask_length) - 1);
pmu->nr_arch_fixed_counters =
min_t(int, edx.split.num_counters_fixed,
x86_pmu.num_counters_fixed);
+ edx.split.bit_width_fixed = min_t(int,
+ edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1;
}
if (vmx->emulation_required)
return EXIT_FASTPATH_NONE;
+ trace_kvm_entry(vcpu);
+
if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false;
vmcs_write32(PLE_WINDOW, vmx->ple_window);
switch (index) {
case MSR_IA32_TSX_CTRL:
/*
- * No need to pass TSX_CTRL_CPUID_CLEAR through, so
- * let's avoid changing CPUID bits under the host
- * kernel's feet.
+ * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
+ * interception. Keep the host value unchanged to avoid
+ * changing CPUID bits under the host kernel's feet.
+ *
+ * hle=0, rtm=0, tsx_ctrl=1 can be found with some
+ * combinations of new kernel and old userspace. If
+ * those guests run on a tsx=off host, do allow guests
+ * to use TSX_CTRL, but do not change the value on the
+ * host so that TSX remains always disabled.
*/
- vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+ if (boot_cpu_has(X86_FEATURE_RTM))
+ vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+ else
+ vmx->guest_uret_msrs[j].mask = 0;
break;
default:
vmx->guest_uret_msrs[j].mask = -1ull;
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
+static void process_smi(struct kvm_vcpu *vcpu);
static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
if (!boot_cpu_has_bug(X86_BUG_MDS))
data |= ARCH_CAP_MDS_NO;
- /*
- * On TAA affected systems:
- * - nothing to do if TSX is disabled on the host.
- * - we emulate TSX_CTRL if present on the host.
- * This lets the guest use VERW to clear CPU buffers.
- */
- if (!boot_cpu_has(X86_FEATURE_RTM))
- data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR);
- else if (!boot_cpu_has_bug(X86_BUG_TAA))
+ if (!boot_cpu_has(X86_FEATURE_RTM)) {
+ /*
+ * If RTM=0 because the kernel has disabled TSX, the host might
+ * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
+ * and therefore knows that there cannot be TAA) but keep
+ * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
+ * and we want to allow migrating those guests to tsx=off hosts.
+ */
+ data &= ~ARCH_CAP_TAA_NO;
+ } else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
data |= ARCH_CAP_TAA_NO;
+ } else {
+ /*
+ * Nothing to do here; we emulate TSX_CTRL if present on the
+ * host so the guest can choose between disabling TSX or
+ * using VERW to clear CPU buffers.
+ */
+ }
return data;
}
{
process_nmi(vcpu);
+ if (kvm_check_request(KVM_REQ_SMI, vcpu))
+ process_smi(vcpu);
+
/*
* In guest mode, payload delivery should be deferred,
* so that the L1 hypervisor can intercept #PF before
if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
- if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
- ;
- else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+ if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
r = 0;
goto out;
}
kvm_x86_ops.request_immediate_exit(vcpu);
}
- trace_kvm_entry(vcpu);
-
fpregs_assert_state_consistent();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
switch_fpu_return();
*/
if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
return false;
+ if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
+ return false;
} else {
/*
* Not in 64-bit mode: EFER.LMA is clear and the code
fx_init(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+ vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
return 0;
old_npages = slot->npages;
- hva = 0;
+ hva = slot->userspace_addr;
}
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
}
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
__reserved_bits |= X86_CR4_UMIP; \
if (!__cpu_has(__c, X86_FEATURE_VMX)) \
__reserved_bits |= X86_CR4_VMXE; \
+ if (!__cpu_has(__c, X86_FEATURE_PCID)) \
+ __reserved_bits |= X86_CR4_PCIDE; \
__reserved_bits; \
})
{
return sev_status & MSR_AMD64_SEV_ENABLED;
}
+EXPORT_SYMBOL_GPL(sev_active);
/* Needs to be called from non-instrumentable code */
bool noinstr sev_es_active(void)
exc_debug(regs);
}
+DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
+{
+ /* This should never happen and there is no way to handle it. */
+ pr_err("Unknown trap in Xen PV mode.");
+ BUG();
+}
+
struct trap_array_entry {
void (*orig)(void);
void (*xen)(void);
{
unsigned int nr;
bool ist_okay = false;
+ bool found = false;
/*
* Replace trap handler addresses by Xen specific ones.
if (*addr == entry->orig) {
*addr = entry->xen;
ist_okay = entry->ist_okay;
+ found = true;
break;
}
}
nr = (*addr - (void *)early_idt_handler_array[0]) /
EARLY_IDT_HANDLER_SIZE;
*addr = (void *)xen_early_idt_handler_array[nr];
+ found = true;
}
- if (WARN_ON(ist != 0 && !ist_okay))
+ if (!found)
+ *addr = (void *)xen_asm_exc_xen_unknown_trap;
+
+ if (WARN_ON(found && ist != 0 && !ist_okay))
return false;
return true;
#ifdef CONFIG_IA32_EMULATION
xen_pv_trap entry_INT80_compat
#endif
+xen_pv_trap asm_exc_xen_unknown_trap
xen_pv_trap asm_exc_xen_hypervisor_callback
__INIT
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
- bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
+ bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
- bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
+ bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
* shortage.
*/
/* no more than ~18% of tags for async I/O */
- bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
+ bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
- bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
+ bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
*/
void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
+ might_sleep();
+
spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) {
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
- if (spin_trylock(&q->queue_lock)) {
- blkg_destroy(blkg);
- spin_unlock(&q->queue_lock);
- } else {
+ if (need_resched() || !spin_trylock(&q->queue_lock)) {
+ /*
+ * Given that the system can accumulate a huge number
+ * of blkgs in pathological cases, check to see if we
+ * need to rescheduling to avoid softlockup.
+ */
spin_unlock_irq(&blkcg->lock);
- cpu_relax();
+ cond_resched();
spin_lock_irq(&blkcg->lock);
+ continue;
}
+
+ blkg_destroy(blkg);
+ spin_unlock(&q->queue_lock);
}
spin_unlock_irq(&blkcg->lock);
struct request_queue *q = hctx->queue;
struct blk_mq_tag_set *set = q->tag_set;
- if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
+ if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return true;
users = atomic_read(&set->active_queues_shared_sbitmap);
} else {
void set_capacity(struct gendisk *disk, sector_t sectors)
{
struct block_device *bdev = disk->part0;
+ unsigned long flags;
- spin_lock(&bdev->bd_size_lock);
+ spin_lock_irqsave(&bdev->bd_size_lock, flags);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
- spin_unlock(&bdev->bd_size_lock);
+ spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
}
EXPORT_SYMBOL(set_capacity);
static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
- spin_lock(&bdev->bd_size_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bdev->bd_size_lock, flags);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
- spin_unlock(&bdev->bd_size_lock);
+ spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
}
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
err = blk_alloc_devt(bdev, &devt);
if (err)
- goto out_bdput;
+ goto out_put;
pdev->devt = devt;
/* delay uevent until 'holders' subdir is created */
ncomp = (struct acpi_iort_named_component *)node->node_data;
+ if (!ncomp->memory_address_limit) {
+ pr_warn(FW_BUG "Named component missing memory address limit\n");
+ return -EINVAL;
+ }
+
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1ULL<<ncomp->memory_address_limit;
rc = (struct acpi_iort_root_complex *)node->node_data;
+ if (!rc->memory_address_limit) {
+ pr_warn(FW_BUG "Root complex missing memory address limit\n");
+ return -EINVAL;
+ }
+
*size = rc->memory_address_limit >= 64 ? U64_MAX :
1ULL<<rc->memory_address_limit;
end = dmaaddr + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
- dev->coherent_dma_mask = mask;
- *dev->dma_mask = mask;
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
+ *dev->dma_mask = min(*dev->dma_mask, mask);
}
*dma_addr = dmaaddr;
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
- if (len < 0)
- return len;
-
- env->buflen += len;
- if (!adev->data.of_compatible)
- return 0;
-
- if (len > 0 && add_uevent_var(env, "MODALIAS="))
- return -ENOMEM;
-
- len = create_of_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
+ if (adev->data.of_compatible)
+ len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ else
+ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
if (dep->supplier == handle) {
acpi_bus_get_device(dep->consumer, &adev);
- if (!adev)
- continue;
- adev->dep_unmet--;
- if (!adev->dep_unmet)
- acpi_bus_attach(adev, true);
+ if (adev) {
+ adev->dep_unmet--;
+ if (!adev->dep_unmet)
+ acpi_bus_attach(adev, true);
+ }
list_del(&dep->node);
kfree(dep);
struct thermal_zone_device *thermal_zone;
int kelvin_offset; /* in millidegrees */
struct work_struct thermal_check_work;
+ struct mutex thermal_check_lock;
+ refcount_t thermal_check_count;
};
/* --------------------------------------------------------------------------
return 0;
}
-static void acpi_thermal_check(void *data)
-{
- struct acpi_thermal *tz = data;
-
- thermal_zone_device_update(tz->thermal_zone,
- THERMAL_EVENT_UNSPECIFIED);
-}
-
/* sys I/F for generic thermal sysfs support */
static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
Driver Interface
-------------------------------------------------------------------------- */
+static void acpi_queue_thermal_check(struct acpi_thermal *tz)
+{
+ if (!work_pending(&tz->thermal_check_work))
+ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+}
+
static void acpi_thermal_notify(struct acpi_device *device, u32 event)
{
struct acpi_thermal *tz = acpi_driver_data(device);
switch (event) {
case ACPI_THERMAL_NOTIFY_TEMPERATURE:
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
{
struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
thermal_check_work);
- acpi_thermal_check(tz);
+
+ /*
+ * In general, it is not sufficient to check the pending bit, because
+ * subsequent instances of this function may be queued after one of them
+ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
+ * one of them is running, though, because it may have done the actual
+ * check some time ago, so allow at least one of them to block on the
+ * mutex while another one is running the update.
+ */
+ if (!refcount_dec_not_one(&tz->thermal_check_count))
+ return;
+
+ mutex_lock(&tz->thermal_check_lock);
+
+ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
+
+ refcount_inc(&tz->thermal_check_count);
+
+ mutex_unlock(&tz->thermal_check_lock);
}
static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
+ refcount_set(&tz->thermal_check_count, 3);
+ mutex_init(&tz->thermal_check_lock);
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
tz->state.active |= tz->trips.active[i].flags.enabled;
}
- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+ acpi_queue_thermal_check(tz);
return AE_OK;
}
if (!sock)
return err;
+ /*
+ * We need to make sure we don't get any errant requests while we're
+ * reallocating the ->socks array.
+ */
+ blk_mq_freeze_queue(nbd->disk->queue);
+
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
nbd->task_setup = current;
nsock->cookie = 0;
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
+ blk_mq_unfreeze_queue(nbd->disk->queue);
return 0;
put_socket:
+ blk_mq_unfreeze_queue(nbd->disk->queue);
sockfd_put(sock);
return err;
}
#define CREATE_TRACE_POINTS
#include "trace.h"
-#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
+static inline sector_t mb_to_sects(unsigned long mb)
+{
+ return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
+}
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
{
return -EINVAL;
}
- zone_capacity_sects = MB_TO_SECTS(dev->zone_capacity);
- dev_capacity_sects = MB_TO_SECTS(dev->size);
- dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
- dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
- if (dev_capacity_sects & (dev->zone_size_sects - 1))
- dev->nr_zones++;
+ zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+ dev_capacity_sects = mb_to_sects(dev->size);
+ dev->zone_size_sects = mb_to_sects(dev->zone_size);
+ dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
+ >> ilog2(dev->zone_size_sects);
dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
GFP_KERNEL | __GFP_ZERO);
if (info->feature_discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_granularity = info->discard_granularity ?:
+ info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
static void blkfront_setup_discard(struct blkfront_info *info)
{
- int err;
- unsigned int discard_granularity;
- unsigned int discard_alignment;
-
info->feature_discard = 1;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-granularity", "%u", &discard_granularity,
- "discard-alignment", "%u", &discard_alignment,
- NULL);
- if (!err) {
- info->discard_granularity = discard_granularity;
- info->discard_alignment = discard_alignment;
- }
+ info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-granularity",
+ 0);
+ info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-alignment", 0);
info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0);
ret = of_platform_default_populate(child, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate module\n");
+ of_node_put(child);
return ret;
}
}
static int simple_pm_bus_probe(struct platform_device *pdev)
{
+ const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_enable(&pdev->dev);
if (np)
- of_platform_populate(np, NULL, NULL, &pdev->dev);
+ of_platform_populate(np, NULL, lookup, &pdev->dev);
return 0;
}
config MXC_CLK_SCU
tristate
- depends on ARCH_MXC
- depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1
def_bool SOC_IMX1
return 0;
}
-static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int mmp2_audio_clk_suspend(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
return 0;
}
-static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
+static int mmp2_audio_clk_resume(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
return 0;
}
+#endif
static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
},
};
-static struct clk_branch gcc_camera_ahb_clk = {
- .halt_reg = 0xb008,
- .halt_check = BRANCH_HALT,
- .hwcg_reg = 0xb008,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0xb008,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_camera_ahb_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_camera_hf_axi_clk = {
.halt_reg = 0xb020,
.halt_check = BRANCH_HALT,
[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
- [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
[GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
[GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
/*
* Keep the clocks always-ON
- * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
- * GCC_GPU_CFG_AHB_CLK
+ * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
+ * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
*/
regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_4,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
.name = "gcc_sdcc4_apps_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
__le32 byte_cnt;
union {
__le32 src;
- dma_addr_t src_dma;
+ u32 src_dma;
};
union {
__le32 dst;
- dma_addr_t dst_dma;
+ u32 dst_dma;
};
__le32 next_dma;
* apple-properties.c - EFI device properties on Macs
* Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
*
- * Note, all properties are considered as u8 arrays.
- * To get a value of any of them the caller must use device_property_read_u8_array().
+ * Properties are stored either as:
+ * u8 arrays which can be retrieved with device_property_read_u8_array() or
+ * booleans which can be queried with device_property_present().
*/
#define pr_fmt(fmt) "apple-properties: " fmt
entry_data = ptr + key_len + sizeof(val_len);
entry_len = val_len - sizeof(val_len);
- entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
- entry_len);
+ if (entry_len)
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
+ else
+ entry[i] = PROPERTY_ENTRY_BOOL(key);
+
if (dump_properties) {
dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
config IMX_SCU
bool "IMX SCU Protocol driver"
depends on IMX_MBOX
+ select SOC_BUS
help
The System Controller Firmware (SCFW) is a low-level system function
which runs on a dedicated Cortex-M core to provide power, clock, and
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
WRITE_ONCE(line->eflags, 0);
+ if (line->desc)
+ WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
}
ret = gdev->id;
goto err_free_gdev;
}
- dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+
+ ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+ if (ret)
+ goto err_free_ida;
+
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
if (gc->parent && gc->parent->driver)
gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
ret = -ENOMEM;
- goto err_free_ida;
+ goto err_free_dev_name;
}
if (gc->ngpio == 0) {
kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
+err_free_dev_name:
+ kfree(dev_name(&gdev->dev));
err_free_ida:
ida_free(&gpio_ida, gdev->id);
err_free_gdev:
struct gpio_chip *gc = desc_array[i]->gdev->chip;
unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
unsigned long *mask, *bits;
- int first, j, ret;
+ int first, j;
if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath;
#include <linux/sched/task.h>
#include "amdgpu_object.h"
+#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
- struct amdgpu_bo_param bp;
+ struct drm_gem_object *gobj;
u32 domain, alloc_domain;
u64 alloc_flags;
int ret;
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain));
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = 1;
- bp.domain = alloc_domain;
- bp.flags = alloc_flags;
- bp.type = bo_type;
- bp.resv = NULL;
- ret = amdgpu_bo_create(adev, &bp, &bo);
+ ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
+ bo_type, NULL, &gobj);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
- domain_string(alloc_domain), ret);
+ domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ bo = gem_to_amdgpu_bo(gobj);
if (bo_type == ttm_bo_type_sg) {
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint32_t domains;
int ret;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
- if (obj->import_attach) {
+ bo = gem_to_amdgpu_bo(obj);
+ domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
+ if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
return ERR_PTR(-EINVAL);
}
resv = vm->root.base.bo->tbo.base.resv;
}
-retry:
initial_domain = (u32)(0xffffffff & args->in.domains);
+retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
return -EINVAL;
/* A shared bo cannot be migrated to VRAM */
- if (bo->prime_shared_count) {
+ if (bo->prime_shared_count || bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
+#define mmCGTS_TCC_DISABLE_Vangogh 0x5006
+#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX 1
+#define mmCGTS_USER_TCC_DISABLE_Vangogh 0x5007
+#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
{
/* TCCs are global (not instanced). */
- uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
- RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ uint32_t tcc_disable;
+
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh);
+ break;
+ default:
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ break;
+ }
adev->gfx.config.tcc_disabled_mask =
REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
link->type = dc_connection_none;
prev_sink = link->local_sink;
- if (prev_sink != NULL)
- dc_sink_retain(prev_sink);
+ if (prev_sink)
+ dc_sink_release(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
dc_commit_updates_for_stream(
dm->dc, bundle->surface_updates,
dc_state->stream_status->plane_count,
- dc_state->streams[k], &bundle->stream_update, dc_state);
+ dc_state->streams[k], &bundle->stream_update);
}
cleanup:
stream_update.stream = stream_state;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
- stream_state, &stream_update,
- stream_state->ctx->dc->current_state);
+ stream_state, &stream_update);
mutex_unlock(&adev->dm.dc_lock);
}
* TODO: check if we still need the S3 mode update workaround.
* If yes, put it here.
*/
- if (aconnector->dc_sink)
+ if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
drm_connector_update_edid_property(connector,
aconnector->edid);
- drm_add_edid_modes(connector, aconnector->edid);
-
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
- uint32_t i;
+ int i;
uint64_t timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
amdgpu_dm_commit_cursors(state);
/* update planes when needed */
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
bundle->surface_updates,
planes_count,
acrtc_state->stream,
- &bundle->stream_update,
- dc_state);
+ &bundle->stream_update);
/**
* Enable or disable the interrupts on the backend.
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dc_surface_update dummy_updates[MAX_SURFACES];
+ struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
bool abm_changed, hdr_changed, scaling_changed;
- memset(&dummy_updates, 0, sizeof(dummy_updates));
+ memset(&surface_updates, 0, sizeof(surface_updates));
memset(&stream_update, 0, sizeof(stream_update));
if (acrtc) {
* To fix this, DC should permit updating only stream properties.
*/
for (j = 0; j < status->plane_count; j++)
- dummy_updates[j].surface = status->plane_states[0];
+ surface_updates[j].surface = status->plane_states[j];
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
- dummy_updates,
+ surface_updates,
status->plane_count,
dm_new_crtc_state->stream,
- &stream_update,
- dc_state);
+ &stream_update);
mutex_unlock(&dm->dc_lock);
}
ret = PTR_ERR_OR_ZERO(conn_state);
if (ret)
- goto err;
+ goto out;
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
- goto err;
+ goto out;
/* force a restore */
crtc_state->mode_changed = true;
ret = PTR_ERR_OR_ZERO(plane_state);
if (ret)
- goto err;
-
+ goto out;
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
- if (!ret)
- return 0;
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+out:
drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret;
}
if (computed_streams[i])
continue;
+ if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
+
mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
mutex_unlock(&aconnector->mst_mgr.lock);
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
+ if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
}
return true;
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state)
+ struct dc_stream_update *stream_update)
{
const struct dc_stream_status *stream_status;
enum surface_update_type update_type;
if (update_type >= UPDATE_TYPE_FULL) {
+ struct dc_plane_state *new_planes[MAX_SURFACES];
+
+ memset(new_planes, 0, sizeof(new_planes));
+
+ for (i = 0; i < surface_count; i++)
+ new_planes[i] = srf_updates[i].surface;
/* initialize scratch memory for building context */
context = dc_create_state(dc);
return;
}
- dc_resource_state_copy_construct(state, context);
+ dc_resource_state_copy_construct(
+ dc->current_state, context);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ /*remove old surfaces from context */
+ if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+ DC_ERROR("Failed to remove streams for new validate context!\n");
+ return;
+ }
- if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
- new_pipe->plane_state->force_full_update = true;
+ /* add surface to context */
+ if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+ DC_ERROR("Failed to add streams for new validate context!\n");
+ return;
}
+
}
switch (dpcd_aux_read_interval) {
case 0x01:
- aux_rd_interval_us = 400;
+ aux_rd_interval_us = 4000;
break;
case 0x02:
- aux_rd_interval_us = 4000;
+ aux_rd_interval_us = 8000;
break;
case 0x03:
- aux_rd_interval_us = 8000;
+ aux_rd_interval_us = 12000;
break;
case 0x04:
aux_rd_interval_us = 16000;
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state);
+ struct dc_stream_update *stream_update);
/*
* Log the current stream state.
*/
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
+ DCN20_CLK_SRC_PLL3,
+ DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21
};
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -msse
endif
ifdef CONFIG_PPC64
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mhard-float
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mhard-float
endif
ifdef CONFIG_X86
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse
endif
ifdef CONFIG_PPC64
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float
endif
ifdef CONFIG_X86
DCN3_02 = dcn302_init.o dcn302_hwseq.o dcn302_resource.o
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -msse
endif
ifdef CONFIG_PPC64
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float
endif
ifdef CONFIG_X86
*clock_req);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
+ int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
int (*gfx_off_control)(struct smu_context *smu, bool enable);
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode);
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+
int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed);
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
int ret = 0;
- uint32_t rpm;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_speed_rpm) {
- if (speed > 100)
- speed = 100;
- rpm = speed * smu->fan_max_rpm / 100;
- ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
- }
+ if (smu->ppt_funcs->set_fan_speed_percent)
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
mutex_unlock(&smu->mutex);
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
return 0;
}
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
+
+ if (speed > 100)
+ speed = 100;
+
+ if (smu_v11_0_auto_fan_control(smu, 0))
+ return -EINVAL;
+
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ if (!duty100)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (uint32_t)tmp64;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+ return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
switch (mode) {
case AMD_FAN_CTRL_NONE:
- ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm);
+ ret = smu_v11_0_set_fan_speed_percent(smu, 100);
break;
case AMD_FAN_CTRL_MANUAL:
ret = smu_v11_0_auto_fan_control(smu, 0);
gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
gpu_metrics->average_cpu_power = metrics.Power[0];
gpu_metrics->average_soc_power = metrics.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Power[2];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
sizeof(uint16_t) * 8);
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <sound/hdmi-codec.h>
struct mutex ocm_lock;
struct wait_queue_head wq;
+ struct work_struct work;
struct device_node *dsi0_node;
struct device_node *dsi1_node;
bool hpd_supported;
bool edid_read;
+ /* can be accessed from different threads, so protect this with ocm_lock */
+ bool hdmi_connected;
uint8_t fw_version;
};
if (irq_status)
regmap_write(lt9611uxc->regmap, 0xb022, 0);
- lt9611uxc_unlock(lt9611uxc);
-
- if (irq_status & BIT(0))
+ if (irq_status & BIT(0)) {
lt9611uxc->edid_read = !!(hpd_status & BIT(0));
+ wake_up_all(<9611uxc->wq);
+ }
if (irq_status & BIT(1)) {
- if (lt9611uxc->connector.dev)
- drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
- else
- drm_bridge_hpd_notify(<9611uxc->bridge, !!(hpd_status & BIT(1)));
+ lt9611uxc->hdmi_connected = hpd_status & BIT(1);
+ schedule_work(<9611uxc->work);
}
+ lt9611uxc_unlock(lt9611uxc);
+
return IRQ_HANDLED;
}
+static void lt9611uxc_hpd_work(struct work_struct *work)
+{
+ struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
+ bool connected;
+
+ if (lt9611uxc->connector.dev)
+ drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
+ else {
+
+ mutex_lock(<9611uxc->ocm_lock);
+ connected = lt9611uxc->hdmi_connected;
+ mutex_unlock(<9611uxc->ocm_lock);
+
+ drm_bridge_hpd_notify(<9611uxc->bridge,
+ connected ?
+ connector_status_connected :
+ connector_status_disconnected);
+ }
+}
+
static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
{
gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1);
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
unsigned int reg_val = 0;
int ret;
- int connected = 1;
+ bool connected = true;
+
+ lt9611uxc_lock(lt9611uxc);
if (lt9611uxc->hpd_supported) {
- lt9611uxc_lock(lt9611uxc);
ret = regmap_read(lt9611uxc->regmap, 0xb023, ®_val);
- lt9611uxc_unlock(lt9611uxc);
if (ret)
dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret);
else
connected = reg_val & BIT(1);
}
+ lt9611uxc->hdmi_connected = connected;
+
+ lt9611uxc_unlock(lt9611uxc);
return connected ? connector_status_connected :
connector_status_disconnected;
static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc)
{
return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read,
- msecs_to_jiffies(100));
+ msecs_to_jiffies(500));
}
static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
ret = lt9611uxc_wait_for_edid(lt9611uxc);
if (ret < 0) {
dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret);
- return ERR_PTR(ret);
+ return NULL;
+ } else if (ret == 0) {
+ dev_err(lt9611uxc->dev, "wait for EDID timeout\n");
+ return NULL;
}
return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
lt9611uxc->fw_version = ret;
init_waitqueue_head(<9611uxc->wq);
+ INIT_WORK(<9611uxc->work, lt9611uxc_hpd_work);
+
ret = devm_request_threaded_irq(dev, client->irq, NULL,
lt9611uxc_irq_thread_handler,
IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
disable_irq(client->irq);
+ flush_scheduled_work();
lt9611uxc_audio_exit(lt9611uxc);
drm_bridge_remove(<9611uxc->bridge);
return 0;
}
-static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
+/**
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
+ * @link_rate: link rate in 10kbits/s units
+ * @link_lane_count: lane count
+ *
+ * Calculate the total bandwidth of a MultiStream Transport link. The returned
+ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
+ * convert the number of PBNs required for a given stream to the number of
+ * timeslots this stream requires in each MTP.
+ */
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
- if (dp_link_bw == 0 || dp_link_count == 0)
- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
- dp_link_bw, dp_link_count);
+ if (link_rate == 0 || link_lane_count == 0)
+ DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
+ link_rate, link_lane_count);
- return dp_link_bw * dp_link_count / 2;
+ /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
+ return link_rate * link_lane_count / 54000;
}
+EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/**
* drm_dp_read_mst_cap() - check whether or not a sink supports MST
goto out_unlock;
}
- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
if (mgr->pbn_div == 0) {
ret = -EINVAL;
int n_entries, ln;
u32 val;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
- /* The table does not have values for level 3 and level 9. */
- if (level >= n_entries || level == 3 || level == 9) {
+ if (level >= n_entries) {
drm_dbg_kms(&dev_priv->drm,
"DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 2);
- level = n_entries - 2;
+ level, n_entries - 1);
+ level = n_entries - 1;
}
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
if (level >= n_entries)
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
}
+static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_phy_is_combo(i915, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(i915, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+}
+
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
* 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
* the used lanes of the DDI.
*/
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
/*
* 7.g Configure and enable DDI_BUF_CTL
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
if (!is_mst)
intel_de_write(dev_priv, reg, val);
}
+ intel_ddi_power_up_lanes(encoder, crtc_state);
+
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
*/
ret = i915_vma_pin_fence(vma);
if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
vma = ERR_PTR(ret);
goto err;
}
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- i915_gem_object_lock(vma->obj, NULL);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
- i915_gem_object_unpin_from_display_plane(vma);
- i915_gem_object_unlock(vma->obj);
-
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
}
return plane_color_ctl;
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
-
- drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
- train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
- drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT,
- train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
- " (max)" : "");
-
- intel_dp->set_signal_levels(intel_dp, crtc_state);
-}
-
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
intel_dp_phy_pattern_update(intel_dp, crtc_state);
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat);
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
}
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ char phy_name[10];
+
+ drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
+ train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+ train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
+ (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT,
+ train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+ " (max)" : "",
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+
+ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
+ intel_dp->set_signal_levels(intel_dp, crtc_state);
+}
+
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
}
DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
int ret;
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
intel_dp->train_set, crtc_state->lane_count);
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy);
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
connector->port,
- crtc_state->pbn, 0);
+ crtc_state->pbn,
+ drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count));
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
intel_frontbuffer_flip_complete(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
return 0;
out_unpin:
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
out_pin_section:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
/* Preoffset values for YUV to RGB Conversion */
#define PREOFF_YUV_TO_RGB_HI 0x1800
-#define PREOFF_YUV_TO_RGB_ME 0x1F00
+#define PREOFF_YUV_TO_RGB_ME 0x0000
#define PREOFF_YUV_TO_RGB_LO 0x1800
#define ROFF(x) (((x) & 0xffff) << 16)
#define GOFF(x) (((x) & 0xffff) << 0)
#define BOFF(x) (((x) & 0xffff) << 16)
+/*
+ * Programs the input color space conversion stage for ICL HDR planes.
+ * Note that it is assumed that this stage always happens after YUV
+ * range correction. Thus, the input to this stage is assumed to be
+ * in full-range YCbCr.
+ */
static void
icl_program_input_csc(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
0x0, 0x7800, 0x7F10,
},
};
-
- /* Matrix for Limited Range to Full Range Conversion */
- static const u16 input_csc_matrix_lr[][9] = {
- /*
- * BT.601 Limted range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.596027,
- * 1.164384, -0.39175, -0.812813,
- * 1.164384, 2.017232, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT601] = {
- 0x7CC8, 0x7950, 0x0,
- 0x8D00, 0x7950, 0x9C88,
- 0x0, 0x7950, 0x6810,
- },
- /*
- * BT.709 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.792741,
- * 1.164384, -0.213249, -0.532909,
- * 1.164384, 2.112402, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT709] = {
- 0x7E58, 0x7950, 0x0,
- 0x8888, 0x7950, 0xADA8,
- 0x0, 0x7950, 0x6870,
- },
- /*
- * BT.2020 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164, 0.000, 1.678,
- * 1.164, -0.1873, -0.6504,
- * 1.164, 2.1417, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT2020] = {
- 0x7D70, 0x7950, 0x0,
- 0x8A68, 0x7950, 0xAC00,
- 0x0, 0x7950, 0x6890,
- },
- };
- const u16 *csc;
-
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- csc = input_csc_matrix[plane_state->hw.color_encoding];
- else
- csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
+ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
ROFF(csc[0]) | GOFF(csc[1]));
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- 0);
- else
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO);
intel_de_write_fw(dev_priv,
return vma;
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_vma *vma;
-
- if (list_empty(&obj->vma.list))
- return;
-
- mutex_lock(&i915->ggtt.vm.mutex);
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- }
- spin_unlock(&obj->vma.lock);
- mutex_unlock(&i915->ggtt.vm.mutex);
-
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- if (obj->mm.madv == I915_MADV_WILLNEED &&
- !atomic_read(&obj->mm.shrink_pin))
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
-}
-
-void
-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
-{
- /* Bump the LRU to try and avoid premature eviction whilst flipping */
- i915_gem_object_bump_inactive_ggtt(vma->obj);
-
- i915_vma_unpin(vma);
-}
-
/**
* Moves a single object to the CPU read, and possibly write domain.
* @obj: object to act on
else
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
i915_gem_object_unlock(obj);
if (write_domain)
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags);
-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
&cb_kernel_ivb,
desc_count);
+ /* Reset inherited context registers */
+ gen7_emit_pipeline_invalidate(&cmds);
+ batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+ batch_add(&cmds, 0xffff0000);
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+ batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ gen7_emit_pipeline_flush(&cmds);
+
+ /* Switch to the media pipeline and our base address */
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
gen7_emit_state_base_address(&cmds, descriptors);
gen7_emit_pipeline_invalidate(&cmds);
+ /* Set the clear-residual kernel state */
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
+ /* Execute the kernel on all HW threads */
for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);
intel_engine_add_retire(b->irq_engine, tl);
}
-static bool __signal_request(struct i915_request *rq)
-{
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
-
- if (!__dma_fence_signal(&rq->fence)) {
- i915_request_put(rq);
- return false;
- }
-
- return true;
-}
-
static struct llist_node *
slist_add(struct llist_node *node, struct llist_node *head)
{
release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
- if (__signal_request(rq))
+ if (__dma_fence_signal(&rq->fence))
/* We own signal_node now, xfer to local list */
signal = slist_add(&rq->signal_node, signal);
+ else
+ i915_request_put(rq);
if (release) {
add_retire(b, ce->timeline);
kfree(b);
}
+static void irq_signal_request(struct i915_request *rq,
+ struct intel_breadcrumbs *b)
+{
+ if (!__dma_fence_signal(&rq->fence))
+ return;
+
+ i915_request_get(rq);
+ if (llist_add(&rq->signal_node, &b->signaled_requests))
+ irq_work_queue(&b->irq_work);
+}
+
static void insert_breadcrumb(struct i915_request *rq)
{
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return;
- i915_request_get(rq);
-
/*
* If the request is already completed, we can transfer it
* straight onto a signaled list, and queue the irq worker for
* its signal completion.
*/
if (__i915_request_is_complete(rq)) {
- if (__signal_request(rq) &&
- llist_add(&rq->signal_node, &b->signaled_requests))
- irq_work_queue(&b->irq_work);
+ irq_signal_request(rq, b);
return;
}
break;
}
}
+
+ i915_request_get(rq);
list_add_rcu(&rq->signal_link, pos);
GEM_BUG_ON(!check_signal_order(ce, rq));
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
struct intel_context *ce = rq->context;
bool release;
- if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ spin_lock(&ce->signal_lock);
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+ spin_unlock(&ce->signal_lock);
return;
+ }
- spin_lock(&ce->signal_lock);
list_del_rcu(&rq->signal_link);
- release = remove_signaling_context(rq->engine->breadcrumbs, ce);
+ release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
if (release)
intel_context_put(ce);
+ if (__i915_request_is_complete(rq))
+ irq_signal_request(rq, b);
+
i915_request_put(rq);
}
mutex_init(&ggtt->error_mutex);
if (ggtt->mappable_end) {
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
- &ggtt->error_capture,
- PAGE_SIZE, 0,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
+ /*
+ * Reserve a mappable slot for our lockless error capture.
+ *
+ * We strongly prefer taking address 0x0 in order to protect
+ * other critical buffers against accidental overwrites,
+ * as writing to address 0 is a very common mistake.
+ *
+ * Since 0 may already be in use by the system (e.g. the BIOS
+ * framebuffer), we let the reservation fail quietly and hope
+ * 0 remains reserved always.
+ *
+ * If we fail to reserve 0, and then fail to find any space
+ * for an error-capture, remain silent. We can afford not
+ * to reserve an error_capture node as we have fallback
+ * paths, and we trust that 0 will remain reserved. However,
+ * the only likely reason for failure to insert is a driver
+ * bug, which we expect to cause other failures...
+ */
+ ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
+ ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
+ if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
+ drm_mm_insert_node_in_range(&ggtt->vm.mm,
+ &ggtt->error_capture,
+ ggtt->error_capture.size, 0,
+ ggtt->error_capture.color,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
}
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Reserved GGTT:[%llx, %llx] for use by error capture\n",
+ ggtt->error_capture.start,
+ ggtt->error_capture.start + ggtt->error_capture.size);
/*
* The upper portion of the GuC address space has a sizeable hole
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- drm_dbg_kms(&ggtt->vm.i915->drm,
- "clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
+ drm_dbg(&ggtt->vm.i915->drm,
+ "clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}
int __i915_active_wait(struct i915_active *ref, int state)
{
- int err;
-
might_sleep();
- if (!i915_active_acquire_if_busy(ref))
- return 0;
-
/* Any fence added after the wait begins will not be auto-signaled */
- err = flush_lazy_signals(ref);
- i915_active_release(ref);
- if (err)
- return err;
+ if (i915_active_acquire_if_busy(ref)) {
+ int err;
- if (!i915_active_is_idle(ref) &&
- ___wait_var_event(ref, i915_active_is_idle(ref),
- state, 0, 0, schedule()))
- return -EINTR;
+ err = flush_lazy_signals(ref);
+ i915_active_release(ref);
+ if (err)
+ return err;
+ if (___wait_var_event(ref, i915_active_is_idle(ref),
+ state, 0, 0, schedule()))
+ return -EINTR;
+ }
+
+ /*
+ * After the wait is complete, the caller may free the active.
+ * We have to flush any concurrent retirement before returning.
+ */
flush_work(&ref->work);
return 0;
}
{
const unsigned int pi = __platform_mask_index(info, p);
- return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+ return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
}
static __always_inline bool
vma = i915_vma_instance(out, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto out_put_batch;
+ goto out_put_out;
}
err = i915_vma_pin(vma, 0, 0,
NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV507C, SET_PROCESSING,
- NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV507C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV827C, SET_PROCESSING,
- NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV827C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+ if (disp->disp->object.oclass >= GK104_DISP) {
+ dev->mode_config.cursor_width = 256;
+ dev->mode_config.cursor_height = 256;
+ } else {
+ dev->mode_config.cursor_width = 64;
+ dev->mode_config.cursor_height = 64;
+ }
+
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
#include "head.h"
#include "core.h"
+#include "nvif/push.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl917d.h>
return 0;
}
+static int
+head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
+}
+
int
head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
.core_clr = head907d_core_clr,
.curs_layout = head917d_curs_layout,
.curs_format = head507d_curs_format,
- .curs_set = head907d_curs_set,
+ .curs_set = head917d_curs_set,
.curs_clr = head907d_curs_clr,
.base = head917d_base,
.ovly = head907d_ovly,
nvif_notify_get(&wndw->notify);
}
+static const u64 nv50_cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
int
nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
enum drm_plane_type type, const char *name, int index,
struct nvif_mmu *mmu = &drm->client.mmu;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_wndw *wndw;
+ const u64 *format_modifiers;
int nformat;
int ret;
for (nformat = 0; format[nformat]; nformat++);
- ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
- format, nformat,
- nouveau_display(dev)->format_modifiers,
- type, "%s-%d", name, index);
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ format_modifiers = nv50_cursor_format_modifiers;
+ else
+ format_modifiers = nouveau_display(dev)->format_modifiers;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
+ format_modifiers, type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
*pwndw = NULL;
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
+#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300)
+#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300)
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300)
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
} while(0)
#endif
-#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \
- PUSH_##o##_HDR((p), s, mA, (c)+(n)); \
- PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
+#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \
+ PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \
+ PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
} while(0)
-#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
- PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
+ PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
- PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
+ PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
- PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
+ PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
- PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
+ PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
- PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
+ PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
- PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
+ PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
- PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
+ PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
- PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
+ PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
- PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
+ PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_1D(X,o,p,s,mA,dA) \
- PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA))
-#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
- PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
- PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
- PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
- PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1D(X,o,p,s,mA,dA) \
+ PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA))
+#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
+ PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
+ PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
+ PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
+ PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \
- PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \
- PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \
- PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \
- PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \
- PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \
- X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \
+ X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
-#define PUSH_1P(X,o,p,s,mA,dp,ds) \
- PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp))
-#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
- PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \
- X##mA, (dA))
-#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
- PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1P(X,o,p,s,mA,dp,ds) \
+ PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp))
+#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
+ PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \
+ X##mA, (dA))
+#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
+ PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
#define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
dma_sync_single_for_device(drm->dev->dev,
ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_TO_DEVICE);
+ num_pages * PAGE_SIZE, DMA_TO_DEVICE);
+ i += num_pages;
+ }
}
void
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
+
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
+ num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
+ i += num_pages;
+ }
}
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
struct drm_nouveau_svm_init *args = data;
int ret;
+ /* We need to fail if svm is disabled */
+ if (!cli->drm->svm)
+ return -ENOSYS;
+
/* Allocate tracking for SVM-enabled VMM. */
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
return -ENOMEM;
* put_page() on a TTM allocated page is illegal.
*/
if (order)
- gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY |
+ gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
__GFP_KSWAPD_RECLAIM;
if (!pool->use_dma_alloc) {
* for now we just allocate globally.
*/
if (!hvs->hvs5)
- /* 96kB */
- drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+ /* 48k words of 2x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else
- /* 70k words */
- drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024);
+ /* 60k words of 4x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
/* Upload filter kernels. We only have the one for now, so we
* keep it around for the lifetime of the driver.
static u32 vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
u32 pix_per_line;
u32 lbm;
lbm = pix_per_line * 16;
}
- lbm = roundup(lbm, 32);
+ /* Align it to 64 or 128 (hvs5) bytes */
+ lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
+
+ /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
+ lbm /= vc4->hvs->hvs5 ? 4 : 2;
return lbm;
}
if (!vc4_state->is_unity) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(vc4_state->crtc_w,
- SCALER_POS1_SCL_WIDTH) |
+ SCALER5_POS1_SCL_WIDTH) |
VC4_SET_FIELD(vc4_state->crtc_h,
- SCALER_POS1_SCL_HEIGHT));
+ SCALER5_POS1_SCL_HEIGHT));
}
/* Position Word 2: Source Image Size */
MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
- if (cls->name == MT_CLS_WIN_8 &&
+ if ((cls->name == MT_CLS_WIN_8 ||
+ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
}
if (flush)
- wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
else if (insert)
- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+ wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
raw_data, report_size);
return insert && !flush;
static int wacom_devm_kfifo_alloc(struct wacom *wacom)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
+ struct kfifo_rec_ptr_2 *pen_fifo;
int error;
pen_fifo = devres_alloc(wacom_devm_kfifo_release,
}
devres_add(&wacom->hdev->dev, pen_fifo);
+ wacom_wac->pen_fifo = pen_fifo;
return 0;
}
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
- struct kfifo_rec_ptr_2 pen_fifo;
+ struct kfifo_rec_ptr_2 *pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
mtk_i2c_clock_disable(i2c);
ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq,
- IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c);
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ I2C_DRV_NAME, i2c);
if (ret < 0) {
dev_err(&pdev->dev,
"Request I2C IRQ %d fail\n", irq);
}
#ifdef CONFIG_PM_SLEEP
-static int mtk_i2c_resume(struct device *dev)
+static int mtk_i2c_suspend_noirq(struct device *dev)
+{
+ struct mtk_i2c *i2c = dev_get_drvdata(dev);
+
+ i2c_mark_adapter_suspended(&i2c->adap);
+
+ return 0;
+}
+
+static int mtk_i2c_resume_noirq(struct device *dev)
{
int ret;
struct mtk_i2c *i2c = dev_get_drvdata(dev);
mtk_i2c_clock_disable(i2c);
+ i2c_mark_adapter_resumed(&i2c->adap);
+
return 0;
}
#endif
static const struct dev_pm_ops mtk_i2c_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
+ mtk_i2c_resume_noirq)
};
static struct platform_driver mtk_i2c_driver = {
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
struct hns_roce_hem_table sccc_table;
struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
- spinlock_t bank_lock;
+ struct mutex bank_mutex;
};
struct hns_roce_cq_table {
hr_qp->doorbell_qpn = 1;
} else {
- spin_lock(&qp_table->bank_lock);
+ mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
if (ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to alloc QPN, ret = %d\n", ret);
- spin_unlock(&qp_table->bank_lock);
+ mutex_unlock(&qp_table->bank_mutex);
return ret;
}
qp_table->bank[bankid].inuse++;
- spin_unlock(&qp_table->bank_lock);
+ mutex_unlock(&qp_table->bank_mutex);
hr_qp->doorbell_qpn = (u32)num;
}
ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
- spin_lock(&hr_dev->qp_table.bank_lock);
+ mutex_lock(&hr_dev->qp_table.bank_mutex);
hr_dev->qp_table.bank[bankid].inuse--;
- spin_unlock(&hr_dev->qp_table.bank_lock);
+ mutex_unlock(&hr_dev->qp_table.bank_mutex);
}
static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
unsigned int i;
mutex_init(&qp_table->scc_mutex);
+ mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa);
reserved_from_bot = hr_dev->caps.reserved_qps;
int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
- unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
struct usnic_vnic_res *vnic_res;
int len;
- len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
+ len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu",
qp_grp->ibqp.qp_num,
usnic_ib_qp_grp_state_to_string(qp_grp->state),
qp_grp->owner_pid,
res_chunk = qp_grp->res_chunk_list[i];
for (j = 0; j < res_chunk->cnt; j++) {
vnic_res = res_chunk->res[j];
- len += sysfs_emit_at(
- buf, len, "%s[%d] ",
+ len += sysfs_emit_at(buf, len, " %s[%d]",
usnic_vnic_res_type_to_str(vnic_res->type),
vnic_res->vnic_idx);
}
}
- len = sysfs_emit_at(buf, len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
}
+static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
+{
+ switch (type) {
+ case PVRDMA_NETWORK_ROCE_V1:
+ return RDMA_NETWORK_ROCE_V1;
+ case PVRDMA_NETWORK_IPV4:
+ return RDMA_NETWORK_IPV4;
+ case PVRDMA_NETWORK_IPV6:
+ return RDMA_NETWORK_IPV6;
+ default:
+ return RDMA_NETWORK_IPV6;
+ }
+}
+
void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
const struct pvrdma_qp_cap *src);
void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
wc->dlid_path_bits = cqe->dlid_path_bits;
wc->port_num = cqe->port_num;
wc->vendor_err = cqe->vendor_err;
- wc->network_hdr_type = cqe->network_hdr_type;
+ wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
/* Update shared ring state */
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/if.h>
+#include <linux/if_vlan.h>
#include <net/udp_tunnel.h>
#include <net/sch_generic.h>
#include <linux/netfilter.h>
{
struct udphdr *udph;
struct net_device *ndev = skb->dev;
+ struct net_device *rdev = ndev;
struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ if (!rxe && is_vlan_dev(rdev)) {
+ rdev = vlan_dev_real_dev(ndev);
+ rxe = rxe_get_dev_from_net(rdev);
+ }
if (!rxe)
goto drop;
else
wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ if (is_vlan_dev(skb->dev)) {
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ wc->vlan_id = vlan_dev_vlan_id(skb->dev);
+ }
+
if (pkt->mask & RXE_IMMDT_MASK) {
wc->wc_flags |= IB_WC_WITH_IMM;
wc->ex.imm_data = immdt_imm(pkt);
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
+ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
{ }
};
};
MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match);
-static const struct spi_device_id ariel_pwrbutton_id_table[] = {
- { "wyse-ariel-ec-input", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_id_table);
-
static struct spi_driver ariel_pwrbutton_driver = {
.driver = {
.name = "dell-wyse-ariel-ec-input",
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
{ .id = "5663", .data = >1x_chip_data },
{ .id = "5688", .data = >1x_chip_data },
{ .id = "917S", .data = >1x_chip_data },
+ { .id = "9286", .data = >1x_chip_data },
{ .id = "911", .data = >911_chip_data },
{ .id = "9271", .data = >911_chip_data },
{ .compatible = "goodix,gt927" },
{ .compatible = "goodix,gt9271" },
{ .compatible = "goodix,gt928" },
+ { .compatible = "goodix,gt9286" },
{ .compatible = "goodix,gt967" },
{ }
};
void *buf, size_t len);
int (*get_touch_data)(struct i2c_client *client, u8 *data);
bool (*parse_touch_data)(const u8 *data, unsigned int finger,
- unsigned int *x, unsigned int *y);
+ unsigned int *x, unsigned int *y,
+ unsigned int *z);
bool (*continue_polling)(const u8 *data, bool touch);
unsigned int max_touches;
unsigned int resolution;
bool has_calibrate_reg;
+ bool has_pressure_reg;
};
struct ili210x {
static bool ili210x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
if (touchdata[0] & BIT(finger))
return false;
static bool ili211x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u32 data;
static bool ili212x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u16 val;
static bool ili251x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u16 val;
*x = val & 0x3fff;
*y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
+ *z = touchdata[1 + (finger * 5) + 4];
return true;
}
.continue_polling = ili251x_check_continue_polling,
.max_touches = 10,
.has_calibrate_reg = true,
+ .has_pressure_reg = true,
};
static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
struct input_dev *input = priv->input;
int i;
bool contact = false, touch;
- unsigned int x = 0, y = 0;
+ unsigned int x = 0, y = 0, z = 0;
for (i = 0; i < priv->chip->max_touches; i++) {
- touch = priv->chip->parse_touch_data(touchdata, i, &x, &y);
+ touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z);
input_mt_slot(input, i);
if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
touchscreen_report_pos(input, &priv->prop, x, y, true);
+ if (priv->chip->has_pressure_reg)
+ input_report_abs(input, ABS_MT_PRESSURE, z);
contact = true;
}
}
max_xy = (chip->resolution ?: SZ_64K) - 1;
input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
+ if (priv->chip->has_pressure_reg)
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
touchscreen_parse_properties(input, true, &priv->prop);
error = input_mt_init_slots(input, priv->chip->max_touches,
#define ST1232_TS_NAME "st1232-ts"
#define ST1633_TS_NAME "st1633-ts"
+#define REG_STATUS 0x01 /* Device Status | Error Code */
+
+#define STATUS_NORMAL 0x00
+#define STATUS_INIT 0x01
+#define STATUS_ERROR 0x02
+#define STATUS_AUTO_TUNING 0x03
+#define STATUS_IDLE 0x04
+#define STATUS_POWER_DOWN 0x05
+
+#define ERROR_NONE 0x00
+#define ERROR_INVALID_ADDRESS 0x10
+#define ERROR_INVALID_VALUE 0x20
+#define ERROR_INVALID_PLATFORM 0x30
+
#define REG_XY_RESOLUTION 0x04
#define REG_XY_COORDINATES 0x12
#define ST_TS_MAX_FINGERS 10
u8 *read_buf;
};
-static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
+static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg,
+ unsigned int n)
{
struct i2c_client *client = ts->client;
struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = I2C_M_RD | I2C_M_DMA_SAFE,
- .len = ts->read_buf_len,
+ .len = n,
.buf = ts->read_buf,
}
};
return 0;
}
+static int st1232_ts_wait_ready(struct st1232_ts_data *ts)
+{
+ unsigned int retries;
+ int error;
+
+ for (retries = 10; retries; retries--) {
+ error = st1232_ts_read_data(ts, REG_STATUS, 1);
+ if (!error && ts->read_buf[0] == (STATUS_NORMAL | ERROR_NONE))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ENXIO;
+}
+
static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
u16 *max_y)
{
int error;
/* select resolution register */
- error = st1232_ts_read_data(ts, REG_XY_RESOLUTION);
+ error = st1232_ts_read_data(ts, REG_XY_RESOLUTION, 3);
if (error)
return error;
buf = ts->read_buf;
- *max_x = ((buf[0] & 0x0070) << 4) | buf[1];
- *max_y = ((buf[0] & 0x0007) << 8) | buf[2];
+ *max_x = (((buf[0] & 0x0070) << 4) | buf[1]) - 1;
+ *max_y = (((buf[0] & 0x0007) << 8) | buf[2]) - 1;
return 0;
}
int count;
int error;
- error = st1232_ts_read_data(ts, REG_XY_COORDINATES);
+ error = st1232_ts_read_data(ts, REG_XY_COORDINATES, ts->read_buf_len);
if (error)
goto out;
input_dev->name = "st1232-touchscreen";
input_dev->id.bustype = BUS_I2C;
+ /* Wait until device is ready */
+ error = st1232_ts_wait_ready(ts);
+ if (error)
+ return error;
+
/* Read resolution from the chip */
error = st1232_ts_read_resolution(ts, &max_x, &max_y);
if (error) {
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
{
- if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
- return false;
-
- return !!(iommu->features & f);
+ return !!(iommu->features & mask);
}
static inline u64 iommu_virt_to_phys(void *vaddr)
#define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR 27
+/* IOMMU IVINFO */
+#define IOMMU_IVINFO_OFFSET 36
+#define IOMMU_IVINFO_EFRSUP BIT(0)
+
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6
static bool amd_iommu_pre_enabled = true;
+static u32 amd_iommu_ivinfo __initdata;
+
bool translation_pre_enabled(struct amd_iommu *iommu)
{
return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
return amd_iommus_present;
}
+/*
+ * For IVHD type 0x11/0x40, EFR is also available via IVHD.
+ * Default to IVHD EFR since it is available sooner
+ * (i.e. before PCI init).
+ */
+static void __init early_iommu_features_init(struct amd_iommu *iommu,
+ struct ivhd_header *h)
+{
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
+ iommu->features = h->efr_reg;
+}
+
/* Access to l1 and l2 indexed register spaces */
static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+
+ early_iommu_features_init(iommu, h);
+
break;
default:
return -EINVAL;
NULL,
};
+/*
+ * Note: IVHD 0x11 and 0x40 also contains exact copy
+ * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
+ * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
+ */
+static void __init late_iommu_features_init(struct amd_iommu *iommu)
+{
+ u64 features;
+
+ if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+ return;
+
+ /* read extended feature bits */
+ features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+
+ if (!iommu->features) {
+ iommu->features = features;
+ return;
+ }
+
+ /*
+ * Sanity check and warn if EFR values from
+ * IVHD and MMIO conflict.
+ */
+ if (features != iommu->features)
+ pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
+ features, iommu->features);
+}
+
static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
- /* read extended feature bits */
- iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+ late_iommu_features_init(iommu);
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
free_unity_maps();
}
+static void __init ivinfo_init(void *ivrs)
+{
+ amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
+}
+
/*
* This is the hardware init function for AMD IOMMU in the system.
* This function is called either from amd_iommu_init or from the interrupt
if (ret)
goto out;
+ ivinfo_init(ivrs_base);
+
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP.
*/
- if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
+ if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
addr, size_order);
return ret;
}
+static bool domain_use_flush_queue(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool r = true;
+
+ if (intel_iommu_strict)
+ return false;
+
+ /*
+ * The flush queue implementation does not perform page-selective
+ * invalidations that are required for efficient TLB flushes in virtual
+ * environments. The benefit of batching is likely to be much lower than
+ * the overhead of synchronizing the virtual and physical IOMMU
+ * page-tables.
+ */
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!cap_caching_mode(iommu->cap))
+ continue;
+
+ pr_warn_once("IOMMU batching is disabled due to virtualization");
+ r = false;
+ break;
+ }
+ rcu_read_unlock();
+
+ return r;
+}
+
static int
intel_iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = !intel_iommu_strict;
+ *(int *)data = domain_use_flush_queue();
return 0;
default:
return -ENODEV;
This option enables support for the Power Button LED of
Acer Iconia Tab A500.
+comment "Flash and Torch LED drivers"
+source "drivers/leds/flash/Kconfig"
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
# LED Userspace Drivers
obj-$(CONFIG_LEDS_USER) += uleds.o
+# Flash and Torch LED Drivers
+obj-$(CONFIG_LEDS_CLASS_FLASH) += flash/
+
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+
+if LEDS_CLASS_FLASH
+
+config LEDS_RT8515
+ tristate "LED support for Richtek RT8515 flash/torch LED"
+ depends on GPIOLIB
+ help
+ This option enables support for the Richtek RT8515 flash
+ and torch LEDs found on some mobile phones.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-rt8515.
+
+endif # LEDS_CLASS_FLASH
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LED driver for Richtek RT8515 flash/torch white LEDs
+ * found on some Samsung mobile phones.
+ *
+ * This is a 1.5A Boost dual channel driver produced around 2011.
+ *
+ * The component lacks a datasheet, but in the schematic picture
+ * from the LG P970 service manual you can see the connections
+ * from the RT8515 to the LED, with two resistors connected
+ * from the pins "RFS" and "RTS" to ground.
+ *
+ * On the LG P970:
+ * RFS (resistance flash setting?) is 20 kOhm
+ * RTS (resistance torch setting?) is 39 kOhm
+ *
+ * Some sleuthing finds us the RT9387A which we have a datasheet for:
+ * https://static5.arrow.com/pdfs/2014/7/27/8/21/12/794/rtt_/manual/94download_ds.jspprt9387a.jspprt9387a.pdf
+ * This apparently works the same way so in theory this driver
+ * should cover RT9387A as well. This has not been tested, please
+ * update the compatibles if you add RT9387A support.
+ *
+ * Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-flash.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-flash-led-class.h>
+
+/* We can provide 15-700 mA out to the LED */
+#define RT8515_MIN_IOUT_MA 15
+#define RT8515_MAX_IOUT_MA 700
+/* The maximum intensity is 1-16 for flash and 1-100 for torch */
+#define RT8515_FLASH_MAX 16
+#define RT8515_TORCH_MAX 100
+
+#define RT8515_TIMEOUT_US 250000U
+#define RT8515_MAX_TIMEOUT_US 300000U
+
+struct rt8515 {
+ struct led_classdev_flash fled;
+ struct device *dev;
+ struct v4l2_flash *v4l2_flash;
+ struct mutex lock;
+ struct regulator *reg;
+ struct gpio_desc *enable_torch;
+ struct gpio_desc *enable_flash;
+ struct timer_list powerdown_timer;
+ u32 max_timeout; /* Flash max timeout */
+ int flash_max_intensity;
+ int torch_max_intensity;
+};
+
+static struct rt8515 *to_rt8515(struct led_classdev_flash *fled)
+{
+ return container_of(fled, struct rt8515, fled);
+}
+
+static void rt8515_gpio_led_off(struct rt8515 *rt)
+{
+ gpiod_set_value(rt->enable_flash, 0);
+ gpiod_set_value(rt->enable_torch, 0);
+}
+
+static void rt8515_gpio_brightness_commit(struct gpio_desc *gpiod,
+ int brightness)
+{
+ int i;
+
+ /*
+ * Toggling a GPIO line with a small delay increases the
+ * brightness one step at a time.
+ */
+ for (i = 0; i < brightness; i++) {
+ gpiod_set_value(gpiod, 0);
+ udelay(1);
+ gpiod_set_value(gpiod, 1);
+ udelay(1);
+ }
+}
+
+/* This is setting the torch light level */
+static int rt8515_led_brightness_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled = lcdev_to_flcdev(led);
+ struct rt8515 *rt = to_rt8515(fled);
+
+ mutex_lock(&rt->lock);
+
+ if (brightness == LED_OFF) {
+ /* Off */
+ rt8515_gpio_led_off(rt);
+ } else if (brightness < RT8515_TORCH_MAX) {
+ /* Step it up to movie mode brightness using the flash pin */
+ rt8515_gpio_brightness_commit(rt->enable_torch, brightness);
+ } else {
+ /* Max torch brightness requested */
+ gpiod_set_value(rt->enable_torch, 1);
+ }
+
+ mutex_unlock(&rt->lock);
+
+ return 0;
+}
+
+static int rt8515_led_flash_strobe_set(struct led_classdev_flash *fled,
+ bool state)
+{
+ struct rt8515 *rt = to_rt8515(fled);
+ struct led_flash_setting *timeout = &fled->timeout;
+ int brightness = rt->flash_max_intensity;
+
+ mutex_lock(&rt->lock);
+
+ if (state) {
+ /* Enable LED flash mode and set brightness */
+ rt8515_gpio_brightness_commit(rt->enable_flash, brightness);
+ /* Set timeout */
+ mod_timer(&rt->powerdown_timer,
+ jiffies + usecs_to_jiffies(timeout->val));
+ } else {
+ del_timer_sync(&rt->powerdown_timer);
+ /* Turn the LED off */
+ rt8515_gpio_led_off(rt);
+ }
+
+ fled->led_cdev.brightness = LED_OFF;
+ /* After this the torch LED will be disabled */
+
+ mutex_unlock(&rt->lock);
+
+ return 0;
+}
+
+static int rt8515_led_flash_strobe_get(struct led_classdev_flash *fled,
+ bool *state)
+{
+ struct rt8515 *rt = to_rt8515(fled);
+
+ *state = timer_pending(&rt->powerdown_timer);
+
+ return 0;
+}
+
+static int rt8515_led_flash_timeout_set(struct led_classdev_flash *fled,
+ u32 timeout)
+{
+ /* The timeout is stored in the led-class-flash core */
+ return 0;
+}
+
+static const struct led_flash_ops rt8515_flash_ops = {
+ .strobe_set = rt8515_led_flash_strobe_set,
+ .strobe_get = rt8515_led_flash_strobe_get,
+ .timeout_set = rt8515_led_flash_timeout_set,
+};
+
+static void rt8515_powerdown_timer(struct timer_list *t)
+{
+ struct rt8515 *rt = from_timer(rt, t, powerdown_timer);
+
+ /* Turn the LED off */
+ rt8515_gpio_led_off(rt);
+}
+
+static void rt8515_init_flash_timeout(struct rt8515 *rt)
+{
+ struct led_classdev_flash *fled = &rt->fled;
+ struct led_flash_setting *s;
+
+ /* Init flash timeout setting */
+ s = &fled->timeout;
+ s->min = 1;
+ s->max = rt->max_timeout;
+ s->step = 1;
+ /*
+ * Set default timeout to RT8515_TIMEOUT_US except if
+ * max_timeout from DT is lower.
+ */
+ s->val = min(rt->max_timeout, RT8515_TIMEOUT_US);
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+/* Configure the V2L2 flash subdevice */
+static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+ struct led_classdev *led = &rt->fled.led_cdev;
+ struct led_flash_setting *s;
+
+ strscpy(v4l2_sd_cfg->dev_name, led->dev->kobj.name,
+ sizeof(v4l2_sd_cfg->dev_name));
+
+ /*
+ * Init flash intensity setting: this is a linear scale
+ * capped from the device tree max intensity setting
+ * 1..flash_max_intensity
+ */
+ s = &v4l2_sd_cfg->intensity;
+ s->min = 1;
+ s->max = rt->flash_max_intensity;
+ s->step = 1;
+ s->val = s->max;
+}
+
+static void rt8515_v4l2_flash_release(struct rt8515 *rt)
+{
+ v4l2_flash_release(rt->v4l2_flash);
+}
+
+#else
+static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+}
+
+static void rt8515_v4l2_flash_release(struct rt8515 *rt)
+{
+}
+#endif
+
+static void rt8515_determine_max_intensity(struct rt8515 *rt,
+ struct fwnode_handle *led,
+ const char *resistance,
+ const char *max_ua_prop, int hw_max,
+ int *max_intensity_setting)
+{
+ u32 res = 0; /* Can't be 0 so 0 is undefined */
+ u32 ua;
+ u32 max_ma;
+ int max_intensity;
+ int ret;
+
+ fwnode_property_read_u32(rt->dev->fwnode, resistance, &res);
+ ret = fwnode_property_read_u32(led, max_ua_prop, &ua);
+
+ /* Missing info in DT, OK go with hardware maxima */
+ if (ret || res == 0) {
+ dev_err(rt->dev,
+ "either %s or %s missing from DT, using HW max\n",
+ resistance, max_ua_prop);
+ max_ma = RT8515_MAX_IOUT_MA;
+ max_intensity = hw_max;
+ goto out_assign_max;
+ }
+
+ /*
+ * Formula from the datasheet, this is the maximum current
+ * defined by the hardware.
+ */
+ max_ma = (5500 * 1000) / res;
+ /*
+ * Calculate max intensity (linear scaling)
+ * Formula is ((ua / 1000) / max_ma) * 100, then simplified
+ */
+ max_intensity = (ua / 10) / max_ma;
+
+ dev_info(rt->dev,
+ "current restricted from %u to %u mA, max intensity %d/100\n",
+ max_ma, (ua / 1000), max_intensity);
+
+out_assign_max:
+ dev_info(rt->dev, "max intensity %d/%d = %d mA\n",
+ max_intensity, hw_max, max_ma);
+ *max_intensity_setting = max_intensity;
+}
+
+static int rt8515_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fwnode_handle *child;
+ struct rt8515 *rt;
+ struct led_classdev *led;
+ struct led_classdev_flash *fled;
+ struct led_init_data init_data = {};
+ struct v4l2_flash_config v4l2_sd_cfg = {};
+ int ret;
+
+ rt = devm_kzalloc(dev, sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ return -ENOMEM;
+
+ rt->dev = dev;
+ fled = &rt->fled;
+ led = &fled->led_cdev;
+
+ /* ENF - Enable Flash line */
+ rt->enable_flash = devm_gpiod_get(dev, "enf", GPIOD_OUT_LOW);
+ if (IS_ERR(rt->enable_flash))
+ return dev_err_probe(dev, PTR_ERR(rt->enable_flash),
+ "cannot get ENF (enable flash) GPIO\n");
+
+ /* ENT - Enable Torch line */
+ rt->enable_torch = devm_gpiod_get(dev, "ent", GPIOD_OUT_LOW);
+ if (IS_ERR(rt->enable_torch))
+ return dev_err_probe(dev, PTR_ERR(rt->enable_torch),
+ "cannot get ENT (enable torch) GPIO\n");
+
+ child = fwnode_get_next_available_child_node(dev->fwnode, NULL);
+ if (!child) {
+ dev_err(dev,
+ "No fwnode child node found for connected LED.\n");
+ return -EINVAL;
+ }
+ init_data.fwnode = child;
+
+ rt8515_determine_max_intensity(rt, child, "richtek,rfs-ohms",
+ "flash-max-microamp",
+ RT8515_FLASH_MAX,
+ &rt->flash_max_intensity);
+ rt8515_determine_max_intensity(rt, child, "richtek,rts-ohms",
+ "led-max-microamp",
+ RT8515_TORCH_MAX,
+ &rt->torch_max_intensity);
+
+ ret = fwnode_property_read_u32(child, "flash-max-timeout-us",
+ &rt->max_timeout);
+ if (ret) {
+ rt->max_timeout = RT8515_MAX_TIMEOUT_US;
+ dev_warn(dev,
+ "flash-max-timeout-us property missing\n");
+ }
+ timer_setup(&rt->powerdown_timer, rt8515_powerdown_timer, 0);
+ rt8515_init_flash_timeout(rt);
+
+ fled->ops = &rt8515_flash_ops;
+
+ led->max_brightness = rt->torch_max_intensity;
+ led->brightness_set_blocking = rt8515_led_brightness_set;
+ led->flags |= LED_CORE_SUSPENDRESUME | LED_DEV_CAP_FLASH;
+
+ mutex_init(&rt->lock);
+
+ platform_set_drvdata(pdev, rt);
+
+ ret = devm_led_classdev_flash_register_ext(dev, fled, &init_data);
+ if (ret) {
+ dev_err(dev, "can't register LED %s\n", led->name);
+ mutex_destroy(&rt->lock);
+ return ret;
+ }
+
+ rt8515_init_v4l2_flash_config(rt, &v4l2_sd_cfg);
+
+ /* Create a V4L2 Flash device if V4L2 flash is enabled */
+ rt->v4l2_flash = v4l2_flash_init(dev, child, fled, NULL, &v4l2_sd_cfg);
+ if (IS_ERR(rt->v4l2_flash)) {
+ ret = PTR_ERR(rt->v4l2_flash);
+ dev_err(dev, "failed to register V4L2 flash device (%d)\n",
+ ret);
+ /*
+ * Continue without the V4L2 flash
+ * (we still have the classdev)
+ */
+ }
+
+ return 0;
+}
+
+static int rt8515_remove(struct platform_device *pdev)
+{
+ struct rt8515 *rt = platform_get_drvdata(pdev);
+
+ rt8515_v4l2_flash_release(rt);
+ del_timer_sync(&rt->powerdown_timer);
+ mutex_destroy(&rt->lock);
+
+ return 0;
+}
+
+static const struct of_device_id rt8515_match[] = {
+ { .compatible = "richtek,rt8515", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rt8515_match);
+
+static struct platform_driver rt8515_driver = {
+ .driver = {
+ .name = "rt8515",
+ .of_match_table = rt8515_match,
+ },
+ .probe = rt8515_probe,
+ .remove = rt8515_remove,
+};
+module_platform_driver(rt8515_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Richtek RT8515 LED driver");
+MODULE_LICENSE("GPL");
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
+ unsigned long flags;
if (!trig)
return;
- read_lock(&trig->leddev_list_lock);
+ read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
- read_unlock(&trig->leddev_list_lock);
+ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
int invert)
{
struct led_classdev *led_cdev;
+ unsigned long flags;
if (!trig)
return;
- read_lock(&trig->leddev_list_lock);
+ read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
else
led_blink_set(led_cdev, delay_on, delay_off);
}
- read_unlock(&trig->leddev_list_lock);
+ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
void led_trigger_blink(struct led_trigger *trig,
return -ENOMEM;
leds[0].ec_index = EC_BLUE_LED;
- leds[0].led_cdev.name = "blue:power",
+ leds[0].led_cdev.name = "blue:power";
leds[0].led_cdev.default_trigger = "default-on";
leds[1].ec_index = EC_AMBER_LED;
- leds[1].led_cdev.name = "amber:status",
+ leds[1].led_cdev.name = "amber:status";
leds[2].ec_index = EC_GREEN_LED;
- leds[2].led_cdev.name = "green:status",
+ leds[2].led_cdev.name = "green:status";
leds[2].led_cdev.default_trigger = "default-on";
for (i = 0; i < NLEDS; i++) {
led->cdev.brightness_get = lm3533_led_get;
led->cdev.blink_set = lm3533_led_blink_set;
led->cdev.brightness = LED_OFF;
- led->cdev.groups = lm3533_led_attribute_groups,
+ led->cdev.groups = lm3533_led_attribute_groups;
led->id = pdev->id;
mutex_init(&led->mutex);
#define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_compat & \
BCH##_FEATURE_COMPAT_##flagname) != 0); \
} \
#define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_ro_compat & \
BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \
} \
#define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_incompat & \
BCH##_FEATURE_INCOMPAT_##flagname) != 0); \
} \
obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
obj-$(CONFIG_CEC_SECO) += seco/
obj-$(CONFIG_CEC_STI) += sti/
+obj-$(CONFIG_CEC_STM32) += stm32/
obj-$(CONFIG_CEC_TEGRA) += tegra/
return -EINVAL;
}
} else {
- length = (b->memory == VB2_MEMORY_USERPTR ||
- b->memory == VB2_MEMORY_DMABUF)
+ length = (b->memory == VB2_MEMORY_USERPTR)
? b->length : vb->planes[0].length;
if (b->bytesused > length)
switch (pll->bus_type) {
case CCS_PLL_BUS_TYPE_CSI2_DPHY:
- /* CSI transfers 2 bits per clock per lane; thus times 2 */
- op_sys_clk_freq_hz_sdr = pll->link_freq * 2
- * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
- 1 : pll->csi2.lanes);
- break;
case CCS_PLL_BUS_TYPE_CSI2_CPHY:
- op_sys_clk_freq_hz_sdr =
- pll->link_freq
+ op_sys_clk_freq_hz_sdr = pll->link_freq * 2
* (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
1 : pll->csi2.lanes);
break;
vv->version_major = ((u16)v->static_data_version_major[0] << 8) +
v->static_data_version_major[1];
vv->version_minor = ((u16)v->static_data_version_minor[0] << 8) +
- v->static_data_version_major[1];
+ v->static_data_version_minor[1];
vv->date_year = ((u16)v->year[0] << 8) + v->year[1];
vv->date_month = v->month;
vv->date_day = v->day;
if (!q->sensor)
return -ENODEV;
- freq = v4l2_get_link_rate(q->sensor->ctrl_handler, bpp, lanes);
+ freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
if (freq < 0) {
dev_err(dev, "error %lld, invalid link_freq\n", freq);
return freq;
{
struct venus_core *core = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(core->dev);
venus_shutdown(core);
venus_firmware_deinit(core);
+ pm_runtime_put_sync(core->dev);
}
static __maybe_unused int venus_runtime_suspend(struct device *dev)
out:
fwnode_handle_put(fwnode);
- return 0;
+ return ret;
}
static int rvin_parallel_init(struct rvin_dev *vin)
struct rkisp1_match_data {
const char * const *clks;
unsigned int size;
+ enum rkisp1_cif_isp_version isp_ver;
};
/* ----------------------------------------------------------------------------
"hclk",
};
-static const struct rkisp1_match_data rk3399_isp_clk_data = {
+static const struct rkisp1_match_data rk3399_isp_match_data = {
.clks = rk3399_isp_clks,
.size = ARRAY_SIZE(rk3399_isp_clks),
+ .isp_ver = RKISP1_V10,
};
static const struct of_device_id rkisp1_of_match[] = {
{
.compatible = "rockchip,rk3399-cif-isp",
- .data = &rk3399_isp_clk_data,
+ .data = &rk3399_isp_match_data,
},
{},
};
static int rkisp1_probe(struct platform_device *pdev)
{
- const struct rkisp1_match_data *clk_data;
+ const struct rkisp1_match_data *match_data;
struct device *dev = &pdev->dev;
struct rkisp1_device *rkisp1;
struct v4l2_device *v4l2_dev;
unsigned int i;
int ret, irq;
- clk_data = of_device_get_match_data(&pdev->dev);
- if (!clk_data)
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
return -ENODEV;
rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL);
rkisp1->irq = irq;
- for (i = 0; i < clk_data->size; i++)
- rkisp1->clks[i].id = clk_data->clks[i];
- ret = devm_clk_bulk_get(dev, clk_data->size, rkisp1->clks);
+ for (i = 0; i < match_data->size; i++)
+ rkisp1->clks[i].id = match_data->clks[i];
+ ret = devm_clk_bulk_get(dev, match_data->size, rkisp1->clks);
if (ret)
return ret;
- rkisp1->clk_size = clk_data->size;
+ rkisp1->clk_size = match_data->size;
pm_runtime_enable(&pdev->dev);
+ rkisp1->media_dev.hw_revision = match_data->isp_ver;
strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME,
sizeof(rkisp1->media_dev.model));
rkisp1->media_dev.dev = &pdev->dev;
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE);
- for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES; i++)
+ for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++)
rkisp1_write(params->rkisp1, arg->gamma_y[i],
RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4);
}
RKISP1_CIF_ISP_HIST_WEIGHT_22TO03,
RKISP1_CIF_ISP_HIST_WEIGHT_13TO43,
RKISP1_CIF_ISP_HIST_WEIGHT_04TO34,
- RKISP1_CIF_ISP_HIST_WEIGHT_44,
};
const u8 *weight;
unsigned int i;
weight[2],
weight[3]),
hist_weight_regs[i]);
+
+ rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44);
}
static void
#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER 0x0000007F
#define RKISP1_CIF_ISP_HIST_ROW_NUM 5
#define RKISP1_CIF_ISP_HIST_COLUMN_NUM 5
+#define RKISP1_CIF_ISP_HIST_GET_BIN(x) ((x) & 0x000FFFFF)
/* AUTO FOCUS MEASUREMENT: ISP_AFM_CTRL */
#define RKISP1_ISP_AFM_CTRL_ENABLE BIT(0)
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
- for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX; i++)
+ for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V10; i++)
pbuf->params.ae.exp_mean[i] =
(u8)rkisp1_read(rkisp1,
RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4);
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
- for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX; i++)
- pbuf->params.hist.hist_bins[i] =
- (u8)rkisp1_read(rkisp1,
- RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+ for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10; i++) {
+ u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+
+ pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN(reg_val);
+ }
}
static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
data->body);
spin_lock(&data->keylock);
if (scancode) {
- delay = nsecs_to_jiffies(dev->timeout) +
+ delay = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(100);
mod_timer(&data->rx_timeout, jiffies + delay);
} else {
rdev->s_rx_carrier_range = ite_set_rx_carrier_range;
/* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */
rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR *
- itdev->params.sample_period;
+ itdev->params.sample_period / 1000;
rdev->timeout = IR_DEFAULT_TIMEOUT;
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR *
void rc_repeat(struct rc_dev *dev)
{
unsigned long flags;
- unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
+ unsigned int timeout = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(dev->last_protocol));
struct lirc_scancode sc = {
.scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
ir_do_keydown(dev, protocol, scancode, keycode, toggle);
if (dev->keypressed) {
- dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
+ dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(protocol));
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
}
goto out_raw;
}
+ dev->registered = true;
+
rc = device_add(&dev->dev);
if (rc)
goto out_rx_free;
dev->device_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
- dev->registered = true;
-
/*
* once the the input device is registered in rc_setup_rx_device,
* userspace can open the input device and rc_open() will be called
} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
mod_timer(&serial_ir.timeout_timer,
- jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));
+ jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout));
ir_raw_event_handle(serial_ir.rcdev);
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
unsigned int div)
{
struct v4l2_ctrl *ctrl;
return freq > 0 ? freq : -EINVAL;
}
-EXPORT_SYMBOL_GPL(v4l2_get_link_rate);
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
#include "sdio_cis.h"
#include "sdio_ops.h"
+#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
+
static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
do {
unsigned char tpl_code, tpl_link;
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
if (ret)
prev = &this->next;
if (ret == -ENOENT) {
+ if (time_after(jiffies, timeout))
+ break;
/* warn about unknown tuples */
pr_warn_ratelimited("%s: queuing unknown"
" CIS tuple 0x%02x (%u bytes)\n",
return host->private;
}
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#ifdef CONFIG_PM_SLEEP
int sdhci_pltfm_suspend(struct device *dev);
int sdhci_pltfm_resume(struct device *dev);
-extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#else
+static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; }
+static inline int sdhci_pltfm_resume(struct device *dev) { return 0; }
+#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
dev->irq = 9;
if (arcrimi_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
iounmap(lp->mem_start);
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
#ifndef MODULE
int excnak_pending; /* We just got an excesive nak interrupt */
+ /* RESET flag handling */
+ int reset_in_progress;
+ struct work_struct reset_work;
+
struct {
uint16_t sequence; /* sequence number (incs with each packet) */
__be16 aborted_seq;
void arcnet_unregister_proto(struct ArcProto *proto);
irqreturn_t arcnet_interrupt(int irq, void *dev_id);
+
struct net_device *alloc_arcdev(const char *name);
+void free_arcdev(struct net_device *dev);
int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
struct arcnet_local *lp = from_timer(lp, t, timer);
struct net_device *dev = lp->dev;
- if (!netif_carrier_ok(dev)) {
+ spin_lock_irq(&lp->lock);
+
+ if (!lp->reset_in_progress && !netif_carrier_ok(dev)) {
netif_carrier_on(dev);
netdev_info(dev, "link up\n");
}
+
+ spin_unlock_irq(&lp->lock);
+}
+
+static void reset_device_work(struct work_struct *work)
+{
+ struct arcnet_local *lp;
+ struct net_device *dev;
+
+ lp = container_of(work, struct arcnet_local, reset_work);
+ dev = lp->dev;
+
+ /* Do not bring the network interface back up if an ifdown
+ * was already done.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ return;
+
+ rtnl_lock();
+
+ /* Do another check, in case of an ifdown that was triggered in
+ * the small race window between the exit condition above and
+ * acquiring RTNL.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ goto out;
+
+ dev_close(dev);
+ dev_open(dev, NULL);
+
+out:
+ rtnl_unlock();
}
static void arcnet_reply_tasklet(unsigned long data)
lp->dev = dev;
spin_lock_init(&lp->lock);
timer_setup(&lp->timer, arcnet_timer, 0);
+ INIT_WORK(&lp->reset_work, reset_device_work);
}
return dev;
}
EXPORT_SYMBOL(alloc_arcdev);
+void free_arcdev(struct net_device *dev)
+{
+ struct arcnet_local *lp = netdev_priv(dev);
+
+ /* Do not cancel this at ->ndo_close(), as the workqueue itself
+ * indirectly calls the ifdown path through dev_close().
+ */
+ cancel_work_sync(&lp->reset_work);
+ free_netdev(dev);
+}
+EXPORT_SYMBOL(free_arcdev);
+
/* Open/initialize the board. This is called sometime after booting when
* the 'ifconfig' program is run.
*
/* shut down the card */
lp->hw.close(dev);
+
+ /* reset counters */
+ lp->reset_in_progress = 0;
+
module_put(lp->hw.owner);
return 0;
}
spin_lock_irqsave(&lp->lock, flags);
+ if (lp->reset_in_progress)
+ goto out;
+
/* RESET flag was enabled - if device is not running, we must
* clear it right away (but nothing else).
*/
if (status & RESETflag) {
arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n",
status);
- arcnet_close(dev);
- arcnet_open(dev);
+
+ lp->reset_in_progress = 1;
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ schedule_work(&lp->reset_work);
/* get out of the interrupt handler! */
- break;
+ goto out;
}
/* RX is inhibited - we must have received something.
* Prepare to receive into the next buffer.
udelay(1);
lp->hw.intmask(dev, lp->intmask);
+out:
spin_unlock_irqrestore(&lp->lock, flags);
return retval;
}
dev->irq = 9;
if (com20020isa_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
unregister_netdev(my_dev);
free_irq(my_dev->irq, my_dev);
release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(my_dev);
+ free_arcdev(my_dev);
}
#ifndef MODULE
unregister_netdev(dev);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
dev = info->dev;
if (dev) {
dev_dbg(&link->dev, "kfree...\n");
- free_netdev(dev);
+ free_arcdev(dev);
}
dev_dbg(&link->dev, "kfree2...\n");
kfree(info);
err = com90io_probe(dev);
if (err) {
- free_netdev(dev);
+ free_arcdev(dev);
return err;
}
free_irq(dev->irq, dev);
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(dev);
+ free_arcdev(dev);
}
module_init(com90io_init)
err_release_mem:
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
err_free_dev:
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
release_mem_region(dev->mem_start,
dev->mem_end - dev->mem_start + 1);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
{
struct can_priv *priv = netdev_priv(dev);
struct can_ctrlmode cm = {.flags = priv->ctrlmode};
- struct can_berr_counter bec;
+ struct can_berr_counter bec = { };
enum can_state state = priv->state;
if (priv->do_get_state)
/* Find our integrated MDIO bus node */
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
priv->master_mii_bus = of_mdio_find_bus(dn);
- if (!priv->master_mii_bus)
+ if (!priv->master_mii_bus) {
+ of_node_put(dn);
return -EPROBE_DEFER;
+ }
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
- if (!priv->slave_mii_bus)
+ if (!priv->slave_mii_bus) {
+ of_node_put(dn);
return -ENOMEM;
+ }
priv->slave_mii_bus->priv = priv;
priv->slave_mii_bus->name = "sf2 slave mii";
.port_cnt = 5, /* total cpu and user ports */
},
{
+ /*
+ * WARNING
+ * =======
+ * KSZ8794 is similar to KSZ8795, except the port map
+ * contains a gap between external and CPU ports, the
+ * port map is NOT continuous. The per-port register
+ * map is shifted accordingly too, i.e. registers at
+ * offset 0x40 are NOT used on KSZ8794 and they ARE
+ * used on KSZ8795 for external port 3.
+ * external cpu
+ * KSZ8794 0,1,2 4
+ * KSZ8795 0,1,2,3 4
+ * KSZ8765 0,1,2,3 4
+ */
.chip_id = 0x8794,
.dev_name = "KSZ8794",
.num_vlans = 4096,
dev->num_vlans = chip->num_vlans;
dev->num_alus = chip->num_alus;
dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
+ dev->port_cnt = fls(chip->cpu_ports);
+ dev->cpu_port = fls(chip->cpu_ports) - 1;
+ dev->phy_port_cnt = dev->port_cnt - 1;
dev->cpu_ports = chip->cpu_ports;
-
+ dev->host_mask = chip->cpu_ports;
+ dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
+ chip->cpu_ports;
break;
}
}
if (!dev->cpu_ports)
return -ENODEV;
- dev->port_mask = BIT(dev->port_cnt) - 1;
- dev->port_mask |= dev->host_mask;
-
dev->reg_mib_cnt = KSZ8795_COUNTER_NUM;
dev->mib_cnt = ARRAY_SIZE(mib_names);
- dev->phy_port_cnt = dev->port_cnt - 1;
-
- dev->cpu_port = dev->port_cnt - 1;
- dev->host_mask = BIT(dev->cpu_port);
-
dev->ports = devm_kzalloc(dev->dev,
dev->port_cnt * sizeof(struct ksz_port),
GFP_KERNEL);
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
- usleep_range(100, 1000);
+ msleep(100);
}
mutex_init(&dev->dev_mutex);
if (of_property_read_u32(port, "reg",
&port_num))
continue;
- if (port_num >= dev->port_cnt)
+ if (!(dev->port_mask & BIT(port_num)))
return -EINVAL;
of_get_phy_mode(port,
&dev->ports[port_num].interface);
if (!entry.portvec)
entry.state = 0;
} else {
- entry.portvec |= BIT(port);
+ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
+ entry.portvec = BIT(port);
+ else
+ entry.portvec |= BIT(port);
+
entry.state = state;
}
#endif
}
if (!n || !n->dev)
- goto free_sk;
+ goto free_dst;
ndev = n->dev;
- if (!ndev)
- goto free_dst;
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
- neigh_release(n);
+ if (n)
+ neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
*/
#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+/* Some link partners do not tolerate the momentary reset of the REF_CLK
+ * frequency when the RNCTL register is cleared by hardware reset.
+ */
+#define FEC_QUIRK_NO_HARD_RESET (1 << 18)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_NO_HARD_RESET,
};
static const struct fec_devinfo fec_imx6q_info = {
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
fep->mii_bus->parent = &pdev->dev;
err = of_mdiobus_register(fep->mii_bus, node);
- of_node_put(node);
if (err)
goto err_out_free_mdiobus;
+ of_node_put(node);
mii_cnt++;
err_out_free_mdiobus:
mdiobus_free(fep->mii_bus);
err_out:
+ of_node_put(node);
return err;
}
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
+ */
+ dma_rmb();
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
unsigned long flags;
spin_lock_irqsave(&adapter->state_lock, flags);
- if (test_bit(0, &adapter->resetting)) {
- spin_unlock_irqrestore(&adapter->state_lock, flags);
- return -EBUSY;
- }
-
adapter->state = VNIC_REMOVING;
spin_unlock_irqrestore(&adapter->state_lock, flags);
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
-
- /* Always report link is down if the VF queues aren't enabled */
- if (!vf->queues_enabled) {
- pfe.event_data.link_event.link_status = false;
- pfe.event_data.link_event.link_speed = 0;
- } else if (vf->link_forced) {
+ if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
(vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0);
pfe.event_data.link_event.link_speed =
i40e_virtchnl_link_speed(ls->link_speed);
}
-
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
}
}
- vf->queues_enabled = true;
-
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- /* Immediately mark queues as disabled */
- vf->queues_enabled = false;
-
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
goto error_param;
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
- * If the VF is indeed in reset, the vsi pointer has
- * to show on the newly loaded vsi under pf->vsi[id].
+ * Acquire the VSI pointer only after the VF has been
+ * properly initialized.
*/
for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- if (i > 0)
- vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
break;
- }
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
ret = -EAGAIN;
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if VF link is forced */
- bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
u16 num_vlan;
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
-#define ICE_MIN_MSIX 2
+#define ICE_MIN_LAN_TXRX_MSIX 1
+#define ICE_MIN_LAN_OICR_MSIX 1
+#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min_t(int, num_online_cpus(),
- pf->hw.func_caps.common_cap.num_txq);
+ return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+ (u16)pf->hw.func_caps.common_cap.num_txq);
}
/**
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min_t(int, num_online_cpus(),
- pf->hw.func_caps.common_cap.num_rxq);
+ return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+ (u16)pf->hw.func_caps.common_cap.num_rxq);
}
/**
sizeof(struct in6_addr));
input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
- input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
+ /* if no protocol requested, use IPPROTO_NONE */
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ input->ip.v6.proto = IPPROTO_NONE;
+ else
+ input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
switch (vsi->type) {
case ICE_VSI_PF:
- vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
- num_online_cpus());
+ vsi->alloc_txq = min3(pf->num_lan_msix,
+ ice_get_avail_txq_count(pf),
+ (u16)num_online_cpus());
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
} else {
- vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
- num_online_cpus());
+ vsi->alloc_rxq = min3(pf->num_lan_msix,
+ ice_get_avail_rxq_count(pf),
+ (u16)num_online_cpus());
if (vsi->req_rxq) {
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
+ vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
+ max_t(int, vsi->alloc_rxq,
+ vsi->alloc_txq));
break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
if (v_actual < v_budget) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
-/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
-#define ICE_MIN_LAN_VECS 2
-#define ICE_MIN_RDMA_VECS 2
-#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
- if (v_actual < ICE_MIN_LAN_VECS) {
+ if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
pci_disable_msix(pf->pdev);
err = -ERANGE;
goto msix_err;
} else {
- pf->num_lan_msix = ICE_MIN_LAN_VECS;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
}
}
goto err_update_filters;
}
- /* Add filter for new MAC. If filter exists, just return success */
+ /* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
+ /* Although this MAC filter is already present in hardware it's
+ * possible in some cases (e.g. bonding) that dev_addr was
+ * modified outside of the driver and needs to be restored back
+ * to this value.
+ */
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
return 0;
}
ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+ int ret;
+
tunnel |= ICE_TX_CTX_EIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
- if (l4.hdr != exthdr)
- ipv6_skip_exthdr(skb, exthdr - skb->data,
- &l4_proto, &frag_off);
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
}
/* define outer transport */
cmd->base.phy_address = hw->phy.addr;
/* advertising link modes */
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
/* set autoneg settings */
if (hw->mac.autoneg == 1) {
Asym_Pause);
}
- status = rd32(IGC_STATUS);
+ status = pm_runtime_suspended(&adapter->pdev->dev) ?
+ 0 : rd32(IGC_STATUS);
if (status & IGC_STATUS_LU) {
if (status & IGC_STATUS_SPEED_1000) {
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
+ /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
+ * We have to check this and convert it to ADVERTISE_2500_FULL
+ * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
+ */
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
+ advertising |= ADVERTISE_2500_FULL;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
u16 *data)
{
struct igc_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -IGC_ERR_NVM;
u32 attempts = 100000;
u32 i, k, eewr = 0;
- s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n");
- ret_val = -IGC_ERR_NVM;
goto out;
}
}
out:
- return 0;
+ return ret_val;
}
/**
/* Clear entry invalidation bit */
pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
- /* Write tcam index - indirect access */
- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
-
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
+ /* Write tcam index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
+
return 0;
}
dma_addr_t iova;
u8 *buf;
- buf = napi_alloc_frag(pool->rbsize);
+ buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
if (unlikely(!buf))
return -ENOMEM;
+ buf = PTR_ALIGN(buf, OTX2_ALIGN);
iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
if (err)
- return err;
+ goto free_page;
cmd = mlx5_rsc_dump_cmd_create(mdev, key);
if (IS_ERR(cmd)) {
.min_size = 16 * 1024,
};
+static bool
+mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+{
+ return !!(entry->tuple_nat_node.next);
+}
+
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
err_insert:
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
err_rules:
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node, tuples_nat_ht_params);
+ if (mlx5_tc_ct_entry_has_nat(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node, tuples_nat_ht_params);
err_tuple_nat:
- if (entry->tuple_node.next)
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
err_tuple:
err_set:
kfree(entry);
{
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
mutex_lock(&ct_priv->shared_counter_lock);
- if (entry->tuple_node.next)
+ if (mlx5_tc_ct_entry_has_nat(entry))
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
- return NUM_IPSEC_SW_COUNTERS;
+ return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
}
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
- return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
+ return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
{
struct mlx5e_channels new_channels = {};
bool reset_channels = true;
+ bool opened;
int err = 0;
mutex_lock(&priv->state_lock);
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
trust_state);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (!opened)
reset_channels = false;
- }
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
reset_channels = false;
- if (reset_channels)
+ if (reset_channels) {
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_update_trust_state_hw,
&trust_state);
- else
+ } else {
err = mlx5e_update_trust_state_hw(priv, &trust_state);
+ if (!err && !opened)
+ priv->channels.params = new_channels.params;
+ }
mutex_unlock(&priv->state_lock);
goto out;
}
- new_channels.params = priv->channels.params;
+ new_channels.params = *cur_params;
new_channels.params.num_channels = count;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ struct mlx5e_params old_params;
+
+ old_params = *cur_params;
*cur_params = new_channels.params;
err = mlx5e_num_channels_changed(priv);
+ if (err)
+ *cur_params = old_params;
+
goto out;
}
new_channels.params.num_tc = tc ? tc : 1;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ struct mlx5e_params old_params;
+
+ old_params = priv->channels.params;
priv->channels.params = new_channels.params;
+ err = mlx5e_num_channels_changed(priv);
+ if (err)
+ priv->channels.params = old_params;
+
goto out;
}
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_num_channels_changed_ctx, NULL);
- if (err)
- goto out;
- priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
- new_channels.params.num_tc);
out:
+ priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
+ priv->channels.params.num_tc);
mutex_unlock(&priv->state_lock);
return err;
}
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
- struct mlx5e_params *old_params;
+ struct mlx5e_params *cur_params;
int err = 0;
bool reset;
goto out;
}
- old_params = &priv->channels.params;
- if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+ cur_params = &priv->channels.params;
+ if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
netdev_warn(netdev, "can't set LRO with legacy RQ\n");
err = -EINVAL;
goto out;
reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
- new_channels.params = *old_params;
+ new_channels.params = *cur_params;
new_channels.params.lro_en = enable;
- if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
+ if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
reset = false;
}
if (!reset) {
- *old_params = new_channels.params;
+ struct mlx5e_params old_params;
+
+ old_params = *cur_params;
+ *cur_params = new_channels.params;
err = mlx5e_modify_tirs_lro(priv);
+ if (err)
+ *cur_params = old_params;
goto out;
}
}
if (!reset) {
+ unsigned int old_mtu = params->sw_mtu;
+
params->sw_mtu = new_mtu;
- if (preactivate)
- preactivate(priv, NULL);
+ if (preactivate) {
+ err = preactivate(priv, NULL);
+ if (err) {
+ params->sw_mtu = old_mtu;
+ goto out;
+ }
+ }
netdev->mtu = params->sw_mtu;
goto out;
}
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
#ifdef CONFIG_MLX5_EN_ARFS
netdev->features |= NETIF_F_NETNS_LOCAL;
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
+#endif
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb))
+ if (!mlx5e_tc_update_skb(cqe, skb)) {
+ dev_kfree_skb_any(skb);
goto free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ dev_kfree_skb_any(skb);
goto free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
+ }
napi_gro_receive(rq->cq.napi, skb);
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb))
+ if (!mlx5e_tc_update_skb(cqe, skb)) {
+ dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
+ }
napi_gro_receive(rq->cq.napi, skb);
#include "lib/geneve.h"
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
+#include <asm/div64.h>
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
{
flow_flag_clear(flow, OFFLOADED);
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ goto offload_rule_0;
+
if (flow_flag_test(flow, CT)) {
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+offload_rule_0:
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
- netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
- dissector->used_keys);
+ netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
return err;
}
-static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
+static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch *esw;
+ u32 rate_mbps = 0;
u16 vport_num;
- u32 rate_mbps;
int err;
vport_num = rpriv->rep->vport;
* Moreover, if rate is non zero we choose to configure to a minimum of
* 1 mbit/sec.
*/
- rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
+ if (rate) {
+ rate = (rate * BITS_PER_BYTE) + 500000;
+ rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
+ }
+
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
destroy_ft:
root->cmds->destroy_flow_table(root, ft);
free_ft:
+ rhltable_destroy(&ft->fgs_hash);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
if (!fte_tmp)
continue;
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
+ /* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false);
kmem_cache_free(steering->ftes_cache, fte);
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
+ if (IS_ERR(rule))
+ tree_put_node(&fte->node, false);
return rule;
}
rule = ERR_PTR(-ENOENT);
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
+ if (IS_ERR(rule))
+ tree_put_node(&fte->node, false);
tree_put_node(&g->node, false);
return rule;
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
#endif
struct rb_node rb_node;
u64 addr;
struct page *page;
- u16 func_id;
+ u32 function;
unsigned long bitmask;
struct list_head list;
unsigned free_count;
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
-static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
+static u32 get_function(u16 func_id, bool ec_function)
+{
+ return (u32)func_id | (ec_function << 16);
+}
+
+static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
int err;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (root)
return root;
if (!root)
return ERR_PTR(-ENOMEM);
- err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
+ err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
if (err) {
kfree(root);
return ERR_PTR(err);
return root;
}
-static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
+static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
{
struct rb_node *parent = NULL;
struct rb_root *root;
struct fw_page *tfp;
int i;
- root = page_root_per_func_id(dev, func_id);
+ root = page_root_per_function(dev, function);
if (IS_ERR(root))
return PTR_ERR(root);
nfp->addr = addr;
nfp->page = page;
- nfp->func_id = func_id;
+ nfp->function = function;
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
set_bit(i, &nfp->bitmask);
}
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
- u32 func_id)
+ u32 function)
{
struct fw_page *result = NULL;
struct rb_root *root;
struct rb_node *tmp;
struct fw_page *tfp;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return NULL;
return err;
}
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
{
struct fw_page *fp = NULL;
struct fw_page *iter;
unsigned n;
list_for_each_entry(iter, &dev->priv.free_list, list) {
- if (iter->func_id != func_id)
+ if (iter->function != function)
continue;
fp = iter;
}
{
struct rb_root *root;
- root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
+ root = xa_load(&dev->priv.page_root_xa, fwp->function);
if (WARN_ON_ONCE(!root))
return;
kfree(fwp);
}
-static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
+static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
{
struct fw_page *fwp;
int n;
- fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
list_add(&fwp->list, &dev->priv.free_list);
}
-static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
+static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
goto map;
}
- err = insert_page(dev, addr, page, func_id);
+ err = insert_page(dev, addr, page, function);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail, bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
for (i = 0; i < npages; i++) {
retry:
- err = alloc_4k(dev, &addr, func_id);
+ err = alloc_4k(dev, &addr, function);
if (err) {
if (err == -ENOMEM)
- err = alloc_system_page(dev, func_id);
+ err = alloc_system_page(dev, function);
if (err)
goto out_4k;
out_4k:
for (i--; i >= 0; i--)
- free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
out_free:
kvfree(in);
if (notify_fail)
return err;
}
-static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
struct rb_root *root;
struct rb_node *p;
int npages = 0;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return;
struct rb_root *root;
struct fw_page *fwp;
struct rb_node *p;
+ bool ec_function;
u32 func_id;
u32 npages;
u32 i = 0;
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
+ ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
if (WARN_ON_ONCE(!root))
return -EEXIST;
return 0;
}
-static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
+static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int *nclaimed, bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
}
for (i = 0; i < num_claimed; i++)
- free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
if (nclaimed)
*nclaimed = num_claimed;
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
+ .is_static = true,
.can_handle = mlxsw_sp1_span_cpu_can_handle,
.parms_set = mlxsw_sp1_span_entry_cpu_parms,
.configure = mlxsw_sp1_span_entry_cpu_configure,
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+ .is_static = true,
.can_handle = mlxsw_sp_port_dev_check,
.parms_set = mlxsw_sp_span_entry_phys_parms,
.configure = mlxsw_sp_span_entry_phys_configure,
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
+ .is_static = true,
.can_handle = mlxsw_sp2_span_cpu_can_handle,
.parms_set = mlxsw_sp2_span_entry_cpu_parms,
.configure = mlxsw_sp2_span_entry_cpu_configure,
if (!refcount_read(&curr->ref_count))
continue;
+ if (curr->ops->is_static)
+ continue;
+
err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
if (err)
continue;
};
struct mlxsw_sp_span_entry_ops {
+ bool is_static;
bool (*can_handle)(const struct net_device *to_dev);
int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev,
return -EIO;
}
-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
+static bool rtl_skb_is_udp(struct sk_buff *skb)
+{
+ int no = skb_network_offset(skb);
+ struct ipv6hdr *i6h, _i6h;
+ struct iphdr *ih, _ih;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih);
+ return ih && ih->protocol == IPPROTO_UDP;
+ case htons(ETH_P_IPV6):
+ i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h);
+ return i6h && i6h->nexthdr == IPPROTO_UDP;
+ default:
+ return false;
+ }
+}
+
+#define RTL_MIN_PATCH_LEN 47
+
+/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */
+static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
+ struct sk_buff *skb)
{
+ unsigned int padto = 0, len = skb->len;
+
+ if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
+ rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
+ unsigned int trans_data_len = skb_tail_pointer(skb) -
+ skb_transport_header(skb);
+
+ if (trans_data_len >= offsetof(struct udphdr, len) &&
+ trans_data_len < RTL_MIN_PATCH_LEN) {
+ u16 dest = ntohs(udp_hdr(skb)->dest);
+
+ /* dest is a standard PTP port */
+ if (dest == 319 || dest == 320)
+ padto = len + RTL_MIN_PATCH_LEN - trans_data_len;
+ }
+
+ if (trans_data_len < sizeof(struct udphdr))
+ padto = max_t(unsigned int, padto,
+ len + sizeof(struct udphdr) - trans_data_len);
+ }
+
+ return padto;
+}
+
+static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
+ struct sk_buff *skb)
+{
+ unsigned int padto;
+
+ padto = rtl8125_quirk_udp_padto(tp, skb);
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
- return true;
+ padto = max_t(unsigned int, padto, ETH_ZLEN);
default:
- return false;
+ break;
}
+
+ return padto;
}
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
- if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
- /* eth_skb_pad would free the skb on error */
- return !__skb_put_padto(skb, ETH_ZLEN, false);
+ unsigned int padto = rtl_quirk_packet_padto(tp, skb);
+
+ /* skb_padto would free the skb on error */
+ return !__skb_put_padto(skb, padto, false);
}
return true;
if (skb->len < ETH_ZLEN)
features &= ~NETIF_F_CSUM_MASK;
+ if (rtl_quirk_packet_padto(tp, skb))
+ features &= ~NETIF_F_CSUM_MASK;
+
if (transport_offset > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_CSUM_MASK;
cancel_work_sync(&tp->wk.work);
- phy_disconnect(tp->phydev);
-
free_irq(pci_irq_vector(pdev, 0), tp);
+ phy_disconnect(tp->phydev);
+
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
if (ret) {
dev_err(&pdev->dev,
"Failed to set tx_clk\n");
- return ret;
+ goto err_remove_config_dt;
}
}
}
if (ret) {
dev_err(&pdev->dev,
"Failed to set clk_ptp_ref\n");
- return ret;
+ goto err_remove_config_dt;
}
}
}
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
+ plat->addr64 = 32;
return ehl_common_data(pdev, plat);
}
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 3;
+ plat->addr64 = 32;
return ehl_common_data(pdev, plat);
}
static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{
u32 channel_id = gsi_channel_id(channel);
- void *virt = channel->gsi->virt;
+ void __iomem *virt = channel->gsi->virt;
u32 val;
val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
/* Hardware requires a 2^n ring size, with alignment equal to size */
ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
if (ring->virt && addr % size) {
- dma_free_coherent(dev, size, ring->virt, ring->addr);
+ dma_free_coherent(dev, size, ring->virt, addr);
dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
size);
return -EINVAL; /* Not a good error value, but distinct */
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->data->qmap)
- val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
+ val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
return true;
if (!status->pkt_len)
return true;
- endpoint_id = u32_get_bits(status->endp_dst_idx,
- IPA_STATUS_DST_IDX_FMASK);
+ endpoint_id = u8_get_bits(status->endp_dst_idx,
+ IPA_STATUS_DST_IDX_FMASK);
if (endpoint_id != endpoint->endpoint_id)
return true;
size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
if (size != ipa->imem_size)
- dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n",
+ dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
size, ipa->imem_size);
} else {
dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
if (size != ipa->smem_size)
- dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n",
+ dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
size, ipa->smem_size);
} else {
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
- list_for_each_entry(port, &team->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
TEAM_VLAN_FEATURES);
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
+ rcu_read_unlock();
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
static void team_compute_features(struct team *team)
{
- mutex_lock(&team->lock);
__team_compute_features(team);
- mutex_unlock(&team->lock);
netdev_change_features(team->dev);
}
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
+}, {
+ /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
{QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_qu_b0_hr_b0 = {
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_qu_c0_hr_b0 = {
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
}
/*
- * Evaluate a DSM with no arguments and a single u8 return value (inside a
- * buffer object), verify and return that value.
+ * Generic function to evaluate a DSM with no arguments
+ * and an integer return value,
+ * (as an integer object or inside a buffer object),
+ * verify and assign the value in the "value" parameter.
+ * return 0 in success and the appropriate errno otherwise.
*/
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ u64 *value, size_t expected_size)
{
union acpi_object *obj;
- int ret;
+ int ret = 0;
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "Failed to get DSM object. func= %d\n",
+ func);
return -ENOENT;
+ }
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *value = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ __le64 le_value = 0;
- if (obj->type != ACPI_TYPE_BUFFER) {
+ if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
+ return -EINVAL;
+
+ /* if the buffer size doesn't match the expected size */
+ if (obj->buffer.length != expected_size)
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM invalid buffer size, padding or truncating (%d)\n",
+ obj->buffer.length);
+
+ /* assuming LE from Intel BIOS spec */
+ memcpy(&le_value, obj->buffer.pointer,
+ min_t(size_t, expected_size, (size_t)obj->buffer.length));
+ *value = le64_to_cpu(le_value);
+ } else {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method did not return a valid object, type=%d\n",
obj->type);
goto out;
}
- if (obj->buffer.length != sizeof(u8)) {
- IWL_DEBUG_DEV_RADIO(dev,
- "ACPI: DSM method returned invalid buffer, length=%d\n",
- obj->buffer.length);
- ret = -EINVAL;
- goto out;
- }
-
- ret = obj->buffer.pointer[0];
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method evaluated: func=%d, ret=%d\n",
func, ret);
ACPI_FREE(obj);
return ret;
}
+
+/*
+ * Evaluate a DSM with no arguments and a u8 return value,
+ */
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
+{
+ int ret;
+ u64 val;
+
+ ret = iwl_acpi_get_dsm_integer(dev, rev, func, &val, sizeof(u8));
+
+ if (ret < 0)
+ return ret;
+
+ /* cast val (u64) to be u8 */
+ *value = (u8)val;
+ return 0;
+}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
return ERR_PTR(-ENOENT);
}
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static inline
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
{
return -ENOENT;
}
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait)
{
- const struct firmware *pnvm;
struct iwl_notification_wait pnvm_wait;
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
PNVM_INIT_COMPLETE_NTFY) };
- char pnvm_name[64];
- int ret;
/* if the SKU_ID is empty, there's nothing to do */
if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
return 0;
- /* if we already have it, nothing to do either */
- if (trans->pnvm_loaded)
- return 0;
+ /* load from disk only if we haven't done it (or tried) before */
+ if (!trans->pnvm_loaded) {
+ const struct firmware *pnvm;
+ char pnvm_name[64];
+ int ret;
+
+ /*
+ * The prefix unfortunately includes a hyphen at the end, so
+ * don't add the dot here...
+ */
+ snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
+ trans->cfg->fw_name_pre);
+
+ /* ...but replace the hyphen with the dot here. */
+ if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
+ pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
+
+ ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
+ if (ret) {
+ IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
+ pnvm_name, ret);
+ /*
+ * Pretend we've loaded it - at least we've tried and
+ * couldn't load it at all, so there's no point in
+ * trying again over and over.
+ */
+ trans->pnvm_loaded = true;
+ } else {
+ iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
- /*
- * The prefix unfortunately includes a hyphen at the end, so
- * don't add the dot here...
- */
- snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
- trans->cfg->fw_name_pre);
-
- /* ...but replace the hyphen with the dot here. */
- if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
- pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
-
- ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
- if (ret) {
- IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
- pnvm_name, ret);
- } else {
- iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
-
- release_firmware(pnvm);
+ release_firmware(pnvm);
+ }
}
iwl_init_notification_wait(notif_wait, &pnvm_wait,
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __IWL_CONFIG_H__
#define IWL_CFG_CORES_BT_GNSS 0x5
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0100) >> 9)
+#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {
extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
+extern const char iwl_ax203_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
+extern const struct iwl_cfg iwl_qu_b0_hr_b0;
+extern const struct iwl_cfg iwl_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
- /* For safe using a string from FW make sure we have a
- * null terminator
- */
- reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
-
- IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
-
if (id >= IWL_FW_INI_MAX_REGION_ID) {
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
return -EINVAL;
}
IWL_EXPORT_SYMBOL(iwl_read_prph);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
+ mdelay(delay_ms);
iwl_write_prph_no_grab(trans, ofs, val);
iwl_trans_release_nic_access(trans, &flags);
}
}
-IWL_EXPORT_SYMBOL(iwl_write_prph);
+IWL_EXPORT_SYMBOL(iwl_write_prph_delay);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
void iwl_force_nmi(struct iwl_trans *trans)
{
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
- iwl_write_prph(trans, DEVICE_SET_NMI_REG,
- DEVICE_SET_NMI_VAL_DRV);
+ iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG,
+ DEVICE_SET_NMI_VAL_DRV, 1);
else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#ifndef __iwl_io_h__
#define __iwl_io_h__
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs,
+ u32 val, u32 delay_ms);
+static inline void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ iwl_write_prph_delay(trans, ofs, val, 0);
+}
+
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
#define RADIO_RSP_ADDR_POS (6)
#define RADIO_RSP_RD_CMD (3)
+/* LTR control (Qu only) */
+#define HPM_MAC_LTR_CSR 0xa0348c
+#define HPM_MAC_LRT_ENABLE_ALL 0xf
+/* also uses CSR_LTR_* for values */
+#define HPM_UMAC_LTR 0xa03480
+
/* FW monitor */
#define MON_BUFF_SAMPLE_CTL (0xa03c00)
#define MON_BUFF_BASE_ADDR (0xa03c1c)
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
mutex_lock(&mvm->mutex);
- clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
-
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
out:
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
/* no need to reset the device in unified images, if successful */
if (unified_image && !ret) {
/* nothing else to do if we already sent D0I3_END_CMD */
const size_t bufsz = sizeof(buf);
int pos = 0;
+ mutex_lock(&mvm->mutex);
iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ mutex_unlock(&mvm->mutex);
+
do_div(curr_os, NSEC_PER_USEC);
diff = curr_os - curr_gp2;
pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
{
+ u8 value;
+
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2);
+ DSM_FUNC_ENABLE_INDONESIA_5G2, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
ret);
- else if (ret >= DSM_VALUE_INDONESIA_MAX)
+ else if (value >= DSM_VALUE_INDONESIA_MAX)
IWL_DEBUG_RADIO(mvm,
- "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n",
- ret);
+ "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n",
+ value);
- else if (ret == DSM_VALUE_INDONESIA_ENABLE) {
+ else if (value == DSM_VALUE_INDONESIA_ENABLE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
return DSM_VALUE_INDONESIA_ENABLE;
static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
{
+ u8 value;
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_DISABLE_SRD);
+ DSM_FUNC_DISABLE_SRD, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
ret);
- else if (ret >= DSM_VALUE_SRD_MAX)
+ else if (value >= DSM_VALUE_SRD_MAX)
IWL_DEBUG_RADIO(mvm,
- "DSM function DISABLE_SRD return invalid value, ret=%d\n",
- ret);
+ "DSM function DISABLE_SRD return invalid value, value=%d\n",
+ value);
- else if (ret == DSM_VALUE_SRD_PASSIVE) {
+ else if (value == DSM_VALUE_SRD_PASSIVE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
return DSM_VALUE_SRD_PASSIVE;
- } else if (ret == DSM_VALUE_SRD_DISABLE) {
+ } else if (value == DSM_VALUE_SRD_DISABLE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function DISABLE_SRD: disabling SRD\n");
return DSM_VALUE_SRD_DISABLE;
iwl_mvm_binding_remove_vif(mvm, vif);
out:
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) &&
+ switching_chanctx)
+ return;
mvmvif->phy_ctxt = NULL;
iwl_mvm_power_update_mac(mvm);
}
if (!mvm->scan_cmd)
goto out_free;
+ /* invalidate ids to prevent accidental removal of sta_id 0 */
+ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
+
/* Set EBS as successful as long as not stated otherwise by the FW. */
mvm->last_ebs_successful = true;
reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
if (device_reprobe(reprobe->dev))
dev_err(reprobe->dev, "reprobe failed!\n");
+ put_device(reprobe->dev);
kfree(reprobe);
module_put(THIS_MODULE);
}
module_put(THIS_MODULE);
return;
}
- reprobe->dev = mvm->trans->dev;
+ reprobe->dev = get_device(mvm->trans->dev);
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
+ return -EINVAL;
+
iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
+ return -EINVAL;
+
iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
if (ret)
next = skb_gso_segment(skb, netdev_flags);
skb_shinfo(skb)->gso_size = mss;
+ skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL;
else if (next)
if (tcp_payload_len > mss) {
skb_shinfo(tmp)->gso_size = mss;
+ skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
+ SKB_GSO_TCPV6;
} else {
if (qos) {
u8 *qc;
const struct fw_img *fw)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+ u32_encode_bits(250,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+ CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+ u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
struct iwl_context_info_gen3 *ctxt_info_gen3;
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
/* Allocate IML */
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
&trans_pcie->iml_dma_addr, GFP_KERNEL);
- if (!iml_img)
- return -ENOMEM;
+ if (!iml_img) {
+ ret = -ENOMEM;
+ goto err_free_ctxt_info;
+ }
memcpy(iml_img, trans->iml, trans->iml_len);
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
- /*
- * The firmware initializes this again later (to a smaller
- * value), but for the boot process initialize the LTR to
- * ~250 usec.
- */
- u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
- u32_encode_bits(250,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
- CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
- u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
-
- iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val);
+ /*
+ * To workaround hardware latency issues during the boot process,
+ * initialize the LTR to ~250 usec (see ltr_val above).
+ * The firmware initializes this again later (to a smaller value).
+ */
+ if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+ !trans->trans_cfg->integrated) {
+ iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+ } else if (trans->trans_cfg->integrated &&
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+ iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+ iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0;
+err_free_ctxt_info:
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+ trans_pcie->ctxt_info_gen3,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_gen3 = NULL;
err_free_prph_info:
dma_free_coherent(trans->dev,
sizeof(*prph_info),
return ret;
}
+ if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
+ return -EBUSY;
+
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
cpu_to_le64(trans_pcie->pnvm_dram.physical);
prph_sc_ctrl->pnvm_cfg.pnvm_size =
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_c0_hr_b0, iwl_ax203_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
while (offs < dwords) {
/* limit the time we spin here under lock to 1/2s */
- ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
+ unsigned long end = jiffies + HZ / 2;
+ bool resched = false;
if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR,
HBUS_TARG_MEM_RDAT);
offs++;
- /* calling ktime_get is expensive so
- * do it once in 128 reads
- */
- if (offs % 128 == 0 && ktime_after(ktime_get(),
- timeout))
+ if (time_after(jiffies, end)) {
+ resched = true;
break;
+ }
}
iwl_trans_release_nic_access(trans, &flags);
+
+ if (resched)
+ cond_resched();
} else {
return -EBUSY;
}
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ if (!txq) {
+ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
+ return;
+ }
+
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
* idx is bounded by n_window
*/
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb;
lockdep_assert_held(&txq->lock);
+ if (!txq->entries)
+ return;
+
iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
iwl_txq_get_tfd(trans, txq, idx));
- /* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
+ skb = txq->entries[idx].skb;
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
}
}
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb;
- if (WARN_ON_ONCE(!skb))
- continue;
-
- iwl_txq_free_tso_page(trans, skb);
+ if (!WARN_ON_ONCE(!skb))
+ iwl_txq_free_tso_page(trans, skb);
}
iwl_txq_gen2_free_tfd(trans, txq);
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
*/
int rd_ptr = txq->read_ptr;
int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
+ struct sk_buff *skb;
lockdep_assert_held(&txq->lock);
+ if (!txq->entries)
+ return;
+
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
+ skb = txq->entries[idx].skb;
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
}
}
int cmd, int *seq)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- enum mt76_txq_id qid;
+ enum mt76_mcuq_id qid;
mt7615_mcu_fill_msg(dev, skb, cmd, seq);
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
{
struct mt76_queue *q = &dev->q_rx[qid];
struct mt76_sdio *sdio = &dev->sdio;
- int len = 0, err, i, order;
+ int len = 0, err, i;
struct page *page;
u8 *buf;
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
- order = get_order(len);
- page = __dev_alloc_pages(GFP_KERNEL, order);
+ page = __dev_alloc_pages(GFP_KERNEL, get_order(len));
if (!page)
return -ENOMEM;
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
- __free_pages(page, order);
+ put_page(page);
return err;
}
if (q->queued + i + 1 == q->ndesc)
break;
}
- __free_pages(page, order);
+ put_page(page);
spin_lock_bh(&q->lock);
q->head = (q->head + i) % q->ndesc;
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_mcu_txd *mcu_txd;
u8 seq, pkt_fmt, qidx;
- enum mt76_txq_id txq;
+ enum mt76_mcuq_id qid;
__le32 *txd;
u32 val;
seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (cmd == -MCU_CMD_FW_SCATTER) {
- txq = MT_MCUQ_FWDL;
+ qid = MT_MCUQ_FWDL;
goto exit;
}
mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
- txq = MT_MCUQ_WA;
+ qid = MT_MCUQ_WA;
qidx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
} else {
- txq = MT_MCUQ_WM;
+ qid = MT_MCUQ_WM;
qidx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
}
if (wait_seq)
*wait_seq = seq;
- return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
+ return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
}
static void
if (new_p) {
/* we have one extra ref from the allocator */
- __free_pages(e->p, MT_RX_ORDER);
-
+ put_page(e->p);
e->p = new_p;
}
}
}
e = &q->e[q->end];
- e->skb = skb;
usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
mt7601u_complete_tx, q);
ret = usb_submit_urb(e->urb, GFP_ATOMIC);
q->end = (q->end + 1) % q->entries;
q->used++;
+ e->skb = skb;
if (q->used >= q->entries)
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
}
}
- list_add_tail(&ns->siblings, &head->list);
+ list_add_tail_rcu(&ns->siblings, &head->list);
ns->head = head;
mutex_unlock(&ctrl->subsys->lock);
return 0;
}
for (ns = nvme_next_ns(head, old);
- ns != old;
+ ns && ns != old;
ns = nvme_next_ns(head, ns)) {
if (nvme_path_is_disabled(ns))
continue;
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
length = cmd->pdu_len;
cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
offset = cmd->rbytes_done;
- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
+ cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
sg = &cmd->req.sg[cmd->sg_idx];
length -= iov_len;
sg = sg_next(sg);
iov++;
+ sg_offset = 0;
}
iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
- /* ...but only set bus limit if we found valid dma-ranges earlier */
- if (!ret)
+ /* ...but only set bus limit and range map if we found valid dma-ranges earlier */
+ if (!ret) {
dev->bus_dma_limit = end;
+ dev->dma_range_map = map;
+ }
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
iommu = of_iommu_configure(dev, np, id);
if (PTR_ERR(iommu) == -EPROBE_DEFER) {
+ /* Don't touch range map if it wasn't set from a valid dma-ranges */
+ if (!ret)
+ dev->dma_range_map = NULL;
kfree(map);
return -EPROBE_DEFER;
}
arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
- dev->dma_range_map = map;
return 0;
}
EXPORT_SYMBOL_GPL(of_dma_configure_id);
return i;
pci_save_ltr_state(dev);
- pci_save_aspm_l1ss_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
* LTR itself (in the PCIe capability).
*/
pci_restore_ltr_state(dev);
- pci_restore_aspm_l1ss_state(dev);
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
if (error)
pci_err(dev, "unable to allocate suspend buffer for LTR\n");
- error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
- 2 * sizeof(u32));
- if (error)
- pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
-
pci_allocate_vc_save_buffers(dev);
}
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_pm_state_change(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-void pci_save_aspm_l1ss_state(struct pci_dev *dev);
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
-static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
-static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
PCI_L1SS_CTL1_L1SS_MASK, val);
}
-void pci_save_aspm_l1ss_state(struct pci_dev *dev)
-{
- int aspm_l1ss;
- struct pci_cap_saved_state *save_state;
- u32 *cap;
-
- if (!pci_is_pcie(dev))
- return;
-
- aspm_l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
- if (!aspm_l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- pci_read_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL1, cap++);
- pci_read_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL2, cap++);
-}
-
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
-{
- int aspm_l1ss;
- struct pci_cap_saved_state *save_state;
- u32 *cap;
-
- if (!pci_is_pcie(dev))
- return;
-
- aspm_l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
- if (!aspm_l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- pci_write_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL1, *cap++);
- pci_write_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL2, *cap++);
-}
-
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
return retval;
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, guid);
- if (!obj)
+ if (!obj || obj->type != ACPI_TYPE_PACKAGE)
return -ENODEV;
elements = obj->package.elements;
mutex_lock(&wmi_priv.mutex);
while (elements) {
/* sanity checking */
+ if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
+ pr_debug("incorrect element type\n");
+ goto nextobj;
+ }
if (strlen(elements[ATTR_NAME].string.pointer) == 0) {
pr_debug("empty attribute found\n");
goto nextobj;
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+static int enable_tablet_mode_sw = -1;
+module_param(enable_tablet_mode_sw, int, 0444);
+MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
+
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
}
/* Tablet mode */
- val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
- if (!(val < 0)) {
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ if (enable_tablet_mode_sw > 0) {
+ val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
+ if (val >= 0) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
}
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
{
struct regulator_dev *r;
struct device *dev = rdev->dev.parent;
- int ret;
+ int ret = 0;
/* No supply to resolve? */
if (!rdev->supply_name)
return 0;
- /* Supply already resolved? */
+ /* Supply already resolved? (fast-path without locking contention) */
if (rdev->supply)
return 0;
/* Did the lookup explicitly defer for us? */
if (ret == -EPROBE_DEFER)
- return ret;
+ goto out;
if (have_full_constraints()) {
r = dummy_regulator_rdev;
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
rdev->supply_name, rdev->desc->name);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
if (r == rdev) {
dev_err(dev, "Supply for %s (%s) resolved to itself\n",
rdev->desc->name, rdev->supply_name);
- if (!have_full_constraints())
- return -EINVAL;
+ if (!have_full_constraints()) {
+ ret = -EINVAL;
+ goto out;
+ }
r = dummy_regulator_rdev;
get_device(&r->dev);
}
if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
if (!device_is_bound(r->dev.parent)) {
put_device(&r->dev);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
ret = regulator_resolve_supply(r);
if (ret < 0) {
put_device(&r->dev);
- return ret;
+ goto out;
+ }
+
+ /*
+ * Recheck rdev->supply with rdev->mutex lock held to avoid a race
+ * between rdev->supply null check and setting rdev->supply in
+ * set_supply() from concurrent tasks.
+ */
+ regulator_lock(rdev);
+
+ /* Supply just resolved by a concurrent task? */
+ if (rdev->supply) {
+ regulator_unlock(rdev);
+ put_device(&r->dev);
+ goto out;
}
ret = set_supply(rdev, r);
if (ret < 0) {
+ regulator_unlock(rdev);
put_device(&r->dev);
- return ret;
+ goto out;
}
+ regulator_unlock(rdev);
+
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
if (ret < 0) {
_regulator_put(rdev->supply);
rdev->supply = NULL;
- return ret;
+ goto out;
}
}
- return 0;
+out:
+ return ret;
}
/* Internal regulator request function */
spin_lock_irq(&rtc_lock);
+ /* Ensure that the RTC is accessible. Bit 0-6 must be 0! */
+ if ((CMOS_READ(RTC_VALID) & 0x7f) != 0) {
+ spin_unlock_irq(&rtc_lock);
+ dev_warn(dev, "not accessible\n");
+ retval = -ENXIO;
+ goto cleanup1;
+ }
+
if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) {
/* force periodic irq to CMOS reset default of 1024Hz;
*
again:
spin_lock_irqsave(&rtc_lock, flags);
+ /* Ensure that the RTC is accessible. Bit 0-6 must be 0! */
+ if (WARN_ON_ONCE((CMOS_READ(RTC_VALID) & 0x7f) != 0)) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ memset(time, 0xff, sizeof(*time));
+ return 0;
+ }
+
/*
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
}
EXPORT_SYMBOL(dasd_path_create_kobjects);
-/*
- * As we keep kobjects for the lifetime of a device, this function must not be
- * called anywhere but in the context of offlining a device.
- */
-void dasd_path_remove_kobj(struct dasd_device *device, int chp)
+static void dasd_path_remove_kobj(struct dasd_device *device, int chp)
{
if (device->path[chp].in_sysfs) {
kobject_put(&device->path[chp].kobj);
device->path[chp].in_sysfs = false;
}
}
-EXPORT_SYMBOL(dasd_path_remove_kobj);
+
+/*
+ * As we keep kobjects for the lifetime of a device, this function must not be
+ * called anywhere but in the context of offlining a device.
+ */
+void dasd_path_remove_kobjects(struct dasd_device *device)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ dasd_path_remove_kobj(device, i);
+}
+EXPORT_SYMBOL(dasd_path_remove_kobjects);
int dasd_add_sysfs_files(struct ccw_device *cdev)
{
device->path[i].ssid = 0;
device->path[i].chpid = 0;
dasd_path_notoper(device, i);
- dasd_path_remove_kobj(device, i);
}
}
device->block = NULL;
out_err1:
dasd_eckd_clear_conf_data(device);
+ dasd_path_remove_kobjects(device);
kfree(device->private);
device->private = NULL;
return rc;
private->vdsneq = NULL;
private->gneq = NULL;
dasd_eckd_clear_conf_data(device);
+ dasd_path_remove_kobjects(device);
}
static struct dasd_ccw_req *
void dasd_remove_sysfs_files(struct ccw_device *);
void dasd_path_create_kobj(struct dasd_device *, int);
void dasd_path_create_kobjects(struct dasd_device *);
-void dasd_path_remove_kobj(struct dasd_device *, int);
+void dasd_path_remove_kobjects(struct dasd_device *);
struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
{
struct vfio_ap_queue *q;
- int apid, apqi;
mutex_lock(&matrix_dev->lock);
q = dev_get_drvdata(&apdev->device);
+ vfio_ap_mdev_reset_queue(q, 1);
dev_set_drvdata(&apdev->device, NULL);
- apid = AP_QID_CARD(q->apqn);
- apqi = AP_QID_QUEUE(q->apqn);
- vfio_ap_mdev_reset_queue(apid, apqi, 1);
- vfio_ap_irq_disable(q);
kfree(q);
mutex_unlock(&matrix_dev->lock);
}
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static int match_apqn(struct device *dev, const void *data)
{
int apqn)
{
struct vfio_ap_queue *q;
- struct device *dev;
if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
return NULL;
if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
return NULL;
- dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &apqn, match_apqn);
- if (!dev)
- return NULL;
- q = dev_get_drvdata(dev);
- q->matrix_mdev = matrix_mdev;
- put_device(dev);
+ q = vfio_ap_find_queue(apqn);
+ if (q)
+ q->matrix_mdev = matrix_mdev;
return q;
}
*/
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
- if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev)
+ if (!q)
+ return;
+ if (q->saved_isc != VFIO_AP_ISC_INVALID &&
+ !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
- if (q->saved_pfn && q->matrix_mdev)
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ }
+ if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
&q->saved_pfn, 1);
- q->saved_pfn = 0;
- q->saved_isc = VFIO_AP_ISC_INVALID;
+ q->saved_pfn = 0;
+ }
}
/**
* Returns if ap_aqic function failed with invalid, deconfigured or
* checkstopped AP.
*/
-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
{
struct ap_qirq_ctrl aqic_gisa = {};
struct ap_queue_status status;
{
struct ap_matrix_mdev *m;
- mutex_lock(&matrix_dev->lock);
-
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
- if ((m != matrix_mdev) && (m->kvm == kvm)) {
- mutex_unlock(&matrix_dev->lock);
+ if ((m != matrix_mdev) && (m->kvm == kvm))
return -EPERM;
- }
}
matrix_mdev->kvm = kvm;
kvm_get_kvm(kvm);
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
- mutex_unlock(&matrix_dev->lock);
return 0;
}
return NOTIFY_DONE;
}
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+{
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+ vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
+ kvm_put_kvm(matrix_mdev->kvm);
+ matrix_mdev->kvm = NULL;
+}
+
static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- int ret;
+ int ret, notify_rc = NOTIFY_OK;
struct ap_matrix_mdev *matrix_mdev;
if (action != VFIO_GROUP_NOTIFY_SET_KVM)
return NOTIFY_OK;
matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+ mutex_lock(&matrix_dev->lock);
if (!data) {
- matrix_mdev->kvm = NULL;
- return NOTIFY_OK;
+ if (matrix_mdev->kvm)
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
+ goto notify_done;
}
ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
- if (ret)
- return NOTIFY_DONE;
+ if (ret) {
+ notify_rc = NOTIFY_DONE;
+ goto notify_done;
+ }
/* If there is no CRYCB pointer, then we can't copy the masks */
- if (!matrix_mdev->kvm->arch.crypto.crycbd)
- return NOTIFY_DONE;
+ if (!matrix_mdev->kvm->arch.crypto.crycbd) {
+ notify_rc = NOTIFY_DONE;
+ goto notify_done;
+ }
kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm,
matrix_mdev->matrix.adm);
- return NOTIFY_OK;
+notify_done:
+ mutex_unlock(&matrix_dev->lock);
+ return notify_rc;
}
-static void vfio_ap_irq_disable_apqn(int apqn)
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
{
struct device *dev;
- struct vfio_ap_queue *q;
+ struct vfio_ap_queue *q = NULL;
dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
&apqn, match_apqn);
if (dev) {
q = dev_get_drvdata(dev);
- vfio_ap_irq_disable(q);
put_device(dev);
}
+
+ return q;
}
-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
unsigned int retry)
{
struct ap_queue_status status;
+ int ret;
int retry2 = 2;
- int apqn = AP_MKQID(apid, apqi);
- do {
- status = ap_zapq(apqn);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- while (!status.queue_empty && retry2--) {
- msleep(20);
- status = ap_tapq(apqn, NULL);
- }
- WARN_ON_ONCE(retry2 <= 0);
- return 0;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- case AP_RESPONSE_BUSY:
+ if (!q)
+ return 0;
+
+retry_zapq:
+ status = ap_zapq(q->apqn);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ ret = 0;
+ break;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (retry--) {
msleep(20);
- break;
- default:
- /* things are really broken, give up */
- return -EIO;
+ goto retry_zapq;
}
- } while (retry--);
+ ret = -EBUSY;
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ WARN_ON_ONCE(status.irq_enabled);
+ ret = -EBUSY;
+ goto free_resources;
+ default:
+ /* things are really broken, give up */
+ WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
+ status.response_code);
+ return -EIO;
+ }
+
+ /* wait for the reset to take effect */
+ while (retry2--) {
+ if (status.queue_empty && !status.irq_enabled)
+ break;
+ msleep(20);
+ status = ap_tapq(q->apqn, NULL);
+ }
+ WARN_ON_ONCE(retry2 <= 0);
- return -EBUSY;
+free_resources:
+ vfio_ap_free_aqic_resources(q);
+
+ return ret;
}
static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
int ret;
int rc = 0;
unsigned long apid, apqi;
+ struct vfio_ap_queue *q;
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
matrix_mdev->matrix.apm_max + 1) {
for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
matrix_mdev->matrix.aqm_max + 1) {
- ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
+ ret = vfio_ap_mdev_reset_queue(q, 1);
/*
* Regardless whether a queue turns out to be busy, or
* is not operational, we need to continue resetting
*/
if (ret)
rc = ret;
- vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi));
}
}
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mutex_lock(&matrix_dev->lock);
- if (matrix_mdev->kvm) {
- kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
- matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
- vfio_ap_mdev_reset_queues(mdev);
- kvm_put_kvm(matrix_mdev->kvm);
- matrix_mdev->kvm = NULL;
- }
+ if (matrix_mdev->kvm)
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
mutex_unlock(&matrix_dev->lock);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
struct mdev_device *mdev;
};
-extern int vfio_ap_mdev_register(void);
-extern void vfio_ap_mdev_unregister(void);
-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
- unsigned int retry);
-
struct vfio_ap_queue {
struct ap_matrix_mdev *matrix_mdev;
unsigned long saved_pfn;
#define VFIO_AP_ISC_INVALID 0xff
unsigned char saved_isc;
};
-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q);
+
+int vfio_ap_mdev_register(void);
+void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+ unsigned int retry);
+
#endif /* _VFIO_AP_PRIVATE_H_ */
return -ENODEV;
}
+ if (!vport->phba->sli4_hba.nvmels_wq)
+ return -ENOMEM;
+
/*
* there are two dma buf in the request, actually there is one and
* the second one is just the start address + cmd size.
int ql2xenforce_iocb_limit = 1;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
- "Enforce IOCB throttling, to avoid FW congestion. (default: 0)");
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
/*
* CT6 CTX allocation cache
return soc_dev;
}
+static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+ { .compatible = "atmel,at91rm9200", },
+ { .compatible = "atmel,at91sam9", },
+ { .compatible = "atmel,sama5", },
+ { .compatible = "atmel,samv7", },
+ { }
+};
+
static int __init atmel_soc_device_init(void)
{
+ struct device_node *np = of_find_node_by_path("/");
+
+ if (!of_match_node(at91_soc_allowed_list, np))
+ return 0;
+
at91_soc_init(socs);
return 0;
depends on ARCH_MXC || COMPILE_TEST
default ARCH_MXC && ARM64
select SOC_BUS
- select ARM_GIC_V3 if ARCH_MXC
+ select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7
help
If you say yes here you get support for the NXP i.MX8M family
support, it will provide the SoC info like SoC family,
config LITEX_SOC_CONTROLLER
tristate "Enable LiteX SoC Controller driver"
depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
select LITEX
help
This option enables the SoC Controller Driver which verifies
"allwinner,sun7i-a20-display-engine",
"allwinner,sun8i-a23-display-engine",
"allwinner,sun8i-a33-display-engine",
- "allwinner,sun8i-a83t-display-engine",
- "allwinner,sun8i-h3-display-engine",
- "allwinner,sun8i-r40-display-engine",
- "allwinner,sun8i-v3s-display-engine",
"allwinner,sun9i-a80-display-engine",
- "allwinner,sun50i-a64-display-engine",
/*
* And now we have the regular devices connected to the MBUS
const struct omap_rst_map *map;
struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
char buf[32];
+ u32 v;
/*
* Check if we have controllable resets. If either rstctrl is non-zero
map++;
}
+ /* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
+ if (prm->data->rstmap == rst_map_012) {
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ if ((v & reset->mask) != reset->mask) {
+ dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
+ writel_relaxed(reset->mask, reset->prm->base +
+ reset->prm->data->rstctrl);
+ }
+ }
+
return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
}
dev_err(&pdev->dev,
"Invalid number of chipselect: %hu\n",
pdata->num_chipselect);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit;
}
master->num_chipselect = pdata->num_chipselect;
{ .compatible = "lwn,bk4" },
{ .compatible = "dh,dhcom-board" },
{ .compatible = "menlo,m53cpld" },
+ { .compatible = "cisco,spi-petra" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
raw_fmt->width = encoded_fmt->width;
- raw_fmt->width = encoded_fmt->width;
+ raw_fmt->height = encoded_fmt->height;
if (ctx->is_encoder)
hantro_set_fmt_out(ctx, raw_fmt);
else
position = cedrus_buf->codec.h264.position;
sram_array[i] |= position << 1;
- if (ref_list[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
+ if (ref_list[i].fields == V4L2_H264_BOTTOM_FIELD_REF)
sram_array[i] |= BIT(0);
}
COUNTRY_CODE_MAX
};
-int rtw_regd_init(struct adapter *padapter,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
+void rtw_regd_init(struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
rtw_cfg80211_init_ht_capab(&bands->ht_cap, NL80211_BAND_2GHZ, rf_type);
}
- /* init regulary domain */
- rtw_regd_init(padapter, rtw_reg_notifier);
-
/* copy mac_addr to wiphy */
memcpy(wiphy->perm_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
*((struct adapter **)wiphy_priv(wiphy)) = padapter;
rtw_cfg80211_preinit_wiphy(padapter, wiphy);
+ /* init regulary domain */
+ rtw_regd_init(wiphy, rtw_reg_notifier);
+
ret = wiphy_register(wiphy);
if (ret < 0) {
DBG_8192C("Couldn't register wiphy device\n");
padapter = rtw_netdev_priv(pnetdev);
- rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
-
/* 3 3. init driver special setting, interface, OS and hardware relative */
/* 4 3.1 set hardware operation functions */
goto free_hal_data;
}
+ rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
+
/* 3 8. get WLan MAC address */
/* set mac addr */
rtw_macaddr_cfg(&psdio->func->dev, padapter->eeprompriv.mac_addr);
_rtw_reg_apply_flags(wiphy);
}
-int rtw_regd_init(struct adapter *padapter,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+void rtw_regd_init(struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
{
- struct wiphy *wiphy = padapter->rtw_wdev->wiphy;
-
_rtw_regd_init_wiphy(NULL, wiphy, reg_notifier);
-
- return 0;
}
void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
else
len = sizeof(struct sockaddr_in);
/*
- * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
+ * Set SO_REUSEADDR, and disable Nagle Algorithm with TCP_NODELAY.
*/
if (np->np_network_transport == ISCSI_TCP)
tcp_sock_set_nodelay(sock->sk);
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
*/
optee_cq_wait_for_completion(&optee->call_queue, &w);
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
- might_sleep();
+ if (need_resched())
+ cond_resched();
param.a0 = res.a0;
param.a1 = res.a1;
param.a2 = res.a2;
* managed with the xHCI and the SuperSpeed hub so we create the
* link from xHCI instead.
*/
- while (!dev_is_pci(dev))
+ while (dev && !dev_is_pci(dev))
dev = dev->parent;
if (!dev)
return 0;
}
-extern ssize_t redirected_tty_write(struct file *, const char __user *,
- size_t, loff_t *);
-
/**
* job_control - check job control
* @tty: tty
/* NOTE: not yet done after every sleep pending a thorough
check of the logic of this change. -- jlc */
/* don't stop on /dev/console */
- if (file->f_op->write == redirected_tty_write)
+ if (file->f_op->write_iter == redirected_tty_write)
return 0;
return __tty_check_change(tty, SIGTTIN);
ssize_t retval = 0;
/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
- if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
+ if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) {
retval = tty_check_change(tty);
if (retval)
return retval;
static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tty_write(struct kiocb *, struct iov_iter *);
-ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
-long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
* write method will not be invoked in parallel for each device.
*/
-static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
{
- struct file *file = iocb->ki_filp;
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
ssize_t ret;
return ret;
}
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ return file_tty_write(iocb->ki_filp, iocb, from);
+}
+
ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *p = NULL;
p = get_file(redirect);
spin_unlock(&redirect_lock);
+ /*
+ * We know the redirected tty is just another tty, we can can
+ * call file_tty_write() directly with that file pointer.
+ */
if (p) {
ssize_t res;
- res = vfs_iocb_iter_write(p, iocb, iter);
+ res = file_tty_write(p, iocb, iter);
fput(p);
return res;
}
fput(f);
return 0;
}
+ if (file->f_op->write_iter != tty_write)
+ return -ENOTTY;
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+ if (!(file->f_mode & FMODE_CAN_WRITE))
+ return -EINVAL;
spin_lock(&redirect_lock);
if (redirect) {
spin_unlock(&redirect_lock);
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL;
- alts = usblp->protocol[protocol].alt_setting;
- if (alts < 0)
- return -EINVAL;
- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
- if (r < 0) {
- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
- alts, usblp->ifnum);
- return r;
+ /* Don't unnecessarily set the interface if there's a single alt. */
+ if (usblp->intf->num_altsetting > 1) {
+ alts = usblp->protocol[protocol].alt_setting;
+ if (alts < 0)
+ return -EINVAL;
+ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
+ if (r < 0) {
+ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
+ alts, usblp->ifnum);
+ return r;
+ }
}
usblp->bidir = (usblp->protocol[protocol].epread != NULL);
static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
u32 windex)
{
- struct dwc2_hsotg_ep *ep;
int dir = (windex & USB_DIR_IN) ? 1 : 0;
int idx = windex & 0x7F;
if (idx > hsotg->num_of_eps)
return NULL;
- ep = index_to_ep(hsotg, idx, dir);
-
- if (idx && ep->dir_in != dir)
- return NULL;
-
- return ep;
+ return index_to_ep(hsotg, idx, dir);
}
/**
if (PMSG_IS_AUTO(msg))
break;
- ret = dwc3_core_init(dwc);
+ ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail1;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
str_array[offset].s = NULL;
ret = ast_vhub_str_alloc_add(vhub, &lang_str);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
return ret;
sch_ep->sch_tt = tt;
sch_ep->ep = ep;
+ INIT_LIST_HEAD(&sch_ep->endpoint);
+ INIT_LIST_HEAD(&sch_ep->tt_endpoint);
return sch_ep;
}
sch_ep->bw_budget_table[j];
}
}
+ sch_ep->allocated = used;
}
static int check_sch_tt(struct usb_device *udev,
return 0;
}
+static void destroy_sch_ep(struct usb_device *udev,
+ struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
+{
+ /* only release ep bw check passed by check_sch_bw() */
+ if (sch_ep->allocated)
+ update_bus_bw(sch_bw, sch_ep, 0);
+
+ list_del(&sch_ep->endpoint);
+
+ if (sch_ep->sch_tt) {
+ list_del(&sch_ep->tt_endpoint);
+ drop_tt(udev);
+ }
+ kfree(sch_ep);
+}
+
static bool need_bw_sch(struct usb_host_endpoint *ep,
enum usb_device_speed speed, int has_tt)
{
mtk->sch_array = sch_array;
+ INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
+
return 0;
}
EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_virt_device *virt_dev;
- struct mu3h_sch_bw_info *sch_bw;
struct mu3h_sch_ep_info *sch_ep;
- struct mu3h_sch_bw_info *sch_array;
unsigned int ep_index;
- int bw_index;
- int ret = 0;
xhci = hcd_to_xhci(hcd);
virt_dev = xhci->devs[udev->slot_id];
ep_index = xhci_get_endpoint_index(&ep->desc);
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
- sch_array = mtk->sch_array;
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
return 0;
}
- bw_index = get_bw_index(xhci, udev, ep);
- sch_bw = &sch_array[bw_index];
-
sch_ep = create_sch_ep(udev, ep, ep_ctx);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(udev, ep_ctx, sch_ep);
- ret = check_sch_bw(udev, sch_bw, sch_ep);
- if (ret) {
- xhci_err(xhci, "Not enough bandwidth!\n");
- if (is_fs_or_ls(udev->speed))
- drop_tt(udev);
-
- kfree(sch_ep);
- return -ENOSPC;
- }
-
- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
-
- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
- | EP_BREPEAT(sch_ep->repeat));
-
- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
- sch_ep->offset, sch_ep->repeat);
+ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
return 0;
}
struct xhci_virt_device *virt_dev;
struct mu3h_sch_bw_info *sch_array;
struct mu3h_sch_bw_info *sch_bw;
- struct mu3h_sch_ep_info *sch_ep;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
int bw_index;
xhci = hcd_to_xhci(hcd);
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
+ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
if (sch_ep->ep == ep) {
- update_bus_bw(sch_bw, sch_ep, 0);
- list_del(&sch_ep->endpoint);
- if (is_fs_or_ls(udev->speed)) {
- list_del(&sch_ep->tt_endpoint);
- drop_tt(udev);
- }
- kfree(sch_ep);
+ destroy_sch_ep(udev, sch_bw, sch_ep);
break;
}
}
}
EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
+
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index, ret;
+
+ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+ list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ ret = check_sch_bw(udev, sch_bw, sch_ep);
+ if (ret) {
+ xhci_err(xhci, "Not enough bandwidth!\n");
+ return -ENOSPC;
+ }
+ }
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+ struct xhci_ep_ctx *ep_ctx;
+ struct usb_host_endpoint *ep = sch_ep->ep;
+ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
+
+ bw_index = get_bw_index(xhci, udev, ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
+ | EP_BCSCOUNT(sch_ep->cs_count)
+ | EP_BBM(sch_ep->burst_mode));
+ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
+ | EP_BREPEAT(sch_ep->repeat));
+
+ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
+ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
+ sch_ep->offset, sch_ep->repeat);
+ }
+
+ return xhci_check_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth);
+
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index;
+
+ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+ destroy_sch_ep(udev, sch_bw, sch_ep);
+ }
+
+ xhci_reset_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth);
static int xhci_mtk_setup(struct usb_hcd *hcd);
static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
.reset = xhci_mtk_setup,
+ .check_bandwidth = xhci_mtk_check_bandwidth,
+ .reset_bandwidth = xhci_mtk_reset_bandwidth,
};
static struct hc_driver __read_mostly xhci_mtk_hc_driver;
* @ep_type: endpoint type
* @maxpkt: max packet size of endpoint
* @ep: address of usb_host_endpoint struct
+ * @allocated: the bandwidth is aready allocated from bus_bw
* @offset: which uframe of the interval that transfer should be
* scheduled first time within the interval
* @repeat: the time gap between two uframes that transfers are
u32 ep_type;
u32 maxpkt;
void *ep;
+ bool allocated;
/*
* mtk xHCI scheduling information put into reserved DWs
* in ep context
struct device *dev;
struct usb_hcd *hcd;
struct mu3h_sch_bw_info *sch_array;
+ struct list_head bw_ep_chk_list;
struct mu3c_ippc_regs __iomem *ippc_regs;
bool has_ippc;
int num_u2_ports;
struct usb_host_endpoint *ep);
void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
#else
static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd,
{
}
+static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+ return 0;
+}
+
+static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+}
#endif
#endif /* _XHCI_MTK_H_ */
#include <linux/mbus.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
return 0;
}
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct phy *phy;
+ int ret;
+
+ /* Old bindings miss the PHY handle */
+ phy = of_phy_get(dev->of_node, "usb3-phy");
+ if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(phy))
+ goto phy_out;
+
+ ret = phy_init(phy);
+ if (ret)
+ goto phy_put;
+
+ ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS);
+ if (ret)
+ goto phy_exit;
+
+ ret = phy_power_on(phy);
+ if (ret == -EOPNOTSUPP) {
+ /* Skip initializatin of XHCI PHY when it is unsupported by firmware */
+ dev_warn(dev, "PHY unsupported by firmware\n");
+ xhci->quirks |= XHCI_SKIP_PHY_INIT;
+ }
+ if (ret)
+ goto phy_exit;
+
+ phy_power_off(phy);
+phy_exit:
+ phy_exit(phy);
+phy_put:
+ of_phy_put(phy);
+phy_out:
+
+ return 0;
+}
+
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd);
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
#else
static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
+static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
return 0;
priv->plat_start(hcd);
}
+static int xhci_priv_plat_setup(struct usb_hcd *hcd)
+{
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ if (!priv->plat_setup)
+ return 0;
+
+ return priv->plat_setup(hcd);
+}
+
static int xhci_priv_init_quirk(struct usb_hcd *hcd)
{
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
};
static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
+ .plat_setup = xhci_mvebu_a3700_plat_setup,
.init_quirk = xhci_mvebu_a3700_init_quirk,
};
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
xhci->shared_hcd->tpl_support = hcd->tpl_support;
- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+
+ if (priv) {
+ ret = xhci_priv_plat_setup(hcd);
+ if (ret)
+ goto disable_usb_phy;
+ }
+
+ if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)))
hcd->skip_phy_initialization = 1;
if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
struct xhci_plat_priv {
const char *firmware_name;
unsigned long long quirks;
+ int (*plat_setup)(struct usb_hcd *);
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
int (*suspend_quirk)(struct usb_hcd *);
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
/* for in tranfers we need to copy the data from bounce to sg */
- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
- seg->bounce_len, seg->bounce_offs);
- if (len != seg->bounce_len)
- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
- len, seg->bounce_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ if (len != seg->bounce_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+ len, seg->bounce_len);
+ } else {
+ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
+ seg->bounce_len);
+ }
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
/* create a max max_pkt sized bounce buffer pointed to by last trb */
if (usb_urb_dir_out(urb)) {
- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
- seg->bounce_buf, new_buff_len, enqd_len);
- if (len != new_buff_len)
- xhci_warn(xhci,
- "WARN Wrong bounce buffer write length: %zu != %d\n",
- len, new_buff_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ if (len != new_buff_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
+ len, new_buff_len);
+ } else {
+ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
+ }
+
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
* else should be touching the xhci->devs[slot_id] structure, so we
* don't need to take the xhci->lock for manipulating that.
*/
-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
return ret;
}
-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
drv->reset = over->reset;
if (over->start)
drv->start = over->start;
+ if (over->check_bandwidth)
+ drv->check_bandwidth = over->check_bandwidth;
+ if (over->reset_bandwidth)
+ drv->reset_bandwidth = over->reset_bandwidth;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
size_t extra_priv_size;
int (*reset)(struct usb_hcd *hcd);
int (*start)(struct usb_hcd *hcd);
+ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
};
#define XHCI_CFC_DELAY 10
void xhci_shutdown(struct usb_hcd *hcd);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
}
usbhs_pipe_clear_without_sequence(pipe, 0, 0);
+ usbhs_pipe_running(pipe, 0);
__usbhsf_pkt_del(pkt);
}
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
+ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
#define CINTERION_PRODUCT_CLS8 0x00b0
+#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
+#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
+ .driver_info = RSVD(0)},
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
struct sg_table sg_head;
int log_size;
int nsg;
+ int nent;
struct list_head list;
u64 offset;
};
return (npages + 1) / 2;
}
-static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
-{
- struct scatterlist *sg;
- __be64 *pas;
- int i;
-
- pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
- (*pas) = cpu_to_be64(sg_dma_address(sg));
-}
-
static void mlx5_set_access_mode(void *mkc, int mode)
{
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
{
struct scatterlist *sg;
+ int nsg = mr->nsg;
+ u64 dma_addr;
+ u64 dma_len;
+ int j = 0;
int i;
- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
- mtt[i] = cpu_to_be64(sg_dma_address(sg));
+ for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
+ for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
+ nsg && dma_len;
+ nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
+ mtt[j++] = cpu_to_be64(dma_addr);
+ }
}
static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
- fill_sg(mr, in);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
done:
mr->log_size = log_entity_size;
mr->nsg = nsg;
- err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
- if (!err)
+ mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+ if (!mr->nent)
goto err_map;
err = create_direct_mr(mvdev, mr);
u64 device_addr;
u64 driver_addr;
u16 avail_index;
+ u16 used_index;
bool ready;
struct vdpa_callback cb;
bool restore;
u32 virtq_id;
struct mlx5_vdpa_net *ndev;
u16 avail_idx;
+ u16 used_idx;
int fw_state;
/* keep last in the struct */
obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
+ MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
get_features_12_3(ndev->mvdev.actual_features));
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
struct mlx5_virtq_attr {
u8 state;
u16 available_index;
+ u16 used_index;
};
static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
memset(attr, 0, sizeof(*attr));
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
+ attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
kfree(out);
return 0;
}
}
+static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
+ ndev->vqs[i].avail_idx = 0;
+ ndev->vqs[i].used_idx = 0;
+ }
+}
+
/* TODO: cross-endian support */
static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
{
return err;
ri->avail_index = attr.available_index;
+ ri->used_index = attr.used_index;
ri->ready = mvq->ready;
ri->num_ent = mvq->num_ent;
ri->desc_addr = mvq->desc_addr;
continue;
mvq->avail_idx = ri->avail_index;
+ mvq->used_idx = ri->used_index;
mvq->ready = ri->ready;
mvq->num_ent = ri->num_ent;
mvq->desc_addr = ri->desc_addr;
if (!status) {
mlx5_vdpa_info(mvdev, "performing device reset\n");
teardown_driver(ndev);
+ clear_virtqueues(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0;
#endif
}
+static int xenbus_probe_thread(void *unused)
+{
+ DEFINE_WAIT(w);
+
+ /*
+ * We actually just want to wait for *any* trigger of xb_waitq,
+ * and run xenbus_probe() the moment it occurs.
+ */
+ prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&xb_waitq, &w);
+
+ DPRINTK("probing");
+ xenbus_probe();
+ return 0;
+}
+
static int __init xenbus_probe_initcall(void)
{
/*
!xs_hvm_defer_init_for_callback()))
xenbus_probe();
+ /*
+ * For XS_LOCAL, spawn a thread which will wait for xenstored
+ * or a xenstore-stubdom to be started, then probe. It will be
+ * triggered when communication starts happening, by waiting
+ * on xb_waitq.
+ */
+ if (xen_store_domain_type == XS_LOCAL) {
+ struct task_struct *probe_task;
+
+ probe_task = kthread_run(xenbus_probe_thread, NULL,
+ "xenbus_probe");
+ if (IS_ERR(probe_task))
+ return PTR_ERR(probe_task);
+ }
return 0;
}
device_initcall(xenbus_probe_initcall);
goto error_cache;
#endif
- ret = register_pernet_subsys(&afs_net_ops);
+ ret = register_pernet_device(&afs_net_ops);
if (ret < 0)
goto error_net;
error_proc:
afs_fs_exit();
error_fs:
- unregister_pernet_subsys(&afs_net_ops);
+ unregister_pernet_device(&afs_net_ops);
error_net:
#ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs);
proc_remove(afs_proc_symlink);
afs_fs_exit();
- unregister_pernet_subsys(&afs_net_ops);
+ unregister_pernet_device(&afs_net_ops);
#ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs);
#endif
static void set_init_blocksize(struct block_device *bdev)
{
- bdev->bd_inode->i_blkbits = blksize_bits(bdev_logical_block_size(bdev));
+ unsigned int bsize = bdev_logical_block_size(bdev);
+ loff_t size = i_size_read(bdev->bd_inode);
+
+ while (bsize < PAGE_SIZE) {
+ if (size & bsize)
+ break;
+ bsize <<= 1;
+ }
+ bdev->bd_inode->i_blkbits = blksize_bits(bsize);
}
int set_blocksize(struct block_device *bdev, int size)
wake_up(&caching_ctl->wait);
}
- if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+ /*
+ * If we are in the transaction that populated the free space tree we
+ * can't actually cache from the free space tree as our commit root and
+ * real root are the same, so we could change the contents of the blocks
+ * while caching. Instead do the slow caching in this case, and after
+ * the transaction has committed we will be safe.
+ */
+ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
ret = load_free_space_tree(caching_ctl);
else
ret = load_extent_tree_free(caching_ctl);
/* Indicate that we need to cleanup space cache v1 */
BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
+
+ /* Indicate that we can't trust the free space tree for caching yet */
+ BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
};
/*
struct btrfs_block_group *cache;
int ret;
- btrfs_add_excluded_extent(trans->fs_info, bytenr, num_bytes);
-
cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
if (!cache)
return -EINVAL;
* the pinned extents.
*/
btrfs_cache_block_group(cache, 1);
+ /*
+ * Make sure we wait until the cache is completely built in case it is
+ * missing or is invalid and therefore needs to be rebuilt.
+ */
+ ret = btrfs_wait_block_group_cache_done(cache);
+ if (ret)
+ goto out;
pin_down_extent(trans, cache, bytenr, num_bytes, 0);
/* remove us from the free space cache (if we're there at all) */
ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
+out:
btrfs_put_block_group(cache);
return ret;
}
{
int ret;
struct btrfs_block_group *block_group;
- struct btrfs_caching_control *caching_ctl;
block_group = btrfs_lookup_block_group(fs_info, start);
if (!block_group)
return -EINVAL;
- btrfs_cache_block_group(block_group, 0);
- caching_ctl = btrfs_get_caching_control(block_group);
-
- if (!caching_ctl) {
- /* Logic error */
- BUG_ON(!btrfs_block_group_done(block_group));
- ret = btrfs_remove_free_space(block_group, start, num_bytes);
- } else {
- /*
- * We must wait for v1 caching to finish, otherwise we may not
- * remove our space.
- */
- btrfs_wait_space_cache_v1_finished(block_group, caching_ctl);
- mutex_lock(&caching_ctl->mutex);
-
- if (start >= caching_ctl->progress) {
- ret = btrfs_add_excluded_extent(fs_info, start,
- num_bytes);
- } else if (start + num_bytes <= caching_ctl->progress) {
- ret = btrfs_remove_free_space(block_group,
- start, num_bytes);
- } else {
- num_bytes = caching_ctl->progress - start;
- ret = btrfs_remove_free_space(block_group,
- start, num_bytes);
- if (ret)
- goto out_lock;
+ btrfs_cache_block_group(block_group, 1);
+ /*
+ * Make sure we wait until the cache is completely built in case it is
+ * missing or is invalid and therefore needs to be rebuilt.
+ */
+ ret = btrfs_wait_block_group_cache_done(block_group);
+ if (ret)
+ goto out;
- num_bytes = (start + num_bytes) -
- caching_ctl->progress;
- start = caching_ctl->progress;
- ret = btrfs_add_excluded_extent(fs_info, start,
- num_bytes);
- }
-out_lock:
- mutex_unlock(&caching_ctl->mutex);
- btrfs_put_caching_control(caching_ctl);
- }
+ ret = btrfs_remove_free_space(block_group, start, num_bytes);
+out:
btrfs_put_block_group(block_group);
return ret;
}
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
}
- if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
- clear_extent_bits(&fs_info->excluded_extents, start,
- end, EXTENT_UPTODATE);
if (btrfs_test_opt(fs_info, DISCARD_SYNC))
ret = btrfs_discard_extent(fs_info, start,
return PTR_ERR(trans);
set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+ set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
free_space_root = btrfs_create_tree(trans,
BTRFS_FREE_SPACE_TREE_OBJECTID);
if (IS_ERR(free_space_root)) {
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+ ret = btrfs_commit_transaction(trans);
- return btrfs_commit_transaction(trans);
+ /*
+ * Now that we've committed the transaction any reading of our commit
+ * root will be safe, so we can cache from the free space tree now.
+ */
+ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
+ return ret;
abort:
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
atomic_set(&dev->reada_in_flight, 0);
atomic_set(&dev->dev_stats_ccnt, 0);
- btrfs_device_data_ordered_init(dev, fs_info);
+ btrfs_device_data_ordered_init(dev);
INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
extent_io_tree_init(fs_info, &dev->alloc_state,
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
#include <linux/seqlock.h>
#define __BTRFS_NEED_DEVICE_DATA_ORDERED
-#define btrfs_device_data_ordered_init(device, info) \
- seqcount_mutex_init(&device->data_seqcount, &info->chunk_mutex)
+#define btrfs_device_data_ordered_init(device) \
+ seqcount_init(&device->data_seqcount)
#else
-#define btrfs_device_data_ordered_init(device, info) do { } while (0)
+#define btrfs_device_data_ordered_init(device) do { } while (0)
#endif
#define BTRFS_DEV_STATE_WRITEABLE (0)
blk_status_t last_flush_error;
#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
- /* A seqcount_t with associated chunk_mutex (for lockdep) */
- seqcount_mutex_t data_seqcount;
+ seqcount_t data_seqcount;
#endif
/* the internal btrfs device id */
static inline void \
btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
{ \
+ preempt_disable(); \
write_seqcount_begin(&dev->data_seqcount); \
dev->name = size; \
write_seqcount_end(&dev->data_seqcount); \
+ preempt_enable(); \
}
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
#define BTRFS_DEVICE_GETSET_FUNCS(name) \
* Caller is responsible for freeing returned value if it is not error.
*/
char *cifs_compose_mount_options(const char *sb_mountdata,
- const char *fullpath,
- const struct dfs_info3_param *ref)
+ const char *fullpath,
+ const struct dfs_info3_param *ref,
+ char **devname)
{
int rc;
char *name;
strcat(mountdata, "ip=");
strcat(mountdata, srvIP);
- kfree(name);
+ if (devname)
+ *devname = name;
+ else
+ kfree(name);
/*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
/*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
/* strip first '\' from fullpath */
mountdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- fullpath + 1, NULL);
+ fullpath + 1, NULL, NULL);
if (IS_ERR(mountdata)) {
kfree(devname);
return (struct vfsmount *)mountdata;
goto out;
}
- rc = cifs_setup_volume_info(cifs_sb->ctx);
+ rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC);
if (rc) {
root = ERR_PTR(rc);
goto out;
int add_treename);
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
extern char *cifs_compose_mount_options(const char *sb_mountdata,
- const char *fullpath, const struct dfs_info3_param *ref);
+ const char *fullpath, const struct dfs_info3_param *ref,
+ char **devname);
/* extern void renew_parental_timestamps(struct dentry *direntry);*/
extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
struct TCP_Server_Info *server);
extern int cifs_handle_standard(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
extern int smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx);
+extern int smb3_parse_opt(const char *options, const char *key, char **val);
extern bool cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs);
extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
extern int cifs_call_async(struct TCP_Server_Info *server,
unsigned char *p24);
extern int
-cifs_setup_volume_info(struct smb3_fs_context *ctx);
+cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname);
extern struct TCP_Server_Info *
cifs_find_tcp_session(struct smb3_fs_context *ctx);
rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
ref_path, &referral, NULL);
if (!rc) {
+ char *fake_devname = NULL;
+
mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- full_path + 1, &referral);
+ full_path + 1, &referral,
+ &fake_devname);
free_dfs_info_param(&referral);
if (IS_ERR(mdata)) {
rc = PTR_ERR(mdata);
mdata = NULL;
} else {
- smb3_cleanup_fs_context_contents(ctx);
- rc = cifs_setup_volume_info(ctx);
+ rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
}
+ kfree(fake_devname);
kfree(cifs_sb->ctx->mount_options);
cifs_sb->ctx->mount_options = mdata;
}
struct dfs_info3_param ref = {0};
char *mdata = NULL;
struct smb3_fs_context fake_ctx = {NULL};
+ char *fake_devname = NULL;
cifs_dbg(FYI, "%s: dfs path: %s\n", __func__, path);
return rc;
mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- full_path + 1, &ref);
+ full_path + 1, &ref,
+ &fake_devname);
free_dfs_info_param(&ref);
if (IS_ERR(mdata)) {
rc = PTR_ERR(mdata);
mdata = NULL;
} else
- rc = cifs_setup_volume_info(&fake_ctx);
+ rc = cifs_setup_volume_info(&fake_ctx, mdata, fake_devname);
kfree(mdata);
+ kfree(fake_devname);
if (!rc) {
/*
* we should pass a clone of the original context?
*/
int
-cifs_setup_volume_info(struct smb3_fs_context *ctx)
+cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
{
int rc = 0;
+ smb3_parse_devname(devname, ctx);
+
+ if (mntopts) {
+ char *ip;
+
+ cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts);
+ rc = smb3_parse_opt(mntopts, "ip", &ip);
+ if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip,
+ strlen(ip))) {
+ cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
+ return -EINVAL;
+ }
+ }
+
if (ctx->nullauth) {
cifs_dbg(FYI, "Anonymous login\n");
kfree(ctx->username);
int rc;
struct cache_entry *ce;
struct dfs_info3_param ref = {0};
- char *mdata = NULL;
+ char *mdata = NULL, *devname = NULL;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct smb3_fs_context ctx = {NULL};
up_read(&htable_rw_lock);
- mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref);
+ mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
+ &devname);
free_dfs_info_param(&ref);
if (IS_ERR(mdata)) {
goto out;
}
- rc = cifs_setup_volume_info(&ctx);
+ rc = cifs_setup_volume_info(&ctx, NULL, devname);
if (rc) {
ses = ERR_PTR(rc);
smb3_cleanup_fs_context_contents(&ctx);
kfree(mdata);
kfree(rpath);
+ kfree(devname);
return ses;
}
cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
{
struct inode *inode;
+ int rc;
if (flags & LOOKUP_RCU)
return -ECHILD;
if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
CIFS_I(inode)->time = 0; /* force reval */
- if (cifs_revalidate_dentry(direntry))
- return 0;
+ rc = cifs_revalidate_dentry(direntry);
+ if (rc) {
+ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
+ switch (rc) {
+ case -ENOENT:
+ case -ESTALE:
+ /*
+ * Those errors mean the dentry is invalid
+ * (file was deleted or recreated)
+ */
+ return 0;
+ default:
+ /*
+ * Otherwise some unexpected error happened
+ * report it as-is to VFS layer
+ */
+ return rc;
+ }
+ }
else {
/*
* If the inode wasn't known to be a dfs entry when
fsparam_flag_no("exec", Opt_ignore),
fsparam_flag_no("dev", Opt_ignore),
fsparam_flag_no("mand", Opt_ignore),
+ fsparam_flag_no("auto", Opt_ignore),
fsparam_string("cred", Opt_ignore),
fsparam_string("credentials", Opt_ignore),
+ fsparam_string("prefixpath", Opt_ignore),
{}
};
return 0;
}
+int smb3_parse_opt(const char *options, const char *key, char **val)
+{
+ int rc = -ENOENT;
+ char *opts, *orig, *p;
+
+ orig = opts = kstrdup(options, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ while ((p = strsep(&opts, ","))) {
+ char *nval;
+
+ if (!*p)
+ continue;
+ if (strncasecmp(p, key, strlen(key)))
+ continue;
+ nval = strchr(p, '=');
+ if (nval) {
+ if (nval == p)
+ continue;
+ *nval++ = 0;
+ *val = kstrndup(nval, strlen(nval), GFP_KERNEL);
+ rc = !*val ? -ENOMEM : 0;
+ goto out;
+ }
+ }
+out:
+ kfree(orig);
+ return rc;
+}
+
/*
* Parse a devname into substrings and populate the ctx->UNC and ctx->prepath
* fields with the result. Returns 0 on success and an error otherwise
if (ctx->rdma && ctx->vals->protocol_id < SMB30_PROT_ID) {
cifs_dbg(VFS, "SMB Direct requires Version >=3.0\n");
- return -1;
+ return -EOPNOTSUPP;
}
#ifndef CONFIG_KEYS
/* make sure UNC has a share name */
if (strlen(ctx->UNC) < 3 || !strchr(ctx->UNC + 3, '\\')) {
cifs_dbg(VFS, "Malformed UNC. Unable to find share name.\n");
- return -1;
+ return -ENOENT;
}
if (!ctx->got_ip) {
if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr,
&ctx->UNC[2], len)) {
pr_err("Unable to determine destination address\n");
- return -1;
+ return -EHOSTUNREACH;
}
}
return 0;
cifs_parse_mount_err:
- return 1;
+ return -EINVAL;
}
int smb3_init_fs_context(struct fs_context *fc)
__le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
__le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */
__le16 Reserved2;
- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
+ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
} __packed;
/* Dialects */
if (*credits < num) {
/*
- * Return immediately if not too many requests in flight since
- * we will likely be stuck on waiting for credits.
+ * If the server is tight on resources or just gives us less
+ * credits for other reasons (e.g. requests are coming out of
+ * order and the server delays granting more credits until it
+ * processes a missing mid) and we exhausted most available
+ * credits there may be situations when we try to send
+ * a compound request but we don't have enough credits. At this
+ * point the client needs to decide if it should wait for
+ * additional credits or fail the request. If at least one
+ * request is in flight there is a high probability that the
+ * server will return enough credits to satisfy this compound
+ * request.
+ *
+ * Return immediately if no requests in flight since we will be
+ * stuck on waiting for credits.
*/
- if (server->in_flight < num - *credits) {
+ if (server->in_flight == 0) {
spin_unlock(&server->req_lock);
trace_smb3_insufficient_credits(server->CurrentMid,
server->hostname, scredits, sin_flight);
{
int rc;
struct dentry *lower_dentry;
+ struct inode *lower_inode;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (!(d_inode(lower_dentry)->i_opflags & IOP_XATTR)) {
+ lower_inode = d_inode(lower_dentry);
+ if (!(lower_inode->i_opflags & IOP_XATTR)) {
rc = -EOPNOTSUPP;
goto out;
}
- rc = vfs_setxattr(lower_dentry, name, value, size, flags);
+ inode_lock(lower_inode);
+ rc = __vfs_setxattr_locked(lower_dentry, name, value, size, flags, NULL);
+ inode_unlock(lower_inode);
if (!rc && inode)
- fsstack_copy_attr_all(inode, d_inode(lower_dentry));
+ fsstack_copy_attr_all(inode, lower_inode);
out:
return rc;
}
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ set_page_huge_active(page);
/*
* unlock_page because locked by add_to_page_cache()
- * page_put due to reference from alloc_huge_page()
+ * put_page() due to reference from alloc_huge_page()
*/
unlock_page(page);
put_page(page);
const struct iovec *fast_iov,
struct iov_iter *iter, bool force);
static void io_req_drop_files(struct io_kiocb *req);
+static void io_req_task_queue(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
{
struct io_kiocb *req;
- if (task && head->task != task)
+ if (task && head->task != task) {
+ /* in terms of cancelation, always match if req task is dead */
+ if (head->task->flags & PF_EXITING)
+ return true;
return false;
+ }
if (!files)
return true;
do {
struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
struct io_defer_entry, list);
- struct io_kiocb *link;
if (req_need_defer(de->req, de->seq))
break;
list_del_init(&de->list);
- /* punt-init is done before queueing for defer */
- link = __io_queue_async_work(de->req);
- if (link) {
- __io_queue_linked_timeout(link);
- /* drop submission reference */
- io_put_req_deferred(link, 1);
- }
+ io_req_task_queue(de->req);
kfree(de);
} while (!list_empty(&ctx->defer_list));
}
struct io_kiocb *req, *tmp;
struct io_uring_cqe *cqe;
unsigned long flags;
- bool all_flushed;
+ bool all_flushed, posted;
LIST_HEAD(list);
if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
return false;
+ posted = false;
spin_lock_irqsave(&ctx->completion_lock, flags);
list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
if (!io_match_task(req, tsk, files))
WRITE_ONCE(ctx->rings->cq_overflow,
ctx->cached_cq_overflow);
}
+ posted = true;
}
all_flushed = list_empty(&ctx->cq_overflow_list);
ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
}
- io_commit_cqring(ctx);
+ if (posted)
+ io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
- io_cqring_ev_posted(ctx);
+ if (posted)
+ io_cqring_ev_posted(ctx);
while (!list_empty(&list)) {
req = list_first_entry(&list, struct io_kiocb, compl.list);
else
__io_req_task_cancel(req, -EFAULT);
mutex_unlock(&ctx->uring_lock);
+
+ if (ctx->flags & IORING_SETUP_SQPOLL)
+ io_sq_thread_drop_mm_files();
}
static void io_req_task_submit(struct callback_head *cb)
file = __io_file_get(state, fd);
}
- if (file && file->f_op == &io_uring_fops) {
+ if (file && file->f_op == &io_uring_fops &&
+ !(req->flags & REQ_F_INFLIGHT)) {
io_req_init_async(req);
req->flags |= REQ_F_INFLIGHT;
TASK_INTERRUPTIBLE);
/* make sure we run task_work before checking for signals */
ret = io_run_task_work_sig();
- if (ret > 0)
+ if (ret > 0) {
+ finish_wait(&ctx->wait, &iowq.wq);
continue;
+ }
else if (ret < 0)
break;
if (io_should_wake(&iowq))
break;
- if (test_bit(0, &ctx->cq_check_overflow))
+ if (test_bit(0, &ctx->cq_check_overflow)) {
+ finish_wait(&ctx->wait, &iowq.wq);
continue;
+ }
if (uts) {
timeout = schedule_timeout(timeout);
if (timeout == 0) {
}
}
+static int io_uring_count_inflight(struct io_ring_ctx *ctx,
+ struct task_struct *task,
+ struct files_struct *files)
+{
+ struct io_kiocb *req;
+ int cnt = 0;
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
+ cnt += io_match_task(req, task, files);
+ spin_unlock_irq(&ctx->inflight_lock);
+ return cnt;
+}
+
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct task_struct *task,
struct files_struct *files)
{
while (!list_empty_careful(&ctx->inflight_list)) {
struct io_task_cancel cancel = { .task = task, .files = files };
- struct io_kiocb *req;
DEFINE_WAIT(wait);
- bool found = false;
+ int inflight;
- spin_lock_irq(&ctx->inflight_lock);
- list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
- if (!io_match_task(req, task, files))
- continue;
- found = true;
- break;
- }
- if (found)
- prepare_to_wait(&task->io_uring->wait, &wait,
- TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&ctx->inflight_lock);
-
- /* We need to keep going until we don't find a matching req */
- if (!found)
+ inflight = io_uring_count_inflight(ctx, task, files);
+ if (!inflight)
break;
io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
io_cqring_overflow_flush(ctx, true, task, files);
/* cancellations _may_ trigger task work */
io_run_task_work();
- schedule();
+
+ prepare_to_wait(&task->io_uring->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (inflight == io_uring_count_inflight(ctx, task, files))
+ schedule();
finish_wait(&task->io_uring->wait, &wait);
}
}
struct task_struct *task = current;
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
- /* for SQPOLL only sqo_task has task notes */
- WARN_ON_ONCE(ctx->sqo_task != current);
io_disable_sqo_submit(ctx);
task = ctx->sq_data->thread;
atomic_inc(&task->io_uring->in_idle);
io_cancel_defer_files(ctx, task, files);
io_cqring_overflow_flush(ctx, true, task, files);
+ io_uring_cancel_files(ctx, task, files);
if (!files)
__io_uring_cancel_task_requests(ctx, task);
- else
- io_uring_cancel_files(ctx, task, files);
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
atomic_dec(&task->io_uring->in_idle);
- /*
- * If the files that are going away are the ones in the thread
- * identity, clear them out.
- */
- if (task->io_uring->identity->files == files)
- task->io_uring->identity->files = NULL;
io_sq_thread_unpark(ctx->sq_data);
}
}
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
/*
- * If we've seen completions, retry. This avoids a race where
- * a completion comes in before we did prepare_to_wait().
+ * If we've seen completions, retry without waiting. This
+ * avoids a race where a completion comes in before we did
+ * prepare_to_wait().
*/
- if (inflight != tctx_inflight(tctx))
- continue;
- schedule();
+ if (inflight == tctx_inflight(tctx))
+ schedule();
finish_wait(&tctx->wait, &wait);
} while (1);
- finish_wait(&tctx->wait, &wait);
atomic_dec(&tctx->in_idle);
io_uring_remove_task_files(tctx);
struct io_uring_task *tctx = current->io_uring;
struct io_ring_ctx *ctx = file->private_data;
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+ io_uring_cancel_task_requests(ctx, NULL);
+
if (!tctx)
return 0;
return NULL;
}
+/*
+ * Compare 2 layout stateid sequence ids, to see which is newer,
+ * taking into account wraparound issues.
+ */
+static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
+{
+ return (s32)(s1 - s2) > 0;
+}
+
+static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
+{
+ if (pnfs_seqid_is_newer(newseq, lo->plh_barrier))
+ lo->plh_barrier = newseq;
+}
+
static void
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
u32 seq)
if (seq != 0) {
WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
lo->plh_return_seq = seq;
+ pnfs_barrier_update(lo, seq);
}
}
return rv;
}
-/*
- * Compare 2 layout stateid sequence ids, to see which is newer,
- * taking into account wraparound issues.
- */
-static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
-{
- return (s32)(s1 - s2) > 0;
-}
-
static bool
pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
const struct pnfs_layout_range *recall_range)
new_barrier = be32_to_cpu(new->seqid);
else if (new_barrier == 0)
return;
- if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
- lo->plh_barrier = new_barrier;
+ pnfs_barrier_update(lo, new_barrier);
}
static bool
{
u32 seqid = be32_to_cpu(stateid->seqid);
- return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
+ return !pnfs_seqid_is_newer(seqid, lo->plh_barrier) && lo->plh_barrier;
}
/* lget is set to 1 if called from inside send_layoutget call chain */
return false;
set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
pnfs_get_layout_hdr(lo);
+ nfs4_stateid_copy(stateid, &lo->plh_stateid);
+ *cred = get_cred(lo->plh_lc_cred);
if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
- nfs4_stateid_copy(stateid, &lo->plh_stateid);
- *cred = get_cred(lo->plh_lc_cred);
if (lo->plh_return_seq != 0)
stateid->seqid = cpu_to_be32(lo->plh_return_seq);
if (iomode != NULL)
*iomode = lo->plh_return_iomode;
pnfs_clear_layoutreturn_info(lo);
- return true;
- }
- nfs4_stateid_copy(stateid, &lo->plh_stateid);
- *cred = get_cred(lo->plh_lc_cred);
- if (iomode != NULL)
+ } else if (iomode != NULL)
*iomode = IOMODE_ANY;
+ pnfs_barrier_update(lo, be32_to_cpu(stateid->seqid));
return true;
}
wake_up_var(&lo->plh_outstanding);
}
+static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
+{
+ return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags);
+}
+
static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
{
unsigned long *bitlock = &lo->plh_flags;
goto out_forget;
}
- if (!pnfs_layout_is_valid(lo)) {
- /* We have a completely new layout */
- pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
- } else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
+ if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
/* existing state ID, make sure the sequence number matches. */
if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
+ if (!pnfs_layout_is_valid(lo) &&
+ pnfs_is_first_layoutget(lo))
+ lo->plh_barrier = 0;
dprintk("%s forget reply due to sequence\n", __func__);
goto out_forget;
}
pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false);
- } else {
+ } else if (pnfs_layout_is_valid(lo)) {
/*
* We got an entirely new state ID. Mark all segments for the
* inode invalid, and retry the layoutget
*/
- pnfs_mark_layout_stateid_invalid(lo, &free_me);
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .length = NFS4_MAX_UINT64,
+ };
+ pnfs_set_plh_return_info(lo, IOMODE_ANY, 0);
+ pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
+ &range, 0);
goto out_forget;
+ } else {
+ /* We have a completely new layout */
+ if (!pnfs_is_first_layoutget(lo))
+ goto out_forget;
+ pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
}
pnfs_get_lseg(lseg);
if (ovl_is_private_xattr(sb, name))
continue;
+
+ error = security_inode_copy_up_xattr(name);
+ if (error < 0 && error != -EOPNOTSUPP)
+ break;
+ if (error == 1) {
+ error = 0;
+ continue; /* Discard */
+ }
retry:
size = vfs_getxattr(old, name, value, value_size);
if (size == -ERANGE)
goto retry;
}
- error = security_inode_copy_up_xattr(name);
- if (error < 0 && error != -EOPNOTSUPP)
- break;
- if (error == 1) {
- error = 0;
- continue; /* Discard */
- }
error = vfs_setxattr(new, name, value, size, 0);
if (error) {
if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
buflen -= thislen;
memcpy(&buf[buflen], name, thislen);
- tmp = dget_dlock(d->d_parent);
spin_unlock(&d->d_lock);
+ tmp = dget_parent(d);
dput(d);
d = tmp;
const struct cred *old_cred;
int ret;
- if (!ovl_should_sync(OVL_FS(file_inode(file)->i_sb)))
- return 0;
+ ret = ovl_sync_status(OVL_FS(file_inode(file)->i_sb));
+ if (ret <= 0)
+ return ret;
ret = ovl_real_fdget_meta(file, &real, !datasync);
if (ret)
goto out;
if (!value && !upperdentry) {
+ old_cred = ovl_override_creds(dentry->d_sb);
err = vfs_getxattr(realdentry, name, NULL, 0);
+ revert_creds(old_cred);
if (err < 0)
goto out_drop_write;
}
bool ovl_is_metacopy_dentry(struct dentry *dentry);
char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
int padding);
+int ovl_sync_status(struct ovl_fs *ofs);
static inline bool ovl_is_impuredir(struct super_block *sb,
struct dentry *dentry)
atomic_long_t last_ino;
/* Whiteout dentry cache */
struct dentry *whiteout;
+ /* r/o snapshot of upperdir sb's only taken on volatile mounts */
+ errseq_t errseq;
};
static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
struct ovl_dir_file *od = file->private_data;
struct dentry *dentry = file->f_path.dentry;
- struct file *realfile = od->realfile;
+ struct file *old, *realfile = od->realfile;
if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
return want_upper ? NULL : realfile;
* Need to check if we started out being a lower dir, but got copied up
*/
if (!od->is_upper) {
- struct inode *inode = file_inode(file);
-
realfile = READ_ONCE(od->upperfile);
if (!realfile) {
struct path upperpath;
ovl_path_upper(dentry, &upperpath);
realfile = ovl_dir_open_realfile(file, &upperpath);
+ if (IS_ERR(realfile))
+ return realfile;
- inode_lock(inode);
- if (!od->upperfile) {
- if (IS_ERR(realfile)) {
- inode_unlock(inode);
- return realfile;
- }
- smp_store_release(&od->upperfile, realfile);
- } else {
- /* somebody has beaten us to it */
- if (!IS_ERR(realfile))
- fput(realfile);
- realfile = od->upperfile;
+ old = cmpxchg_release(&od->upperfile, NULL, realfile);
+ if (old) {
+ fput(realfile);
+ realfile = old;
}
- inode_unlock(inode);
}
}
struct file *realfile;
int err;
- if (!ovl_should_sync(OVL_FS(file->f_path.dentry->d_sb)))
- return 0;
+ err = ovl_sync_status(OVL_FS(file->f_path.dentry->d_sb));
+ if (err <= 0)
+ return err;
realfile = ovl_dir_real_file(file, true);
err = PTR_ERR_OR_ZERO(realfile);
struct super_block *upper_sb;
int ret;
- if (!ovl_upper_mnt(ofs))
- return 0;
+ ret = ovl_sync_status(ofs);
+ /*
+ * We have to always set the err, because the return value isn't
+ * checked in syncfs, and instead indirectly return an error via
+ * the sb's writeback errseq, which VFS inspects after this call.
+ */
+ if (ret < 0) {
+ errseq_set(&sb->s_wb_err, -EIO);
+ return -EIO;
+ }
+
+ if (!ret)
+ return ret;
- if (!ovl_should_sync(ofs))
- return 0;
/*
* Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
* All the super blocks will be iterated, including upper_sb.
unsigned int numlower;
int err;
+ err = -EIO;
+ if (WARN_ON(sb->s_user_ns != current_user_ns()))
+ goto out;
+
sb->s_d_op = &ovl_dentry_operations;
err = -ENOMEM;
sb->s_op = &ovl_super_operations;
if (ofs->config.upperdir) {
+ struct super_block *upper_sb;
+
if (!ofs->config.workdir) {
pr_err("missing 'workdir'\n");
goto out_err;
if (err)
goto out_err;
+ upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
+ if (!ovl_should_sync(ofs)) {
+ ofs->errseq = errseq_sample(&upper_sb->s_wb_err);
+ if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) {
+ err = -EIO;
+ pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n");
+ goto out_err;
+ }
+ }
+
err = ovl_get_workdir(sb, ofs, &upperpath);
if (err)
goto out_err;
if (!ofs->workdir)
sb->s_flags |= SB_RDONLY;
- sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth;
- sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran;
-
+ sb->s_stack_depth = upper_sb->s_stack_depth;
+ sb->s_time_gran = upper_sb->s_time_gran;
}
oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers);
err = PTR_ERR(oe);
kfree(buf);
return ERR_PTR(res);
}
+
+/*
+ * ovl_sync_status() - Check fs sync status for volatile mounts
+ *
+ * Returns 1 if this is not a volatile mount and a real sync is required.
+ *
+ * Returns 0 if syncing can be skipped because mount is volatile, and no errors
+ * have occurred on the upperdir since the mount.
+ *
+ * Returns -errno if it is a volatile mount, and the error that occurred since
+ * the last mount. If the error code changes, it'll return the latest error
+ * code.
+ */
+
+int ovl_sync_status(struct ovl_fs *ofs)
+{
+ struct vfsmount *mnt;
+
+ if (ovl_should_sync(ofs))
+ return 1;
+
+ mnt = ovl_upper_mnt(ofs);
+ if (!mnt)
+ return 0;
+
+ return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq);
+}
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
.fasync = pipe_fasync,
+ .splice_write = iter_file_splice_write,
};
/*
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
#ifndef __DT_APQ8016_LPASS_H
#define __DT_APQ8016_LPASS_H
-#define MI2S_PRIMARY 0
-#define MI2S_SECONDARY 1
-#define MI2S_TERTIARY 2
-#define MI2S_QUATERNARY 3
+#include <dt-bindings/sound/qcom,lpass.h>
+
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
#endif /* __DT_APQ8016_LPASS_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_QCOM_LPASS_H
+#define __DT_QCOM_LPASS_H
+
+#define MI2S_PRIMARY 0
+#define MI2S_SECONDARY 1
+#define MI2S_TERTIARY 2
+#define MI2S_QUATERNARY 3
+#define MI2S_QUINARY 4
+
+#define LPASS_DP_RX 5
+
+#define LPASS_MCLK0 0
+
+#endif /* __DT_QCOM_LPASS_H */
#ifndef __DT_SC7180_LPASS_H
#define __DT_SC7180_LPASS_H
-#define MI2S_PRIMARY 0
-#define MI2S_SECONDARY 1
-#define LPASS_DP_RX 2
+#include <dt-bindings/sound/qcom,lpass.h>
-#define LPASS_MCLK0 0
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
#endif /* __DT_APQ8016_LPASS_H */
}
#endif
+void set_page_huge_active(struct page *page);
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
static inline void *dev_iommu_priv_get(struct device *dev)
{
- return dev->iommu->priv;
+ if (dev->iommu)
+ return dev->iommu->priv;
+ else
+ return NULL;
}
static inline void dev_iommu_priv_set(struct device *dev, void *priv)
return (void *)arch_kasan_reset_tag(addr);
}
+/**
+ * kasan_report - print a report about a bad memory access detected by KASAN
+ * @addr: address of the bad access
+ * @size: size of the bad access
+ * @is_write: whether the bad access is a write or a read
+ * @ip: instruction pointer for the accessibility check or the bad access itself
+ */
bool kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void);
extern bool arch_kprobe_on_func_entry(unsigned long offset);
-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
+extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
extern int kprobe_add_ksym_blacklist(unsigned long entry);
* Objtool generates debug info for both FUNC & CODE, but needs special
* annotations for each CODE's start (to describe the actual stack frame).
*
+ * Objtool requires that all code must be contained in an ELF symbol. Symbol
+ * names that have a .L prefix do not emit symbol table entries. .L
+ * prefixed symbols can be used within a code region, but should be avoided for
+ * denoting a range of code via ``SYM_*_START/END`` annotations.
+ *
* ALIAS -- does not generate debug info -- the aliased function will
*/
return val.vbool;
}
-/**
- * mlx5_core_net - Provide net namespace of the mlx5_core_dev
- * @dev: mlx5 core device
- *
- * mlx5_core_net() returns the net namespace of mlx5 core device.
- * This can be called only in below described limited context.
- * (a) When a devlink instance for mlx5_core is registered and
- * when devlink reload operation is disabled.
- * or
- * (b) during devlink reload reload_down() and reload_up callbacks
- * where it is ensured that devlink instance's net namespace is
- * stable.
- */
-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
-{
- return devlink_net(priv_to_devlink(dev));
-}
-
#endif /* MLX5_DRIVER_H */
return ERR_PTR(-ENODEV);
}
+static inline struct regulator *__must_check
+devm_regulator_get_exclusive(struct device *dev, const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct regulator *__must_check
regulator_get_optional(struct device *dev, const char *id)
{
return -EINVAL;
}
+static inline int regulator_sync_voltage(struct regulator *regulator)
+{
+ return -EINVAL;
+}
+
static inline int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
{
return 0;
}
+static inline int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_set_suspend_voltage(struct regulator *regulator,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
static inline void *regulator_get_drvdata(struct regulator *regulator)
{
return NULL;
#define XDR_QUADLEN(l) (((l) + 3) >> 2)
/*
- * Generic opaque `network object.' At the kernel level, this type
- * is used only by lockd.
+ * Generic opaque `network object.'
*/
#define XDR_MAX_NETOBJ 1024
struct xdr_netobj {
\
it_func_ptr = \
rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
- do { \
- it_func = (it_func_ptr)->func; \
- __data = (it_func_ptr)->data; \
- ((void(*)(void *, proto))(it_func))(__data, args); \
- } while ((++it_func_ptr)->func); \
+ if (it_func_ptr) { \
+ do { \
+ it_func = (it_func_ptr)->func; \
+ __data = (it_func_ptr)->data; \
+ ((void(*)(void *, proto))(it_func))(__data, args); \
+ } while ((++it_func_ptr)->func); \
+ } \
return 0; \
} \
DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
extern int tty_dev_name_to_number(const char *name, dev_t *number);
extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
extern void tty_ldisc_unlock(struct tty_struct *tty);
+extern ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
#else
static inline void tty_kref_put(struct tty_struct *tty)
{ }
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
-#define VM_MAP_PUT_PAGES 0x00000100 /* put pages and free array in vfree */
+#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
+#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
/*
* VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
* determine which allocations need the module shadow freed.
*/
-/*
- * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
- * vfree_atomic().
- */
-#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
-
/* bits [20..32] reserved for arch specific ioremap internals */
/*
u32 width, u32 height);
/**
- * v4l2_get_link_rate - Get link rate from transmitter
+ * v4l2_get_link_freq - Get link rate from transmitter
*
* @handler: The transmitter's control handler
* @mul: The multiplier between pixel rate and link frequency. Bits per pixel on
* -ENOENT: Link frequency or pixel rate control not found
* -EINVAL: Invalid link frequency value
*/
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
unsigned int div);
static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
unsigned short n2, n2count;
unsigned short t1, t2;
struct timer_list t1timer, t2timer;
+ bool t1timer_stop, t2timer_stop;
/* Internal control information */
struct sk_buff_head write_queue;
struct lapb_frame frmr_data;
unsigned char frmr_type;
+ spinlock_t lock;
refcount_t refcnt;
};
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *key_end, const u32 *data,
u64 timeout, u64 expiration, gfp_t gfp);
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_expr *expr_array[]);
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr);
old = *pold;
*pold = new;
if (old != NULL)
- qdisc_tree_flush_backlog(old);
+ qdisc_purge_queue(old);
sch_tree_unlock(sch);
return old;
unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
unsigned int tcp_current_mss(struct sock *sk);
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
u32 reo_wnd);
-extern void tcp_rack_mark_lost(struct sock *sk);
+extern bool tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
- netdev_features_t features);
+ netdev_features_t features, bool is_ipv6);
static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
{
struct snd_pcm_hw_rule {
unsigned int cond;
int var;
- int deps[4];
+ int deps[5];
snd_pcm_hw_rule_func_t func;
void *private;
BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR = 0x3,
};
-struct br_mrp_tlv_hdr {
- __u8 type;
- __u8 length;
-};
-
-struct br_mrp_sub_tlv_hdr {
- __u8 type;
- __u8 length;
-};
-
-struct br_mrp_end_hdr {
- struct br_mrp_tlv_hdr hdr;
-};
-
-struct br_mrp_common_hdr {
- __be16 seq_id;
- __u8 domain[MRP_DOMAIN_UUID_LENGTH];
-};
-
-struct br_mrp_ring_test_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 state;
- __be16 transitions;
- __be32 timestamp;
-};
-
-struct br_mrp_ring_topo_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 interval;
-};
-
-struct br_mrp_ring_link_hdr {
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 interval;
- __be16 blocked;
-};
-
-struct br_mrp_sub_opt_hdr {
- __u8 type;
- __u8 manufacture_data[MRP_MANUFACTURE_DATA_LENGTH];
-};
-
-struct br_mrp_test_mgr_nack_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 other_prio;
- __u8 other_sa[ETH_ALEN];
-};
-
-struct br_mrp_test_prop_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 other_prio;
- __u8 other_sa[ETH_ALEN];
-};
-
-struct br_mrp_oui_hdr {
- __u8 oui[MRP_OUI_LENGTH];
-};
-
-struct br_mrp_in_test_hdr {
- __be16 id;
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 state;
- __be16 transitions;
- __be32 timestamp;
-};
-
-struct br_mrp_in_topo_hdr {
- __u8 sa[ETH_ALEN];
- __be16 id;
- __be16 interval;
-};
-
-struct br_mrp_in_link_hdr {
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 id;
- __be16 interval;
-};
-
#endif
#define RKISP1_CIF_ISP_CTK_COEFF_MAX 0x100
#define RKISP1_CIF_ISP_CTK_OFFSET_MAX 0x800
-#define RKISP1_CIF_ISP_AE_MEAN_MAX 25
-#define RKISP1_CIF_ISP_HIST_BIN_N_MAX 16
+#define RKISP1_CIF_ISP_AE_MEAN_MAX_V10 25
+#define RKISP1_CIF_ISP_AE_MEAN_MAX_V12 81
+#define RKISP1_CIF_ISP_AE_MEAN_MAX RKISP1_CIF_ISP_AE_MEAN_MAX_V12
+
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 16
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 32
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12
+
#define RKISP1_CIF_ISP_AFM_MAX_WINDOWS 3
#define RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE 17
* Gamma out
*/
/* Maximum number of color samples supported */
-#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES 17
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 17
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 34
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
/*
* Lens shade correction
/*
* Histogram calculation
*/
-/* Last 3 values unused. */
-#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE 28
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 25
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 81
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
/*
* Defect Pixel Cluster Correction
#define RKISP1_CIF_ISP_STAT_AFM (1U << 2)
#define RKISP1_CIF_ISP_STAT_HIST (1U << 3)
+/**
+ * enum rkisp1_cif_isp_version - ISP variants
+ *
+ * @RKISP1_V10: used at least in rk3288 and rk3399
+ * @RKISP1_V11: declared in the original vendor code, but not used
+ * @RKISP1_V12: used at least in rk3326 and px30
+ * @RKISP1_V13: used at least in rk1808
+ */
+enum rkisp1_cif_isp_version {
+ RKISP1_V10 = 10,
+ RKISP1_V11,
+ RKISP1_V12,
+ RKISP1_V13,
+};
+
enum rkisp1_cif_isp_histogram_mode {
RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE,
RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
*
* @mode: goc mode (from enum rkisp1_cif_isp_goc_mode)
* @gamma_y: gamma out curve y-axis for all color components
+ *
+ * The number of entries of @gamma_y depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10
+ * entries, versions >= V12 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
+ * entries. RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES is equal to the maximum
+ * of the two.
*/
struct rkisp1_cif_isp_goc_config {
__u32 mode;
* skipped
* @meas_window: coordinates of the measure window
* @hist_weight: weighting factor for sub-windows
+ *
+ * The number of entries of @hist_weight depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10
+ * entries, versions >= V12 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
+ * entries. RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE is equal to the maximum
+ * of the two.
*/
struct rkisp1_cif_isp_hst_config {
__u32 mode;
* @exp_mean: Mean luminance value of block xx
* @bls_val: BLS measured values
*
- * Image is divided into 5x5 blocks.
+ * The number of entries of @exp_mean depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries,
+ * versions >= V12 have RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries.
+ * RKISP1_CIF_ISP_AE_MEAN_MAX is equal to the maximum of the two.
+ *
+ * Image is divided into 5x5 blocks on V10 and 9x9 blocks on V12.
*/
struct rkisp1_cif_isp_ae_stat {
__u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX];
/**
* struct rkisp1_cif_isp_hist_stat - statistics histogram data
*
- * @hist_bins: measured bin counters
+ * @hist_bins: measured bin counters. Each bin is a 20 bits unsigned fixed point
+ * type. Bits 0-4 are the fractional part and bits 5-19 are the
+ * integer part.
+ *
+ * The window of the measurements area is divided to 5x5 sub-windows for
+ * V10/V11 and to 9x9 sub-windows for V12. The histogram is then computed for
+ * each sub-window independently and the final result is a weighted average of
+ * the histogram measurements on all sub-windows. The window of the
+ * measurements area and the weight of each sub-window are configurable using
+ * struct @rkisp1_cif_isp_hst_config.
+ *
+ * The histogram contains 16 bins in V10/V11 and 32 bins in V12/V13.
+ *
+ * The number of entries of @hist_bins depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
*
- * Measurement window divided into 25 sub-windows, set
- * with ISP_HIST_XXX
+ * Versions <= V11 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 entries,
+ * versions >= V12 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 entries.
+ * RKISP1_CIF_ISP_HIST_BIN_N_MAX is equal to the maximum of the two.
*/
struct rkisp1_cif_isp_hist_stat {
- __u16 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
+ __u32 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
};
/**
pad:4,
reserved1:16;
#elif defined(__BIG_ENDIAN_BITFIELD)
- __u32 reserved:20,
+ __u32 cmpri:4,
+ cmpre:4,
pad:4,
- cmpri:4,
- cmpre:4;
+ reserved:20;
#else
#error "Please fix <asm/byteorder.h>"
#endif
};
/* The v4l2 sub-device video device node is registered in read-only mode. */
-#define V4L2_SUBDEV_CAP_RO_SUBDEV BIT(0)
+#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001
/* Backwards compatibility define --- to be removed */
#define v4l2_subdev_edid v4l2_edid
PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
};
+enum pvrdma_network_type {
+ PVRDMA_NETWORK_IB,
+ PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB,
+ PVRDMA_NETWORK_IPV4,
+ PVRDMA_NETWORK_IPV6
+};
+
struct pvrdma_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 reserved;
config CONSTRUCTORS
bool
- depends on !UML
config IRQ_WORK
bool
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .ret_stack = NULL,
+ .ret_stack = NULL,
+ .tracing_graph_pause = ATOMIC_INIT(0),
#endif
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
.trace_recursion = 0,
/* Call all constructor functions linked into the kernel. */
static void __init do_ctors(void)
{
-#ifdef CONFIG_CONSTRUCTORS
+/*
+ * For UML, the constructors have already been called by the
+ * normal setup code as it's just a normal ELF binary, so we
+ * cannot do it again - but we do need CONFIG_CONSTRUCTORS
+ * even on UML for modules.
+ */
+#if defined(CONFIG_CONSTRUCTORS) && !defined(CONFIG_UML)
ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
for (; fn < (ctor_fn_t *) __ctors_end; fn++)
fd = *(int *)key;
f = fget_raw(fd);
- if (!f || !inode_storage_ptr(f->f_inode))
+ if (!f)
+ return -EBADF;
+ if (!inode_storage_ptr(f->f_inode)) {
+ fput(f);
return -EBADF;
+ }
sdata = bpf_local_storage_update(f->f_inode,
(struct bpf_local_storage_map *)map,
BTF_ID(func, bpf_lsm_file_lock)
BTF_ID(func, bpf_lsm_file_open)
BTF_ID(func, bpf_lsm_file_receive)
+
+#ifdef CONFIG_SECURITY_NETWORK
BTF_ID(func, bpf_lsm_inet_conn_established)
+#endif /* CONFIG_SECURITY_NETWORK */
+
BTF_ID(func, bpf_lsm_inode_create)
BTF_ID(func, bpf_lsm_inode_free_security)
BTF_ID(func, bpf_lsm_inode_getattr)
BTF_ID(func, bpf_lsm_inode_unlink)
BTF_ID(func, bpf_lsm_kernel_module_request)
BTF_ID(func, bpf_lsm_kernfs_init_security)
+
+#ifdef CONFIG_KEYS
BTF_ID(func, bpf_lsm_key_free)
+#endif /* CONFIG_KEYS */
+
BTF_ID(func, bpf_lsm_mmap_file)
BTF_ID(func, bpf_lsm_netlink_send)
BTF_ID(func, bpf_lsm_path_notify)
BTF_ID(func, bpf_lsm_sb_statfs)
BTF_ID(func, bpf_lsm_sb_umount)
BTF_ID(func, bpf_lsm_settime)
+
+#ifdef CONFIG_SECURITY_NETWORK
BTF_ID(func, bpf_lsm_socket_accept)
BTF_ID(func, bpf_lsm_socket_bind)
BTF_ID(func, bpf_lsm_socket_connect)
BTF_ID(func, bpf_lsm_socket_sendmsg)
BTF_ID(func, bpf_lsm_socket_shutdown)
BTF_ID(func, bpf_lsm_socket_socketpair)
+#endif /* CONFIG_SECURITY_NETWORK */
+
BTF_ID(func, bpf_lsm_syslog)
BTF_ID(func, bpf_lsm_task_alloc)
BTF_ID(func, bpf_lsm_task_getsecid)
goto out;
}
+ if (ctx.optlen < 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
if (copy_from_user(ctx.optval, optval,
min(ctx.optlen, max_optlen)) != 0) {
ret = -EFAULT;
goto out;
}
- if (ctx.optlen > max_optlen) {
+ if (ctx.optlen > max_optlen || ctx.optlen < 0) {
ret = -EFAULT;
goto out;
}
LIBBPF_A = $(obj)/libbpf.a
LIBBPF_OUT = $(abspath $(obj))
+# Although not in use by libbpf's Makefile, set $(O) so that the "dummy" test
+# in tools/scripts/Makefile.include always succeeds when building the kernel
+# with $(O) pointing to a relative path, as in "make O=build bindeb-pkg".
$(LIBBPF_A):
- $(Q)$(MAKE) -C $(LIBBPF_SRCS) OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
+ $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
-I $(srctree)/tools/lib/ -Wno-unused-result
atomic64_set(&map->sum_sq_unmap, 0);
atomic64_set(&map->loops, 0);
- for (i = 0; i < threads; i++)
+ for (i = 0; i < threads; i++) {
+ get_task_struct(tsk[i]);
wake_up_process(tsk[i]);
+ }
msleep_interruptible(map->bparam.seconds * 1000);
}
out:
+ for (i = 0; i < threads; i++)
+ put_task_struct(tsk[i]);
put_device(map->dev);
kfree(tsk);
return ret;
*/
static inline bool report_single_step(unsigned long work)
{
- if (!(work & SYSCALL_WORK_SYSCALL_EMU))
+ if (work & SYSCALL_WORK_SYSCALL_EMU)
return false;
return !!(current_thread_info()->flags & _TIF_SINGLESTEP);
return pi_state;
}
+static void pi_state_update_owner(struct futex_pi_state *pi_state,
+ struct task_struct *new_owner)
+{
+ struct task_struct *old_owner = pi_state->owner;
+
+ lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
+
+ if (old_owner) {
+ raw_spin_lock(&old_owner->pi_lock);
+ WARN_ON(list_empty(&pi_state->list));
+ list_del_init(&pi_state->list);
+ raw_spin_unlock(&old_owner->pi_lock);
+ }
+
+ if (new_owner) {
+ raw_spin_lock(&new_owner->pi_lock);
+ WARN_ON(!list_empty(&pi_state->list));
+ list_add(&pi_state->list, &new_owner->pi_state_list);
+ pi_state->owner = new_owner;
+ raw_spin_unlock(&new_owner->pi_lock);
+ }
+}
+
static void get_pi_state(struct futex_pi_state *pi_state)
{
WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
- struct task_struct *owner;
unsigned long flags;
raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
- owner = pi_state->owner;
- if (owner) {
- raw_spin_lock(&owner->pi_lock);
- list_del_init(&pi_state->list);
- raw_spin_unlock(&owner->pi_lock);
- }
- rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
+ pi_state_update_owner(pi_state, NULL);
+ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
}
* FUTEX_OWNER_DIED bit. See [4]
*
* [10] There is no transient state which leaves owner and user space
- * TID out of sync.
+ * TID out of sync. Except one error case where the kernel is denied
+ * write access to the user address, see fixup_pi_state_owner().
*
*
* Serialization and lifetime rules:
ret = -EINVAL;
}
- if (ret)
- goto out_unlock;
-
- /*
- * This is a point of no return; once we modify the uval there is no
- * going back and subsequent operations must not fail.
- */
-
- raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- list_del_init(&pi_state->list);
- raw_spin_unlock(&pi_state->owner->pi_lock);
-
- raw_spin_lock(&new_owner->pi_lock);
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &new_owner->pi_state_list);
- pi_state->owner = new_owner;
- raw_spin_unlock(&new_owner->pi_lock);
-
- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+ if (!ret) {
+ /*
+ * This is a point of no return; once we modified the uval
+ * there is no going back and subsequent operations must
+ * not fail.
+ */
+ pi_state_update_owner(pi_state, new_owner);
+ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+ }
out_unlock:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(q->lock_ptr);
}
-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- struct task_struct *argowner)
+static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ struct task_struct *argowner)
{
struct futex_pi_state *pi_state = q->pi_state;
- u32 uval, curval, newval;
struct task_struct *oldowner, *newowner;
- u32 newtid;
- int ret, err = 0;
-
- lockdep_assert_held(q->lock_ptr);
-
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ u32 uval, curval, newval, newtid;
+ int err = 0;
oldowner = pi_state->owner;
* We raced against a concurrent self; things are
* already fixed up. Nothing to do.
*/
- ret = 0;
- goto out_unlock;
+ return 0;
}
if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
- /* We got the lock after all, nothing to fix. */
- ret = 0;
- goto out_unlock;
+ /* We got the lock. pi_state is correct. Tell caller. */
+ return 1;
}
/*
* We raced against a concurrent self; things are
* already fixed up. Nothing to do.
*/
- ret = 0;
- goto out_unlock;
+ return 1;
}
newowner = argowner;
}
* We fixed up user space. Now we need to fix the pi_state
* itself.
*/
- if (pi_state->owner != NULL) {
- raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- list_del_init(&pi_state->list);
- raw_spin_unlock(&pi_state->owner->pi_lock);
- }
+ pi_state_update_owner(pi_state, newowner);
- pi_state->owner = newowner;
-
- raw_spin_lock(&newowner->pi_lock);
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &newowner->pi_state_list);
- raw_spin_unlock(&newowner->pi_lock);
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-
- return 0;
+ return argowner == current;
/*
* In order to reschedule or handle a page fault, we need to drop the
switch (err) {
case -EFAULT:
- ret = fault_in_user_writeable(uaddr);
+ err = fault_in_user_writeable(uaddr);
break;
case -EAGAIN:
cond_resched();
- ret = 0;
+ err = 0;
break;
default:
WARN_ON_ONCE(1);
- ret = err;
break;
}
/*
* Check if someone else fixed it for us:
*/
- if (pi_state->owner != oldowner) {
- ret = 0;
- goto out_unlock;
- }
+ if (pi_state->owner != oldowner)
+ return argowner == current;
- if (ret)
- goto out_unlock;
+ /* Retry if err was -EAGAIN or the fault in succeeded */
+ if (!err)
+ goto retry;
- goto retry;
+ /*
+ * fault_in_user_writeable() failed so user state is immutable. At
+ * best we can make the kernel state consistent but user state will
+ * be most likely hosed and any subsequent unlock operation will be
+ * rejected due to PI futex rule [10].
+ *
+ * Ensure that the rtmutex owner is also the pi_state owner despite
+ * the user space value claiming something different. There is no
+ * point in unlocking the rtmutex if current is the owner as it
+ * would need to wait until the next waiter has taken the rtmutex
+ * to guarantee consistent state. Keep it simple. Userspace asked
+ * for this wreckaged state.
+ *
+ * The rtmutex has an owner - either current or some other
+ * task. See the EAGAIN loop above.
+ */
+ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
-out_unlock:
+ return err;
+}
+
+static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ struct task_struct *argowner)
+{
+ struct futex_pi_state *pi_state = q->pi_state;
+ int ret;
+
+ lockdep_assert_held(q->lock_ptr);
+
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ ret = __fixup_pi_state_owner(uaddr, q, argowner);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
return ret;
}
*/
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
{
- int ret = 0;
-
if (locked) {
/*
* Got the lock. We might not be the anticipated owner if we
* stable state, anything else needs more attention.
*/
if (q->pi_state->owner != current)
- ret = fixup_pi_state_owner(uaddr, q, current);
- return ret ? ret : locked;
+ return fixup_pi_state_owner(uaddr, q, current);
+ return 1;
}
/*
* Another speculative read; pi_state->owner == current is unstable
* but needs our attention.
*/
- if (q->pi_state->owner == current) {
- ret = fixup_pi_state_owner(uaddr, q, NULL);
- return ret;
- }
+ if (q->pi_state->owner == current)
+ return fixup_pi_state_owner(uaddr, q, NULL);
/*
* Paranoia check. If we did not take the lock, then we should not be
- * the owner of the rt_mutex.
+ * the owner of the rt_mutex. Warn and establish consistent state.
*/
- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
- "pi-state %p\n", ret,
- q->pi_state->pi_mutex.owner,
- q->pi_state->owner);
- }
+ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
+ return fixup_pi_state_owner(uaddr, q, current);
- return ret;
+ return 0;
}
/**
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to;
- struct futex_pi_state *pi_state = NULL;
struct task_struct *exiting = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
if (res)
ret = (res < 0) ? res : 0;
- /*
- * If fixup_owner() faulted and was unable to handle the fault, unlock
- * it and return the fault to userspace.
- */
- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
-
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
-
- if (pi_state) {
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
- put_pi_state(pi_state);
- }
-
goto out;
out_unlock_put_key:
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to;
- struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
spin_unlock(q.lock_ptr);
+ /*
+ * Adjust the return value. It's either -EFAULT or
+ * success (1) but the caller expects 0 for success.
+ */
+ ret = ret < 0 ? ret : 0;
}
} else {
struct rt_mutex *pi_mutex;
if (res)
ret = (res < 0) ? res : 0;
- /*
- * If fixup_pi_state_owner() faulted and was unable to handle
- * the fault, unlock the rt_mutex and return the fault to
- * userspace.
- */
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
-
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
- if (pi_state) {
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
- put_pi_state(pi_state);
- }
-
if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
config GCOV_KERNEL
bool "Enable gcov-based kernel profiling"
depends on DEBUG_FS
- select CONSTRUCTORS if !UML
+ select CONSTRUCTORS
default n
help
This option enables gcov-based code profiling (e.g. for code coverage
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
- lock_system_sleep();
pm_prepare_console();
error = freeze_processes();
if (error) {
thaw_processes();
Restore_console:
pm_restore_console();
- unlock_system_sleep();
}
#endif
return !offset;
}
-bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
+/**
+ * kprobe_on_func_entry() -- check whether given address is function entry
+ * @addr: Target address
+ * @sym: Target symbol name
+ * @offset: The offset from the symbol or the address
+ *
+ * This checks whether the given @addr+@offset or @sym+@offset is on the
+ * function entry address or not.
+ * This returns 0 if it is the function entry, or -EINVAL if it is not.
+ * And also it returns -ENOENT if it fails the symbol or address lookup.
+ * Caller must pass @addr or @sym (either one must be NULL), or this
+ * returns -EINVAL.
+ */
+int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
{
kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
if (IS_ERR(kp_addr))
- return false;
+ return PTR_ERR(kp_addr);
- if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
- !arch_kprobe_on_func_entry(offset))
- return false;
+ if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
+ return -ENOENT;
- return true;
+ if (!arch_kprobe_on_func_entry(offset))
+ return -EINVAL;
+
+ return 0;
}
int register_kretprobe(struct kretprobe *rp)
{
- int ret = 0;
+ int ret;
struct kretprobe_instance *inst;
int i;
void *addr;
- if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
+ ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
+ if (ret)
+ return ret;
+
+ /* If only rp->kp.addr is specified, check reregistering kprobes */
+ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
return -EINVAL;
if (kretprobe_blacklist_size) {
* possible because it belongs to the pi_state which is about to be freed
* and it is not longer visible to other tasks.
*/
-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
- struct task_struct *proxy_owner)
+void rt_mutex_proxy_unlock(struct rt_mutex *lock)
{
debug_rt_mutex_proxy_unlock(lock);
rt_mutex_set_owner(lock, NULL);
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
- struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
unsigned int flags, int error)
{
if (!error) {
- flush_swap_writer(handle);
pr_info("S");
error = mark_swapfiles(handle, flags);
pr_cont("|\n");
+ flush_swap_writer(handle);
}
if (error)
* not counted in the return value.
*/
if (buf_size > 0)
- text[len] = 0;
+ r->text_buf[len] = 0;
return len;
}
}
if (t->ret_stack == NULL) {
- atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->curr_ret_stack = -1;
t->curr_ret_depth = -1;
static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
- atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0;
/* make curr_ret_stack visible before we add the ret_stack */
/* non overwrite screws up the latency tracers */
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
+ /* without pause, we will produce garbage if another latency occurs */
+ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
tr->max_latency = 0;
irqsoff_trace = tr;
{
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+ int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
stop_irqsoff_tracer(tr, is_graph(tr));
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
ftrace_reset_array_ops(tr);
irqsoff_busy = false;
{
struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
- return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
+ return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
- tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
+ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
}
bool trace_kprobe_error_injectable(struct trace_event_call *call)
}
if (is_return)
flags |= TPARG_FL_RETURN;
- if (kprobe_on_func_entry(NULL, symbol, offset))
+ ret = kprobe_on_func_entry(NULL, symbol, offset);
+ if (ret == 0)
flags |= TPARG_FL_FENTRY;
- if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
+ /* Defer the ENOENT case until register kprobe */
+ if (ret == -EINVAL && is_return) {
trace_probe_log_err(0, BAD_RETPROBE);
goto parse_error;
}
ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
+
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset)
+{
+ struct alignment_assumption_data *data = _data;
+ unsigned long real_ptr;
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "alignment-assumption");
+
+ if (offset)
+ pr_err("assumption of %lu byte alignment (with offset of %lu byte) for pointer of type %s failed",
+ align, offset, data->type->type_name);
+ else
+ pr_err("assumption of %lu byte alignment for pointer of type %s failed",
+ align, data->type->type_name);
+
+ real_ptr = ptr - offset;
+ pr_err("%saddress is %lu aligned, misalignment offset is %lu bytes",
+ offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0),
+ real_ptr & (align - 1));
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_alignment_assumption);
struct type_descriptor *type;
};
+struct alignment_assumption_data {
+ struct source_location location;
+ struct source_location assumption_location;
+ struct type_descriptor *type;
+};
+
#if defined(CONFIG_ARCH_SUPPORTS_INT128)
typedef __int128 s_max;
typedef unsigned __int128 u_max;
{
unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
unsigned int nr_scanned = 0;
- unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
+ unsigned long low_pfn, min_pfn, highest = 0;
unsigned long nr_isolated = 0;
unsigned long distance;
struct page *page = NULL;
struct page *freepage;
unsigned long flags;
unsigned int order_scanned = 0;
+ unsigned long high_pfn = 0;
if (!area->nr_free)
continue;
XA_STATE(xas, &mapping->i_pages, offset);
int huge = PageHuge(page);
int error;
+ bool charged = false;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
error = mem_cgroup_charge(page, current->mm, gfp);
if (error)
goto error;
+ charged = true;
}
gfp &= GFP_RECLAIM_MASK;
if (xas_error(&xas)) {
error = xas_error(&xas);
+ if (charged)
+ mem_cgroup_uncharge(page);
goto error;
}
{
spinlock_t *ptl;
struct mmu_notifier_range range;
- bool was_locked = false;
+ bool do_unlock_page = false;
pmd_t _pmd;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
VM_BUG_ON(freeze && !page);
if (page) {
VM_WARN_ON_ONCE(!PageLocked(page));
- was_locked = true;
if (page != pmd_page(*pmd))
goto out;
}
if (pmd_trans_huge(*pmd)) {
if (!page) {
page = pmd_page(*pmd);
- if (unlikely(!trylock_page(page))) {
- get_page(page);
- _pmd = *pmd;
- spin_unlock(ptl);
- lock_page(page);
- spin_lock(ptl);
- if (unlikely(!pmd_same(*pmd, _pmd))) {
- unlock_page(page);
+ /*
+ * An anonymous page must be locked, to ensure that a
+ * concurrent reuse_swap_page() sees stable mapcount;
+ * but reuse_swap_page() is not used on shmem or file,
+ * and page lock must not be taken when zap_pmd_range()
+ * calls __split_huge_pmd() while i_mmap_lock is held.
+ */
+ if (PageAnon(page)) {
+ if (unlikely(!trylock_page(page))) {
+ get_page(page);
+ _pmd = *pmd;
+ spin_unlock(ptl);
+ lock_page(page);
+ spin_lock(ptl);
+ if (unlikely(!pmd_same(*pmd, _pmd))) {
+ unlock_page(page);
+ put_page(page);
+ page = NULL;
+ goto repeat;
+ }
put_page(page);
- page = NULL;
- goto repeat;
}
- put_page(page);
+ do_unlock_page = true;
}
}
if (PageMlocked(page))
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
out:
spin_unlock(ptl);
- if (!was_locked && page)
+ if (do_unlock_page)
unlock_page(page);
/*
* No need to double call mmu_notifier->invalidate_range() callback.
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
+static inline bool PageHugeFreed(struct page *head)
+{
+ return page_private(head + 4) == -1UL;
+}
+
+static inline void SetPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, -1UL);
+}
+
+static inline void ClearPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, 0);
+}
+
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
list_move(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
+ SetPageHugeFreed(page);
}
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
+ ClearPageHugeFreed(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
*/
bool page_huge_active(struct page *page)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- return PageHead(page) && PagePrivate(&page[1]);
+ return PageHeadHuge(page) && PagePrivate(&page[1]);
}
/* never called for tail page */
-static void set_page_huge_active(struct page *page)
+void set_page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
SetPagePrivate(&page[1]);
spin_lock(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
+ ClearPageHugeFreed(page);
spin_unlock(&hugetlb_lock);
}
{
int rc = -EBUSY;
+retry:
/* Not to disrupt normal path by vainly holding hugetlb_lock */
if (!PageHuge(page))
return 0;
int nid = page_to_nid(head);
if (h->free_huge_pages - h->resv_huge_pages == 0)
goto out;
+
+ /*
+ * We should make sure that the page is already on the free list
+ * when it is dissolved.
+ */
+ if (unlikely(!PageHugeFreed(head))) {
+ spin_unlock(&hugetlb_lock);
+ cond_resched();
+
+ /*
+ * Theoretically, we should return -EBUSY when we
+ * encounter this race. In fact, we have a chance
+ * to successfully dissolve the page if we do a
+ * retry. Because the race window is quite small.
+ * If we seize this opportunity, it is an optimization
+ * for increasing the success rate of dissolving page.
+ */
+ goto retry;
+ }
+
/*
* Move PageHWPoison flag from head page to the raw error page,
* which makes any subpages rather than the error page reusable.
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+ int zeroed;
+
if ((--needed) < 0)
break;
/*
* This page is now managed by the hugetlb allocator and has
* no users -- drop the buddy allocator's reference.
*/
- VM_BUG_ON_PAGE(!put_page_testzero(page), page);
+ zeroed = put_page_testzero(page);
+ VM_BUG_ON_PAGE(!zeroed, page);
enqueue_huge_page(h, page);
}
free:
{
bool ret = true;
- VM_BUG_ON_PAGE(!PageHead(page), page);
spin_lock(&hugetlb_lock);
- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
+ if (!PageHeadHuge(page) || !page_huge_active(page) ||
+ !get_page_unless_zero(page)) {
ret = false;
goto unlock;
}
static inline bool addr_has_metadata(const void *addr)
{
- return true;
+ return (is_vmalloc_addr(addr) || virt_addr_valid(addr));
}
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
*
* Find @size free area aligned to @align in the specified range and node.
*
- * When allocation direction is bottom-up, the @start should be greater
- * than the end of the kernel image. Otherwise, it will be trimmed. The
- * reason is that we want the bottom-up allocation just near the kernel
- * image so it is highly likely that the allocated memory and the kernel
- * will reside in the same node.
- *
- * If bottom-up allocation failed, will try to allocate memory top-down.
- *
* Return:
* Found address on success, 0 on failure.
*/
phys_addr_t end, int nid,
enum memblock_flags flags)
{
- phys_addr_t kernel_end, ret;
-
/* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
end == MEMBLOCK_ALLOC_KASAN)
/* avoid allocating the first page */
start = max_t(phys_addr_t, start, PAGE_SIZE);
end = max(start, end);
- kernel_end = __pa_symbol(_end);
-
- /*
- * try bottom-up allocation only when bottom-up mode
- * is set and @end is above the kernel image.
- */
- if (memblock_bottom_up() && end > kernel_end) {
- phys_addr_t bottom_up_start;
-
- /* make sure we will allocate above the kernel */
- bottom_up_start = max(start, kernel_end);
- /* ok, try bottom-up allocation first */
- ret = __memblock_find_range_bottom_up(bottom_up_start, end,
- size, align, nid, flags);
- if (ret)
- return ret;
-
- /*
- * we always limit bottom-up allocation above the kernel,
- * but top-down allocation doesn't have the limit, so
- * retrying top-down allocation may succeed when bottom-up
- * allocation failed.
- *
- * bottom-up allocation is expected to be fail very rarely,
- * so we use WARN_ONCE() here to see the stack trace if
- * fail happens.
- */
- WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
- "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
- }
-
- return __memblock_find_range_top_down(start, end, size, align, nid,
- flags);
+ if (memblock_bottom_up())
+ return __memblock_find_range_bottom_up(start, end, size, align,
+ nid, flags);
+ else
+ return __memblock_find_range_top_down(start, end, size, align,
+ nid, flags);
}
/**
return -ENOSYS;
}
+ if (page_count(hpage) == 1) {
+ /* page was freed from under us. So we are done. */
+ putback_active_hugepage(hpage);
+ return MIGRATEPAGE_SUCCESS;
+ }
+
new_hpage = get_new_page(hpage, private);
if (!new_hpage)
return -ENOMEM;
* Initialize all valid struct pages in the range [spfn, epfn) and mark them
* PageReserved(). Return the number of struct pages that were initialized.
*/
-static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn,
- int zone, int nid)
+static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
{
- unsigned long pfn, zone_spfn, zone_epfn;
+ unsigned long pfn;
u64 pgcnt = 0;
- zone_spfn = arch_zone_lowest_possible_pfn[zone];
- zone_epfn = arch_zone_highest_possible_pfn[zone];
-
- spfn = clamp(spfn, zone_spfn, zone_epfn);
- epfn = clamp(epfn, zone_spfn, zone_epfn);
-
for (pfn = spfn; pfn < epfn; pfn++) {
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
+ pageblock_nr_pages - 1;
continue;
}
-
- __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
+ /*
+ * Use a fake node/zone (0) for now. Some of these pages
+ * (in memblock.reserved but not in memblock.memory) will
+ * get re-initialized via reserve_bootmem_region() later.
+ */
+ __init_single_page(pfn_to_page(pfn), pfn, 0, 0);
__SetPageReserved(pfn_to_page(pfn));
pgcnt++;
}
}
/*
- * Only struct pages that correspond to ranges defined by memblock.memory
- * are zeroed and initialized by going through __init_single_page() during
- * memmap_init().
- *
- * But, there could be struct pages that correspond to holes in
- * memblock.memory. This can happen because of the following reasons:
- * - phyiscal memory bank size is not necessarily the exact multiple of the
- * arbitrary section size
- * - early reserved memory may not be listed in memblock.memory
- * - memory layouts defined with memmap= kernel parameter may not align
- * nicely with memmap sections
+ * Only struct pages that are backed by physical memory are zeroed and
+ * initialized by going through __init_single_page(). But, there are some
+ * struct pages which are reserved in memblock allocator and their fields
+ * may be accessed (for example page_to_pfn() on some configuration accesses
+ * flags). We must explicitly initialize those struct pages.
*
- * Explicitly initialize those struct pages so that:
- * - PG_Reserved is set
- * - zone link is set accorging to the architecture constrains
- * - node is set to node id of the next populated region except for the
- * trailing hole where last node id is used
+ * This function also addresses a similar issue where struct pages are left
+ * uninitialized because the physical address range is not covered by
+ * memblock.memory or memblock.reserved. That could happen when memblock
+ * layout is manually configured via memmap=, or when the highest physical
+ * address (max_pfn) does not end on a section boundary.
*/
-static void __init init_zone_unavailable_mem(int zone)
+static void __init init_unavailable_mem(void)
{
- unsigned long start, end;
- int i, nid;
- u64 pgcnt;
- unsigned long next = 0;
+ phys_addr_t start, end;
+ u64 i, pgcnt;
+ phys_addr_t next = 0;
/*
- * Loop through holes in memblock.memory and initialize struct
- * pages corresponding to these holes
+ * Loop through unavailable ranges not covered by memblock.memory.
*/
pgcnt = 0;
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
+ for_each_mem_range(i, &start, &end) {
if (next < start)
- pgcnt += init_unavailable_range(next, start, zone, nid);
+ pgcnt += init_unavailable_range(PFN_DOWN(next),
+ PFN_UP(start));
next = end;
}
/*
- * Last section may surpass the actual end of memory (e.g. we can
- * have 1Gb section and 512Mb of RAM pouplated).
- * Make sure that memmap has a well defined state in this case.
+ * Early sections always have a fully populated memmap for the whole
+ * section - see pfn_valid(). If the last section has holes at the
+ * end and that section is marked "online", the memmap will be
+ * considered initialized. Make sure that memmap has a well defined
+ * state.
*/
- end = round_up(max_pfn, PAGES_PER_SECTION);
- pgcnt += init_unavailable_range(next, end, zone, nid);
+ pgcnt += init_unavailable_range(PFN_DOWN(next),
+ round_up(max_pfn, PAGES_PER_SECTION));
/*
* Struct pages that do not have backing memory. This could be because
* firmware is using some of this memory, or for some other reasons.
*/
if (pgcnt)
- pr_info("Zone %s: zeroed struct page in unavailable ranges: %lld pages", zone_names[zone], pgcnt);
-}
-
-static void __init init_unavailable_mem(void)
-{
- int zone;
-
- for (zone = 0; zone < ZONE_MOVABLE; zone++)
- init_zone_unavailable_mem(zone);
+ pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
}
#else
static inline void __init init_unavailable_mem(void)
s->kobj.kset = kset;
err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
- if (err) {
- kobject_put(&s->kobj);
+ if (err)
goto out;
- }
err = sysfs_create_group(&s->kobj, &slab_attr_group);
if (err)
int br_mrp_ring_port_open(struct net_device *dev, u8 loc);
int br_mrp_in_port_open(struct net_device *dev, u8 loc);
+/* MRP protocol data units */
+struct br_mrp_tlv_hdr {
+ __u8 type;
+ __u8 length;
+};
+
+struct br_mrp_common_hdr {
+ __be16 seq_id;
+ __u8 domain[MRP_DOMAIN_UUID_LENGTH];
+};
+
+struct br_mrp_ring_test_hdr {
+ __be16 prio;
+ __u8 sa[ETH_ALEN];
+ __be16 port_role;
+ __be16 state;
+ __be16 transitions;
+ __be32 timestamp;
+} __attribute__((__packed__));
+
+struct br_mrp_in_test_hdr {
+ __be16 id;
+ __u8 sa[ETH_ALEN];
+ __be16 port_role;
+ __be16 state;
+ __be16 transitions;
+ __be32 timestamp;
+} __attribute__((__packed__));
+
#endif /* _BR_PRIVATE_MRP_H */
old = neigh->nud_state;
err = -EPERM;
- if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
- (old & (NUD_NOARP | NUD_PERMANENT)))
- goto out;
if (neigh->dead) {
NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
+ new = old;
goto out;
}
+ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+ (old & (NUD_NOARP | NUD_PERMANENT)))
+ goto out;
ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify);
fld.saddr = dnet_select_source(dev_out, 0,
RT_SCOPE_HOST);
if (!fld.daddr)
- goto out;
+ goto done;
}
fld.flowidn_oif = LOOPBACK_IFINDEX;
res.type = RTN_LOCAL;
u8 net_id; /* for PRP, it occupies most significant 3 bits
* of lan_id
*/
- unsigned char sup_multicast_addr[ETH_ALEN];
+ unsigned char sup_multicast_addr[ETH_ALEN] __aligned(sizeof(u16));
+ /* Align to u16 boundary to avoid unaligned access
+ * in ether_addr_equal
+ */
#ifdef CONFIG_DEBUG_FS
struct dentry *node_tbl_root;
#endif
}
dev->needed_headroom = t_hlen + hlen;
- mtu -= (dev->hard_header_len + t_hlen);
+ mtu -= t_hlen;
if (mtu < IPV4_MIN_MTU)
mtu = IPV4_MIN_MTU;
nt = netdev_priv(dev);
t_hlen = nt->hlen + sizeof(struct iphdr);
dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
+ dev->max_mtu = IP_MAX_MTU - t_hlen;
ip_tunnel_add(itn, nt);
return nt;
int mtu;
tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
- pkt_size = skb->len - tunnel_hlen - dev->hard_header_len;
+ pkt_size = skb->len - tunnel_hlen;
if (df)
- mtu = dst_mtu(&rt->dst) - dev->hard_header_len
- - sizeof(struct iphdr) - tunnel_hlen;
+ mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
else
mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
- int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
+ int max_mtu = IP_MAX_MTU - t_hlen;
if (new_mtu < ETH_MIN_MTU)
return -EINVAL;
mtu = ip_tunnel_bind_dev(dev);
if (tb[IFLA_MTU]) {
- unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
+ unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
- mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
- (unsigned int)(max - sizeof(struct iphdr)));
+ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
}
err = dev_set_mtu(dev, mtu);
} else if (tcp_is_rack(sk)) {
u32 prior_retrans = tp->retrans_out;
- tcp_rack_mark_lost(sk);
+ if (tcp_rack_mark_lost(sk))
+ *ack_flag &= ~FLAG_SET_XMIT_TIMER;
if (prior_retrans > tp->retrans_out)
*ack_flag |= FLAG_LOST_RETRANS;
}
} else {
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- when, TCP_RTO_MAX);
+ when = tcp_clamp_probe0_to_user_timeout(sk, when);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX);
}
}
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
- /* If needed, reset TLP/RTO timer; RACK may later override this. */
- if (flag & FLAG_SET_XMIT_TIMER)
- tcp_set_xmit_timer(sk);
if (tcp_ack_is_dubious(sk, flag)) {
if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
&rexmit);
}
+ /* If needed, reset TLP/RTO timer when RACK doesn't set. */
+ if (flag & FLAG_SET_XMIT_TIMER)
+ tcp_set_xmit_timer(sk);
+
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
sk_dst_confirm(sk);
*/
timeout = TCP_RESOURCE_PROBE_INTERVAL;
}
+
+ timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
}
}
}
-void tcp_rack_mark_lost(struct sock *sk)
+bool tcp_rack_mark_lost(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 timeout;
if (!tp->rack.advanced)
- return;
+ return false;
/* Reset the advanced flag to avoid unnecessary queue scanning */
tp->rack.advanced = 0;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
timeout, inet_csk(sk)->icsk_rto);
}
+ return !!timeout;
}
/* Record the most recently (re)sent time among the (s)acked packets
return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
}
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 remaining;
+ s32 elapsed;
+
+ if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
+ return when;
+
+ elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
+ if (unlikely(elapsed < 0))
+ elapsed = 0;
+ remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
+ remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
+
+ return min_t(u32, remaining, when);
+}
+
/**
* tcp_write_err() - close socket and save error info
* @sk: The socket the error has appeared on.
}
EXPORT_SYMBOL(skb_udp_tunnel_segment);
+static void __udpv4_gso_segment_csum(struct sk_buff *seg,
+ __be32 *oldip, __be32 *newip,
+ __be16 *oldport, __be16 *newport)
+{
+ struct udphdr *uh;
+ struct iphdr *iph;
+
+ if (*oldip == *newip && *oldport == *newport)
+ return;
+
+ uh = udp_hdr(seg);
+ iph = ip_hdr(seg);
+
+ if (uh->check) {
+ inet_proto_csum_replace4(&uh->check, seg, *oldip, *newip,
+ true);
+ inet_proto_csum_replace2(&uh->check, seg, *oldport, *newport,
+ false);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ *oldport = *newport;
+
+ csum_replace4(&iph->check, *oldip, *newip);
+ *oldip = *newip;
+}
+
+static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
+{
+ struct sk_buff *seg;
+ struct udphdr *uh, *uh2;
+ struct iphdr *iph, *iph2;
+
+ seg = segs;
+ uh = udp_hdr(seg);
+ iph = ip_hdr(seg);
+
+ if ((udp_hdr(seg)->dest == udp_hdr(seg->next)->dest) &&
+ (udp_hdr(seg)->source == udp_hdr(seg->next)->source) &&
+ (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) &&
+ (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr))
+ return segs;
+
+ while ((seg = seg->next)) {
+ uh2 = udp_hdr(seg);
+ iph2 = ip_hdr(seg);
+
+ __udpv4_gso_segment_csum(seg,
+ &iph2->saddr, &iph->saddr,
+ &uh2->source, &uh->source);
+ __udpv4_gso_segment_csum(seg,
+ &iph2->daddr, &iph->daddr,
+ &uh2->dest, &uh->dest);
+ }
+
+ return segs;
+}
+
static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
- netdev_features_t features)
+ netdev_features_t features,
+ bool is_ipv6)
{
unsigned int mss = skb_shinfo(skb)->gso_size;
udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
- return skb;
+ return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
}
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
- netdev_features_t features)
+ netdev_features_t features, bool is_ipv6)
{
struct sock *sk = gso_skb->sk;
unsigned int sum_truesize = 0;
__be16 newlen;
if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
- return __udp_gso_segment_list(gso_skb, features);
+ return __udp_gso_segment_list(gso_skb, features, is_ipv6);
mss = skb_shinfo(gso_skb)->gso_size;
if (gso_skb->len <= sizeof(*uh) + mss)
goto out;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- return __udp_gso_segment(skb, features);
+ return __udp_gso_segment(skb, features, false);
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- return __udp_gso_segment(skb, features);
+ return __udp_gso_segment(skb, features, true);
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
break;
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available)
+ if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
return sz + sizeof(struct sadb_prop);
if (!ealg->pfkey_supported)
continue;
- if (!(ealg_tmpl_set(t, ealg) && ealg->available))
+ if (!(ealg_tmpl_set(t, ealg)))
continue;
for (k = 1; ; k++) {
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available)
+ if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
}
timer_setup(&lapb->t1timer, NULL, 0);
timer_setup(&lapb->t2timer, NULL, 0);
+ lapb->t1timer_stop = true;
+ lapb->t2timer_stop = true;
lapb->t1 = LAPB_DEFAULT_T1;
lapb->t2 = LAPB_DEFAULT_T2;
lapb->mode = LAPB_DEFAULT_MODE;
lapb->window = LAPB_DEFAULT_WINDOW;
lapb->state = LAPB_STATE_0;
+
+ spin_lock_init(&lapb->lock);
refcount_set(&lapb->refcnt, 1);
out:
return lapb;
goto out;
lapb_put(lapb);
+ /* Wait for other refs to "lapb" to drop */
+ while (refcount_read(&lapb->refcnt) > 2)
+ usleep_range(1, 10);
+
+ spin_lock_bh(&lapb->lock);
+
lapb_stop_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb_clear_queues(lapb);
+ spin_unlock_bh(&lapb->lock);
+
+ /* Wait for running timers to stop */
+ del_timer_sync(&lapb->t1timer);
+ del_timer_sync(&lapb->t2timer);
+
__lapb_remove_cb(lapb);
lapb_put(lapb);
if (!lapb)
goto out;
+ spin_lock_bh(&lapb->lock);
+
parms->t1 = lapb->t1 / HZ;
parms->t2 = lapb->t2 / HZ;
parms->n2 = lapb->n2;
else
parms->t2timer = (lapb->t2timer.expires - jiffies) / HZ;
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
rc = LAPB_OK;
out:
if (!lapb)
goto out;
+ spin_lock_bh(&lapb->lock);
+
rc = LAPB_INVALUE;
if (parms->t1 < 1 || parms->t2 < 1 || parms->n2 < 1)
goto out_put;
rc = LAPB_OK;
out_put:
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
out:
return rc;
if (!lapb)
goto out;
+ spin_lock_bh(&lapb->lock);
+
rc = LAPB_OK;
if (lapb->state == LAPB_STATE_1)
goto out_put;
rc = LAPB_OK;
out_put:
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
out:
return rc;
}
EXPORT_SYMBOL(lapb_connect_request);
-int lapb_disconnect_request(struct net_device *dev)
+static int __lapb_disconnect_request(struct lapb_cb *lapb)
{
- struct lapb_cb *lapb = lapb_devtostruct(dev);
- int rc = LAPB_BADTOKEN;
-
- if (!lapb)
- goto out;
-
switch (lapb->state) {
case LAPB_STATE_0:
- rc = LAPB_NOTCONNECTED;
- goto out_put;
+ return LAPB_NOTCONNECTED;
case LAPB_STATE_1:
lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev);
lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
- rc = LAPB_NOTCONNECTED;
- goto out_put;
+ return LAPB_NOTCONNECTED;
case LAPB_STATE_2:
- rc = LAPB_OK;
- goto out_put;
+ return LAPB_OK;
}
lapb_clear_queues(lapb);
lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev);
lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev);
- rc = LAPB_OK;
-out_put:
+ return LAPB_OK;
+}
+
+int lapb_disconnect_request(struct net_device *dev)
+{
+ struct lapb_cb *lapb = lapb_devtostruct(dev);
+ int rc = LAPB_BADTOKEN;
+
+ if (!lapb)
+ goto out;
+
+ spin_lock_bh(&lapb->lock);
+
+ rc = __lapb_disconnect_request(lapb);
+
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
out:
return rc;
if (!lapb)
goto out;
+ spin_lock_bh(&lapb->lock);
+
rc = LAPB_NOTCONNECTED;
if (lapb->state != LAPB_STATE_3 && lapb->state != LAPB_STATE_4)
goto out_put;
lapb_kick(lapb);
rc = LAPB_OK;
out_put:
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
out:
return rc;
int rc = LAPB_BADTOKEN;
if (lapb) {
+ spin_lock_bh(&lapb->lock);
lapb_data_input(lapb, skb);
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
rc = LAPB_OK;
}
if (!lapb)
return NOTIFY_DONE;
+ spin_lock_bh(&lapb->lock);
+
switch (event) {
case NETDEV_UP:
lapb_dbg(0, "(%p) Interface up: %s\n", dev, dev->name);
break;
case NETDEV_GOING_DOWN:
if (netif_carrier_ok(dev))
- lapb_disconnect_request(dev);
+ __lapb_disconnect_request(lapb);
break;
case NETDEV_DOWN:
lapb_dbg(0, "(%p) Interface down: %s\n", dev, dev->name);
break;
}
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
return NOTIFY_DONE;
}
skb = skb_dequeue(&lapb->write_queue);
do {
- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
+ skbn = skb_copy(skb, GFP_ATOMIC);
+ if (!skbn) {
skb_queue_head(&lapb->write_queue, skb);
break;
}
lapb->t1timer.function = lapb_t1timer_expiry;
lapb->t1timer.expires = jiffies + lapb->t1;
+ lapb->t1timer_stop = false;
add_timer(&lapb->t1timer);
}
lapb->t2timer.function = lapb_t2timer_expiry;
lapb->t2timer.expires = jiffies + lapb->t2;
+ lapb->t2timer_stop = false;
add_timer(&lapb->t2timer);
}
void lapb_stop_t1timer(struct lapb_cb *lapb)
{
+ lapb->t1timer_stop = true;
del_timer(&lapb->t1timer);
}
void lapb_stop_t2timer(struct lapb_cb *lapb)
{
+ lapb->t2timer_stop = true;
del_timer(&lapb->t2timer);
}
{
struct lapb_cb *lapb = from_timer(lapb, t, t2timer);
+ spin_lock_bh(&lapb->lock);
+ if (timer_pending(&lapb->t2timer)) /* A new timer has been set up */
+ goto out;
+ if (lapb->t2timer_stop) /* The timer has been stopped */
+ goto out;
+
if (lapb->condition & LAPB_ACK_PENDING_CONDITION) {
lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
lapb_timeout_response(lapb);
}
+
+out:
+ spin_unlock_bh(&lapb->lock);
}
static void lapb_t1timer_expiry(struct timer_list *t)
{
struct lapb_cb *lapb = from_timer(lapb, t, t1timer);
+ spin_lock_bh(&lapb->lock);
+ if (timer_pending(&lapb->t1timer)) /* A new timer has been set up */
+ goto out;
+ if (lapb->t1timer_stop) /* The timer has been stopped */
+ goto out;
+
switch (lapb->state) {
/*
lapb->state = LAPB_STATE_0;
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
- return;
+ goto out;
} else {
lapb->n2count++;
if (lapb->mode & LAPB_EXTENDED) {
lapb->state = LAPB_STATE_0;
lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT);
lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
- return;
+ goto out;
} else {
lapb->n2count++;
lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev);
lapb_stop_t2timer(lapb);
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
- return;
+ goto out;
} else {
lapb->n2count++;
lapb_requeue_frames(lapb);
lapb->state = LAPB_STATE_0;
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev);
- return;
+ goto out;
} else {
lapb->n2count++;
lapb_transmit_frmr(lapb);
}
lapb_start_t1timer(lapb);
+
+out:
+ spin_unlock_bh(&lapb->lock);
}
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
ret = drv_sta_add(local, sdata, &sta->sta);
- if (ret == 0)
+ if (ret == 0) {
sta->uploaded = true;
+ if (rcu_access_pointer(sta->sta.rates))
+ drv_sta_rate_tbl_update(local, sdata, &sta->sta);
+ }
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
drv_sta_remove(local, sdata, &sta->sta);
IEEE80211_QUEUE_STOP_REASON_FLUSH,
IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID,
+ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE,
IEEE80211_QUEUE_STOP_REASONS,
};
if (ret)
return ret;
+ ieee80211_stop_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
+ synchronize_net();
+
ieee80211_do_stop(sdata, false);
ieee80211_teardown_sdata(sdata);
err = ieee80211_do_open(&sdata->wdev, false);
WARN(err, "type change: do_open returned %d", err);
+ ieee80211_wake_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
return ret;
}
if (old)
kfree_rcu(old, rcu_head);
- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
+ if (sta->uploaded)
+ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta));
}
if (wide_bw_chansw_ie) {
+ u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
struct ieee80211_vht_operation vht_oper = {
.chan_width =
wide_bw_chansw_ie->new_channel_width,
.center_freq_seg0_idx =
wide_bw_chansw_ie->new_center_freq_seg0,
- .center_freq_seg1_idx =
- wide_bw_chansw_ie->new_center_freq_seg1,
+ .center_freq_seg1_idx = new_seg1,
/* .basic_mcs_set doesn't matter */
};
- struct ieee80211_ht_operation ht_oper = {};
+ struct ieee80211_ht_operation ht_oper = {
+ .operation_mode =
+ cpu_to_le16(new_seg1 <<
+ IEEE80211_HT_OP_MODE_CCFS2_SHIFT),
+ };
/* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
* to the previously parsed chandef
kfree(elem);
}
-static int nft_set_elem_expr_clone(const struct nft_ctx *ctx,
- struct nft_set *set,
- struct nft_expr *expr_array[])
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_expr *expr_array[])
{
struct nft_expr *expr;
int err, i, k;
err = -EOPNOTSUPP;
goto err_expr_free;
}
+ } else if (set->num_exprs > 0) {
+ err = nft_set_elem_expr_clone(ctx, set, priv->expr_array);
+ if (err < 0)
+ return err;
+
+ priv->num_exprs = set->num_exprs;
}
nft_set_ext_prepare(&priv->tmpl);
nft_dynset_ext_add_expr(priv);
if (set->flags & NFT_SET_TIMEOUT) {
- if (timeout || set->timeout)
+ if (timeout || set->timeout) {
+ nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
+ }
}
priv->timeout = timeout;
nf_jiffies64_to_msecs(priv->timeout),
NFTA_DYNSET_PAD))
goto nla_put_failure;
- if (priv->num_exprs == 1) {
- if (nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr_array[0]))
- goto nla_put_failure;
- } else if (priv->num_exprs > 1) {
- struct nlattr *nest;
-
- nest = nla_nest_start_noflag(skb, NFTA_DYNSET_EXPRESSIONS);
- if (!nest)
- goto nla_put_failure;
-
- for (i = 0; i < priv->num_exprs; i++) {
- if (nft_expr_dump(skb, NFTA_LIST_ELEM,
- priv->expr_array[i]))
+ if (priv->set->num_exprs == 0) {
+ if (priv->num_exprs == 1) {
+ if (nft_expr_dump(skb, NFTA_DYNSET_EXPR,
+ priv->expr_array[0]))
goto nla_put_failure;
+ } else if (priv->num_exprs > 1) {
+ struct nlattr *nest;
+
+ nest = nla_nest_start_noflag(skb, NFTA_DYNSET_EXPRESSIONS);
+ if (!nest)
+ goto nla_put_failure;
+
+ for (i = 0; i < priv->num_exprs; i++) {
+ if (nft_expr_dump(skb, NFTA_LIST_ELEM,
+ priv->expr_array[i]))
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, nest);
}
- nla_nest_end(skb, nest);
}
if (nla_put_be32(skb, NFTA_DYNSET_FLAGS, htonl(flags)))
goto nla_put_failure;
if (!dev->polling) {
device_unlock(&dev->dev);
+ nfc_put_device(dev);
return -EINVAL;
}
if (addr->target_idx > dev->target_next_idx - 1 ||
addr->target_idx < dev->target_next_idx - dev->n_targets) {
rc = -EINVAL;
- goto error;
+ goto put_dev;
}
rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
if (args->nr_local == 0)
return -EINVAL;
+ if (args->nr_local > UIO_MAXIOV)
+ return -EMSGSIZE;
+
iov->iov = kcalloc(args->nr_local,
sizeof(struct rds_iovec),
GFP_KERNEL);
goto error_security;
}
- ret = register_pernet_subsys(&rxrpc_net_ops);
+ ret = register_pernet_device(&rxrpc_net_ops);
if (ret)
goto error_pernet;
error_sock:
proto_unregister(&rxrpc_proto);
error_proto:
- unregister_pernet_subsys(&rxrpc_net_ops);
+ unregister_pernet_device(&rxrpc_net_ops);
error_pernet:
rxrpc_exit_security();
error_security:
unregister_key_type(&key_type_rxrpc);
sock_unregister(PF_RXRPC);
proto_unregister(&rxrpc_proto);
- unregister_pernet_subsys(&rxrpc_net_ops);
+ unregister_pernet_device(&rxrpc_net_ops);
ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
tail = b->peer_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_peer *peer = b->peer_backlog[tail];
+ rxrpc_put_local(peer->local);
kfree(peer);
tail = (tail + 1) & (size - 1);
}
#include <linux/uaccess.h>
#include <linux/hashtable.h>
+#include "auth_gss_internal.h"
#include "../netns.h"
#include <trace/events/rpcgss.h>
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
}
-static const void *
-simple_get_bytes(const void *p, const void *end, void *res, size_t len)
-{
- const void *q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- memcpy(res, p, len);
- return q;
-}
-
-static inline const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
-{
- const void *q;
- unsigned int len;
-
- p = simple_get_bytes(p, end, &len, sizeof(len));
- if (IS_ERR(p))
- return p;
- q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- dest->data = kmemdup(p, len, GFP_NOFS);
- if (unlikely(dest->data == NULL))
- return ERR_PTR(-ENOMEM);
- dest->len = len;
- return q;
-}
-
static struct gss_cl_ctx *
gss_cred_get_ctx(struct rpc_cred *cred)
{
--- /dev/null
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * linux/net/sunrpc/auth_gss/auth_gss_internal.h
+ *
+ * Internal definitions for RPCSEC_GSS client authentication
+ *
+ * Copyright (c) 2000 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ */
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/sunrpc/xdr.h>
+
+static inline const void *
+simple_get_bytes(const void *p, const void *end, void *res, size_t len)
+{
+ const void *q = (const void *)((const char *)p + len);
+ if (unlikely(q > end || q < p))
+ return ERR_PTR(-EFAULT);
+ memcpy(res, p, len);
+ return q;
+}
+
+static inline const void *
+simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
+{
+ const void *q;
+ unsigned int len;
+
+ p = simple_get_bytes(p, end, &len, sizeof(len));
+ if (IS_ERR(p))
+ return p;
+ q = (const void *)((const char *)p + len);
+ if (unlikely(q > end || q < p))
+ return ERR_PTR(-EFAULT);
+ if (len) {
+ dest->data = kmemdup(p, len, GFP_NOFS);
+ if (unlikely(dest->data == NULL))
+ return ERR_PTR(-ENOMEM);
+ } else
+ dest->data = NULL;
+ dest->len = len;
+ return q;
+}
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/gss_krb5_enctypes.h>
+#include "auth_gss_internal.h"
+
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
return NULL;
}
-static const void *
-simple_get_bytes(const void *p, const void *end, void *res, int len)
-{
- const void *q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- memcpy(res, p, len);
- return q;
-}
-
-static const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
-{
- const void *q;
- unsigned int len;
-
- p = simple_get_bytes(p, end, &len, sizeof(len));
- if (IS_ERR(p))
- return p;
- q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- res->data = kmemdup(p, len, GFP_NOFS);
- if (unlikely(res->data == NULL))
- return ERR_PTR(-ENOMEM);
- res->len = len;
- return q;
-}
-
static inline const void *
get_key(const void *p, const void *end,
struct krb5_ctx *ctx, struct crypto_sync_skcipher **res)
unsigned int offset, len, remaining;
struct bio_vec *bvec;
- bvec = xdr->bvec;
- offset = xdr->page_base;
+ bvec = xdr->bvec + (xdr->page_base >> PAGE_SHIFT);
+ offset = offset_in_page(xdr->page_base);
remaining = xdr->page_len;
flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
while (remaining > 0) {
if (remaining <= PAGE_SIZE && tail->iov_len == 0)
flags = 0;
- len = min(remaining, bvec->bv_len);
+
+ len = min(remaining, bvec->bv_len - offset);
ret = kernel_sendpage(sock, bvec->bv_page,
bvec->bv_offset + offset,
len, flags);
extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
if (check_cb(dev)) {
- /* This flag is only checked if the return value is success. */
- port_obj_info->handled = true;
- return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
- extack);
+ err = add_cb(dev, port_obj_info->obj, port_obj_info->trans,
+ extack);
+ if (err != -EOPNOTSUPP)
+ port_obj_info->handled = true;
+ return err;
}
/* Switch ports might be stacked under e.g. a LAG. Ignore the
int err = -EOPNOTSUPP;
if (check_cb(dev)) {
- /* This flag is only checked if the return value is success. */
- port_obj_info->handled = true;
- return del_cb(dev, port_obj_info->obj);
+ err = del_cb(dev, port_obj_info->obj);
+ if (err != -EOPNOTSUPP)
+ port_obj_info->handled = true;
+ return err;
}
/* Switch ports might be stacked under e.g. a LAG. Ignore the
int err = -EOPNOTSUPP;
if (check_cb(dev)) {
- port_attr_info->handled = true;
- return set_cb(dev, port_attr_info->attr,
- port_attr_info->trans);
+ err = set_cb(dev, port_attr_info->attr, port_attr_info->trans);
+ if (err != -EOPNOTSUPP)
+ port_attr_info->handled = true;
+ return err;
}
/* Switch ports might be stacked under e.g. a LAG. Ignore the
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
} else if (sock->type == SOCK_STREAM) {
- const struct vsock_transport *transport = vsk->transport;
+ const struct vsock_transport *transport;
+
lock_sock(sk);
+ transport = vsk->transport;
+
/* Listening sockets that have connections in their accept
* queue can be read.
*/
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
- transport = vsk->transport;
lock_sock(sk);
+ transport = vsk->transport;
+
err = vsock_auto_bind(vsk);
if (err)
goto out;
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
- transport = vsk->transport;
lock_sock(sk);
+ transport = vsk->transport;
+
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
COPY_IN(val);
sk = sock->sk;
vsk = vsock_sk(sk);
- transport = vsk->transport;
total_written = 0;
err = 0;
lock_sock(sk);
+ transport = vsk->transport;
+
/* Callers should not provide a destination with stream sockets. */
if (msg->msg_namelen) {
err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
sk = sock->sk;
vsk = vsock_sk(sk);
- transport = vsk->transport;
err = 0;
lock_sock(sk);
+ transport = vsk->transport;
+
if (!transport || sk->sk_state != TCP_ESTABLISHED) {
/* Recvmsg is supposed to return 0 if a peer performs an
* orderly shutdown. Differentiate between that case and when a
int call_commit_handler(struct net_device *dev)
{
#ifdef CONFIG_WIRELESS_EXT
- if ((netif_running(dev)) &&
- (dev->wireless_handlers->standard[0] != NULL))
+ if (netif_running(dev) &&
+ dev->wireless_handlers &&
+ dev->wireless_handlers->standard[0])
/* Call the commit handler on the driver */
return dev->wireless_handlers->standard[0](dev, NULL,
NULL, NULL);
/* only the first xfrm gets the encap type */
encap_type = 0;
- if (async && x->repl->recheck(x, skb, seq)) {
+ if (x->repl->recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
const xfrm_address_t *b,
u8 prefixlen, u16 family)
{
+ u32 ma, mb, mask;
unsigned int pdw, pbi;
int delta = 0;
switch (family) {
case AF_INET:
- if (sizeof(long) == 4 && prefixlen == 0)
- return ntohl(a->a4) - ntohl(b->a4);
- return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
- (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
+ if (prefixlen == 0)
+ return 0;
+ mask = ~0U << (32 - prefixlen);
+ ma = ntohl(a->a4) & mask;
+ mb = ntohl(b->a4) & mask;
+ if (ma < mb)
+ delta = -1;
+ else if (ma > mb)
+ delta = 1;
+ break;
case AF_INET6:
pdw = prefixlen >> 5;
pbi = prefixlen & 0x1f;
return delta;
}
if (pbi) {
- u32 mask = ~0u << (32 - pbi);
-
- delta = (ntohl(a->a6[pdw]) & mask) -
- (ntohl(b->a6[pdw]) & mask);
+ mask = ~0U << (32 - pbi);
+ ma = ntohl(a->a6[pdw]) & mask;
+ mb = ntohl(b->a6[pdw]) & mask;
+ if (ma < mb)
+ delta = -1;
+ else if (ma > mb)
+ delta = 1;
}
break;
default:
xflo.flags = flags;
/* To accelerate a bit... */
- if ((dst_orig->flags & DST_NOXFRM) ||
- !net->xfrm.policy_count[XFRM_POLICY_OUT])
+ if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
+ !net->xfrm.policy_count[XFRM_POLICY_OUT]))
goto nopol;
xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
# scripts contains sources for various helper programs used throughout
# the kernel for the build process.
+CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto)
+CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null)
+
hostprogs-always-$(CONFIG_BUILD_BIN2C) += bin2c
hostprogs-always-$(CONFIG_KALLSYMS) += kallsyms
hostprogs-always-$(BUILD_C_RECORDMCOUNT) += recordmcount
HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
-HOSTLDLIBS_sign-file = -lcrypto
-HOSTLDLIBS_extract-cert = -lcrypto
+HOSTLDLIBS_sign-file = $(CRYPTO_LIBS)
+HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS)
+HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS)
ifdef CONFIG_UNWINDER_ORC
ifeq ($(ARCH),x86_64)
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright 2004 Matt Mackall <mpm@selenic.com>
#
}
}
-# discourage the use of boolean for type definition attributes of Kconfig options
- if ($realfile =~ /Kconfig/ &&
- $line =~ /^\+\s*\bboolean\b/) {
- WARN("CONFIG_TYPE_BOOLEAN",
- "Use of boolean is deprecated, please use bool instead.\n" . $herecurr);
- }
-
if (($realfile =~ /Makefile.*/ || $realfile =~ /Kbuild.*/) &&
($line =~ /\+(EXTRA_[A-Z]+FLAGS).*/)) {
my $flag = $1;
-#!/usr/bin/env python
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) Google LLC, 2018
-#!/usr/bin/env python
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) Google LLC, 2020
-#!/usr/bin/env python
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# diffconfig - a tool to compare .config files.
fi
fi
-# For scripts/gcc-plugin.sh
+# To set GCC_PLUGINS
if arg_contain -print-file-name=plugin "$@"; then
plugin_dir=$(mktemp -d)
- sed -n 's/.*#include "\(.*\)"/\1/p' $(dirname $0)/../gcc-plugins/gcc-common.h |
- while read header
- do
- mkdir -p $plugin_dir/include/$(dirname $header)
- touch $plugin_dir/include/$header
- done
+ mkdir -p $plugin_dir/include
+ touch $plugin_dir/include/plugin-version.h
echo $plugin_dir
exit 0
-#!/usr/bin/env python
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
#
# This determines how many parallel tasks "make" is expecting, as it is
"__crc_", /* modversions */
"__efistub_", /* arm64 EFI stub namespace */
"__kvm_nvhe_", /* arm64 non-VHE KVM namespace */
+ "__AArch64ADRPThunk_", /* arm64 lld */
+ "__ARMV5PILongThunk_", /* arm lld */
+ "__ARMV7PILongThunk_",
+ "__ThumbV7PILongThunk_",
+ "__LA25Thunk_", /* mips lld */
+ "__microLA25Thunk_",
NULL
};
# As a final fallback before giving up, check if $HOSTCC knows of a default
# ncurses installation (e.g. from a vendor-specific sysroot).
-if echo '#include <ncurses.h>' | "${HOSTCC}" -E - >/dev/null 2>&1; then
+if echo '#include <ncurses.h>' | ${HOSTCC} -E - >/dev/null 2>&1; then
echo cflags=\"-D_GNU_SOURCE\"
echo libs=\"-lncurses\"
exit 0
{
int size, ret;
kuid_t kroot;
+ u32 nsmagic, magic;
uid_t root, mappedroot;
char *tmpbuf = NULL;
struct vfs_cap_data *cap;
- struct vfs_ns_cap_data *nscap;
+ struct vfs_ns_cap_data *nscap = NULL;
struct dentry *dentry;
struct user_namespace *fs_ns;
fs_ns = inode->i_sb->s_user_ns;
cap = (struct vfs_cap_data *) tmpbuf;
if (is_v2header((size_t) ret, cap)) {
- /* If this is sizeof(vfs_cap_data) then we're ok with the
- * on-disk value, so return that. */
- if (alloc)
- *buffer = tmpbuf;
- else
- kfree(tmpbuf);
- return ret;
- } else if (!is_v3header((size_t) ret, cap)) {
- kfree(tmpbuf);
- return -EINVAL;
+ root = 0;
+ } else if (is_v3header((size_t) ret, cap)) {
+ nscap = (struct vfs_ns_cap_data *) tmpbuf;
+ root = le32_to_cpu(nscap->rootid);
+ } else {
+ size = -EINVAL;
+ goto out_free;
}
- nscap = (struct vfs_ns_cap_data *) tmpbuf;
- root = le32_to_cpu(nscap->rootid);
kroot = make_kuid(fs_ns, root);
/* If the root kuid maps to a valid uid in current ns, then return
* this as a nscap. */
mappedroot = from_kuid(current_user_ns(), kroot);
if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
+ size = sizeof(struct vfs_ns_cap_data);
if (alloc) {
- *buffer = tmpbuf;
+ if (!nscap) {
+ /* v2 -> v3 conversion */
+ nscap = kzalloc(size, GFP_ATOMIC);
+ if (!nscap) {
+ size = -ENOMEM;
+ goto out_free;
+ }
+ nsmagic = VFS_CAP_REVISION_3;
+ magic = le32_to_cpu(cap->magic_etc);
+ if (magic & VFS_CAP_FLAGS_EFFECTIVE)
+ nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
+ memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
+ nscap->magic_etc = cpu_to_le32(nsmagic);
+ } else {
+ /* use allocated v3 buffer */
+ tmpbuf = NULL;
+ }
nscap->rootid = cpu_to_le32(mappedroot);
- } else
- kfree(tmpbuf);
- return size;
+ *buffer = nscap;
+ }
+ goto out_free;
}
if (!rootid_owns_currentns(kroot)) {
- kfree(tmpbuf);
- return -EOPNOTSUPP;
+ size = -EOVERFLOW;
+ goto out_free;
}
/* This comes from a parent namespace. Return as a v2 capability */
size = sizeof(struct vfs_cap_data);
if (alloc) {
- *buffer = kmalloc(size, GFP_ATOMIC);
- if (*buffer) {
- struct vfs_cap_data *cap = *buffer;
- __le32 nsmagic, magic;
+ if (nscap) {
+ /* v3 -> v2 conversion */
+ cap = kzalloc(size, GFP_ATOMIC);
+ if (!cap) {
+ size = -ENOMEM;
+ goto out_free;
+ }
magic = VFS_CAP_REVISION_2;
nsmagic = le32_to_cpu(nscap->magic_etc);
if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
cap->magic_etc = cpu_to_le32(magic);
} else {
- size = -ENOMEM;
+ /* use unconverted v2 */
+ tmpbuf = NULL;
}
+ *buffer = cap;
}
+out_free:
kfree(tmpbuf);
return size;
}
continue;
/*
- * The 'deps' array includes maximum three dependencies
- * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
+ * The 'deps' array includes maximum four dependencies
+ * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fifth
* member of this array is a sentinel and should be
* negative value.
*
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0xa0c8,
},
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x43c8,
+ },
#endif
/* Elkhart Lake */
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
+ SND_PCI_QUIRK(0x1043, 0x1982, "ASUS B1400CEPE", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
static const struct snd_pci_quirk vt2002p_fixups[] = {
SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
- SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE),
+ SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
{}
};
static const struct dmi_system_id rn_acp_quirk_table[] = {
{
- /* Lenovo IdeaPad Flex 5 14ARE05, IdeaPad 5 15ARE05 */
+ /* Lenovo IdeaPad S340-14API */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "LNVNB161216"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81NB"),
+ }
+ },
+ {
+ /* Lenovo IdeaPad Flex 5 14ARE05 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81X2"),
+ }
+ },
+ {
+ /* Lenovo IdeaPad 5 15ARE05 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81YQ"),
}
},
{
.ops = &ak4458_dai_ops,
};
-static void ak4458_power_off(struct ak4458_priv *ak4458)
+static void ak4458_reset(struct ak4458_priv *ak4458, bool active)
{
if (ak4458->reset_gpiod) {
- gpiod_set_value_cansleep(ak4458->reset_gpiod, 0);
- usleep_range(1000, 2000);
- }
-}
-
-static void ak4458_power_on(struct ak4458_priv *ak4458)
-{
- if (ak4458->reset_gpiod) {
- gpiod_set_value_cansleep(ak4458->reset_gpiod, 1);
+ gpiod_set_value_cansleep(ak4458->reset_gpiod, active);
usleep_range(1000, 2000);
}
}
if (ak4458->mute_gpiod)
gpiod_set_value_cansleep(ak4458->mute_gpiod, 1);
- ak4458_power_on(ak4458);
+ ak4458_reset(ak4458, false);
ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1,
0x80, 0x80); /* ACKS bit = 1; 10000000 */
{
struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component);
- ak4458_power_off(ak4458);
+ ak4458_reset(ak4458, true);
}
#ifdef CONFIG_PM
regcache_cache_only(ak4458->regmap, true);
- ak4458_power_off(ak4458);
+ ak4458_reset(ak4458, true);
if (ak4458->mute_gpiod)
gpiod_set_value_cansleep(ak4458->mute_gpiod, 0);
if (ak4458->mute_gpiod)
gpiod_set_value_cansleep(ak4458->mute_gpiod, 1);
- ak4458_power_off(ak4458);
- ak4458_power_on(ak4458);
+ ak4458_reset(ak4458, true);
+ ak4458_reset(ak4458, false);
regcache_cache_only(ak4458->regmap, false);
regcache_mark_dirty(ak4458->regmap);
unsigned int alg)
{
struct wm_coeff_ctl *pos, *rslt = NULL;
+ const char *fw_txt = wm_adsp_fw_text[dsp->fw];
list_for_each_entry(pos, &dsp->ctl_list, list) {
if (!pos->subname)
continue;
if (strncmp(pos->subname, name, pos->subname_len) == 0 &&
+ strncmp(pos->fw_name, fw_txt,
+ SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0 &&
pos->alg_region.alg == alg &&
pos->alg_region.type == type) {
rslt = pos;
.driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
SOF_RT715_DAI_ID_FIX),
},
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5E")
+ },
+ .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
+ SOF_RT715_DAI_ID_FIX |
+ SOF_SDW_FOUR_SPK),
+ },
{
.callback = sof_sdw_quirk_cb,
.matches = {
list_for_each_entry(dobj, &component->dobj_list, list) {
struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
- struct soc_enum *se =
- (struct soc_enum *)kcontrol->private_value;
- char **texts = dobj->control.dtexts;
+ struct soc_enum *se;
+ char **texts;
char chan_text[4];
- if (dobj->type != SND_SOC_DOBJ_ENUM ||
- dobj->control.kcontrol->put !=
- skl_tplg_multi_config_set_dmic)
+ if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol ||
+ kcontrol->put != skl_tplg_multi_config_set_dmic)
continue;
+
+ se = (struct soc_enum *)kcontrol->private_value;
+ texts = dobj->control.dtexts;
sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
for (i = 0; i < se->items; i++) {
- struct snd_ctl_elem_value val;
+ struct snd_ctl_elem_value val = {};
if (strstr(texts[i], chan_text)) {
val.value.enumerated.item[0] = i;
.dpcm_playback = 1,
.ignore_suspend = 1,
.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+ .ignore = 1,
.init = mt8183_da7219_max98357_hdmi_init,
SND_SOC_DAILINK_REG(tdm),
},
}
}
- if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0)
+ if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0) {
dai_link->codecs->of_node = hdmi_codec;
+ dai_link->ignore = 0;
+ }
if (!dai_link->platforms->name)
dai_link->platforms->of_node = platform_node;
.ignore_suspend = 1,
.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
.ops = &mt8183_mt6358_tdm_ops,
+ .ignore = 1,
.init = mt8183_mt6358_ts3a227_max98357_hdmi_init,
SND_SOC_DAILINK_REG(tdm),
},
SND_SOC_DAIFMT_CBM_CFM;
}
- if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0)
+ if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0) {
dai_link->codecs->of_node = hdmi_codec;
+ dai_link->ignore = 0;
+ }
if (!dai_link->platforms->name)
dai_link->platforms->of_node = platform_node;
.startup = mt8192_mt6359_rt1015_rt5682_cap1_startup,
};
+static int
+mt8192_mt6359_rt5682_startup(struct snd_pcm_substream *substream)
+{
+ static const unsigned int channels[] = {
+ 1, 2
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+ };
+ static const unsigned int rates[] = {
+ 48000
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+ };
+
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list channels failed\n");
+ return ret;
+ }
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list rate failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_ops mt8192_mt6359_rt5682_ops = {
+ .startup = mt8192_mt6359_rt5682_startup,
+};
+
/* FE */
SND_SOC_DAILINK_DEFS(playback1,
DAILINK_COMP_ARRAY(COMP_CPU("DL1")),
SND_SOC_DPCM_TRIGGER_PRE},
.dynamic = 1,
.dpcm_playback = 1,
+ .ops = &mt8192_mt6359_rt5682_ops,
SND_SOC_DAILINK_REG(playback3),
},
{
SND_SOC_DPCM_TRIGGER_PRE},
.dynamic = 1,
.dpcm_capture = 1,
+ .ops = &mt8192_mt6359_rt5682_ops,
SND_SOC_DAILINK_REG(capture2),
},
{
}
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_probe);
+static int asoc_qcom_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name)
+{
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct lpass_variant *variant = drvdata->variant;
+ int id = args->args[0];
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < variant->num_dai; i++) {
+ if (variant->dai_driver[i].id == id) {
+ *dai_name = variant->dai_driver[i].name;
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
.name = "lpass-cpu",
+ .of_xlate_dai_name = asoc_qcom_of_xlate_dai_name,
};
static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
.micmode = REG_FIELD_ID(0x0010, 4, 7, 5, 0x4),
.micmono = REG_FIELD_ID(0x0010, 3, 3, 5, 0x4),
.wssrc = REG_FIELD_ID(0x0010, 2, 2, 5, 0x4),
- .bitwidth = REG_FIELD_ID(0x0010, 0, 0, 5, 0x4),
+ .bitwidth = REG_FIELD_ID(0x0010, 0, 1, 5, 0x4),
.rdma_dyncclk = REG_FIELD_ID(0x6000, 12, 12, 4, 0x1000),
.rdma_bursten = REG_FIELD_ID(0x6000, 11, 11, 4, 0x1000),
#define LPAIF_WRDMAPERCNT_REG(v, chan) LPAIF_WRDMA_REG_ADDR(v, 0x14, (chan))
#define LPAIF_INTFDMA_REG(v, chan, reg, dai_id) \
- ((v->dai_driver[dai_id].id == LPASS_DP_RX) ? \
+ ((dai_id == LPASS_DP_RX) ? \
LPAIF_HDMI_RDMA##reg##_REG(v, chan) : \
LPAIF_RDMA##reg##_REG(v, chan))
break;
case MI2S_PRIMARY:
case MI2S_SECONDARY:
+ case MI2S_TERTIARY:
+ case MI2S_QUATERNARY:
+ case MI2S_QUINARY:
ret = regmap_fields_write(dmactl->intf, id,
LPAIF_DMACTL_AUDINTF(dma_port));
if (ret) {
break;
case MI2S_PRIMARY:
case MI2S_SECONDARY:
+ case MI2S_TERTIARY:
+ case MI2S_QUATERNARY:
+ case MI2S_QUINARY:
reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
val_irqclr = LPAIF_IRQ_ALL(ch);
break;
case MI2S_PRIMARY:
case MI2S_SECONDARY:
+ case MI2S_TERTIARY:
+ case MI2S_QUATERNARY:
+ case MI2S_QUINARY:
reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
val_mask = LPAIF_IRQ_ALL(ch);
val_irqen = 0;
break;
case MI2S_PRIMARY:
case MI2S_SECONDARY:
+ case MI2S_TERTIARY:
+ case MI2S_QUATERNARY:
+ case MI2S_QUINARY:
map = drvdata->lpaif_map;
reg = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
val = 0;
#include "lpass.h"
static struct snd_soc_dai_driver sc7180_lpass_cpu_dai_driver[] = {
- [MI2S_PRIMARY] = {
+ {
.id = MI2S_PRIMARY,
.name = "Primary MI2S",
.playback = {
},
.probe = &asoc_qcom_lpass_cpu_dai_probe,
.ops = &asoc_qcom_lpass_cpu_dai_ops,
- },
-
- [MI2S_SECONDARY] = {
+ }, {
.id = MI2S_SECONDARY,
.name = "Secondary MI2S",
.playback = {
},
.probe = &asoc_qcom_lpass_cpu_dai_probe,
.ops = &asoc_qcom_lpass_cpu_dai_ops,
- },
- [LPASS_DP_RX] = {
+ }, {
.id = LPASS_DP_RX,
.name = "Hdmi",
.playback = {
.rdma_channels = 5,
.hdmi_rdma_reg_base = 0x64000,
.hdmi_rdma_reg_stride = 0x1000,
- .hdmi_rdma_channels = 4,
+ .hdmi_rdma_channels = 3,
.dmactl_audif_start = 1,
.wrdma_reg_base = 0x18000,
.wrdma_reg_stride = 0x1000,
#include <linux/compiler.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <dt-bindings/sound/sc7180-lpass.h>
+#include <dt-bindings/sound/qcom,lpass.h>
#include "lpass-hdmi.h"
#define LPASS_AHBIX_CLOCK_FREQUENCY 131072000
{
struct snd_soc_dai_driver *dai_drv =
container_of(dobj, struct snd_soc_dai_driver, dobj);
- struct snd_soc_dai *dai;
+ struct snd_soc_dai *dai, *_dai;
if (pass != SOC_TPLG_PASS_PCM_DAI)
return;
if (dobj->ops && dobj->ops->dai_unload)
dobj->ops->dai_unload(comp, dobj);
- for_each_component_dais(comp, dai)
+ for_each_component_dais_safe(comp, dai, _dai)
if (dai->driver == dai_drv)
- dai->driver = NULL;
+ snd_soc_unregister_dai(dai);
list_del(&dobj->list);
}
return -EINVAL;
se->dobj.control.dvalues = devm_kcalloc(tplg->dev, le32_to_cpu(ec->items),
- sizeof(u32),
+ sizeof(*se->dobj.control.dvalues),
GFP_KERNEL);
if (!se->dobj.control.dvalues)
return -ENOMEM;
list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list);
/* register the DAI to the component */
- dai = devm_snd_soc_register_dai(tplg->dev, tplg->comp, dai_drv, false);
+ dai = snd_soc_register_dai(tplg->comp, dai_drv, false);
if (!dai)
return -ENOMEM;
ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
if (ret != 0) {
dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret);
+ snd_soc_unregister_dai(dai);
return ret;
}
config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK
bool "SOF support for SoundWire"
- depends on SOUNDWIRE && ACPI
+ depends on ACPI
help
This adds support for SoundWire with Sound Open Firmware
for Intel(R) platforms.
config SND_SOC_SOF_INTEL_SOUNDWIRE
tristate
+ select SOUNDWIRE
select SOUNDWIRE_INTEL
help
This option is not user-selectable but automagically handled by
if (!id)
return -ENODEV;
- ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
- if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
- dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
- return -ENODEV;
+ if (IS_REACHABLE(CONFIG_SND_INTEL_DSP_CONFIG)) {
+ ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
}
-
dev_dbg(dev, "ACPI DSP detected");
sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
const struct snd_sof_dsp_ops *ops;
int ret;
- ret = snd_intel_dsp_driver_probe(pci);
- if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
- dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
- return -ENODEV;
+ if (IS_REACHABLE(CONFIG_SND_INTEL_DSP_CONFIG)) {
+ ret = snd_intel_dsp_driver_probe(pci);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
}
dev_dbg(&pci->dev, "PCI DSP detected");
unsigned int nr_rates;
int i, err;
+ /* performing the rate verification may lead to unexpected USB bus
+ * behavior afterwards by some unknown reason. Do this only for the
+ * known devices.
+ */
+ switch (USB_ID_VENDOR(chip->usb_id)) {
+ case 0x07fd: /* MOTU */
+ break;
+ default:
+ return 0; /* don't perform the validation as default */
+ }
+
table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
t0_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp13=$(link_stats_tx_packets_get $rp13)
- ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
+ ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
-d 1msec -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp13=$(link_stats_tx_packets_get $rp13)
- ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
+ ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
-d 1msec -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
# 1: iptables -m policy rule count != 0
rval=$1
ip=$2
- lret=0
+ local lret=0
ip netns exec ns1 ping -q -c 1 10.0.2.$ip > /dev/null
return 0
}
+# insert non-overlapping policies in a random order and check that
+# all of them can be fetched using the traffic selectors.
+check_random_order()
+{
+ local ns=$1
+ local log=$2
+
+ for i in $(seq 100); do
+ ip -net $ns xfrm policy flush
+ for j in $(seq 0 16 255 | sort -R); do
+ ip -net $ns xfrm policy add dst $j.0.0.0/24 dir out priority 10 action allow
+ done
+ for j in $(seq 0 16 255); do
+ if ! ip -net $ns xfrm policy get dst $j.0.0.0/24 dir out > /dev/null; then
+ echo "FAIL: $log" 1>&2
+ return 1
+ fi
+ done
+ done
+
+ for i in $(seq 100); do
+ ip -net $ns xfrm policy flush
+ for j in $(seq 0 16 255 | sort -R); do
+ local addr=$(printf "e000:0000:%02x00::/56" $j)
+ ip -net $ns xfrm policy add dst $addr dir out priority 10 action allow
+ done
+ for j in $(seq 0 16 255); do
+ local addr=$(printf "e000:0000:%02x00::/56" $j)
+ if ! ip -net $ns xfrm policy get dst $addr dir out > /dev/null; then
+ echo "FAIL: $log" 1>&2
+ return 1
+ fi
+ done
+ done
+
+ ip -net $ns xfrm policy flush
+
+ echo "PASS: $log"
+ return 0
+}
+
#check for needed privileges
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
check_hthresh_repeat "policies with repeated htresh change"
+check_random_order ns3 "policies inserted in random order"
+
for i in 1 2 3 4;do ip netns del ns$i;done
exit $ret
return -EINVAL;
/* We can read the guest memory with __xxx_user() later on. */
if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
+ (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
!access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))
return -EINVAL;