- 'drm_for_each_bridge_in_chain'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
+ - 'drm_for_each_crtc_reverse'
- 'drm_for_each_encoder'
- 'drm_for_each_encoder_mask'
- 'drm_for_each_fb'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'for_each_member'
- - 'for_each_mem_region'
- - 'for_each_memblock_type'
- 'for_each_memcg_cache_index'
- 'for_each_mem_pfn_range'
- '__for_each_mem_range'
- 'for_each_mem_range'
- '__for_each_mem_range_rev'
- 'for_each_mem_range_rev'
+ - 'for_each_mem_region'
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- - 'for_each_rtd_codec_dais_rollback'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- - 'for_each_rtd_cpu_dais_rollback'
- 'for_each_rtd_dais'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_unicast_dest_pgid'
+ - 'for_each_vsi'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'hlist_for_each_entry_rcu_bh'
- 'hlist_for_each_entry_rcu_notrace'
- 'hlist_for_each_entry_safe'
+ - 'hlist_for_each_entry_srcu'
- '__hlist_for_each_rcu'
- 'hlist_for_each_safe'
- 'hlist_nulls_for_each_entry'
- 'list_for_each_entry_safe_continue'
- 'list_for_each_entry_safe_from'
- 'list_for_each_entry_safe_reverse'
+ - 'list_for_each_entry_srcu'
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_safe'
- 'of_property_for_each_string'
- 'of_property_for_each_u32'
- 'pci_bus_for_each_resource'
+ - 'pcl_for_each_chunk'
+ - 'pcl_for_each_segment'
- 'pcm_for_each_format'
- 'ping_portaddr_for_each_entry'
- 'plist_for_each'
#
# Please keep this list dictionary sorted.
#
-# This comment is parsed by git-shortlog:
-# repo-abbrev: /pub/scm/linux/kernel/git/
-#
Aaron Durbin <adurbin@google.com>
Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com>
Ben Gardner <bgardner@wabtec.com>
Ben M Cahill <ben.m.cahill@intel.com>
Björn Steinbrink <B.Steinbrink@gmx.de>
+Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
+Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
Kees Cook <keescook@chromium.org> <keescook@google.com>
Kees Cook <keescook@chromium.org> <kees@outflux.net>
Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
+Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
+Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
+Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
+Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
Mark Brown <broonie@sirena.org.uk>
Morten Welinder <welinder@darter.rentec.com>
Morten Welinder <welinder@troll.com>
Mythri P K <mythripk@ti.com>
+Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Nguyen Anh Quynh <aquynh@gmail.com>
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
+Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
+Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Provide a place in sysfs for the device link objects in the
kernel at any given time. The name of a device link directory,
denoted as ... above, is of the form <supplier>--<consumer>
- where <supplier> is the supplier device name and <consumer> is
- the consumer device name.
+ where <supplier> is the supplier bus:device name and <consumer>
+ is the consumer bus:device name.
What: /sys/class/devlink/.../auto_remove_on
Date: May 2020
Description:
The /sys/devices/.../consumer:<consumer> are symlinks to device
links where this device is the supplier. <consumer> denotes the
- name of the consumer in that device link. There can be zero or
- more of these symlinks for a given device.
+ name of the consumer in that device link and is of the form
+ bus:device name. There can be zero or more of these symlinks
+ for a given device.
Description:
The /sys/devices/.../supplier:<supplier> are symlinks to device
links where this device is the consumer. <supplier> denotes the
- name of the supplier in that device link. There can be zero or
- more of these symlinks for a given device.
+ name of the supplier in that device link and is of the form
+ bus:device name. There can be zero or more of these symlinks
+ for a given device.
Contact: Subhash Jadavani <subhashj@codeaurora.org>
Description: This entry could be used to set or show the UFS device
runtime power management level. The current driver
- implementation supports 6 levels with next target states:
+ implementation supports 7 levels with next target states:
== ====================================================
- 0 an UFS device will stay active, an UIC link will
+ 0 UFS device will stay active, UIC link will
stay active
- 1 an UFS device will stay active, an UIC link will
+ 1 UFS device will stay active, UIC link will
hibernate
- 2 an UFS device will moved to sleep, an UIC link will
+ 2 UFS device will be moved to sleep, UIC link will
stay active
- 3 an UFS device will moved to sleep, an UIC link will
+ 3 UFS device will be moved to sleep, UIC link will
hibernate
- 4 an UFS device will be powered off, an UIC link will
+ 4 UFS device will be powered off, UIC link will
hibernate
- 5 an UFS device will be powered off, an UIC link will
+ 5 UFS device will be powered off, UIC link will
be powered off
+ 6 UFS device will be moved to deep sleep, UIC link
+ will be powered off. Note, deep sleep might not be
+ supported in which case this value will not be
+ accepted
== ====================================================
What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
Contact: Subhash Jadavani <subhashj@codeaurora.org>
Description: This entry could be used to set or show the UFS device
system power management level. The current driver
- implementation supports 6 levels with next target states:
+ implementation supports 7 levels with next target states:
== ====================================================
- 0 an UFS device will stay active, an UIC link will
+ 0 UFS device will stay active, UIC link will
stay active
- 1 an UFS device will stay active, an UIC link will
+ 1 UFS device will stay active, UIC link will
hibernate
- 2 an UFS device will moved to sleep, an UIC link will
+ 2 UFS device will be moved to sleep, UIC link will
stay active
- 3 an UFS device will moved to sleep, an UIC link will
+ 3 UFS device will be moved to sleep, UIC link will
hibernate
- 4 an UFS device will be powered off, an UIC link will
+ 4 UFS device will be powered off, UIC link will
hibernate
- 5 an UFS device will be powered off, an UIC link will
+ 5 UFS device will be powered off, UIC link will
be powered off
+ 6 UFS device will be moved to deep sleep, UIC link
+ will be powered off. Note, deep sleep might not be
+ supported in which case this value will not be
+ accepted
== ====================================================
What: /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state
The bitmap flush interval in milliseconds. The metadata buffers
are synchronized when this interval expires.
+allow_discards
+ Allow block discard requests (a.k.a. TRIM) for the integrity device.
+ Discards are only allowed to devices using internal hash.
+
fix_padding
Use a smaller padding of the tag area that is more
space-efficient. If this option is not present, large padding is
used - that is for compatibility with older kernels.
-allow_discards
- Allow block discard requests (a.k.a. TRIM) for the integrity device.
- Discards are only allowed to devices using internal hash.
+legacy_recalculate
+ Allow recalculating of volumes with HMAC keys. This is disabled by
+ default for security reasons - an attacker could modify the volume,
+ set recalc_sector to zero, and the kernel would not detect the
+ modification.
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
allow_discards can be changed when reloading the target (load an inactive
and RK3399 SoCs. The driver is located under drivers/staging/media/rkisp1
and uses the Media-Controller API.
+Revisions
+=========
+
+There exist multiple smaller revisions to this ISP that got introduced in
+later SoCs. Revisions can be found in the enum :c:type:`rkisp1_cif_isp_version`
+in the UAPI and the revision of the ISP inside the running SoC can be read
+in the field hw_revision of struct media_device_info as returned by
+ioctl MEDIA_IOC_DEVICE_INFO.
+
+Versions in use are:
+
+- RKISP1_V10: used at least in rk3288 and rk3399
+- RKISP1_V11: declared in the original vendor code, but not used
+- RKISP1_V12: used at least in rk3326 and px30
+- RKISP1_V13: used at least in rk1808
+
Topology
========
.. _rkisp1_topology_graph:
~~~~~~~~~~~~~~~~~~
This section covers ``SYM_FUNC_*`` and ``SYM_CODE_*`` enumerated above.
+``objtool`` requires that all code must be contained in an ELF symbol. Symbol
+names that have a ``.L`` prefix do not emit symbol table entries. ``.L``
+prefixed symbols can be used within a code region, but should be avoided for
+denoting a range of code via ``SYM_*_START/END`` annotations.
+
* ``SYM_FUNC_START`` and ``SYM_FUNC_START_LOCAL`` are supposed to be **the
most frequent markings**. They are used for functions with standard calling
conventions -- global and local. Like in C, they both align the functions to
boot parameters that allow to disable KASAN competely or otherwise control
particular KASAN features.
-The things that can be controlled are:
+- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
-1. Whether KASAN is enabled at all.
-2. Whether KASAN collects and saves alloc/free stacks.
-3. Whether KASAN panics on a detected bug or not.
+- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
+ traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
+ ``off``).
-The ``kasan.mode`` boot parameter allows to choose one of three main modes:
-
-- ``kasan.mode=off`` - KASAN is disabled, no tag checks are performed
-- ``kasan.mode=prod`` - only essential production features are enabled
-- ``kasan.mode=full`` - all KASAN features are enabled
-
-The chosen mode provides default control values for the features mentioned
-above. However it's also possible to override the default values by providing:
-
-- ``kasan.stacktrace=off`` or ``=on`` - enable alloc/free stack collection
- (default: ``on`` for ``mode=full``,
- otherwise ``off``)
-- ``kasan.fault=report`` or ``=panic`` - only print KASAN report or also panic
- (default: ``report``)
-
-If ``kasan.mode`` parameter is not provided, it defaults to ``full`` when
-``CONFIG_DEBUG_KERNEL`` is enabled, and to ``prod`` otherwise.
+- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
+ report or also panic the kernel (default: ``report``).
For developers
~~~~~~~~~~~~~~
* E.g. if we wanted to also test ``sha256sum``, we could add a ``sha256``
field and reuse ``cases``.
+* be converted to a "parameterized test", see below.
+
+Parameterized Testing
+~~~~~~~~~~~~~~~~~~~~~
+
+The table-driven testing pattern is common enough that KUnit has special
+support for it.
+
+Reusing the same ``cases`` array from above, we can write the test as a
+"parameterized test" with the following.
+
+.. code-block:: c
+
+ // This is copy-pasted from above.
+ struct sha1_test_case {
+ const char *str;
+ const char *sha1;
+ };
+ struct sha1_test_case cases[] = {
+ {
+ .str = "hello world",
+ .sha1 = "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed",
+ },
+ {
+ .str = "hello world!",
+ .sha1 = "430ce34d020724ed75a196dfc2ad67c77772d169",
+ },
+ };
+
+ // Need a helper function to generate a name for each test case.
+ static void case_to_desc(const struct sha1_test_case *t, char *desc)
+ {
+ strcpy(desc, t->str);
+ }
+ // Creates `sha1_gen_params()` to iterate over `cases`.
+ KUNIT_ARRAY_PARAM(sha1, cases, case_to_desc);
+
+ // Looks no different from a normal test.
+ static void sha1_test(struct kunit *test)
+ {
+ // This function can just contain the body of the for-loop.
+ // The former `cases[i]` is accessible under test->param_value.
+ char out[40];
+ struct sha1_test_case *test_param = (struct sha1_test_case *)(test->param_value);
+
+ sha1sum(test_param->str, out);
+ KUNIT_EXPECT_STREQ_MSG(test, (char *)out, test_param->sha1,
+ "sha1sum(%s)", test_param->str);
+ }
+
+ // Instead of KUNIT_CASE, we use KUNIT_CASE_PARAM and pass in the
+ // function declared by KUNIT_ARRAY_PARAM.
+ static struct kunit_case sha1_test_cases[] = {
+ KUNIT_CASE_PARAM(sha1_test, sha1_gen_params),
+ {}
+ };
+
.. _kunit-on-non-uml:
KUnit on non-UML architectures
by this cpu (see ./idle-states.yaml).
capacity-dmips-mhz:
- $ref: '/schemas/types.yaml#/definitions/uint32'
description:
u32 value representing CPU capacity (see ./cpu-capacity.txt) in
DMIPS/MHz, relative to highest capacity-dmips-mhz
documents on how to describe the way the sii902x device is
connected to the rest of the audio system:
Documentation/devicetree/bindings/sound/simple-card.yaml
- Documentation/devicetree/bindings/sound/audio-graph-card.txt
+ Documentation/devicetree/bindings/sound/audio-graph-card.yaml
Note: In case of the audio-graph-card binding the used port
index should be 3.
For a description of the display interface sink function blocks, see
Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt and
-Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt.
+Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml.
Required properties (all function blocks):
- compatible: "mediatek,<chip>-disp-<function>", one of
"mediatek,<chip>-disp-wdma"
the supported chips are mt2701, mt8167 and mt8173.
- larb: Should contain a phandle pointing to the local arbiter device as defined
- in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
- iommus: Should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
for details.
wlf,micd-timeout-ms:
description:
Timeout for microphone detection, specified in milliseconds.
- $ref: "/schemas/types.yaml#/definitions/uint32"
wlf,micd-force-micbias:
description:
description:
This property controls the Accumulation Dead band which allows to set the
level of current below which no accumulation takes place.
- $ref: /schemas/types.yaml#/definitions/uint32
maximum: 255
default: 0
description: |
Temperature sensor trimming factor. It can be used to manually adjust the
temperature measurements within 7.130 degrees Celsius.
- maxItems: 1
- items:
- default: 0
- minimum: 0
- maximum: 7130
+ default: 0
+ minimum: 0
+ maximum: 7130
additionalProperties: false
ti,bus-range-microvolt:
description: |
This is the operating range of the bus voltage in microvolt
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [16000000, 32000000]
default: 32000000
i2c-gpio,delay-us:
description: delay between GPIO operations (may depend on each platform)
- $ref: /schemas/types.yaml#/definitions/uint32
i2c-gpio,timeout-ms:
description: timeout to get data
- $ref: /schemas/types.yaml#/definitions/uint32
# Deprecated properties, do not use in new device tree sources:
gpios:
default: 400000
i2c-sda-hold-time-ns:
- maxItems: 1
description: |
The property should contain the SDA hold time in nanoseconds. This option
is only supported in hardware blocks version 1.11a or newer or on
Microsemi SoCs.
i2c-scl-falling-time-ns:
- maxItems: 1
description: |
The property should contain the SCL falling time in nanoseconds.
This value is used to compute the tLOW period.
default: 300
i2c-sda-falling-time-ns:
- maxItems: 1
description: |
The property should contain the SDA falling time in nanoseconds.
This value is used to compute the tHIGH period.
properties:
compatible:
enum:
- - bosch,bmc150
- - bosch,bmi055
+ - bosch,bmc150_accel
+ - bosch,bmi055_accel
- bosch,bma255
- bosch,bma250e
- bosch,bma222
type: boolean
bipolar:
- description: see Documentation/devicetree/bindings/iio/adc/adc.txt
+ description: see Documentation/devicetree/bindings/iio/adc/adc.yaml
type: boolean
required:
maxItems: 1
shunt-resistor-micro-ohms:
- $ref: /schemas/types.yaml#/definitions/uint32
description: |
Value in micro Ohms of the shunt resistor connected between the RS+ and
RS- inputs, across which the current is measured. Value needed to compute
Resolution (bits) to use for conversions:
- can be 6, 8, 10 or 12 on stm32f4
- can be 8, 10, 12, 14 or 16 on stm32h7 and stm32mp1
- $ref: /schemas/types.yaml#/definitions/uint32
st,adc-channels:
description: |
const: 1
ti,channel0-current-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
description: Channel 0 current in uA.
enum:
- 0
- 20
ti,channel3-current-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
description: Channel 3 current in uA.
enum:
- 0
two properties must be present:
adi,range-microvolt:
- $ref: /schemas/types.yaml#/definitions/int32-array
description: |
Voltage output range specified as <minimum, maximum>
- enum:
- - [[0, 5000000]]
- - [[0, 10000000]]
- - [[-5000000, 5000000]]
- - [[-10000000, 10000000]]
+ oneOf:
+ - items:
+ - const: 0
+ - enum: [5000000, 10000000]
+ - items:
+ - const: -5000000
+ - const: 5000000
+ - items:
+ - const: -10000000
+ - const: 10000000
adi,range-microamp:
- $ref: /schemas/types.yaml#/definitions/int32-array
description: |
Current output range specified as <minimum, maximum>
- enum:
- - [[0, 20000]]
- - [[0, 24000]]
- - [[4, 24000]]
- - [[-20000, 20000]]
- - [[-24000, 24000]]
- - [[-1000, 22000]]
+ oneOf:
+ - items:
+ - const: 0
+ - enum: [20000, 24000]
+ - items:
+ - const: 4
+ - const: 24000
+ - items:
+ - const: -20000
+ - const: 20000
+ - items:
+ - const: -24000
+ - const: 24000
+ - items:
+ - const: -1000
+ - const: 22000
reset-gpios: true
adi,dc-dc-ilim-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [150000, 200000, 250000, 300000, 350000, 400000]
description: |
The dc-to-dc converter current limit.
description: Connected to ADC_RDY pin.
maxim,led-current-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
description: |
- compatible: "adc-keys"
- io-channels: Phandle to an ADC channel
- io-channel-names = "buttons";
- - keyup-threshold-microvolt: Voltage at which all the keys are considered up.
+ - keyup-threshold-microvolt: Voltage above or equal to which all the keys are
+ considered up.
Optional properties:
- poll-interval: Poll interval time in milliseconds
Required subnode-properties:
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
- - press-threshold-microvolt: Voltage ADC input when this key is pressed.
+ - press-threshold-microvolt: voltage above or equal to which this key is
+ considered pressed.
+
+No two values of press-threshold-microvolt may be the same.
+All values of press-threshold-microvolt must be less than
+keyup-threshold-microvolt.
Example:
press-threshold-microvolt = <500000>;
};
};
+
++--------------------------------+------------------------+
+| 2.000.000 <= value | no key pressed |
++--------------------------------+------------------------+
+| 1.500.000 <= value < 2.000.000 | KEY_VOLUMEUP pressed |
++--------------------------------+------------------------+
+| 1.000.000 <= value < 1.500.000 | KEY_VOLUMEDOWN pressed |
++--------------------------------+------------------------+
+| 500.000 <= value < 1.000.000 | KEY_ENTER pressed |
++--------------------------------+------------------------+
+| value < 500.000 | no key pressed |
++--------------------------------+------------------------+
- goodix,gt927
- goodix,gt9271
- goodix,gt928
+ - goodix,gt9286
- goodix,gt967
reg:
touchscreen-x-mm:
description: horizontal length in mm of the touchscreen
- $ref: /schemas/types.yaml#/definitions/uint32
touchscreen-y-mm:
description: vertical length in mm of the touchscreen
- $ref: /schemas/types.yaml#/definitions/uint32
dependencies:
touchscreen-size-x: [ touchscreen-size-y ]
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/richtek,rt8515.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT8515 1.5A dual channel LED driver
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ The Richtek RT8515 is a dual channel (two mode) LED driver that
+ supports driving a white LED in flash or torch mode. The maximum
+ current for each mode is defined in hardware using two resistors
+ RFS and RTS.
+
+properties:
+ compatible:
+ const: richtek,rt8515
+
+ enf-gpios:
+ maxItems: 1
+ description: A connection to the 'ENF' (enable flash) pin.
+
+ ent-gpios:
+ maxItems: 1
+ description: A connection to the 'ENT' (enable torch) pin.
+
+ richtek,rfs-ohms:
+ minimum: 7680
+ maximum: 367000
+ description: The resistance value of the RFS resistor. This
+ resistors limits the maximum flash current. This must be set
+ for the property flash-max-microamp to work, the RFS resistor
+ defines the range of the dimmer setting (brightness) of the
+ flash LED.
+
+ richtek,rts-ohms:
+ minimum: 7680
+ maximum: 367000
+ description: The resistance value of the RTS resistor. This
+ resistors limits the maximum torch current. This must be set
+ for the property torch-max-microamp to work, the RTS resistor
+ defines the range of the dimmer setting (brightness) of the
+ torch LED.
+
+ led:
+ type: object
+ $ref: common.yaml#
+ properties:
+ function: true
+ color: true
+ flash-max-timeout-us: true
+
+ flash-max-microamp:
+ maximum: 700000
+ description: The maximum current for flash mode
+ is hardwired to the component using the RFS resistor to
+ ground. The maximum hardware current setting is calculated
+ according to the formula Imax = 5500 / RFS. The lowest
+ allowed resistance value is 7.86 kOhm giving an absolute
+ maximum current of 700mA. By setting this attribute in
+ the device tree, you can further restrict the maximum
+ current below the hardware limit. This requires the RFS
+ to be defined as it defines the maximum range.
+
+ led-max-microamp:
+ maximum: 700000
+ description: The maximum current for torch mode
+ is hardwired to the component using the RTS resistor to
+ ground. The maximum hardware current setting is calculated
+ according to the formula Imax = 5500 / RTS. The lowest
+ allowed resistance value is 7.86 kOhm giving an absolute
+ maximum current of 700mA. By setting this attribute in
+ the device tree, you can further restrict the maximum
+ current below the hardware limit. This requires the RTS
+ to be defined as it defines the maximum range.
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - ent-gpios
+ - enf-gpios
+ - led
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ led-controller {
+ compatible = "richtek,rt8515";
+ enf-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;
+ ent-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
+ richtek,rfs-ohms = <16000>;
+ richtek,rts-ohms = <100000>;
+
+ led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ flash-max-timeout-us = <250000>;
+ flash-max-microamp = <150000>;
+ led-max-microamp = <25000>;
+ };
+ };
+
+...
- power-domains: a phandle to the power domain, see
Documentation/devicetree/bindings/power/power_domain.txt for details.
- mediatek,larb: must contain the local arbiters in the current Socs, see
- Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
for details.
- iommus: should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
- power-domains: a phandle to the power domain, see
Documentation/devicetree/bindings/power/power_domain.txt for details.
- mediatek,larb: must contain the local arbiters in the current SoCs, see
- Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
for details.
- iommus: should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
for details.
- mediatek,larb: must contain the local arbiters in the current Socs, see
- Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
for details.
Example:
waiting for I/O signalling and card power supply to be stable,
regardless of whether pwrseq-simple is used. Default to 10ms if
no available.
- $ref: /schemas/types.yaml#/definitions/uint32
default: 10
supports-cqe:
description:
Delay in ms after powering the card and de-asserting the
reset-gpios (if any).
- $ref: /schemas/types.yaml#/definitions/uint32
power-off-delay-us:
description:
Delay in us after asserting the reset-gpios (if any)
during power off of the card.
- $ref: /schemas/types.yaml#/definitions/uint32
required:
- compatible
such as flow control thresholds.
rx-internal-delay-ps:
- $ref: /schemas/types.yaml#/definitions/uint32
description: |
RGMII Receive Clock Delay defined in pico seconds.
This is used for controllers that have configurable RX internal delays.
is used for components that can have configurable fifo sizes.
tx-internal-delay-ps:
- $ref: /schemas/types.yaml#/definitions/uint32
description: |
RGMII Transmit Clock Delay defined in pico seconds.
This is used for controllers that have configurable TX internal delays.
Triplet of delays. The 1st cell is reset pre-delay in micro
seconds. The 2nd cell is reset pulse in micro seconds. The 3rd
cell is reset post-delay in micro seconds.
- $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 3
maxItems: 3
for each of the battery capacity lookup table.
operating-range-celsius:
- $ref: /schemas/types.yaml#/definitions/uint32-array
description: operating temperature range of a battery
items:
- description: minimum temperature at which battery can operate
- description: maximum temperature at which battery can operate
ambient-celsius:
- $ref: /schemas/types.yaml#/definitions/uint32-array
description: safe range of ambient temperature
items:
- description: alert when ambient temperature is lower than this value
- description: alert when ambient temperature is higher than this value
alert-celsius:
- $ref: /schemas/types.yaml#/definitions/uint32-array
description: safe range of battery temperature
items:
- description: alert when battery temperature is lower than this value
maxItems: 1
input-current-limit-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
description: Maximum input current in micro Amps.
minimum: 50000
maximum: 500000
description: IRQ line information.
dlg,irq-polling-delay-passive-ms:
- $ref: "/schemas/types.yaml#/definitions/uint32"
minimum: 1000
maximum: 10000
description: |
startup-delay-us:
description: startup time in microseconds
- $ref: /schemas/types.yaml#/definitions/uint32
off-on-delay-us:
description: off delay time in microseconds
- $ref: /schemas/types.yaml#/definitions/uint32
enable-active-high:
description:
1: chargeable
quartz-load-femtofarads:
- $ref: /schemas/types.yaml#/definitions/uint32
description:
The capacitive load of the quartz(x-tal), expressed in femto
Farad (fF). The default value shall be listed (if optional),
deprecated: true
trickle-resistor-ohms:
- $ref: /schemas/types.yaml#/definitions/uint32
description:
Selected resistor for trickle charger. Should be given
if trickle charger should be enabled.
description:
Rate at which poll occurs when auto-poll is set.
default 100ms.
- $ref: /schemas/types.yaml#/definitions/uint32
default: 100
poll-timeout-ms:
description:
Poll timeout when auto-poll is set, default
3000ms.
- $ref: /schemas/types.yaml#/definitions/uint32
default: 3000
required:
title: Mediatek MT8192 with MT6359, RT1015 and RT5682 ASoC sound card driver
maintainers:
- - Jiaxin Yu <jiaxin.yu@mediatek.com>
- - Shane Chien <shane.chien@mediatek.com>
+ - Jiaxin Yu <jiaxin.yu@mediatek.com>
+ - Shane Chien <shane.chien@mediatek.com>
description:
This binding describes the MT8192 sound card.
values of 2k, 4k or 8k. If set to 0 it will be off. If this node is not
mentioned or if the value is unknown, then micbias resistor is set to
4k.
- $ref: "/schemas/types.yaml#/definitions/uint32"
enum: [ 0, 2, 4, 8 ]
micbias-voltage-m-volts:
description: The bias voltage to be used in mVolts. The voltage can take
values from 1.25V to 3V by 250mV steps. If this node is not mentioned
or the value is unknown, then the value is set to 1.25V.
- $ref: "/schemas/types.yaml#/definitions/uint32"
enum: [ 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000 ]
lrclk-strength:
reg:
description: module registers
+ ranges: true
+
power-domains:
description:
PM domain provider node and an args specifier containing
'#size-cells':
const: 2
+ dma-coherent: true
+
patternProperties:
"^usb@":
type: object
pattern: "^watchdog(@.*|-[0-9a-f])?$"
timeout-sec:
- $ref: /schemas/types.yaml#/definitions/uint32
description:
Contains the watchdog timeout in seconds.
The advantage of mounting with the "volatile" option is that all forms of
sync calls to the upper filesystem are omitted.
+In order to avoid a giving a false sense of safety, the syncfs (and fsync)
+semantics of volatile mounts are slightly different than that of the rest of
+VFS. If any writeback error occurs on the upperdir's filesystem after a
+volatile mount takes place, all sync functions will return an error. Once this
+condition is reached, the filesystem will not recover, and every subsequent sync
+call will return an error, even if the upperdir has not experience a new error
+since the last sync call.
+
When overlay is mounted with "volatile" option, the directory
"$workdir/work/incompat/volatile" is created. During next mount, overlay
checks for this directory and refuses to mount if present. This is a strong
If non-zero, the message will be sent with the primary address of
the interface that received the packet that caused the icmp error.
- This is the behaviour network many administrators will expect from
+ This is the behaviour many network administrators will expect from
a router. And it can make debugging complicated network layouts
much easier.
``conf/default/*``:
Change the interface-specific default settings.
+ These settings would be used during creating new interfaces.
+
``conf/all/*``:
Change all the interface-specific settings.
[XXX: Other special features than forwarding?]
+conf/all/disable_ipv6 - BOOLEAN
+ Changing this value is same as changing ``conf/default/disable_ipv6``
+ setting and also all per-interface ``disable_ipv6`` settings to the same
+ value.
+
+ Reading this value does not have any particular meaning. It does not say
+ whether IPv6 support is enabled or disabled. Returned value can be 1
+ also in the case when some interface has ``disable_ipv6`` set to 0 and
+ has configured IPv6 addresses.
+
conf/all/forwarding - BOOLEAN
Enable global IPv6 forwarding between all interfaces.
Disabling the latter implies clearing the former. Disabling TX checksum offload
should not affect old connections, and drivers should make sure checksum
calculation does not break for them.
+Similarly, device-offloaded TLS decryption implies doing RXCSUM. If the user
+does not want to enable RX csum offload, TLS RX device feature is disabled
+as well.
memory slot. Ensure the entire structure is cleared to avoid padding
issues.
-If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
-the address space for which you want to return the dirty bitmap.
-They must be less than the value that KVM_CHECK_EXTENSION returns for
-the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
+the address space for which you want to return the dirty bitmap. See
+KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
The bits in the dirty bitmap are cleared before the ioctl returns, unless
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
the entire memory slot size. Any object may back this memory, including
anonymous memory, ordinary files, and hugetlbfs.
+On architectures that support a form of address tagging, userspace_addr must
+be an untagged address.
+
It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
be identical. This allows large pages in the guest to be backed by large
pages in the host.
:Capability: KVM_CAP_ENABLE_CAP_VM
:Architectures: all
-:Type: vcpu ioctl
+:Type: vm ioctl
:Parameters: struct kvm_enable_cap (in)
:Returns: 0 on success; -1 on error
:Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
:Architectures: x86, arm, arm64, mips
:Type: vm ioctl
-:Parameters: struct kvm_dirty_log (in)
+:Parameters: struct kvm_clear_dirty_log (in)
:Returns: 0 on success, -1 on error
::
(for example via write-protection, or by clearing the dirty bit in
a page table entry).
-If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
-the address space for which you want to return the dirty bitmap.
-They must be less than the value that KVM_CHECK_EXTENSION returns for
-the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
+the address space for which you want to clear the dirty status. See
+KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
is enabled; for more information, see the description of the capability.
Running nested VMX
------------------
-The nested VMX feature is disabled by default. It can be enabled by giving
-the "nested=1" option to the kvm-intel module.
+The nested VMX feature is enabled by default since Linux kernel v4.20. For
+older Linux kernel, it can be enabled by giving the "nested=1" option to the
+kvm-intel module.
+
No modifications are required to user space (qemu). However, qemu's default
emulated CPU type (qemu64) does not list the "VMX" CPU feature, so it must be
Enabling "nested" (x86)
-----------------------
-From Linux kernel v4.19 onwards, the ``nested`` KVM parameter is enabled
+From Linux kernel v4.20 onwards, the ``nested`` KVM parameter is enabled
by default for Intel and AMD. (Though your Linux distribution might
override this default.)
F: drivers/power/reset/keystone-reset.c
ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE
-M: Tero Kristo <t-kristo@ti.com>
M: Nishanth Menon <nm@ti.com>
+M: Tero Kristo <kristo@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/arm/ti/k3.yaml
S: Supported
W: http://sourceforge.net/projects/bonding/
F: drivers/net/bonding/
+F: include/net/bonding.h
F: include/uapi/linux/if_bonding.h
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
X: arch/riscv/net/bpf_jit_comp64.c
BPF JIT for RISC-V (64-bit)
-M: Björn Töpel <bjorn.topel@gmail.com>
+M: Björn Töpel <bjorn@kernel.org>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
F: drivers/pci/controller/pcie-brcmstb.c
F: drivers/staging/vc04_services
N: bcm2711
-N: bcm2835
+N: bcm283*
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
F: drivers/mtd/nand/raw/cadence-nand-controller.c
CADENCE USB3 DRD IP DRIVER
-M: Peter Chen <peter.chen@nxp.com>
+M: Peter Chen <peter.chen@kernel.org>
M: Pawel Laszczak <pawell@cadence.com>
R: Roger Quadros <rogerq@kernel.org>
R: Aswath Govindraju <a-govindraju@ti.com>
F: Documentation/translations/zh_CN/
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
-M: Peter Chen <Peter.Chen@nxp.com>
+M: Peter Chen <peter.chen@kernel.org>
L: linux-usb@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
F: .clang-format
CLANG/LLVM BUILD SUPPORT
-M: Nathan Chancellor <natechancellor@gmail.com>
+M: Nathan Chancellor <nathan@kernel.org>
M: Nick Desaulniers <ndesaulniers@google.com>
L: clang-built-linux@googlegroups.com
S: Supported
B: https://github.com/ClangBuiltLinux/linux/issues
C: irc://chat.freenode.net/clangbuiltlinux
F: Documentation/kbuild/llvm.rst
+F: include/linux/compiler-clang.h
F: scripts/clang-tools/
+F: scripts/clang-version.sh
F: scripts/lld-version.sh
K: \b(?i:clang|llvm)\b
F: drivers/edac/skx_*.[ch]
EDAC-TI
-M: Tero Kristo <t-kristo@ti.com>
+M: Tero Kristo <kristo@kernel.org>
L: linux-edac@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: drivers/edac/ti_edac.c
EDIROL UA-101/UA-1000 DRIVER
F: include/linux/i3c/
IA64 (Itanium) PLATFORM
-M: Tony Luck <tony.luck@intel.com>
-M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-ia64@vger.kernel.org
-S: Odd Fixes
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
+S: Orphan
F: Documentation/ia64/
F: arch/ia64/
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
+M: David Ahern <dsahern@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
F: drivers/crypto/qat/
QCOM AUDIO (ASoC) DRIVERS
-M: Patrick Lai <plai@codeaurora.org>
+M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Banajit Goswami <bgoswami@codeaurora.org>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
+F: sound/soc/codecs/lpass-va-macro.c
+F: sound/soc/codecs/lpass-wsa-macro.*
+F: sound/soc/codecs/msm8916-wcd-analog.c
+F: sound/soc/codecs/msm8916-wcd-digital.c
+F: sound/soc/codecs/wcd9335.*
+F: sound/soc/codecs/wcd934x.c
+F: sound/soc/codecs/wcd-clsh-v2.*
+F: sound/soc/codecs/wsa881x.c
F: sound/soc/qcom/
QCOM IPA DRIVER
M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/sound/st,stm32-*.txt
+F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
F: sound/soc/stm/
STM32 TIMER/LPTIMER DRIVERS
TEXAS INSTRUMENTS' SYSTEM CONTROL INTERFACE (TISCI) PROTOCOL DRIVER
M: Nishanth Menon <nm@ti.com>
-M: Tero Kristo <t-kristo@ti.com>
+M: Tero Kristo <kristo@kernel.org>
M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: drivers/clk/clk-cdce706.c
TI CLOCK DRIVER
-M: Tero Kristo <t-kristo@ti.com>
+M: Tero Kristo <kristo@kernel.org>
L: linux-omap@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: drivers/clk/ti/
F: include/linux/clk/ti.h
F: drivers/usb/host/ohci*
USB OTG FSM (Finite State Machine)
-M: Peter Chen <Peter.Chen@nxp.com>
+M: Peter Chen <peter.chen@kernel.org>
L: linux-usb@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
K: (?:\b|_)xdp(?:\b|_)
XDP SOCKETS (AF_XDP)
-M: Björn Töpel <bjorn.topel@intel.com>
+M: Björn Töpel <bjorn@kernel.org>
M: Magnus Karlsson <magnus.karlsson@intel.com>
R: Jonathan Lemon <jonathan.lemon@gmail.com>
L: netdev@vger.kernel.org
VERSION = 5
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
{
int offset = fdt_path_offset(fdt, node_path);
if (offset == -FDT_ERR_NOTFOUND)
- offset = fdt_add_subnode(fdt, 0, node_path);
+ /* Add the node to root if not found, dropping the leading '/' */
+ offset = fdt_add_subnode(fdt, 0, node_path + 1);
return offset;
}
stdout-path = &uart1;
};
+ aliases {
+ mmc0 = &usdhc2;
+ mmc1 = &usdhc3;
+ mmc2 = &usdhc4;
+ /delete-property/ mmc3;
+ };
+
memory@10000000 {
device_type = "memory";
reg = <0x10000000 0x80000000>;
/* VDD_AUD_1P8: Audio codec */
reg_aud_1p8v: ldo3 {
- regulator-name = "vdd1p8";
+ regulator-name = "vdd1p8a";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
lcd_backlight: lcd-backlight {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
pwm-names = "LCD_BKLT_PWM";
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>;
#size-cells = <0>;
- status = "disabld";
+ status = "disabled";
};
i2c_cam: i2c-gpio-cam {
i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>;
#size-cells = <0>;
- status = "disabld";
+ status = "disabled";
};
};
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
- phy-handle = <&phy>;
phy-mode = "rgmii-id";
phy-reset-duration = <2>;
phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
#address-cells = <1>;
#size-cells = <0>;
- phy: ethernet-phy@0 {
+ /*
+ * The PHY can appear at either address 0 or 4 due to the
+ * configuration (LED) pin not being pulled sufficiently.
+ */
+ ethernet-phy@0 {
reg = <0>;
qca,clk-out-frequency = <125000000>;
};
+
+ ethernet-phy@4 {
+ reg = <4>;
+ qca,clk-out-frequency = <125000000>;
+ };
};
};
compatible = "nxp,pcf2127";
reg = <0>;
spi-max-frequency = <2000000>;
+ reset-source;
};
};
clocks = <&xtal_32k>, <&xtal>;
clock-names = "xtal_32k", "xtal";
-
- assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
- assigned-clock-rates = <208000000>;
};
};
gpio-sck = <&gpio1 12 GPIO_ACTIVE_HIGH>;
gpio-miso = <&gpio1 18 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpio1 20 GPIO_ACTIVE_HIGH>;
- cs-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
num-chipselects = <1>;
/* lcd panel */
spi-max-frequency = <100000>;
spi-cpol;
spi-cpha;
- spi-cs-high;
backlight= <&backlight>;
label = "lcd";
debounce-interval = <10>;
};
+ /*
+ * We use pad 0x4a100116 abe_dmic_din3.gpio_122 as the irq instead
+ * of the gpio interrupt to avoid lost events in deeper idle states.
+ */
slider {
label = "Keypad Slide";
+ interrupts-extended = <&omap4_pmx_core 0xd6>;
gpios = <&gpio4 26 GPIO_ACTIVE_HIGH>; /* gpio122 */
linux,input-type = <EV_SW>;
linux,code = <SW_KEYPAD_SLIDE>;
200000 0>;
};
};
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* Modem trace memory */
+ ram@06000000 {
+ reg = <0x06000000 0x00f00000>;
+ no-map;
+ };
+
+ /* Modem shared memory */
+ ram@06f00000 {
+ reg = <0x06f00000 0x00100000>;
+ no-map;
+ };
+
+ /* Modem private memory */
+ ram@07000000 {
+ reg = <0x07000000 0x01000000>;
+ no-map;
+ };
+
+ /*
+ * Initial Secure Software ISSW memory
+ *
+ * This is probably only used if the kernel tries
+ * to actually call into trustzone to run secure
+ * applications, which the mainline kernel probably
+ * will not do on this old chipset. But you can never
+ * be too careful, so reserve this memory anyway.
+ */
+ ram@17f00000 {
+ reg = <0x17f00000 0x00100000>;
+ no-map;
+ };
+ };
};
200000 0>;
};
};
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* Modem trace memory */
+ ram@06000000 {
+ reg = <0x06000000 0x00f00000>;
+ no-map;
+ };
+
+ /* Modem shared memory */
+ ram@06f00000 {
+ reg = <0x06f00000 0x00100000>;
+ no-map;
+ };
+
+ /* Modem private memory */
+ ram@07000000 {
+ reg = <0x07000000 0x01000000>;
+ no-map;
+ };
+
+ /*
+ * Initial Secure Software ISSW memory
+ *
+ * This is probably only used if the kernel tries
+ * to actually call into trustzone to run secure
+ * applications, which the mainline kernel probably
+ * will not do on this old chipset. But you can never
+ * be too careful, so reserve this memory anyway.
+ */
+ ram@17f00000 {
+ reg = <0x17f00000 0x00100000>;
+ no-map;
+ };
+ };
};
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "ste-dbx5x0.dtsi"
+
+/ {
+ cpus {
+ cpu@300 {
+ /* cpufreq controls */
+ operating-points = <1152000 0
+ 800000 0
+ 400000 0
+ 200000 0>;
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /*
+ * Initial Secure Software ISSW memory
+ *
+ * This is probably only used if the kernel tries
+ * to actually call into trustzone to run secure
+ * applications, which the mainline kernel probably
+ * will not do on this old chipset. But you can never
+ * be too careful, so reserve this memory anyway.
+ */
+ ram@17f00000 {
+ reg = <0x17f00000 0x00100000>;
+ no-map;
+ };
+ };
+};
*/
/dts-v1/;
-#include "ste-db8500.dtsi"
+#include "ste-db9500.dtsi"
#include "ste-href-ab8500.dtsi"
#include "ste-href-family-pinctrl.dtsi"
* during TX anyway and that it only controls drive enable DE
* line. Hence, the RX is always enabled here.
*/
- rs485-rx-en {
+ rs485-rx-en-hog {
gpio-hog;
- gpios = <8 GPIO_ACTIVE_HIGH>;
+ gpios = <8 0>;
output-low;
line-name = "rs485-rx-en";
};
* order to reset the Hub when USB bus is powered down, but
* so far there is no such functionality.
*/
- usb-hub {
+ usb-hub-hog {
gpio-hog;
- gpios = <2 GPIO_ACTIVE_HIGH>;
+ gpios = <2 0>;
output-high;
line-name = "usb-hub-reset";
};
};
};
+&i2c4 {
+ touchscreen@49 {
+ status = "disabled";
+ };
+};
+
&i2c5 { /* TP7/TP8 */
pinctrl-names = "default";
pinctrl-0 = <&i2c5_pins_a>;
* are used for on-board microSD slot instead.
*/
/delete-property/broken-cd;
- cd-gpios = <&gpioi 10 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ cd-gpios = <&gpioi 10 GPIO_ACTIVE_HIGH>;
disable-wp;
};
* in order to turn on port power when USB bus is powered up, but so
* far there is no such functionality.
*/
- usb-port-power {
+ usb-port-power-hog {
gpio-hog;
- gpios = <13 GPIO_ACTIVE_LOW>;
+ gpios = <13 0>;
output-low;
line-name = "usb-port-power";
};
pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
- broken-cd;
+ cd-gpios = <&gpiog 1 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ disable-wp;
st,sig-dir;
st,neg-edge;
st,use-ckin;
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <®_gmac_3v3>;
status = "okay";
};
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_INTERNAL_H
+#define _ARM_KEXEC_INTERNAL_H
+
+struct kexec_relocate_data {
+ unsigned long kexec_start_address;
+ unsigned long kexec_indirection_page;
+ unsigned long kexec_mach_type;
+ unsigned long kexec_r2;
+};
+
+#endif
.align
99: .word .
+#if defined(ZIMAGE)
+ .word . + 4
+/*
+ * Storage for the state maintained by the macro.
+ *
+ * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
+ * That's because this header is included from multiple files, and we only
+ * want a single copy of the data. In particular, the UART probing code above
+ * assumes it's running using physical addresses. This is true when this file
+ * is included from head.o, but not when included from debug.o. So we need
+ * to share the probe results between the two copies, rather than having
+ * to re-run the probing again later.
+ *
+ * In the decompressor, we put the storage right here, since common.c
+ * isn't included in the decompressor build. This storage data gets put in
+ * .text even though it's really data, since .data is discarded from the
+ * decompressor. Luckily, .text is writeable in the decompressor, unless
+ * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
+ */
+ /* Debug UART initialization required */
+ .word 1
+ /* Debug UART physical address */
+ .word 0
+ /* Debug UART virtual address */
+ .word 0
+#else
.word tegra_uart_config
+#endif
.ltorg
/* Load previously selected UART address */
.macro waituarttxrdy,rd,rx
.endm
-
-/*
- * Storage for the state maintained by the macros above.
- *
- * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
- * That's because this header is included from multiple files, and we only
- * want a single copy of the data. In particular, the UART probing code above
- * assumes it's running using physical addresses. This is true when this file
- * is included from head.o, but not when included from debug.o. So we need
- * to share the probe results between the two copies, rather than having
- * to re-run the probing again later.
- *
- * In the decompressor, we put the symbol/storage right here, since common.c
- * isn't included in the decompressor build. This symbol gets put in .text
- * even though it's really data, since .data is discarded from the
- * decompressor. Luckily, .text is writeable in the decompressor, unless
- * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
- */
-#if defined(ZIMAGE)
-tegra_uart_config:
- /* Debug UART initialization required */
- .word 1
- /* Debug UART physical address */
- .word 0
- /* Debug UART virtual address */
- .word 0
-#endif
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif
+ DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
+ DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
+ DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
+ DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
return 0;
}
#include <linux/of_fdt.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
-extern unsigned long kexec_start_address;
-extern unsigned long kexec_indirection_page;
-extern unsigned long kexec_mach_type;
-extern unsigned long kexec_boot_atags;
-
static atomic_t waiting_for_crash_ipi;
/*
void machine_kexec(struct kimage *image)
{
unsigned long page_list, reboot_entry_phys;
+ struct kexec_relocate_data *data;
void (*reboot_entry)(void);
void *reboot_code_buffer;
reboot_code_buffer = page_address(image->control_code_page);
- /* Prepare parameters for reboot_code_buffer*/
- set_kernel_text_rw();
- kexec_start_address = image->start;
- kexec_indirection_page = page_list;
- kexec_mach_type = machine_arch_type;
- kexec_boot_atags = image->arch.kernel_r2;
-
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
&relocate_new_kernel,
relocate_new_kernel_size);
+ data = reboot_code_buffer + relocate_new_kernel_size;
+ data->kexec_start_address = image->start;
+ data->kexec_indirection_page = page_list;
+ data->kexec_mach_type = machine_arch_type;
+ data->kexec_r2 = image->arch.kernel_r2;
+
/* get the identity mapping physical address for the reboot code */
reboot_entry_phys = virt_to_idmap(reboot_entry);
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
#include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel)
- ldr r0,kexec_indirection_page
- ldr r1,kexec_start_address
+ adr r7, relocate_new_kernel_end
+ ldr r0, [r7, #KEXEC_INDIR_PAGE]
+ ldr r1, [r7, #KEXEC_START_ADDR]
/*
* If there is no indirection page (we are doing crashdumps)
2:
/* Jump to relocated kernel */
- mov lr,r1
- mov r0,#0
- ldr r1,kexec_mach_type
- ldr r2,kexec_boot_atags
- ARM( ret lr )
- THUMB( bx lr )
-
- .align
-
- .globl kexec_start_address
-kexec_start_address:
- .long 0x0
-
- .globl kexec_indirection_page
-kexec_indirection_page:
- .long 0x0
-
- .globl kexec_mach_type
-kexec_mach_type:
- .long 0x0
-
- /* phy addr of the atags for the new kernel */
- .globl kexec_boot_atags
-kexec_boot_atags:
- .long 0x0
+ mov lr, r1
+ mov r0, #0
+ ldr r1, [r7, #KEXEC_MACH_TYPE]
+ ldr r2, [r7, #KEXEC_R2]
+ ARM( ret lr )
+ THUMB( bx lr )
ENDPROC(relocate_new_kernel)
+ .align 3
relocate_new_kernel_end:
.globl relocate_new_kernel_size
addr = page_address(page);
+ /* Poison the entire page */
+ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
+ PAGE_SIZE / sizeof(u32));
+
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
+ /* Copy signal return handlers into the page */
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
- ptr = (unsigned long)addr + offset;
- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+ /* Flush out all instructions in this page */
+ ptr = (unsigned long)addr;
+ flush_icache_range(ptr, ptr + PAGE_SIZE);
return page;
}
if (addr)
switch (size) {
case 1:
- asm("ldrb %0, [%1, %2]"
+ asm volatile("ldrb %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
case 2:
- asm("ldrh %0, [%1, %2]"
+ asm volatile("ldrh %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
case 4:
- asm("ldr %0, [%1, %2]"
+ asm volatile("ldr %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
}
if (addr)
switch (size) {
case 1:
- asm("strb %0, [%1, %2]"
+ asm volatile("strb %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
case 2:
- asm("strh %0, [%1, %2]"
+ asm volatile("strh %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
case 4:
- asm("str %0, [%1, %2]"
+ asm volatile("str %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
#define MX6Q_CCM_CCR 0x0
.align 3
+ .arm
.macro sync_l2_cache
*/
gpio_request(OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en");
gpio_direction_output(OSK_TPS_GPIO_USB_PWR_EN, 1);
+ /* Free the GPIO again as the driver will request it */
+ gpio_free(OSK_TPS_GPIO_USB_PWR_EN);
/* Set GPIO 2 high so LED D3 is off by default */
tps65010_set_gpio_out_value(GPIO2, HIGH);
bool "TI OMAP3"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select OMAP_HWMOD
select OMAP_INTERCONNECT
- select PM_OPP if PM
- select PM if CPU_IDLE
+ select PM_OPP
select SOC_HAS_OMAP2_SDRC
select ARM_ERRATA_430973
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select ARM_ERRATA_720789
select ARM_GIC
select HAVE_ARM_SCU if SMP
select OMAP_INTERCONNECT_BARRIER
select PL310_ERRATA_588369 if CACHE_L2X0
select PL310_ERRATA_727915 if CACHE_L2X0
- select PM_OPP if PM
+ select PM_OPP
select PM if CPU_IDLE
select ARM_ERRATA_754322
select ARM_ERRATA_775420
bool "TI OMAP5"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select ARM_GIC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_ARCH_TIMER
select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
- select PM_OPP if PM
+ select PM_OPP
select ZONE_DMA if ARM_LPAE
config SOC_AM33XX
bool "TI AM33XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
config SOC_AM43XX
bool "TI AM43x"
select ARM_ERRATA_754322
select ARM_ERRATA_775420
select OMAP_INTERCONNECT
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
config SOC_DRA7XX
bool "TI DRA7XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select ARM_GIC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_ARCH_TIMER
select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
- select PM_OPP if PM
+ select PM_OPP
select ZONE_DMA if ARM_LPAE
select PINCTRL_TI_IODELAY if OF && PINCTRL
select OMAP_DM_TIMER
select OMAP_GPMC
select PINCTRL
- select PM_GENERIC_DOMAINS if PM
- select PM_GENERIC_DOMAINS_OF if PM
+ select PM
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
select RESET_CONTROLLER
+ select SIMPLE_PM_BUS
select SOC_BUS
select TI_SYSC
select OMAP_IRQCHIP
select I2C_OMAP
select MENELAUS if ARCH_OMAP2
select NEON if CPU_V7
- select PM
select REGULATOR
select REGULATOR_FIXED_VOLTAGE
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
(cx->mpu_logic_state == PWRDM_POWER_OFF);
/* Enter broadcast mode for periodic timers */
- tick_broadcast_enable();
+ RCU_NONIDLE(tick_broadcast_enable());
/* Enter broadcast mode for one-shot timers */
- tick_broadcast_enter();
+ RCU_NONIDLE(tick_broadcast_enter());
/*
* Call idle CPU PM enter notifier chain so that
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
/*
* Call idle CPU cluster PM enter notifier chain
index = 0;
cx = state_ptr + index;
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
mpuss_can_lose_context = 0;
}
}
mpuss_can_lose_context)
gic_dist_disable();
- clkdm_deny_idle(cpu_clkdm[1]);
- omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
- clkdm_allow_idle(cpu_clkdm[1]);
+ RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
+ RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
+ RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
mpuss_can_lose_context) {
cpu_pm_exit();
cpu_pm_out:
- tick_broadcast_exit();
+ RCU_NONIDLE(tick_broadcast_exit());
fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
&dra7_ipu1_dsp_iommu_pdata),
#endif
/* Common auxdata */
+ OF_DEV_AUXDATA("simple-pm-bus", 0, NULL, omap_auxdata_lookup),
OF_DEV_AUXDATA("ti,sysc", 0, NULL, &ti_sysc_pdata),
OF_DEV_AUXDATA("pinctrl-single", 0, NULL, &pcs_pdata),
OF_DEV_AUXDATA("ti,omap-prm-inst", 0, NULL, &ti_prm_pdata),
"timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
- resets = <&reset RESET_ETHERNET>;
- reset-names = "stmmaceth";
power-domains = <&pwrc PWRC_AXG_ETHERNET_MEM_ID>;
status = "disabled";
};
"timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
- resets = <&reset RESET_ETHERNET>;
- reset-names = "stmmaceth";
status = "disabled";
mdio0: mdio {
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,dis_u2_susphy_quirk;
- snps,quirk-frame-length-adjustment;
+ snps,quirk-frame-length-adjustment = <0x20>;
snps,parkmode-disable-ss-quirk;
};
};
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/meson-gxbb-power.h>
-#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
#include <dt-bindings/thermal/thermal.h>
/ {
interrupt-names = "macirq";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
- resets = <&reset RESET_ETHERNET>;
- reset-names = "stmmaceth";
power-domains = <&pwrc PWRC_GXBB_ETHERNET_MEM_ID>;
status = "disabled";
};
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- gpio = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+ gpio = <&gpio_ao GPIOAO_3 GPIO_OPEN_DRAIN>;
enable-active-high;
regulator-always-on;
};
*/
usb {
compatible = "simple-bus";
- dma-ranges;
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
+ /*
+ * Internally, USB bus to the interconnect can only address up
+ * to 40-bit
+ */
+ dma-ranges = <0 0 0 0 0x100 0x0>;
+
usbphy0: usb-phy@0 {
compatible = "brcm,sr-usb-combo-phy";
reg = <0x0 0x00000000 0x0 0x100>;
reboot {
compatible ="syscon-reboot";
regmap = <&rst>;
- offset = <0xb0>;
+ offset = <0>;
mask = <0x02>;
};
dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1046a-dcfg", "syscon";
- reg = <0x0 0x1ee0000 0x0 0x10000>;
+ reg = <0x0 0x1ee0000 0x0 0x1000>;
big-endian;
};
#size-cells = <1>;
ranges;
- spba: bus@30000000 {
+ spba: spba-bus@30000000 {
compatible = "fsl,spba-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
- gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 0 144 4>;
+ gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 26 144 4>;
};
gpio4: gpio@30230000 {
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
- <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
};
&gpu {
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
- <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
};
&gpu {
&i2c3 {
status = "okay";
clock-frequency = <400000>;
+ /* Overwrite pinctrl-0 from sdm845.dtsi */
+ pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>;
tsel: hid@15 {
compatible = "hid-over-i2c";
hid-descr-addr = <0x1>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_hid_active>;
};
tsc2: hid@2c {
hid-descr-addr = <0x20>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_hid_active>;
-
- status = "disabled";
};
};
vopl_mmu: iommu@ff470f00 {
compatible = "rockchip,iommu";
reg = <0x0 0xff470f00 0x0 0x100>;
- interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu";
clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>;
clock-names = "aclk", "iface";
cpu-supply = <&vdd_arm>;
};
+&display_subsystem {
+ status = "disabled";
+};
+
&gmac2io {
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
&pcie0 {
bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
- max-link-speed = <2>;
num-lanes = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreqn_cpm>;
reg = <0x0 0xf8000000 0x0 0x2000000>,
<0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
+ device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
<0 0 0 2 &pcie0_intc 1>,
<0 0 0 3 &pcie0_intc 2>,
<0 0 0 4 &pcie0_intc 3>;
- linux,pci-domain = <0>;
max-link-speed = <1>;
msi-map = <0x0 &its 0x0 0x1000>;
phys = <&pcie_phy 0>, <&pcie_phy 1>,
compatible = "rockchip,rk3399-vdec";
reg = <0x0 0xff660000 0x0 0x400>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "vdpu";
clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
<&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
clock-names = "axi", "ahb", "cabac", "core";
CONFIG_ARCH_TEGRA_186_SOC=y
CONFIG_ARCH_TEGRA_194_SOC=y
CONFIG_ARCH_TEGRA_234_SOC=y
-CONFIG_ARCH_K3_AM6_SOC=y
-CONFIG_ARCH_K3_J721E_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y
CONFIG_EXTCON_PTN5150=m
CONFIG_EXTCON_USB_GPIO=y
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_MSM8916=m
CONFIG_INTERCONNECT_QCOM_OSM_L3=m
-CONFIG_INTERCONNECT_QCOM_SDM845=m
+CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_SM8150=m
CONFIG_INTERCONNECT_QCOM_SM8250=m
CONFIG_EXT2_FS=y
/*
- * The linear kernel range starts at the bottom of the virtual address space.
+ * Check whether an arbitrary address is within the linear map, which
+ * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the
+ * kernel's TTBR1 address range.
*/
-#define __is_lm_address(addr) (((u64)(addr) & ~PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
+#define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
-#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
+#define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET)
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
#define __virt_to_phys_nodebug(x) ({ \
#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
#define virt_addr_valid(addr) ({ \
- __typeof__(addr) __addr = addr; \
+ __typeof__(addr) __addr = __tag_reset(addr); \
__is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
})
unsigned long addr = instruction_pointer(regs);
struct kprobe *cur = kprobe_running();
- if (cur && (kcb->kprobe_status == KPROBE_HIT_SS)
- && ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
+ if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
+ ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
kprobes_restore_local_irqflag(kcb, regs);
post_kprobe_handler(cur, kcb, regs);
* Calculate the raw per-cpu offset without a translation from the
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
+ * Also drop the KASAN tag which gets in the way...
*/
- params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+ params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
(unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
params->mair_el2 = read_sysreg(mair_el1);
b .
/*
+ * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
+ *
* x0: SMCCC function ID
* x1: struct kvm_nvhe_init_params PA
*/
eret
1: mov x0, x1
- mov x4, lr
- bl ___kvm_hyp_init
- mov lr, x4
+ mov x3, lr
+ bl ___kvm_hyp_init // Clobbers x0..x2
+ mov lr, x3
/* Hello, World! */
mov x0, #SMCCC_RET_SUCCESS
/*
* Initialize the hypervisor in EL2.
*
- * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers
- * and leave x4 for the caller.
+ * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
+ * and leave x3 for the caller.
*
* x0: struct kvm_nvhe_init_params PA
*/
/*
* Set the PS bits in TCR_EL2.
*/
- ldr x1, [x0, #NVHE_INIT_TCR_EL2]
- tcr_compute_pa_size x1, #TCR_EL2_PS_SHIFT, x2, x3
- msr tcr_el2, x1
+ ldr x0, [x0, #NVHE_INIT_TCR_EL2]
+ tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
+ msr tcr_el2, x0
isb
/* Enable MMU, set vectors and stack. */
mov x0, x28
- bl ___kvm_hyp_init // Clobbers x0..x3
+ bl ___kvm_hyp_init // Clobbers x0..x2
/* Leave idmap. */
mov x0, x29
cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
}
-static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
-{
- psci_forward(host_ctxt);
- hyp_panic(); /* unreachable */
-}
-
static unsigned int find_cpu_id(u64 mpidr)
{
unsigned int i;
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
return psci_forward(host_ctxt);
+ /*
+ * SYSTEM_OFF/RESET should not return according to the spec.
+ * Allow it so as to stay robust to broken firmware.
+ */
case PSCI_0_2_FN_SYSTEM_OFF:
case PSCI_0_2_FN_SYSTEM_RESET:
- psci_forward_noreturn(host_ctxt);
- unreachable();
+ return psci_forward(host_ctxt);
case PSCI_0_2_FN64_CPU_SUSPEND:
return psci_cpu_suspend(func_id, host_ctxt);
case PSCI_0_2_FN64_CPU_ON:
{
unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
u64 val, mask = 0;
- int base, i;
+ int base, i, nr_events;
if (!pmceid1) {
val = read_sysreg(pmceid0_el0);
if (!bmap)
return val;
+ nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
+
for (i = 0; i < 32; i += 8) {
u64 byte;
byte = bitmap_get_value8(bmap, base + i);
mask |= byte << i;
- byte = bitmap_get_value8(bmap, 0x4000 + base + i);
- mask |= byte << (32 + i);
+ if (nr_events >= (0x4000 + base + 32)) {
+ byte = bitmap_get_value8(bmap, 0x4000 + base + i);
+ mask |= byte << (32 + i);
+ }
}
return val & mask;
* 64bit interface.
*/
+#define reg_to_encoding(x) \
+ sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
+ (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
+
static bool read_from_write_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
const struct sys_reg_desc *r)
{
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
- u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 sr = reg_to_encoding(r);
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
kvm_inject_undefined(vcpu);
vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
}
+static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ if (kvm_vcpu_has_pmu(vcpu))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
{
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
- bool enabled = kvm_vcpu_has_pmu(vcpu);
+ bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
- enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
if (!enabled)
kvm_inject_undefined(vcpu);
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- if (!kvm_vcpu_has_pmu(vcpu)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
-
if (p->is_write) {
if (!vcpu_mode_priv(vcpu)) {
kvm_inject_undefined(vcpu);
return true;
}
-#define reg_to_encoding(x) \
- sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
- (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
-
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
+#define PMU_SYS_REG(r) \
+ SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
+
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
- { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
- access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
+ { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
+ .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
#define PMU_PMEVTYPER_EL0(n) \
- { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
- access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+ { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
+ .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r, bool raz)
{
- u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 id = reg_to_encoding(r);
u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
if (id == SYS_ID_AA64PFR0_EL1) {
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
- u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 id = reg_to_encoding(r);
switch (id) {
case SYS_ID_AA64ZFR0_EL1:
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
- { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
- { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
+ { PMU_SYS_REG(SYS_PMINTENSET_EL1),
+ .access = access_pminten, .reg = PMINTENSET_EL1 },
+ { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
+ .access = access_pminten, .reg = PMINTENSET_EL1 },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
- { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
- { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
- { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
- { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
- { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
- { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
- { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
- { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
- { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
- { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
- { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
+ { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
+ .reset = reset_pmcr, .reg = PMCR_EL0 },
+ { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
+ .access = access_pmovs, .reg = PMOVSSET_EL0 },
+ { PMU_SYS_REG(SYS_PMSWINC_EL0),
+ .access = access_pmswinc, .reg = PMSWINC_EL0 },
+ { PMU_SYS_REG(SYS_PMSELR_EL0),
+ .access = access_pmselr, .reg = PMSELR_EL0 },
+ { PMU_SYS_REG(SYS_PMCEID0_EL0),
+ .access = access_pmceid, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMCEID1_EL0),
+ .access = access_pmceid, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMCCNTR_EL0),
+ .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
+ { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
+ .access = access_pmu_evtyper, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
+ .access = access_pmu_evcntr, .reset = NULL },
/*
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency.
*/
- { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
- { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
+ { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
+ .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
+ { PMU_SYS_REG(SYS_PMOVSSET_EL0),
+ .access = access_pmovs, .reg = PMOVSSET_EL0 },
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency.
*/
- { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
+ { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
+ .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
struct pt_regs *regs)
{
/*
- * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN for tag
- * check faults. Mask them out now so that userspace doesn't see them.
+ * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
+ * for tag check faults. Set them to corresponding bits in the untagged
+ * address.
*/
- far &= (1UL << 60) - 1;
+ far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
do_bad_area(far, esr, regs);
return 0;
}
phys_addr_t __virt_to_phys(unsigned long x)
{
- WARN(!__is_lm_address(x),
+ WARN(!__is_lm_address(__tag_reset(x)),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x,
(void *)x);
#define _ASM_IA64_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
+#include <asm/page.h>
/*
* SECTION_SIZE_BITS 2^N: how big each section will be
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
})
#define xchg(ptr, x) \
-((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
+({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
{
- unsigned long cur_itm, new_itm, ticks;
+ unsigned long new_itm;
if (cpu_is_offline(smp_processor_id())) {
return IRQ_HANDLED;
}
new_itm = local_cpu_data->itm_next;
- cur_itm = ia64_get_itc();
- if (!time_after(cur_itm, new_itm)) {
+ if (!time_after(ia64_get_itc(), new_itm))
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
- cur_itm, new_itm);
- ticks = 1;
- } else {
- ticks = DIV_ROUND_UP(cur_itm - new_itm,
- local_cpu_data->itm_delta);
- new_itm += ticks * local_cpu_data->itm_delta;
- }
+ ia64_get_itc(), new_itm);
+
+ while (1) {
+ new_itm += local_cpu_data->itm_delta;
+
+ legacy_timer_tick(smp_processor_id() == time_keeper_id);
- if (smp_processor_id() != time_keeper_id)
- ticks = 0;
+ local_cpu_data->itm_next = new_itm;
- legacy_timer_tick(ticks);
+ if (time_after(new_itm, ia64_get_itc()))
+ break;
+
+ /*
+ * Allow IPIs to interrupt the timer loop.
+ */
+ local_irq_enable();
+ local_irq_disable();
+ }
do {
/*
#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) set_pte(ptep, ptev)
#define arch_kmap_local_post_map(vaddr, pteval) local_flush_tlb_one(vaddr)
#define arch_kmap_local_post_unmap(vaddr) local_flush_tlb_one(vaddr)
void __iomem *ioremap(phys_addr_t offset, unsigned long size);
#define iounmap iounmap
-extern void iounmap(void *addr);
+extern void iounmap(void __iomem *addr);
#include <asm-generic/io.h>
}
EXPORT_SYMBOL(ioremap);
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
{
/* If the page is from the fixmap pool then we just clear out
* the fixmap mapping.
depends on PA8X00 || PA7200
config MLONGCALLS
- bool "Enable the -mlong-calls compiler option for big kernels"
- default y if !MODULES || UBSAN || FTRACE
- default n
+ def_bool y if !MODULES || UBSAN || FTRACE
+ bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
depends on PA8X00
help
If you configure the kernel to include many drivers built-in instead
extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
-/* soft power switch support (power.c) */
-extern struct tasklet_struct power_tasklet;
-
#endif /* _ASM_PARISC_IRQ_H */
bb,<,n %r20, 31 - PSW_SM_I, intr_restore
nop
+ /* ssm PSW_SM_I done later in intr_restore */
+#ifdef CONFIG_MLONGCALLS
+ ldil L%intr_restore, %r2
+ load32 preempt_schedule_irq, %r1
+ bv %r0(%r1)
+ ldo R%intr_restore(%r2), %r2
+#else
+ ldil L%intr_restore, %r1
BL preempt_schedule_irq, %r2
- nop
-
- b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
+ ldo R%intr_restore(%r1), %r2
+#endif
#endif /* CONFIG_PREEMPTION */
/*
nop; \
nop;
+#define SCV_ENTRY_FLUSH_SLOT \
+ SCV_ENTRY_FLUSH_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop;
+
/*
* r10 must be free to use, r13 must be paca
*/
STF_ENTRY_BARRIER_SLOT; \
ENTRY_FLUSH_SLOT
+/*
+ * r10, ctr must be free to use, r13 must be paca
+ */
+#define SCV_INTERRUPT_TO_KERNEL \
+ STF_ENTRY_BARRIER_SLOT; \
+ SCV_ENTRY_FLUSH_SLOT
+
/*
* Macros for annotating the expected destination of (h)rfid
*
FTR_ENTRY_OFFSET 957b-958b; \
.popsection;
+#define SCV_ENTRY_FLUSH_FIXUP_SECTION \
+957: \
+ .pushsection __scv_entry_flush_fixup,"a"; \
+ .align 2; \
+958: \
+ FTR_ENTRY_OFFSET 957b-958b; \
+ .popsection;
+
#define RFI_FLUSH_FIXUP_SECTION \
951: \
.pushsection __rfi_flush_fixup,"a"; \
extern long stf_barrier_fallback;
extern long entry_flush_fallback;
+extern long scv_entry_flush_fallback;
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
#define flush_cache_kmaps() flush_cache_all()
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
+ __set_pte_at(mm, vaddr, ptep, ptev, 1)
#define arch_kmap_local_post_map(vaddr, pteval) \
local_flush_tlb_page(NULL, vaddr)
#define arch_kmap_local_post_unmap(vaddr) \
obj-$(CONFIG_PPC64) += setup_64.o \
paca.o nvram_64.o note.o syscall_64.o
obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
-obj-$(CONFIG_VDSO32) += vdso32/
+obj-$(CONFIG_VDSO32) += vdso32_wrapper.o
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_DAWR) += dawr.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
-obj-$(CONFIG_PPC64) += vdso64/
+obj-$(CONFIG_PPC64) += vdso64_wrapper.o
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o
procfs-y := proc_powerpc.o
bne .Ltabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
- INTERRUPT_TO_KERNEL
+ SCV_INTERRUPT_TO_KERNEL
mr r10,r1
ld r1,PACAKSAVE(r13)
std r10,0(r1)
ld r11,PACA_EXRFI+EX_R11(r13)
blr
+/*
+ * The SCV entry flush happens with interrupts enabled, so it must disable
+ * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
+ * (containing LR) does not need to be preserved here because scv entry
+ * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
+ */
+TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
+ li r10,0
+ mtmsrd r10,1
+ lbz r10,PACAIRQHAPPENED(r13)
+ ori r10,r10,PACA_IRQ_HARD_DIS
+ stb r10,PACAIRQHAPPENED(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ L1D_DISPLACEMENT_FLUSH
+ ld r11,PACA_EXRFI+EX_R11(r13)
+ li r10,MSR_RI
+ mtmsrd r10,1
+ blr
+
TRAMP_REAL_BEGIN(rfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
void replay_soft_interrupts(void)
{
+ struct pt_regs regs;
+
/*
- * We use local_paca rather than get_paca() to avoid all
- * the debug_smp_processor_id() business in this low level
- * function
+ * Be careful here, calling these interrupt handlers can cause
+ * softirqs to be raised, which they may run when calling irq_exit,
+ * which will cause local_irq_enable() to be run, which can then
+ * recurse into this function. Don't keep any state across
+ * interrupt handler calls which may change underneath us.
+ *
+ * We use local_paca rather than get_paca() to avoid all the
+ * debug_smp_processor_id() business in this low level function.
*/
- unsigned char happened = local_paca->irq_happened;
- struct pt_regs regs;
ppc_save_regs(®s);
regs.softe = IRQS_ENABLED;
* This is a higher priority interrupt than the others, so
* replay it first.
*/
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_HMI)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
local_paca->irq_happened &= ~PACA_IRQ_HMI;
regs.trap = 0xe60;
handle_hmi_exception(®s);
hard_irq_disable();
}
- if (happened & PACA_IRQ_DEC) {
+ if (local_paca->irq_happened & PACA_IRQ_DEC) {
local_paca->irq_happened &= ~PACA_IRQ_DEC;
regs.trap = 0x900;
timer_interrupt(®s);
hard_irq_disable();
}
- if (happened & PACA_IRQ_EE) {
+ if (local_paca->irq_happened & PACA_IRQ_EE) {
local_paca->irq_happened &= ~PACA_IRQ_EE;
regs.trap = 0x500;
do_IRQ(®s);
hard_irq_disable();
}
- if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) {
+ if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
if (IS_ENABLED(CONFIG_PPC_BOOK3E))
regs.trap = 0x280;
}
/* Book3E does not support soft-masking PMI interrupts */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_PMI)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
local_paca->irq_happened &= ~PACA_IRQ_PMI;
regs.trap = 0xf00;
performance_monitor_exception(®s);
hard_irq_disable();
}
- happened = local_paca->irq_happened;
- if (happened & ~PACA_IRQ_HARD_DIS) {
+ if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
/*
* We are responding to the next interrupt, so interrupt-off
* latencies should be reset here.
KBUILD_CFLAGS := $(filter-out -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc,$(KBUILD_CFLAGS))
endif
-targets := $(obj-vdso32) vdso32.so.dbg
+targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday.o
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
GCOV_PROFILE := n
targets += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -Upowerpc
-# Force dependency (incbin is bad)
-$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so.dbg
-
# link rule for the .so file, .lds has to be first
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday.o FORCE
$(call if_changed,vdso32ld_and_check)
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/linkage.h>
-#include <asm/page.h>
-
- __PAGE_ALIGNED_DATA
-
- .globl vdso32_start, vdso32_end
- .balign PAGE_SIZE
-vdso32_start:
- .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
- .balign PAGE_SIZE
-vdso32_end:
-
- .previous
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso32_start, vdso32_end
+ .balign PAGE_SIZE
+vdso32_start:
+ .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
+ .balign PAGE_SIZE
+vdso32_end:
+
+ .previous
# Build rules
-targets := $(obj-vdso64) vdso64.so.dbg
+targets := $(obj-vdso64) vdso64.so.dbg vgettimeofday.o
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
GCOV_PROFILE := n
-Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
asflags-y := -D__VDSO64__ -s
-obj-y += vdso64_wrapper.o
targets += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
-$(obj)/vgettimeofday.o: %.o: %.c FORCE
-
-# Force dependency (incbin is bad)
-$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so.dbg
-
# link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday.o FORCE
$(call if_changed,vdso64ld_and_check)
.text
+/*
+ * __kernel_start_sigtramp_rt64 and __kernel_sigtramp_rt64 together
+ * are one function split in two parts. The kernel jumps to the former
+ * and the signal handler indirectly (by blr) returns to the latter.
+ * __kernel_sigtramp_rt64 needs to point to the return address so
+ * glibc can correctly identify the trampoline stack frame.
+ */
.balign 8
.balign IFETCH_ALIGN_BYTES
-V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
+V_FUNCTION_BEGIN(__kernel_start_sigtramp_rt64)
.Lsigrt_start:
bctrl /* call the handler */
+V_FUNCTION_END(__kernel_start_sigtramp_rt64)
+V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
addi r1, r1, __SIGNAL_FRAMESIZE
li r0,__NR_rt_sigreturn
sc
/*
* Make the sigreturn code visible to the kernel.
*/
-VDSO_sigtramp_rt64 = __kernel_sigtramp_rt64;
+VDSO_sigtramp_rt64 = __kernel_start_sigtramp_rt64;
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/linkage.h>
-#include <asm/page.h>
-
- __PAGE_ALIGNED_DATA
-
- .globl vdso64_start, vdso64_end
- .balign PAGE_SIZE
-vdso64_start:
- .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
- .balign PAGE_SIZE
-vdso64_end:
-
- .previous
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso64_start, vdso64_end
+ .balign PAGE_SIZE
+vdso64_start:
+ .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
+ .balign PAGE_SIZE
+vdso64_end:
+
+ .previous
__stop___entry_flush_fixup = .;
}
+ . = ALIGN(8);
+ __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
+ __start___scv_entry_flush_fixup = .;
+ *(__scv_entry_flush_fixup)
+ __stop___scv_entry_flush_fixup = .;
+ }
+
. = ALIGN(8);
__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
__start___stf_exit_barrier_fixup = .;
long *start, *end;
int i;
- start = PTRRELOC(&__start___entry_flush_fixup);
- end = PTRRELOC(&__stop___entry_flush_fixup);
-
instrs[0] = 0x60000000; /* nop */
instrs[1] = 0x60000000; /* nop */
instrs[2] = 0x60000000; /* nop */
if (types & L1D_FLUSH_MTTRIG)
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ start = PTRRELOC(&__start___entry_flush_fixup);
+ end = PTRRELOC(&__stop___entry_flush_fixup);
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
}
+ start = PTRRELOC(&__start___scv_entry_flush_fixup);
+ end = PTRRELOC(&__stop___scv_entry_flush_fixup);
+ for (; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+
+ if (types == L1D_FLUSH_FALLBACK)
+ patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
+ BRANCH_SET_LINK);
+ else
+ patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ }
+
+
printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
(types == L1D_FLUSH_NONE) ? "no" :
(types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
break;
if (rev) {
/* reverse 32 bytes */
- buf.d[0] = byterev_8(reg->d[3]);
- buf.d[1] = byterev_8(reg->d[2]);
- buf.d[2] = byterev_8(reg->d[1]);
- buf.d[3] = byterev_8(reg->d[0]);
- reg = &buf;
+ union vsx_reg buf32[2];
+ buf32[0].d[0] = byterev_8(reg[1].d[1]);
+ buf32[0].d[1] = byterev_8(reg[1].d[0]);
+ buf32[1].d[0] = byterev_8(reg[0].d[1]);
+ buf32[1].d[1] = byterev_8(reg[0].d[0]);
+ memcpy(mem, buf32, size);
+ } else {
+ memcpy(mem, reg, size);
}
- memcpy(mem, reg, size);
break;
case 16:
/* stxv, stxvx, stxvl, stxvll */
default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
config MAXPHYSMEM_1GB
+ depends on 32BIT
bool "1GiB"
config MAXPHYSMEM_2GB
+ depends on 64BIT && CMODEL_MEDLOW
bool "2GiB"
config MAXPHYSMEM_128GB
depends on 64BIT && CMODEL_MEDANY
#endif /* __ASSEMBLY__ */
-#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
+})
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
+#ifdef CONFIG_STRICT_KERNEL_RWX
#ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21)
#else
#define SECTION_ALIGN (1 << 22)
#endif
-#else /* !CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+#else /* !CONFIG_STRICT_KERNEL_RWX */
#define SECTION_ALIGN L1_CACHE_BYTES
-#endif /* CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+#endif /* CONFIG_STRICT_KERNEL_RWX */
#endif /* _ASM_RISCV_SET_MEMORY_H */
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
- set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+ set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+
free_initmem_default(POISON_FREE_INITMEM);
}
max_pfn = PFN_DOWN(dram_end);
max_low_pfn = max_pfn;
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
- set_max_mapnr(max_low_pfn);
+ set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
uv_info.guest_cpu_stor_len = uvcb.cpu_stor_len;
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
- uv_info.max_guest_cpus = uvcb.max_guest_cpus;
+ uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
}
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
u32 max_num_sec_conf;
u64 max_guest_stor_addr;
u8 reserved88[158 - 136];
- u16 max_guest_cpus;
+ u16 max_guest_cpu_id;
u8 reserveda0[200 - 160];
} __packed __aligned(8);
unsigned long guest_cpu_stor_len;
unsigned long max_sec_stor_addr;
unsigned int max_num_sec_conf;
- unsigned short max_guest_cpus;
+ unsigned short max_guest_cpu_id;
};
extern struct uv_info uv_info;
struct kobj_attribute *attr, char *page)
{
return scnprintf(page, PAGE_SIZE, "%d\n",
- uv_info.max_guest_cpus);
+ uv_info.max_guest_cpu_id + 1);
}
static struct kobj_attribute uv_query_max_guest_cpus_attr =
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/bcd.h>
-#include <linux/rtc.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/rtc.h>
CONFIG_ATALK=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_OFFBOARD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AEC62XX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_ATP867X=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=y
CONFIG_PARPORT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_NETDEVICES=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_ROM=y
-CONFIG_IDE=y
CONFIG_SCSI=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_BLK_DEV_IDETAPE=m
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
config G2_DMA
tristate "G2 Bus DMA support"
- depends on SH_DREAMCAST
- select SH_DMA_API
+ depends on SH_DREAMCAST && SH_DMA_API
help
This enables support for the DMA controller for the Dreamcast's
G2 bus. Drivers that want this will generally enable this on
#include <cpu/gpio.h>
#endif
-#define ARCH_NR_GPIOS 512
#include <asm-generic/gpio.h>
#ifdef CONFIG_GPIOLIB
#include <cpu/mmu_context.h>
#include <asm/page.h>
#include <asm/cache.h>
-#include <asm/thread_info.h>
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
(the default value) say Y.
config NUMA
- bool "Non Uniform Memory Access (NUMA) Support"
+ bool "Non-Uniform Memory Access (NUMA) Support"
depends on MMU && SYS_SUPPORTS_NUMA
select ARCH_WANT_NUMA_VARIABLE_LOCALITY
default n
#include <asm/processor.h>
#include <asm/mmu_context.h>
-static int asids_seq_show(struct seq_file *file, void *iter)
+static int asids_debugfs_show(struct seq_file *file, void *iter)
{
struct task_struct *p;
return 0;
}
-static int asids_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, asids_seq_show, inode->i_private);
-}
-
-static const struct file_operations asids_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = asids_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(asids_debugfs);
static int __init asids_debugfs_init(void)
{
CACHE_TYPE_UNIFIED,
};
-static int cache_seq_show(struct seq_file *file, void *iter)
+static int cache_debugfs_show(struct seq_file *file, void *iter)
{
unsigned int cache_type = (unsigned int)file->private;
struct cache_info *cache;
return 0;
}
-static int cache_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cache_seq_show, inode->i_private);
-}
-
-static const struct file_operations cache_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = cache_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(cache_debugfs);
static int __init cache_debugfs_init(void)
{
return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
}
-static int pmb_seq_show(struct seq_file *file, void *iter)
+static int pmb_debugfs_show(struct seq_file *file, void *iter)
{
int i;
return 0;
}
-static int pmb_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmb_seq_show, NULL);
-}
-
-static const struct file_operations pmb_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = pmb_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pmb_debugfs);
static int __init pmb_debugfs_init(void)
{
#define flush_cache_kmaps() flush_cache_all()
-/* FIXME: Use __flush_tlb_one(vaddr) instead of flush_cache_all() -- Anton */
-#define arch_kmap_local_post_map(vaddr, pteval) flush_cache_all()
-#define arch_kmap_local_post_unmap(vaddr) flush_cache_all()
-
+/* FIXME: Use __flush_*_one(vaddr) instead of flush_*_all() -- Anton */
+#define arch_kmap_local_pre_map(vaddr, pteval) flush_cache_all()
+#define arch_kmap_local_pre_unmap(vaddr) flush_cache_all()
+#define arch_kmap_local_post_map(vaddr, pteval) flush_tlb_all()
+#define arch_kmap_local_post_unmap(vaddr) flush_tlb_all()
#endif /* __KERNEL__ */
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE
select NO_DMA
- select ARCH_HAS_SET_MEMORY
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select HAVE_GCC_PLUGINS
file = NULL;
backing_file = strsep(&str, ",:");
- if (*backing_file == '\0')
+ if (backing_file && *backing_file == '\0')
backing_file = NULL;
serial = strsep(&str, ",:");
- if (*serial == '\0')
+ if (serial && *serial == '\0')
serial = NULL;
if (backing_file && ubd_dev->no_cow) {
/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
* enough. So use anyway the io thread. */
}
- stack = alloc_stack(0);
+ stack = alloc_stack(0, 0);
io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
&thread_fd);
if(io_pid < 0){
}
os_close_file(vu_dev->sock);
+ kfree(vu_dev);
}
/* Platform device */
if (!pdata)
return -EINVAL;
- vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL);
+ vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
if (!vu_dev)
return -ENOMEM;
#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
{
- return (void __iomem *)(unsigned long)offset;
+ return NULL;
}
#define iounmap iounmap
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_RO \
- (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
/*
* The i386 can't do page protection for execute, and considers that the same
+++ /dev/null
-#include <asm-generic/set_memory.h>
#define UML_ROUND_UP(addr) \
((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
-extern unsigned long alloc_stack(int atomic);
+extern unsigned long alloc_stack(int order, int atomic);
extern void free_stack(unsigned long stack, int order);
struct pt_regs;
// SPDX-License-Identifier: GPL-2.0
#include <linux/kmsg_dump.h>
#include <linux/console.h>
+#include <linux/string.h>
#include <shared/init.h>
#include <shared/kern.h>
#include <os.h>
if (!console_trylock())
return;
- for_each_console(con)
- break;
+ for_each_console(con) {
+ if(strcmp(con->name, "tty") == 0 &&
+ (con->flags & (CON_ENABLED | CON_CONSDEV)) != 0) {
+ break;
+ }
+ }
console_unlock();
#include <os.h>
#include <skas.h>
#include <linux/time-internal.h>
-#include <asm/set_memory.h>
/*
* This is a per-cpu array. A processor only modifies its entry and it only
free_pages(stack, order);
}
-unsigned long alloc_stack(int atomic)
+unsigned long alloc_stack(int order, int atomic)
{
- unsigned long addr;
+ unsigned long page;
gfp_t flags = GFP_KERNEL;
if (atomic)
flags = GFP_ATOMIC;
- addr = __get_free_pages(flags, 1);
+ page = __get_free_pages(flags, order);
- set_memory_ro(addr, 1);
-
- return addr + PAGE_SIZE;
+ return page;
}
static inline void set_current(struct task_struct *task)
return 1;
}
+
+static void time_travel_set_start(void)
+{
+ if (time_travel_start_set)
+ return;
+
+ switch (time_travel_mode) {
+ case TT_MODE_EXTERNAL:
+ time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
+ /* controller gave us the *current* time, so adjust by that */
+ time_travel_ext_get_time();
+ time_travel_start -= time_travel_time;
+ break;
+ case TT_MODE_INFCPU:
+ case TT_MODE_BASIC:
+ if (!time_travel_start_set)
+ time_travel_start = os_persistent_clock_emulation();
+ break;
+ case TT_MODE_OFF:
+ /* we just read the host clock with os_persistent_clock_emulation() */
+ break;
+ }
+
+ time_travel_start_set = true;
+}
#else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
#define time_travel_start_set 0
#define time_travel_start 0
{
}
+static inline void time_travel_set_start(void)
+{
+}
+
/* fail link if this actually gets used */
extern u64 time_travel_ext_req(u32 op, u64 time);
{
long long nsecs;
+ time_travel_set_start();
+
if (time_travel_mode != TT_MODE_OFF)
nsecs = time_travel_start + time_travel_time;
else
void __init time_init(void)
{
-#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
- switch (time_travel_mode) {
- case TT_MODE_EXTERNAL:
- time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
- /* controller gave us the *current* time, so adjust by that */
- time_travel_ext_get_time();
- time_travel_start -= time_travel_time;
- break;
- case TT_MODE_INFCPU:
- case TT_MODE_BASIC:
- if (!time_travel_start_set)
- time_travel_start = os_persistent_clock_emulation();
- break;
- case TT_MODE_OFF:
- /* we just read the host clock with os_persistent_clock_emulation() */
- break;
- }
-#endif
-
timer_set_signal_handler();
late_time_init = um_timer_setup;
}
vma = vma->vm_next;
}
}
-
-struct page_change_data {
- unsigned int set_mask, clear_mask;
-};
-
-static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
-{
- struct page_change_data *cdata = data;
- pte_t pte = READ_ONCE(*ptep);
-
- pte_clear_bits(pte, cdata->clear_mask);
- pte_set_bits(pte, cdata->set_mask);
-
- set_pte(ptep, pte);
- return 0;
-}
-
-static int change_memory(unsigned long start, unsigned long pages,
- unsigned int set_mask, unsigned int clear_mask)
-{
- unsigned long size = pages * PAGE_SIZE;
- struct page_change_data data;
- int ret;
-
- data.set_mask = set_mask;
- data.clear_mask = clear_mask;
-
- ret = apply_to_page_range(&init_mm, start, size, change_page_range,
- &data);
-
- flush_tlb_kernel_range(start, start + size);
-
- return ret;
-}
-
-int set_memory_ro(unsigned long addr, int numpages)
-{
- return change_memory(addr, numpages, 0, _PAGE_RW);
-}
-
-int set_memory_rw(unsigned long addr, int numpages)
-{
- return change_memory(addr, numpages, _PAGE_RW, 0);
-}
-
-int set_memory_nx(unsigned long addr, int numpages)
-{
- return -EOPNOTSUPP;
-}
-
-int set_memory_x(unsigned long addr, int numpages)
-{
- return -EOPNOTSUPP;
-}
#include <mem_user.h>
#include <os.h>
-#define DEFAULT_COMMAND_LINE "root=98:0"
+#define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
+#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
/* Changed in add_arg and setup_arch, which run before SMP is started */
static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
int ncpus = 1;
/* Set in early boot */
-static int have_root __initdata = 0;
+static int have_root __initdata;
+static int have_console __initdata;
/* Set in uml_mem_setup and modified in linux_main */
long long physmem_size = 32 * 1024 * 1024;
" this flag is not needed to run gdb on UML in skas mode\n\n"
);
+static int __init uml_console_setup(char *line, int *add)
+{
+ have_console = 1;
+ return 0;
+}
+
+__uml_setup("console=", uml_console_setup,
+"console=<preferred console>\n"
+" Specify the preferred console output driver\n\n"
+);
+
static int __init Usage(char *line, int *add)
{
const char **p;
add_arg(argv[i]);
}
if (have_root == 0)
- add_arg(DEFAULT_COMMAND_LINE);
+ add_arg(DEFAULT_COMMAND_LINE_ROOT);
+
+ if (have_console == 0)
+ add_arg(DEFAULT_COMMAND_LINE_CONSOLE);
host_task_size = os_get_top_address();
/*
unsigned long stack, sp;
int pid, fds[2], ret, n;
- stack = alloc_stack(__cant_sleep());
+ stack = alloc_stack(0, __cant_sleep());
if (stack == 0)
return -ENOMEM;
unsigned long stack, sp;
int pid, status, err;
- stack = alloc_stack(__cant_sleep());
+ stack = alloc_stack(0, __cant_sleep());
if (stack == 0)
return -ENOMEM;
*/
void os_idle_sleep(void)
{
- pause();
+ struct itimerspec its;
+ sigset_t set, old;
+
+ /* block SIGALRM while we analyze the timer state */
+ sigemptyset(&set);
+ sigaddset(&set, SIGALRM);
+ sigprocmask(SIG_BLOCK, &set, &old);
+
+ /* check the timer, and if it'll fire then wait for it */
+ timer_gettime(event_high_res_timer, &its);
+ if (its.it_value.tv_sec || its.it_value.tv_nsec)
+ sigsuspend(&old);
+ /* either way, restore the signal mask */
+ sigprocmask(SIG_UNBLOCK, &set, NULL);
}
unsigned int nr)
{
if (likely(nr < IA32_NR_syscalls)) {
- instrumentation_begin();
nr = array_index_nospec(nr, IA32_NR_syscalls);
regs->ax = ia32_sys_call_table[nr](regs);
- instrumentation_end();
}
}
* or may not be necessary, but it matches the old asm behavior.
*/
nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+ instrumentation_begin();
do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
syscall_exit_to_user_mode(regs);
}
res = get_user(*(u32 *)®s->bp,
(u32 __user __force *)(unsigned long)(u32)regs->sp);
}
- instrumentation_end();
if (res) {
/* User code screwed up. */
regs->ax = -EFAULT;
+
+ instrumentation_end();
syscall_exit_to_user_mode(regs);
return false;
}
/* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
syscall_exit_to_user_mode(regs);
return true;
}
#include <asm/export.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
- .macro THUNK name, func, put_ret_addr_in_rdi=0
+ .macro THUNK name, func
SYM_FUNC_START_NOALIGN(\name)
pushq %rbp
movq %rsp, %rbp
pushq %r10
pushq %r11
- .if \put_ret_addr_in_rdi
- /* 8(%rbp) is return addr on stack */
- movq 8(%rbp), %rdi
- .endif
-
call \func
- jmp .L_restore
+ jmp __thunk_restore
SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
#endif
#ifdef CONFIG_PREEMPTION
-SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
+SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore)
popq %r11
popq %r10
popq %r9
popq %rdi
popq %rbp
ret
- _ASM_NOKPROBE(.L_restore)
-SYM_CODE_END(.L_restore)
+ _ASM_NOKPROBE(__thunk_restore)
+SYM_CODE_END(__thunk_restore)
#endif
.resume = hv_resume,
};
+static void (* __initdata old_setup_percpu_clockev)(void);
+
+static void __init hv_stimer_setup_percpu_clockev(void)
+{
+ /*
+ * Ignore any errors in setting up stimer clockevents
+ * as we can run with the LAPIC timer as a fallback.
+ */
+ (void)hv_stimer_alloc();
+
+ /*
+ * Still register the LAPIC timer, because the direct-mode STIMER is
+ * not supported by old versions of Hyper-V. This also allows users
+ * to switch to LAPIC timer via /sys, if they want to.
+ */
+ if (old_setup_percpu_clockev)
+ old_setup_percpu_clockev();
+}
+
/*
* This function is to be invoked early in the boot sequence after the
* hypervisor has been detected.
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
/*
- * Ignore any errors in setting up stimer clockevents
- * as we can run with the LAPIC timer as a fallback.
+ * hyperv_init() is called before LAPIC is initialized: see
+ * apic_intr_mode_init() -> x86_platform.apic_post_init() and
+ * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
+ * depends on LAPIC, so hv_stimer_alloc() should be called from
+ * x86_init.timers.setup_percpu_clockev.
*/
- (void)hv_stimer_alloc();
+ old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
+ x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
hv_apic_init();
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
* disables preemption so be careful if you intend to use it for long periods
* of time.
- * If you intend to use the FPU in softirq you need to check first with
+ * If you intend to use the FPU in irq/softirq you need to check first with
* irq_fpu_usable() if it is possible.
*/
-extern void kernel_fpu_begin(void);
+
+/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
+#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
+#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
+
+extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);
extern void fpregs_mark_activate(void);
+/* Code that is unaware of kernel_fpu_begin_mask() can use this */
+static inline void kernel_fpu_begin(void)
+{
+ kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
+}
+
/*
* Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
* A context switch will (and softirq might) save CPU's FPU registers to
#ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
+DECLARE_IDTENTRY_RAW(X86_TRAP_OTHER, exc_xen_unknown_trap);
#endif
/* Device interrupts common/spurious */
#define INTEL_FAM6_LAKEFIELD 0x8A
#define INTEL_FAM6_ALDERLAKE 0x97
+#define INTEL_FAM6_ALDERLAKE_L 0x9A
/* "Small Core" Processors (Atom) */
* think of extending them - you will be slapped with a stinking trout or a frozen
* shark will reach you, wherever you are! You've been warned.
*/
-static inline unsigned long long notrace __rdmsr(unsigned int msr)
+static __always_inline unsigned long long __rdmsr(unsigned int msr)
{
DECLARE_ARGS(val, low, high);
return EAX_EDX_VAL(val, low, high);
}
-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
+static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
{
asm volatile("1: wrmsr\n"
"2:\n"
#define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
+extern unsigned int __max_die_per_package;
+
#ifdef CONFIG_SMP
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
extern unsigned int __max_logical_packages;
#define topology_max_packages() (__max_logical_packages)
-extern unsigned int __max_die_per_package;
-
static inline int topology_max_die_per_package(void)
{
return __max_die_per_package;
u32 ecx;
ecx = cpuid_ecx(0x8000001e);
- nodes_per_socket = ((ecx >> 8) & 7) + 1;
+ __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value);
- nodes_per_socket = ((value >> 3) & 7) + 1;
+ __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
}
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
* that out because it's an indirect call. Annotate it.
*/
instrumentation_begin();
- trace_hardirqs_off_finish();
+
machine_check_vector(regs);
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
+
instrumentation_end();
irqentry_nmi_exit(regs, irq_state);
}
{
irqentry_enter_from_user_mode(regs);
instrumentation_begin();
+
machine_check_vector(regs);
+
instrumentation_end();
irqentry_exit_to_user_mode(regs);
}
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
-#ifdef CONFIG_SMP
unsigned int __max_die_per_package __read_mostly = 1;
EXPORT_SYMBOL(__max_die_per_package);
+#ifdef CONFIG_SMP
/*
* Check if given CPUID extended toplogy "leaf" is implemented
*/
}
EXPORT_SYMBOL(copy_fpregs_to_fpstate);
-void kernel_fpu_begin(void)
+void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{
preempt_disable();
}
__cpu_invalidate_fpregs_state();
- if (boot_cpu_has(X86_FEATURE_XMM))
+ /* Put sane initial values into the control registers. */
+ if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
ldmxcsr(MXCSR_DEFAULT);
- if (boot_cpu_has(X86_FEATURE_FPU))
+ if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
asm volatile ("fninit");
}
-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
void kernel_fpu_end(void)
{
return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
}
-static inline void sev_es_wr_ghcb_msr(u64 val)
+static __always_inline void sev_es_wr_ghcb_msr(u64 val)
{
u32 low, high;
u16 d2;
u8 d1;
+ /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+ if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
+ memcpy(dst, buf, size);
+ return ES_OK;
+ }
+
switch (size) {
case 1:
memcpy(&d1, buf, 1);
u16 d2;
u8 d1;
+ /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+ if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
+ memcpy(buf, src, size);
+ return ES_OK;
+ }
+
switch (size) {
case 1:
if (get_user(d1, s))
#include <linux/numa.h>
#include <linux/pgtable.h>
#include <linux/overflow.h>
+#include <linux/syscore_ops.h>
#include <asm/acpi.h>
#include <asm/desc.h>
this_cpu_write(arch_prev_mperf, mperf);
}
+#ifdef CONFIG_PM_SLEEP
+static struct syscore_ops freq_invariance_syscore_ops = {
+ .resume = init_counter_refs,
+};
+
+static void register_freq_invariance_syscore_ops(void)
+{
+ /* Bail out if registered already. */
+ if (freq_invariance_syscore_ops.node.prev)
+ return;
+
+ register_syscore_ops(&freq_invariance_syscore_ops);
+}
+#else
+static inline void register_freq_invariance_syscore_ops(void) {}
+#endif
+
static void init_freq_invariance(bool secondary, bool cppc_ready)
{
bool ret = false;
if (ret) {
init_counter_refs();
static_branch_enable(&arch_scale_freq_key);
+ register_freq_invariance_syscore_ops();
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
} else {
pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
if (cpuid->nent < vcpu->arch.cpuid_nent)
goto out;
r = -EFAULT;
- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+ if (copy_to_user(entries, vcpu->arch.cpuid_entries,
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
return 0;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
(u32)msr_data;
+ if (efer & EFER_LMA)
+ ctxt->mode = X86EMUL_MODE_PROT64;
return X86EMUL_CONTINUE;
}
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
-static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-}
-
-static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
-static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-}
-
-static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
{ \
unsigned long val) \
{ \
vcpu->arch.regs[VCPU_REGS_##uname] = val; \
- kvm_register_mark_dirty(vcpu, VCPU_REGS_##uname); \
}
BUILD_KVM_GPR_ACCESSORS(rax, RAX)
BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
BUILD_KVM_GPR_ACCESSORS(r15, R15)
#endif
+static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
+static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
{
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
-static inline u64 rsvd_bits(int s, int e)
+static __always_inline u64 rsvd_bits(int s, int e)
{
+ BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
+
+ if (__builtin_constant_p(e))
+ BUILD_BUG_ON(e > 63);
+ else
+ e &= 63;
+
if (e < s)
return 0;
}
/*
- * Clear non-leaf entries (and free associated page tables) which could
- * be replaced by large mappings, for GFNs within the slot.
+ * Clear leaf entries which could be replaced by large mappings, for
+ * GFNs within the slot.
*/
static void zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root,
tdp_root_for_each_pte(iter, root, start, end) {
if (!is_shadow_present_pte(iter.old_spte) ||
- is_last_spte(iter.old_spte, iter.level))
+ !is_last_spte(iter.old_spte, iter.level))
continue;
pfn = spte_to_pfn(iter.old_spte);
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (WARN_ON(!is_guest_mode(vcpu)))
+ return true;
+
if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma;
if ((vmcb12->save.efer & EFER_SVME) == 0)
vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
- if (!vmcb12_lma) {
- if (vmcb12->save.cr4 & X86_CR4_PAE) {
- if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
- return false;
- } else {
- if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
- return false;
- }
- } else {
+ if (vmcb12_lma) {
if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
!(vmcb12->save.cr0 & X86_CR0_PE) ||
- (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
+ (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
return false;
}
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
unsigned long first, last;
int ret;
+ lockdep_assert_held(&kvm->lock);
+
if (ulen == 0 || uaddr + ulen < uaddr)
return ERR_PTR(-EINVAL);
if (!region)
return -ENOMEM;
+ mutex_lock(&kvm->lock);
region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
if (IS_ERR(region->pages)) {
ret = PTR_ERR(region->pages);
+ mutex_unlock(&kvm->lock);
goto e_free;
}
+ region->uaddr = range->addr;
+ region->size = range->size;
+
+ list_add_tail(®ion->list, &sev->regions_list);
+ mutex_unlock(&kvm->lock);
+
/*
* The guest may change the memory encryption attribute from C=0 -> C=1
* or vice versa for this memory range. Lets make sure caches are
*/
sev_clflush_pages(region->pages, region->npages);
- region->uaddr = range->addr;
- region->size = range->size;
-
- mutex_lock(&kvm->lock);
- list_add_tail(®ion->list, &sev->regions_list);
- mutex_unlock(&kvm->lock);
-
return ret;
e_free:
* to be returned:
* GPRs RAX, RBX, RCX, RDX
*
- * Copy their values to the GHCB if they are dirty.
+ * Copy their values, even if they may not have been written during the
+ * VM-Exit. It's the guest's responsibility to not consume random data.
*/
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RAX))
- ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RBX))
- ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RCX))
- ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RDX))
- ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
+ ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
+ ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
+ ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
+ ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
}
static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
return 0;
}
+ if (sev_active()) {
+ pr_info("KVM is unsupported when running as an SEV guest\n");
+ return 0;
+ }
+
return 1;
}
{
struct vcpu_svm *svm = to_svm(vcpu);
+ trace_kvm_entry(vcpu);
+
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
}
/* svm.c */
-#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
-#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
-#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
#define MSR_INVALID 0xffffffffU
extern int sev;
return 0;
}
-static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_host_map *map;
- struct page *page;
- u64 hpa;
/*
* hv_evmcs may end up being not mapped after migration (when
}
}
+ return true;
+}
+
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_host_map *map;
+ struct page *page;
+ u64 hpa;
+
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/*
* Translate L1 physical address to host physical
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
else
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+
+ return true;
+}
+
+static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
+{
+ if (!nested_get_evmcs_page(vcpu))
+ return false;
+
+ if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
+ return false;
+
return true;
}
if (is_guest_mode(vcpu)) {
sync_vmcs02_to_vmcs12(vcpu, vmcs12);
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
- } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
- if (vmx->nested.hv_evmcs)
- copy_enlightened_to_vmcs12(vmx);
- else if (enable_shadow_vmcs)
- copy_shadow_to_vmcs12(vmx);
+ } else {
+ copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
+ if (!vmx->nested.need_vmcs12_to_shadow_sync) {
+ if (vmx->nested.hv_evmcs)
+ copy_enlightened_to_vmcs12(vmx);
+ else if (enable_shadow_vmcs)
+ copy_shadow_to_vmcs12(vmx);
+ }
}
BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
.hv_timer_pending = nested_vmx_preemption_timer_pending,
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
- .get_nested_state_pages = nested_get_vmcs12_pages,
+ .get_nested_state_pages = vmx_get_nested_state_pages,
.write_log_dirty = nested_vmx_write_pml_buffer,
.enable_evmcs = nested_enable_evmcs,
.get_evmcs_version = nested_get_evmcs_version,
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
+ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
};
/* mapping between fixed pmc index and intel_arch_events array */
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
x86_pmu.num_counters_gp);
+ eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
+ eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
pmu->available_event_types = ~entry->ebx &
((1ull << eax.split.mask_length) - 1);
pmu->nr_arch_fixed_counters =
min_t(int, edx.split.num_counters_fixed,
x86_pmu.num_counters_fixed);
+ edx.split.bit_width_fixed = min_t(int,
+ edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1;
}
if (vmx->emulation_required)
return EXIT_FASTPATH_NONE;
+ trace_kvm_entry(vcpu);
+
if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false;
vmcs_write32(PLE_WINDOW, vmx->ple_window);
switch (index) {
case MSR_IA32_TSX_CTRL:
/*
- * No need to pass TSX_CTRL_CPUID_CLEAR through, so
- * let's avoid changing CPUID bits under the host
- * kernel's feet.
+ * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
+ * interception. Keep the host value unchanged to avoid
+ * changing CPUID bits under the host kernel's feet.
+ *
+ * hle=0, rtm=0, tsx_ctrl=1 can be found with some
+ * combinations of new kernel and old userspace. If
+ * those guests run on a tsx=off host, do allow guests
+ * to use TSX_CTRL, but do not change the value on the
+ * host so that TSX remains always disabled.
*/
- vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+ if (boot_cpu_has(X86_FEATURE_RTM))
+ vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+ else
+ vmx->guest_uret_msrs[j].mask = 0;
break;
default:
vmx->guest_uret_msrs[j].mask = -1ull;
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
+static void process_smi(struct kvm_vcpu *vcpu);
static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
if (!boot_cpu_has_bug(X86_BUG_MDS))
data |= ARCH_CAP_MDS_NO;
- /*
- * On TAA affected systems:
- * - nothing to do if TSX is disabled on the host.
- * - we emulate TSX_CTRL if present on the host.
- * This lets the guest use VERW to clear CPU buffers.
- */
- if (!boot_cpu_has(X86_FEATURE_RTM))
- data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR);
- else if (!boot_cpu_has_bug(X86_BUG_TAA))
+ if (!boot_cpu_has(X86_FEATURE_RTM)) {
+ /*
+ * If RTM=0 because the kernel has disabled TSX, the host might
+ * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
+ * and therefore knows that there cannot be TAA) but keep
+ * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
+ * and we want to allow migrating those guests to tsx=off hosts.
+ */
+ data &= ~ARCH_CAP_TAA_NO;
+ } else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
data |= ARCH_CAP_TAA_NO;
+ } else {
+ /*
+ * Nothing to do here; we emulate TSX_CTRL if present on the
+ * host so the guest can choose between disabling TSX or
+ * using VERW to clear CPU buffers.
+ */
+ }
return data;
}
{
process_nmi(vcpu);
+ if (kvm_check_request(KVM_REQ_SMI, vcpu))
+ process_smi(vcpu);
+
/*
* In guest mode, payload delivery should be deferred,
* so that the L1 hypervisor can intercept #PF before
if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
- if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
- ;
- else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+ if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
r = 0;
goto out;
}
kvm_x86_ops.request_immediate_exit(vcpu);
}
- trace_kvm_entry(vcpu);
-
fpregs_assert_state_consistent();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
switch_fpu_return();
*/
if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
return false;
+ if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
+ return false;
} else {
/*
* Not in 64-bit mode: EFER.LMA is clear and the code
fx_init(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+ vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
return 0;
old_npages = slot->npages;
- hva = 0;
+ hva = slot->userspace_addr;
}
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
}
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
__reserved_bits |= X86_CR4_UMIP; \
if (!__cpu_has(__c, X86_FEATURE_VMX)) \
__reserved_bits |= X86_CR4_VMXE; \
+ if (!__cpu_has(__c, X86_FEATURE_PCID)) \
+ __reserved_bits |= X86_CR4_PCIDE; \
__reserved_bits; \
})
#include <asm/fpu/api.h>
#include <asm/asm.h>
+/*
+ * Use KFPU_387. MMX instructions are not affected by MXCSR,
+ * but both AMD and Intel documentation states that even integer MMX
+ * operations will result in #MF if an exception is pending in FCW.
+ *
+ * EMMS is not needed afterwards because, after calling kernel_fpu_end(),
+ * any subsequent user of the 387 stack will reinitialize it using
+ * KFPU_387.
+ */
+
void *_mmx_memcpy(void *to, const void *from, size_t len)
{
void *p;
p = to;
i = len >> 6; /* len/64 */
- kernel_fpu_begin();
+ kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ (
"1: prefetch (%0)\n" /* This set is 28 bytes */
{
int i;
- kernel_fpu_begin();
+ kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : :
{
int i;
- kernel_fpu_begin();
+ kernel_fpu_begin_mask(KFPU_387);
/*
* maybe the prefetch stuff can go before the expensive fnsave...
{
int i;
- kernel_fpu_begin();
+ kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : :
{
int i;
- kernel_fpu_begin();
+ kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ (
"1: prefetch (%0)\n"
{
return sev_status & MSR_AMD64_SEV_ENABLED;
}
+EXPORT_SYMBOL_GPL(sev_active);
/* Needs to be called from non-instrumentable code */
bool noinstr sev_es_active(void)
exc_debug(regs);
}
+DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
+{
+ /* This should never happen and there is no way to handle it. */
+ pr_err("Unknown trap in Xen PV mode.");
+ BUG();
+}
+
struct trap_array_entry {
void (*orig)(void);
void (*xen)(void);
{
unsigned int nr;
bool ist_okay = false;
+ bool found = false;
/*
* Replace trap handler addresses by Xen specific ones.
if (*addr == entry->orig) {
*addr = entry->xen;
ist_okay = entry->ist_okay;
+ found = true;
break;
}
}
nr = (*addr - (void *)early_idt_handler_array[0]) /
EARLY_IDT_HANDLER_SIZE;
*addr = (void *)xen_early_idt_handler_array[nr];
+ found = true;
}
- if (WARN_ON(ist != 0 && !ist_okay))
+ if (!found)
+ *addr = (void *)xen_asm_exc_xen_unknown_trap;
+
+ if (WARN_ON(found && ist != 0 && !ist_okay))
return false;
return true;
smp_ops.cpu_die = xen_hvm_cpu_die;
if (!xen_have_vector_callback) {
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
nopvspin = true;
+#endif
return;
}
#ifdef CONFIG_IA32_EMULATION
xen_pv_trap entry_INT80_compat
#endif
+xen_pv_trap asm_exc_xen_unknown_trap
xen_pv_trap asm_exc_xen_hypervisor_callback
__INIT
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
- bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
+ bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
- bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
+ bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
* shortage.
*/
/* no more than ~18% of tags for async I/O */
- bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
+ bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
- bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
+ bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
*/
void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
+ might_sleep();
+
spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) {
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
- if (spin_trylock(&q->queue_lock)) {
- blkg_destroy(blkg);
- spin_unlock(&q->queue_lock);
- } else {
+ if (need_resched() || !spin_trylock(&q->queue_lock)) {
+ /*
+ * Given that the system can accumulate a huge number
+ * of blkgs in pathological cases, check to see if we
+ * need to rescheduling to avoid softlockup.
+ */
spin_unlock_irq(&blkcg->lock);
- cpu_relax();
+ cond_resched();
spin_lock_irq(&blkcg->lock);
+ continue;
}
+
+ blkg_destroy(blkg);
+ spin_unlock(&q->queue_lock);
}
spin_unlock_irq(&blkcg->lock);
struct request_queue *q = hctx->queue;
struct blk_mq_tag_set *set = q->tag_set;
- if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
+ if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return true;
users = atomic_read(&set->active_queues_shared_sbitmap);
} else {
void set_capacity(struct gendisk *disk, sector_t sectors)
{
struct block_device *bdev = disk->part0;
+ unsigned long flags;
- spin_lock(&bdev->bd_size_lock);
+ spin_lock_irqsave(&bdev->bd_size_lock, flags);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
- spin_unlock(&bdev->bd_size_lock);
+ spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
}
EXPORT_SYMBOL(set_capacity);
static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
- spin_lock(&bdev->bd_size_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bdev->bd_size_lock, flags);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
- spin_unlock(&bdev->bd_size_lock);
+ spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
}
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
err = blk_alloc_devt(bdev, &devt);
if (err)
- goto out_bdput;
+ goto out_put;
pdev->devt = devt;
/* delay uevent until 'holders' subdir is created */
if (ret)
goto error_free_key;
- if (strcmp(sig->pkey_algo, "sm2") == 0 && sig->data_size) {
+ if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 &&
+ sig->data_size) {
ret = cert_sig_digest_update(sig, tfm);
if (ret)
goto error_free_key;
preempt_enable();
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
+ if (!min)
+ min = 1;
speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
tmpl->speed = speed;
ncomp = (struct acpi_iort_named_component *)node->node_data;
+ if (!ncomp->memory_address_limit) {
+ pr_warn(FW_BUG "Named component missing memory address limit\n");
+ return -EINVAL;
+ }
+
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1ULL<<ncomp->memory_address_limit;
rc = (struct acpi_iort_root_complex *)node->node_data;
+ if (!rc->memory_address_limit) {
+ pr_warn(FW_BUG "Root complex missing memory address limit\n");
+ return -EINVAL;
+ }
+
*size = rc->memory_address_limit >= 64 ? U64_MAX :
1ULL<<rc->memory_address_limit;
end = dmaaddr + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
- dev->coherent_dma_mask = mask;
- *dev->dma_mask = mask;
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
+ *dev->dma_mask = min(*dev->dma_mask, mask);
}
*dma_addr = dmaaddr;
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
- if (len < 0)
- return len;
-
- env->buflen += len;
- if (!adev->data.of_compatible)
- return 0;
-
- if (len > 0 && add_uevent_var(env, "MODALIAS="))
- return -ENOMEM;
-
- len = create_of_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
+ if (adev->data.of_compatible)
+ len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ else
+ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
if (!device)
return -EINVAL;
+ *device = NULL;
+
status = acpi_get_data_full(handle, acpi_scan_drop_device,
(void **)device, callback);
if (ACPI_FAILURE(status) || !*device) {
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
if (dep->supplier == handle) {
acpi_bus_get_device(dep->consumer, &adev);
- if (!adev)
- continue;
- adev->dep_unmet--;
- if (!adev->dep_unmet)
- acpi_bus_attach(adev, true);
+ if (adev) {
+ adev->dep_unmet--;
+ if (!adev->dep_unmet)
+ acpi_bus_attach(adev, true);
+ }
list_del(&dep->node);
kfree(dep);
struct thermal_zone_device *thermal_zone;
int kelvin_offset; /* in millidegrees */
struct work_struct thermal_check_work;
+ struct mutex thermal_check_lock;
+ refcount_t thermal_check_count;
};
/* --------------------------------------------------------------------------
return 0;
}
-static void acpi_thermal_check(void *data)
-{
- struct acpi_thermal *tz = data;
-
- thermal_zone_device_update(tz->thermal_zone,
- THERMAL_EVENT_UNSPECIFIED);
-}
-
/* sys I/F for generic thermal sysfs support */
static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
Driver Interface
-------------------------------------------------------------------------- */
+static void acpi_queue_thermal_check(struct acpi_thermal *tz)
+{
+ if (!work_pending(&tz->thermal_check_work))
+ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+}
+
static void acpi_thermal_notify(struct acpi_device *device, u32 event)
{
struct acpi_thermal *tz = acpi_driver_data(device);
switch (event) {
case ACPI_THERMAL_NOTIFY_TEMPERATURE:
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
{
struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
thermal_check_work);
- acpi_thermal_check(tz);
+
+ /*
+ * In general, it is not sufficient to check the pending bit, because
+ * subsequent instances of this function may be queued after one of them
+ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
+ * one of them is running, though, because it may have done the actual
+ * check some time ago, so allow at least one of them to block on the
+ * mutex while another one is running the update.
+ */
+ if (!refcount_dec_not_one(&tz->thermal_check_count))
+ return;
+
+ mutex_lock(&tz->thermal_check_lock);
+
+ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
+
+ refcount_inc(&tz->thermal_check_count);
+
+ mutex_unlock(&tz->thermal_check_lock);
}
static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
+ refcount_set(&tz->thermal_check_count, 3);
+ mutex_init(&tz->thermal_check_lock);
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
tz->state.active |= tz->trips.active[i].flags.enabled;
}
- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+ acpi_queue_thermal_check(tz);
return AE_OK;
}
#endif
#endif /* !CONFIG_SRCU */
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+ while (target->parent) {
+ target = target->parent;
+ if (dev == target)
+ return true;
+ }
+ return false;
+}
+
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
struct device_link *link;
int ret;
- if (dev == target)
+ /*
+ * The "ancestors" check is needed to catch the case when the target
+ * device has not been completely initialized yet and it is still
+ * missing from the list of children of its parent device.
+ */
+ if (dev == target || device_is_ancestor(dev, target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
struct device *con = link->consumer;
char *buf;
- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
if (ret)
goto err_con;
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_con_dev;
- snprintf(buf, len, "supplier:%s", dev_name(sup));
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_sup_dev;
goto out;
err_sup_dev:
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf) {
return;
}
- snprintf(buf, len, "supplier:%s", dev_name(sup));
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
sysfs_remove_link(&con->kobj, buf);
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
kfree(buf);
}
link->link_dev.class = &devlink_class;
device_set_pm_not_required(&link->link_dev);
- dev_set_name(&link->link_dev, "%s--%s",
- dev_name(supplier), dev_name(consumer));
+ dev_set_name(&link->link_dev, "%s:%s--%s:%s",
+ dev_bus_name(supplier), dev_name(supplier),
+ dev_bus_name(consumer), dev_name(consumer));
if (device_register(&link->link_dev)) {
put_device(consumer);
put_device(supplier);
* never change once they are set, so they don't need special care.
*/
drv = READ_ONCE(dev->driver);
- return drv ? drv->name :
- (dev->bus ? dev->bus->name :
- (dev->class ? dev->class->name : ""));
+ return drv ? drv->name : dev_bus_name(dev);
}
EXPORT_SYMBOL(dev_driver_string);
device_pm_check_callbacks(dev);
- /*
- * Reorder successfully probed devices to the end of the device list.
- * This ensures that suspend/resume order matches probe order, which
- * is usually what drivers rely on.
- */
- device_pm_move_to_tail(dev);
-
/*
* Make sure the device is no longer in one of the deferred lists and
* kick off retrying all pending devices
else if (drv->remove)
drv->remove(dev);
probe_failed:
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
return -ERANGE;
nvec = platform_irq_count(dev);
+ if (nvec < 0)
+ return nvec;
if (nvec < minvec)
return -ENOSPC;
if (!sock)
return err;
+ /*
+ * We need to make sure we don't get any errant requests while we're
+ * reallocating the ->socks array.
+ */
+ blk_mq_freeze_queue(nbd->disk->queue);
+
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
nbd->task_setup = current;
nsock->cookie = 0;
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
+ blk_mq_unfreeze_queue(nbd->disk->queue);
return 0;
put_socket:
+ blk_mq_unfreeze_queue(nbd->disk->queue);
sockfd_put(sock);
return err;
}
#define CREATE_TRACE_POINTS
#include "trace.h"
-#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
+static inline sector_t mb_to_sects(unsigned long mb)
+{
+ return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
+}
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
{
return -EINVAL;
}
- zone_capacity_sects = MB_TO_SECTS(dev->zone_capacity);
- dev_capacity_sects = MB_TO_SECTS(dev->size);
- dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
- dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
- if (dev_capacity_sects & (dev->zone_size_sects - 1))
- dev->nr_zones++;
+ zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+ dev_capacity_sects = mb_to_sects(dev->size);
+ dev->zone_size_sects = mb_to_sects(dev->zone_size);
+ dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
+ >> ilog2(dev->zone_size_sects);
dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
GFP_KERNEL | __GFP_ZERO);
if (info->feature_discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_granularity = info->discard_granularity ?:
+ info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
static void blkfront_setup_discard(struct blkfront_info *info)
{
- int err;
- unsigned int discard_granularity;
- unsigned int discard_alignment;
-
info->feature_discard = 1;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-granularity", "%u", &discard_granularity,
- "discard-alignment", "%u", &discard_alignment,
- NULL);
- if (!err) {
- info->discard_granularity = discard_granularity;
- info->discard_alignment = discard_alignment;
- }
+ info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-granularity",
+ 0);
+ info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-alignment", 0);
info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0);
ret = of_platform_default_populate(child, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate module\n");
+ of_node_put(child);
return ret;
}
}
static int simple_pm_bus_probe(struct platform_device *pdev)
{
+ const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_enable(&pdev->dev);
if (np)
- of_platform_populate(np, NULL, NULL, &pdev->dev);
+ of_platform_populate(np, NULL, lookup, &pdev->dev);
return 0;
}
config MXC_CLK_SCU
tristate
- depends on ARCH_MXC
- depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1
def_bool SOC_IMX1
return 0;
}
-static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int mmp2_audio_clk_suspend(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
return 0;
}
-static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
+static int mmp2_audio_clk_resume(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
return 0;
}
+#endif
static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
},
};
-static struct clk_branch gcc_camera_ahb_clk = {
- .halt_reg = 0xb008,
- .halt_check = BRANCH_HALT,
- .hwcg_reg = 0xb008,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0xb008,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_camera_ahb_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_camera_hf_axi_clk = {
.halt_reg = 0xb020,
.halt_check = BRANCH_HALT,
[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
- [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
[GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
[GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
/*
* Keep the clocks always-ON
- * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
- * GCC_GPU_CFG_AHB_CLK
+ * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
+ * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
*/
regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_4,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
.name = "gcc_sdcc4_apps_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
return len;
}
-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, char *buf)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- u32 qposinit;
-
- regmap_read(priv->regmap32, QPOSINIT, &qposinit);
-
- return sprintf(buf, "%u\n", qposinit);
-}
-
-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, const char *buf,
- size_t len)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- int err;
- u32 res;
-
- err = kstrtouint(buf, 0, &res);
- if (err < 0)
- return err;
-
- regmap_write(priv->regmap32, QPOSINIT, res);
-
- return len;
-}
-
static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
struct counter_count *count,
void *ext_priv, char *buf)
.read = ti_eqep_position_ceiling_read,
.write = ti_eqep_position_ceiling_write,
},
- {
- .name = "floor",
- .read = ti_eqep_position_floor_read,
- .write = ti_eqep_position_floor_write,
- },
{
.name = "enable",
.read = ti_eqep_position_enable_read,
config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
depends on ARCH_OMAP2PLUS
+ select CRYPTO_ENGINE
select CRYPTO_SHA1
select CRYPTO_MD5
select CRYPTO_SHA256
__le32 byte_cnt;
union {
__le32 src;
- dma_addr_t src_dma;
+ u32 src_dma;
};
union {
__le32 dst;
- dma_addr_t dst_dma;
+ u32 dst_dma;
};
__le32 next_dma;
* apple-properties.c - EFI device properties on Macs
* Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
*
- * Note, all properties are considered as u8 arrays.
- * To get a value of any of them the caller must use device_property_read_u8_array().
+ * Properties are stored either as:
+ * u8 arrays which can be retrieved with device_property_read_u8_array() or
+ * booleans which can be queried with device_property_present().
*/
#define pr_fmt(fmt) "apple-properties: " fmt
entry_data = ptr + key_len + sizeof(val_len);
entry_len = val_len - sizeof(val_len);
- entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
- entry_len);
+ if (entry_len)
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
+ else
+ entry[i] = PROPERTY_ENTRY_BOOL(key);
+
if (dump_properties) {
dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
config IMX_SCU
bool "IMX SCU Protocol driver"
depends on IMX_MBOX
+ select SOC_BUS
help
The System Controller Firmware (SCFW) is a low-level system function
which runs on a dedicated Cortex-M core to provide power, clock, and
config GPIO_SIFIVE
bool "SiFive GPIO support"
- depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+ depends on OF_GPIO
+ select IRQ_DOMAIN_HIERARCHY
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select REGMAP_MMIO
default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
help
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
else
state->duty_cycle = 1;
+ val = (unsigned long long) u; /* on duration */
regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
- val = (unsigned long long) u * NSEC_PER_SEC;
+ val += (unsigned long long) u; /* period = on + off duration */
+ val *= NSEC_PER_SEC;
do_div(val, mvpwm->clk_rate);
- if (val < state->duty_cycle) {
+ if (val > UINT_MAX)
+ state->period = UINT_MAX;
+ else if (val)
+ state->period = val;
+ else
state->period = 1;
- } else {
- val -= state->duty_cycle;
- if (val > UINT_MAX)
- state->period = UINT_MAX;
- else if (val)
- state->period = val;
- else
- state->period = 1;
- }
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
if (u)
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
WRITE_ONCE(line->eflags, 0);
+ if (line->desc)
+ WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
}
#endif
};
+static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
+{
+ struct gpio_device *gdev = cdev->gdev;
+ struct gpiochip_info chipinfo;
+
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
+ strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
+ strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
+ chipinfo.lines = gdev->ngpio;
+ if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+ return -EFAULT;
+ return 0;
+}
+
#ifdef CONFIG_GPIO_CDEV_V1
/*
* returns 0 if the versions match, else the previously selected ABI version
return abiv;
}
+
+static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
+ bool watch)
+{
+ struct gpio_desc *desc;
+ struct gpioline_info lineinfo;
+ struct gpio_v2_line_info lineinfo_v2;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+
+ /* this doubles as a range check on line_offset */
+ desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (watch) {
+ if (lineinfo_ensure_abi_version(cdev, 1))
+ return -EPERM;
+
+ if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
+ return -EBUSY;
+ }
+
+ gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+ gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+ if (watch)
+ clear_bit(lineinfo.line_offset, cdev->watched_lines);
+ return -EFAULT;
+ }
+
+ return 0;
+}
#endif
static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
return 0;
}
+static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+{
+ __u32 offset;
+
+ if (copy_from_user(&offset, ip, sizeof(offset)))
+ return -EFAULT;
+
+ if (offset >= cdev->gdev->ngpio)
+ return -EINVAL;
+
+ if (!test_and_clear_bit(offset, cdev->watched_lines))
+ return -EBUSY;
+
+ return 0;
+}
+
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
- struct gpio_chip *gc = gdev->chip;
void __user *ip = (void __user *)arg;
- __u32 offset;
/* We fail any subsequent ioctl():s when the chip is gone */
- if (!gc)
+ if (!gdev->chip)
return -ENODEV;
/* Fill in the struct and pass to userspace */
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
- struct gpiochip_info chipinfo;
-
- memset(&chipinfo, 0, sizeof(chipinfo));
-
- strscpy(chipinfo.name, dev_name(&gdev->dev),
- sizeof(chipinfo.name));
- strscpy(chipinfo.label, gdev->label,
- sizeof(chipinfo.label));
- chipinfo.lines = gdev->ngpio;
- if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
- return -EFAULT;
- return 0;
+ return chipinfo_get(cdev, ip);
#ifdef CONFIG_GPIO_CDEV_V1
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
- struct gpio_desc *desc;
- struct gpioline_info lineinfo;
- struct gpio_v2_line_info lineinfo_v2;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- /* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
- return -EFAULT;
- return 0;
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
return linehandle_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
- } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
- struct gpio_desc *desc;
- struct gpioline_info lineinfo;
- struct gpio_v2_line_info lineinfo_v2;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- /* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- if (lineinfo_ensure_abi_version(cdev, 1))
- return -EPERM;
-
- if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
- return -EBUSY;
-
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
- clear_bit(lineinfo.line_offset, cdev->watched_lines);
- return -EFAULT;
- }
-
- return 0;
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ return lineinfo_get_v1(cdev, ip,
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL);
#endif /* CONFIG_GPIO_CDEV_V1 */
} else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
} else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
return linereq_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
- if (copy_from_user(&offset, ip, sizeof(offset)))
- return -EFAULT;
-
- if (offset >= cdev->gdev->ngpio)
- return -EINVAL;
-
- if (!test_and_clear_bit(offset, cdev->watched_lines))
- return -EBUSY;
-
- return 0;
+ return lineinfo_unwatch(cdev, ip);
}
return -EINVAL;
}
ret = gdev->id;
goto err_free_gdev;
}
- dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+
+ ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+ if (ret)
+ goto err_free_ida;
+
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
if (gc->parent && gc->parent->driver)
gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
ret = -ENOMEM;
- goto err_free_ida;
+ goto err_free_dev_name;
}
if (gc->ngpio == 0) {
kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
+err_free_dev_name:
+ kfree(dev_name(&gdev->dev));
err_free_ida:
ida_free(&gpio_ida, gdev->id);
err_free_gdev:
type = IRQ_TYPE_NONE;
}
+ if (gc->to_irq)
+ chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+
gc->to_irq = gpiochip_to_irq;
gc->irq.default_type = type;
gc->irq.lock_key = lock_key;
struct gpio_chip *gc = desc_array[i]->gdev->chip;
unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
unsigned long *mask, *bits;
- int first, j, ret;
+ int first, j;
if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath;
#include <linux/sched/task.h>
#include "amdgpu_object.h"
+#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
- struct amdgpu_bo_param bp;
+ struct drm_gem_object *gobj;
u32 domain, alloc_domain;
u64 alloc_flags;
int ret;
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain));
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = 1;
- bp.domain = alloc_domain;
- bp.flags = alloc_flags;
- bp.type = bo_type;
- bp.resv = NULL;
- ret = amdgpu_bo_create(adev, &bp, &bo);
+ ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
+ bo_type, NULL, &gobj);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
- domain_string(alloc_domain), ret);
+ domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ bo = gem_to_amdgpu_bo(gobj);
if (bo_type == ttm_bo_type_sg) {
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint32_t domains;
int ret;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
- if (obj->import_attach) {
+ bo = gem_to_amdgpu_bo(obj);
+ domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
+ if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
return ERR_PTR(-EINVAL);
}
resv = vm->root.base.bo->tbo.base.resv;
}
-retry:
initial_domain = (u32)(0xffffffff & args->in.domains);
+retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
return -EINVAL;
/* A shared bo cannot be migrated to VRAM */
- if (bo->prime_shared_count) {
+ if (bo->prime_shared_count || bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
+#define mmCGTS_TCC_DISABLE_Vangogh 0x5006
+#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX 1
+#define mmCGTS_USER_TCC_DISABLE_Vangogh 0x5007
+#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
#define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1
#define mmSPI_CONFIG_CNTL_Vangogh 0x2440
#define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1
+#define mmGCR_GENERAL_CNTL_Vangogh 0x1580
+#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0
#define mmCP_HYP_PFP_UCODE_ADDR 0x5814
#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
{
/* TCCs are global (not instanced). */
- uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
- RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ uint32_t tcc_disable;
+
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh);
+ break;
+ default:
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ break;
+ }
adev->gfx.config.tcc_disabled_mask =
REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
{
uint32_t def, data, def1, data1;
- def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
+ def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
- data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+ data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
} else {
- data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+ data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
}
if (def != data)
- WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+ WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
if (def1 != data1)
WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
}
mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- uint32_t def, data;
-
- def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
- data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
- else
- data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ uint32_t def, data, def1, data1, def2, data2;
+
+ def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+ def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+ def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
+ data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+ data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ } else {
+ data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+ data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ }
if (def != data)
- WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+ WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
+ if (def1 != data1)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
+ if (def2 != data2)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
}
static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
- int data, data1;
+ int data, data1, data2, data3;
if (amdgpu_sriov_vf(adev))
*flags = 0;
- data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
- data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+ data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+ data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
/* AMD_CG_SUPPORT_MC_MGCG */
- if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
- !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
- *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
+ && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ }
/* AMD_CG_SUPPORT_MC_LS */
- if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
+ && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
+ && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
*flags |= AMD_CG_SUPPORT_MC_LS;
}
link->type = dc_connection_none;
prev_sink = link->local_sink;
- if (prev_sink != NULL)
- dc_sink_retain(prev_sink);
+ if (prev_sink)
+ dc_sink_release(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
dc_commit_updates_for_stream(
dm->dc, bundle->surface_updates,
dc_state->stream_status->plane_count,
- dc_state->streams[k], &bundle->stream_update, dc_state);
+ dc_state->streams[k], &bundle->stream_update);
}
cleanup:
stream_update.stream = stream_state;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
- stream_state, &stream_update,
- stream_state->ctx->dc->current_state);
+ stream_state, &stream_update);
mutex_unlock(&adev->dm.dc_lock);
}
* TODO: check if we still need the S3 mode update workaround.
* If yes, put it here.
*/
- if (aconnector->dc_sink)
+ if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
drm_connector_update_edid_property(connector,
aconnector->edid);
- drm_add_edid_modes(connector, aconnector->edid);
-
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
- uint32_t i;
+ int i;
uint64_t timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
amdgpu_dm_commit_cursors(state);
/* update planes when needed */
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
bundle->surface_updates,
planes_count,
acrtc_state->stream,
- &bundle->stream_update,
- dc_state);
+ &bundle->stream_update);
/**
* Enable or disable the interrupts on the backend.
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dc_surface_update dummy_updates[MAX_SURFACES];
+ struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
bool abm_changed, hdr_changed, scaling_changed;
- memset(&dummy_updates, 0, sizeof(dummy_updates));
+ memset(&surface_updates, 0, sizeof(surface_updates));
memset(&stream_update, 0, sizeof(stream_update));
if (acrtc) {
* To fix this, DC should permit updating only stream properties.
*/
for (j = 0; j < status->plane_count; j++)
- dummy_updates[j].surface = status->plane_states[0];
+ surface_updates[j].surface = status->plane_states[j];
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
- dummy_updates,
+ surface_updates,
status->plane_count,
dm_new_crtc_state->stream,
- &stream_update,
- dc_state);
+ &stream_update);
mutex_unlock(&dm->dc_lock);
}
ret = PTR_ERR_OR_ZERO(conn_state);
if (ret)
- goto err;
+ goto out;
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
- goto err;
+ goto out;
/* force a restore */
crtc_state->mode_changed = true;
ret = PTR_ERR_OR_ZERO(plane_state);
if (ret)
- goto err;
-
+ goto out;
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
- if (!ret)
- return 0;
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+out:
drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret;
}
if (computed_streams[i])
continue;
+ if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
+
mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
mutex_unlock(&aconnector->mst_mgr.lock);
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
+ if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
}
return true;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
bool update_uclk = false;
+ bool p_state_change_support;
if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
return;
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+ p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support)
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state)
+ struct dc_stream_update *stream_update)
{
const struct dc_stream_status *stream_status;
enum surface_update_type update_type;
if (update_type >= UPDATE_TYPE_FULL) {
+ struct dc_plane_state *new_planes[MAX_SURFACES];
+
+ memset(new_planes, 0, sizeof(new_planes));
+
+ for (i = 0; i < surface_count; i++)
+ new_planes[i] = srf_updates[i].surface;
/* initialize scratch memory for building context */
context = dc_create_state(dc);
return;
}
- dc_resource_state_copy_construct(state, context);
+ dc_resource_state_copy_construct(
+ dc->current_state, context);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ /*remove old surfaces from context */
+ if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+ DC_ERROR("Failed to remove streams for new validate context!\n");
+ return;
+ }
- if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
- new_pipe->plane_state->force_full_update = true;
+ /* add surface to context */
+ if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+ DC_ERROR("Failed to add streams for new validate context!\n");
+ return;
}
+
}
switch (dpcd_aux_read_interval) {
case 0x01:
- aux_rd_interval_us = 400;
+ aux_rd_interval_us = 4000;
break;
case 0x02:
- aux_rd_interval_us = 4000;
+ aux_rd_interval_us = 8000;
break;
case 0x03:
- aux_rd_interval_us = 8000;
+ aux_rd_interval_us = 12000;
break;
case 0x04:
aux_rd_interval_us = 16000;
initial_link_setting;
uint32_t link_bw;
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link)
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_disable_stream(pipe_ctx);
}
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link)
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state);
+ struct dc_stream_update *stream_update);
/*
* Log the current stream state.
*/
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->funcs.dpp_pg_control(hws, plane_id, true);
- hws->funcs.hubp_pg_control(hws, plane_id, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, plane_id, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, plane_id, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->funcs.dpp_pg_control(hws, dpp->inst, false);
- hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
- dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
* if this primary pipe has a bottom pipe in prev. state
* and if the bottom pipe is still available (which it should be),
* pick that pipe as secondary
- * Same logic applies for ODM pipes. Since mpo is not allowed with odm
- * check in else case.
+ * Same logic applies for ODM pipes
*/
if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
}
- } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+ }
+ if (secondary_pipe == NULL &&
+ dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
.num_banks = 8,
.num_chans = 4,
.vmm_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 23.84,
+ .dram_clock_change_latency_us = 11.72,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
+ DCN20_CLK_SRC_PLL3,
+ DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21
};
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -msse
endif
ifdef CONFIG_PPC64
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mhard-float
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mhard-float
endif
ifdef CONFIG_X86
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse
endif
ifdef CONFIG_PPC64
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float
endif
ifdef CONFIG_X86
DCN3_02 = dcn302_init.o dcn302_hwseq.o dcn302_resource.o
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -msse
endif
ifdef CONFIG_PPC64
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float
endif
ifdef CONFIG_X86
*clock_req);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
+ int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
int (*gfx_off_control)(struct smu_context *smu, bool enable);
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode);
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+
int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed);
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
int ret = 0;
- uint32_t rpm;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_speed_rpm) {
- if (speed > 100)
- speed = 100;
- rpm = speed * smu->fan_max_rpm / 100;
- ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
- }
+ if (smu->ppt_funcs->set_fan_speed_percent)
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
mutex_unlock(&smu->mutex);
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
return 0;
}
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
+
+ if (speed > 100)
+ speed = 100;
+
+ if (smu_v11_0_auto_fan_control(smu, 0))
+ return -EINVAL;
+
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ if (!duty100)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (uint32_t)tmp64;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+ return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
switch (mode) {
case AMD_FAN_CTRL_NONE:
- ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm);
+ ret = smu_v11_0_set_fan_speed_percent(smu, 100);
break;
case AMD_FAN_CTRL_MANUAL:
ret = smu_v11_0_auto_fan_control(smu, 0);
gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
gpu_metrics->average_cpu_power = metrics.Power[0];
gpu_metrics->average_soc_power = metrics.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Power[2];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
sizeof(uint16_t) * 8);
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
{
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL);
+ return 0;
}
static const struct pptable_funcs renoir_ppt_funcs = {
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <sound/hdmi-codec.h>
struct mutex ocm_lock;
struct wait_queue_head wq;
+ struct work_struct work;
struct device_node *dsi0_node;
struct device_node *dsi1_node;
bool hpd_supported;
bool edid_read;
+ /* can be accessed from different threads, so protect this with ocm_lock */
+ bool hdmi_connected;
uint8_t fw_version;
};
if (irq_status)
regmap_write(lt9611uxc->regmap, 0xb022, 0);
- lt9611uxc_unlock(lt9611uxc);
-
- if (irq_status & BIT(0))
+ if (irq_status & BIT(0)) {
lt9611uxc->edid_read = !!(hpd_status & BIT(0));
+ wake_up_all(<9611uxc->wq);
+ }
if (irq_status & BIT(1)) {
- if (lt9611uxc->connector.dev)
- drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
- else
- drm_bridge_hpd_notify(<9611uxc->bridge, !!(hpd_status & BIT(1)));
+ lt9611uxc->hdmi_connected = hpd_status & BIT(1);
+ schedule_work(<9611uxc->work);
}
+ lt9611uxc_unlock(lt9611uxc);
+
return IRQ_HANDLED;
}
+static void lt9611uxc_hpd_work(struct work_struct *work)
+{
+ struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
+ bool connected;
+
+ if (lt9611uxc->connector.dev)
+ drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
+ else {
+
+ mutex_lock(<9611uxc->ocm_lock);
+ connected = lt9611uxc->hdmi_connected;
+ mutex_unlock(<9611uxc->ocm_lock);
+
+ drm_bridge_hpd_notify(<9611uxc->bridge,
+ connected ?
+ connector_status_connected :
+ connector_status_disconnected);
+ }
+}
+
static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
{
gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1);
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
unsigned int reg_val = 0;
int ret;
- int connected = 1;
+ bool connected = true;
+
+ lt9611uxc_lock(lt9611uxc);
if (lt9611uxc->hpd_supported) {
- lt9611uxc_lock(lt9611uxc);
ret = regmap_read(lt9611uxc->regmap, 0xb023, ®_val);
- lt9611uxc_unlock(lt9611uxc);
if (ret)
dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret);
else
connected = reg_val & BIT(1);
}
+ lt9611uxc->hdmi_connected = connected;
+
+ lt9611uxc_unlock(lt9611uxc);
return connected ? connector_status_connected :
connector_status_disconnected;
static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc)
{
return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read,
- msecs_to_jiffies(100));
+ msecs_to_jiffies(500));
}
static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
ret = lt9611uxc_wait_for_edid(lt9611uxc);
if (ret < 0) {
dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret);
- return ERR_PTR(ret);
+ return NULL;
+ } else if (ret == 0) {
+ dev_err(lt9611uxc->dev, "wait for EDID timeout\n");
+ return NULL;
}
return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
lt9611uxc->fw_version = ret;
init_waitqueue_head(<9611uxc->wq);
+ INIT_WORK(<9611uxc->work, lt9611uxc_hpd_work);
+
ret = devm_request_threaded_irq(dev, client->irq, NULL,
lt9611uxc_irq_thread_handler,
IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
disable_irq(client->irq);
+ flush_scheduled_work();
lt9611uxc_audio_exit(lt9611uxc);
drm_bridge_remove(<9611uxc->bridge);
ret = handle_conflicting_encoders(state, true);
if (ret)
- return ret;
+ goto fail;
ret = drm_atomic_commit(state);
return 0;
}
-static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
+/**
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
+ * @link_rate: link rate in 10kbits/s units
+ * @link_lane_count: lane count
+ *
+ * Calculate the total bandwidth of a MultiStream Transport link. The returned
+ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
+ * convert the number of PBNs required for a given stream to the number of
+ * timeslots this stream requires in each MTP.
+ */
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
- if (dp_link_bw == 0 || dp_link_count == 0)
- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
- dp_link_bw, dp_link_count);
+ if (link_rate == 0 || link_lane_count == 0)
+ DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
+ link_rate, link_lane_count);
- return dp_link_bw * dp_link_count / 2;
+ /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
+ return link_rate * link_lane_count / 54000;
}
+EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/**
* drm_dp_read_mst_cap() - check whether or not a sink supports MST
goto out_unlock;
}
- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
if (mgr->pbn_div == 0) {
ret = -EINVAL;
if (gbo->vmap_use_count > 0)
goto out;
- ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
- if (ret)
- return ret;
+ /*
+ * VRAM helpers unmap the BO only on demand. So the previous
+ * page mapping might still be around. Only vmap if the there's
+ * no mapping present.
+ */
+ if (dma_buf_map_is_null(&gbo->map)) {
+ ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
+ if (ret)
+ return ret;
+ }
out:
++gbo->vmap_use_count;
return;
ttm_bo_vunmap(bo, &gbo->map);
+ dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
}
static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
return -ENOENT;
*fence = drm_syncobj_fence_get(syncobj);
- drm_syncobj_put(syncobj);
if (*fence) {
ret = dma_fence_chain_find_seqno(fence, point);
if (!ret)
- return 0;
+ goto out;
dma_fence_put(*fence);
} else {
ret = -EINVAL;
}
if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
- return ret;
+ goto out;
memset(&wait, 0, sizeof(wait));
wait.task = current;
if (wait.node.next)
drm_syncobj_remove_wait(syncobj, &wait);
+out:
+ drm_syncobj_put(syncobj);
+
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
int n_entries, ln;
u32 val;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
- /* The table does not have values for level 3 and level 9. */
- if (level >= n_entries || level == 3 || level == 9) {
+ if (level >= n_entries) {
drm_dbg_kms(&dev_priv->drm,
"DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 2);
- level = n_entries - 2;
+ level, n_entries - 1);
+ level = n_entries - 1;
}
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
if (level >= n_entries)
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
}
+static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_phy_is_combo(i915, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(i915, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+}
+
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
* 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
* the used lanes of the DDI.
*/
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
/*
* 7.g Configure and enable DDI_BUF_CTL
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_de_write(dev_priv, reg, val);
}
+ intel_ddi_power_up_lanes(encoder, crtc_state);
+
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
*/
ret = i915_vma_pin_fence(vma);
if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
vma = ERR_PTR(ret);
goto err;
}
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- i915_gem_object_lock(vma->obj, NULL);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
- i915_gem_object_unpin_from_display_plane(vma);
- i915_gem_object_unlock(vma->obj);
-
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
}
return plane_color_ctl;
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 tmp;
drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
enableddisabled(intel_dp->has_hdmi_sink));
- tmp = intel_dp->dfp.ycbcr_444_to_420 ?
- DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+ tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+ intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
}
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp);
+ intel_dp_configure_protocol_converter(intel_dp, pipe_config);
intel_dp_start_link_train(intel_dp, pipe_config);
intel_dp_stop_link_train(intel_dp, pipe_config);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
-
- drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
- train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
- drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT,
- train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
- " (max)" : "");
-
- intel_dp->set_signal_levels(intel_dp, crtc_state);
-}
-
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
intel_dp_phy_pattern_update(intel_dp, crtc_state);
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable);
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat);
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
}
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ char phy_name[10];
+
+ drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
+ train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+ train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
+ (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT,
+ train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+ " (max)" : "",
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+
+ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
+ intel_dp->set_signal_levels(intel_dp, crtc_state);
+}
+
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
}
DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
int ret;
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
intel_dp->train_set, crtc_state->lane_count);
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy);
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
connector->port,
- crtc_state->pbn, 0);
+ crtc_state->pbn,
+ drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count));
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
if (content_protection_type_changed) {
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ drm_connector_get(&connector->base);
schedule_work(&hdcp->prop_work);
mutex_unlock(&hdcp->mutex);
}
desired_and_not_enabled =
hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
mutex_unlock(&hdcp->mutex);
+ /*
+ * If HDCP already ENABLED and CP property is DESIRED, schedule
+ * prop_work to update correct CP property to user space.
+ */
+ if (!desired_and_not_enabled && !content_protection_type_changed) {
+ drm_connector_get(&connector->base);
+ schedule_work(&hdcp->prop_work);
+ }
}
if (desired_and_not_enabled || content_protection_type_changed)
intel_frontbuffer_flip_complete(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
return 0;
out_unpin:
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
out_pin_section:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
/* Preoffset values for YUV to RGB Conversion */
#define PREOFF_YUV_TO_RGB_HI 0x1800
-#define PREOFF_YUV_TO_RGB_ME 0x1F00
+#define PREOFF_YUV_TO_RGB_ME 0x0000
#define PREOFF_YUV_TO_RGB_LO 0x1800
#define ROFF(x) (((x) & 0xffff) << 16)
#define GOFF(x) (((x) & 0xffff) << 0)
#define BOFF(x) (((x) & 0xffff) << 16)
+/*
+ * Programs the input color space conversion stage for ICL HDR planes.
+ * Note that it is assumed that this stage always happens after YUV
+ * range correction. Thus, the input to this stage is assumed to be
+ * in full-range YCbCr.
+ */
static void
icl_program_input_csc(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
0x0, 0x7800, 0x7F10,
},
};
-
- /* Matrix for Limited Range to Full Range Conversion */
- static const u16 input_csc_matrix_lr[][9] = {
- /*
- * BT.601 Limted range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.596027,
- * 1.164384, -0.39175, -0.812813,
- * 1.164384, 2.017232, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT601] = {
- 0x7CC8, 0x7950, 0x0,
- 0x8D00, 0x7950, 0x9C88,
- 0x0, 0x7950, 0x6810,
- },
- /*
- * BT.709 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.792741,
- * 1.164384, -0.213249, -0.532909,
- * 1.164384, 2.112402, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT709] = {
- 0x7E58, 0x7950, 0x0,
- 0x8888, 0x7950, 0xADA8,
- 0x0, 0x7950, 0x6870,
- },
- /*
- * BT.2020 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164, 0.000, 1.678,
- * 1.164, -0.1873, -0.6504,
- * 1.164, 2.1417, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT2020] = {
- 0x7D70, 0x7950, 0x0,
- 0x8A68, 0x7950, 0xAC00,
- 0x0, 0x7950, 0x6890,
- },
- };
- const u16 *csc;
-
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- csc = input_csc_matrix[plane_state->hw.color_encoding];
- else
- csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
+ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
ROFF(csc[0]) | GOFF(csc[1]));
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- 0);
- else
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO);
intel_de_write_fw(dev_priv,
return vma;
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_vma *vma;
-
- if (list_empty(&obj->vma.list))
- return;
-
- mutex_lock(&i915->ggtt.vm.mutex);
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- }
- spin_unlock(&obj->vma.lock);
- mutex_unlock(&i915->ggtt.vm.mutex);
-
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- if (obj->mm.madv == I915_MADV_WILLNEED &&
- !atomic_read(&obj->mm.shrink_pin))
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
-}
-
-void
-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
-{
- /* Bump the LRU to try and avoid premature eviction whilst flipping */
- i915_gem_object_bump_inactive_ggtt(vma->obj);
-
- i915_vma_unpin(vma);
-}
-
/**
* Moves a single object to the CPU read, and possibly write domain.
* @obj: object to act on
else
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
i915_gem_object_unlock(obj);
if (write_domain)
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags);
-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
&cb_kernel_ivb,
desc_count);
+ /* Reset inherited context registers */
+ gen7_emit_pipeline_invalidate(&cmds);
+ batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+ batch_add(&cmds, 0xffff0000);
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+ batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ gen7_emit_pipeline_flush(&cmds);
+
+ /* Switch to the media pipeline and our base address */
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
gen7_emit_state_base_address(&cmds, descriptors);
gen7_emit_pipeline_invalidate(&cmds);
+ /* Set the clear-residual kernel state */
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
+ /* Execute the kernel on all HW threads */
for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);
return true;
}
-static inline bool __request_completed(const struct i915_request *rq)
-{
- return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
-}
-
__maybe_unused static bool
check_signal_order(struct intel_context *ce, struct i915_request *rq)
{
intel_engine_add_retire(b->irq_engine, tl);
}
-static bool __signal_request(struct i915_request *rq)
-{
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
-
- if (!__dma_fence_signal(&rq->fence)) {
- i915_request_put(rq);
- return false;
- }
-
- return true;
-}
-
static struct llist_node *
slist_add(struct llist_node *node, struct llist_node *head)
{
list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
bool release;
- if (!__request_completed(rq))
+ if (!__i915_request_is_complete(rq))
break;
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
- if (__signal_request(rq))
+ if (__dma_fence_signal(&rq->fence))
/* We own signal_node now, xfer to local list */
signal = slist_add(&rq->signal_node, signal);
+ else
+ i915_request_put(rq);
if (release) {
add_retire(b, ce->timeline);
kfree(b);
}
+static void irq_signal_request(struct i915_request *rq,
+ struct intel_breadcrumbs *b)
+{
+ if (!__dma_fence_signal(&rq->fence))
+ return;
+
+ i915_request_get(rq);
+ if (llist_add(&rq->signal_node, &b->signaled_requests))
+ irq_work_queue(&b->irq_work);
+}
+
static void insert_breadcrumb(struct i915_request *rq)
{
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return;
- i915_request_get(rq);
-
/*
* If the request is already completed, we can transfer it
* straight onto a signaled list, and queue the irq worker for
* its signal completion.
*/
- if (__request_completed(rq)) {
- if (__signal_request(rq) &&
- llist_add(&rq->signal_node, &b->signaled_requests))
- irq_work_queue(&b->irq_work);
+ if (__i915_request_is_complete(rq)) {
+ irq_signal_request(rq, b);
return;
}
break;
}
}
+
+ i915_request_get(rq);
list_add_rcu(&rq->signal_link, pos);
GEM_BUG_ON(!check_signal_order(ce, rq));
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
struct intel_context *ce = rq->context;
bool release;
- if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ spin_lock(&ce->signal_lock);
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+ spin_unlock(&ce->signal_lock);
return;
+ }
- spin_lock(&ce->signal_lock);
list_del_rcu(&rq->signal_link);
- release = remove_signaling_context(rq->engine->breadcrumbs, ce);
+ release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
if (release)
intel_context_put(ce);
+ if (__i915_request_is_complete(rq))
+ irq_signal_request(rq, b);
+
i915_request_put(rq);
}
mutex_init(&ggtt->error_mutex);
if (ggtt->mappable_end) {
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
- &ggtt->error_capture,
- PAGE_SIZE, 0,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
+ /*
+ * Reserve a mappable slot for our lockless error capture.
+ *
+ * We strongly prefer taking address 0x0 in order to protect
+ * other critical buffers against accidental overwrites,
+ * as writing to address 0 is a very common mistake.
+ *
+ * Since 0 may already be in use by the system (e.g. the BIOS
+ * framebuffer), we let the reservation fail quietly and hope
+ * 0 remains reserved always.
+ *
+ * If we fail to reserve 0, and then fail to find any space
+ * for an error-capture, remain silent. We can afford not
+ * to reserve an error_capture node as we have fallback
+ * paths, and we trust that 0 will remain reserved. However,
+ * the only likely reason for failure to insert is a driver
+ * bug, which we expect to cause other failures...
+ */
+ ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
+ ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
+ if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
+ drm_mm_insert_node_in_range(&ggtt->vm.mm,
+ &ggtt->error_capture,
+ ggtt->error_capture.size, 0,
+ ggtt->error_capture.color,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
}
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Reserved GGTT:[%llx, %llx] for use by error capture\n",
+ ggtt->error_capture.start,
+ ggtt->error_capture.start + ggtt->error_capture.size);
/*
* The upper portion of the GuC address space has a sizeable hole
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- drm_dbg_kms(&ggtt->vm.i915->drm,
- "clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
+ drm_dbg(&ggtt->vm.i915->drm,
+ "clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}
static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+
+ /* Called on error unwind, clear all flags to prevent further use */
+ memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
}
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
struct intel_timeline_cacheline *cl =
container_of(rcu, typeof(*cl), rcu);
+ /* Must wait until after all *rq->hwsp are complete before removing */
+ i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+ __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
i915_active_fini(&cl->active);
kfree(cl);
}
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
-
- i915_gem_object_unpin_map(cl->hwsp->vma->obj);
- i915_vma_put(cl->hwsp->vma);
- __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
-
call_rcu(&cl->rcu, __rcu_cacheline_free);
}
return ERR_CAST(vaddr);
}
- i915_vma_get(hwsp->vma);
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
int __i915_active_wait(struct i915_active *ref, int state)
{
- int err;
-
might_sleep();
- if (!i915_active_acquire_if_busy(ref))
- return 0;
-
/* Any fence added after the wait begins will not be auto-signaled */
- err = flush_lazy_signals(ref);
- i915_active_release(ref);
- if (err)
- return err;
+ if (i915_active_acquire_if_busy(ref)) {
+ int err;
- if (!i915_active_is_idle(ref) &&
- ___wait_var_event(ref, i915_active_is_idle(ref),
- state, 0, 0, schedule()))
- return -EINTR;
+ err = flush_lazy_signals(ref);
+ i915_active_release(ref);
+ if (err)
+ return err;
+ if (___wait_var_event(ref, i915_active_is_idle(ref),
+ state, 0, 0, schedule()))
+ return -EINTR;
+ }
+
+ /*
+ * After the wait is complete, the caller may free the active.
+ * We have to flush any concurrent retirement before returning.
+ */
flush_work(&ref->work);
return 0;
}
{
const unsigned int pi = __platform_mask_index(info, p);
- return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+ return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
}
static __always_inline bool
return val;
}
-static void park_rc6(struct drm_i915_private *i915)
+static void init_rc6(struct i915_pmu *pmu)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ intel_wakeref_t wakeref;
- if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
+ pmu->sample[__I915_SAMPLE_RC6].cur;
+ pmu->sleep_last = ktime_get();
+ }
+}
+static void park_rc6(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sleep_last = ktime_get();
}
return __get_rc6(gt);
}
+static void init_rc6(struct i915_pmu *pmu) { }
static void park_rc6(struct drm_i915_private *i915) {}
#endif
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
- intel_wakeref_t wakeref;
unsigned long flags;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
spin_lock_irqsave(&pmu->lock, flags);
/*
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
- if (pmu->enable_count[bit] == 0 &&
- config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
- pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
- pmu->sleep_last = ktime_get();
- }
-
pmu->enable |= BIT_ULL(bit);
pmu->enable_count[bit]++;
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
pmu->cpuhp.cpu = -1;
+ init_rc6(pmu);
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
static inline bool __i915_request_has_started(const struct i915_request *rq)
{
- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
}
/**
*/
static inline bool i915_request_started(const struct i915_request *rq)
{
+ bool result;
+
if (i915_request_signaled(rq))
return true;
- /* Remember: started but may have since been preempted! */
- return __i915_request_has_started(rq);
+ result = true;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ if (likely(!i915_request_signaled(rq)))
+ /* Remember: started but may have since been preempted! */
+ result = __i915_request_has_started(rq);
+ rcu_read_unlock();
+
+ return result;
}
/**
*/
static inline bool i915_request_is_running(const struct i915_request *rq)
{
+ bool result;
+
if (!i915_request_is_active(rq))
return false;
- return __i915_request_has_started(rq);
+ rcu_read_lock();
+ result = __i915_request_has_started(rq) && i915_request_is_active(rq);
+ rcu_read_unlock();
+
+ return result;
}
/**
return !list_empty(&rq->sched.link);
}
+static inline bool __i915_request_is_complete(const struct i915_request *rq)
+{
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
+}
+
static inline bool i915_request_completed(const struct i915_request *rq)
{
+ bool result;
+
if (i915_request_signaled(rq))
return true;
- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
+ result = true;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ if (likely(!i915_request_signaled(rq)))
+ result = __i915_request_is_complete(rq);
+ rcu_read_unlock();
+
+ return result;
}
static inline void i915_request_mark_complete(struct i915_request *rq)
vma = i915_vma_instance(out, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto out_put_batch;
+ goto out_put_out;
}
err = i915_vma_pin(vma, 0, 0,
NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV507C, SET_PROCESSING,
- NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV507C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV827C, SET_PROCESSING,
- NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV827C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+ if (disp->disp->object.oclass >= GK104_DISP) {
+ dev->mode_config.cursor_width = 256;
+ dev->mode_config.cursor_height = 256;
+ } else {
+ dev->mode_config.cursor_width = 64;
+ dev->mode_config.cursor_height = 64;
+ }
+
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
#include "head.h"
#include "core.h"
+#include "nvif/push.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl917d.h>
return 0;
}
+static int
+head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
+}
+
int
head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
.core_clr = head907d_core_clr,
.curs_layout = head917d_curs_layout,
.curs_format = head507d_curs_format,
- .curs_set = head907d_curs_set,
+ .curs_set = head917d_curs_set,
.curs_clr = head907d_curs_clr,
.base = head917d_base,
.ovly = head907d_ovly,
nvif_notify_get(&wndw->notify);
}
+static const u64 nv50_cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
int
nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
enum drm_plane_type type, const char *name, int index,
struct nvif_mmu *mmu = &drm->client.mmu;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_wndw *wndw;
+ const u64 *format_modifiers;
int nformat;
int ret;
for (nformat = 0; format[nformat]; nformat++);
- ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
- format, nformat,
- nouveau_display(dev)->format_modifiers,
- type, "%s-%d", name, index);
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ format_modifiers = nv50_cursor_format_modifiers;
+ else
+ format_modifiers = nouveau_display(dev)->format_modifiers;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
+ format_modifiers, type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
*pwndw = NULL;
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
+#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300)
+#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300)
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300)
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
} while(0)
#endif
-#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \
- PUSH_##o##_HDR((p), s, mA, (c)+(n)); \
- PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
+#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \
+ PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \
+ PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
} while(0)
-#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
- PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
+ PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
- PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
+ PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
- PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
+ PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
- PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
+ PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
- PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
+ PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
- PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
+ PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
- PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
+ PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
- PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
+ PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
- PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
+ PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_1D(X,o,p,s,mA,dA) \
- PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA))
-#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
- PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
- PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
- PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
- PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1D(X,o,p,s,mA,dA) \
+ PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA))
+#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
+ PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
+ PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
+ PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
+ PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \
- PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \
- PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \
- PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \
- PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \
- PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \
- X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \
+ X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
-#define PUSH_1P(X,o,p,s,mA,dp,ds) \
- PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp))
-#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
- PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \
- X##mA, (dA))
-#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
- PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1P(X,o,p,s,mA,dp,ds) \
+ PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp))
+#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
+ PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \
+ X##mA, (dA))
+#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
+ PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
#define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
dma_sync_single_for_device(drm->dev->dev,
ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_TO_DEVICE);
+ num_pages * PAGE_SIZE, DMA_TO_DEVICE);
+ i += num_pages;
+ }
}
void
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
+
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
+ num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
+ i += num_pages;
+ }
}
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
struct drm_nouveau_svm_init *args = data;
int ret;
+ /* We need to fail if svm is disabled */
+ if (!cli->drm->svm)
+ return -ENOSYS;
+
/* Allocate tracking for SVM-enabled VMM. */
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
return -ENOMEM;
struct page *p;
void *vaddr;
- if (order) {
- gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ /* Don't set the __GFP_COMP flag for higher order allocations.
+ * Mapping pages directly into an userspace process and calling
+ * put_page() on a TTM allocated page is illegal.
+ */
+ if (order)
+ gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
__GFP_KSWAPD_RECLAIM;
- gfp_flags &= ~__GFP_MOVABLE;
- gfp_flags &= ~__GFP_COMP;
- }
if (!pool->use_dma_alloc) {
p = alloc_pages(gfp_flags, order);
card->dai_link = dai_link;
card->num_links = 1;
card->name = vc4_hdmi->variant->card_name;
+ card->driver_name = "vc4-hdmi";
card->dev = dev;
card->owner = THIS_MODULE;
* for now we just allocate globally.
*/
if (!hvs->hvs5)
- /* 96kB */
- drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+ /* 48k words of 2x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else
- /* 70k words */
- drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024);
+ /* 60k words of 4x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
/* Upload filter kernels. We only have the one for now, so we
* keep it around for the lifetime of the driver.
static u32 vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
u32 pix_per_line;
u32 lbm;
lbm = pix_per_line * 16;
}
- lbm = roundup(lbm, 32);
+ /* Align it to 64 or 128 (hvs5) bytes */
+ lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
+
+ /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
+ lbm /= vc4->hvs->hvs5 ? 4 : 2;
return lbm;
}
if (!vc4_state->is_unity) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(vc4_state->crtc_w,
- SCALER_POS1_SCL_WIDTH) |
+ SCALER5_POS1_SCL_WIDTH) |
VC4_SET_FIELD(vc4_state->crtc_h,
- SCALER_POS1_SCL_HEIGHT));
+ SCALER5_POS1_SCL_HEIGHT));
}
/* Position Word 2: Source Image Size */
MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
- if (cls->name == MT_CLS_WIN_8 &&
+ if ((cls->name == MT_CLS_WIN_8 ||
+ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
}
if (flush)
- wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
else if (insert)
- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+ wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
raw_data, report_size);
return insert && !flush;
static int wacom_devm_kfifo_alloc(struct wacom *wacom)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
+ struct kfifo_rec_ptr_2 *pen_fifo;
int error;
pen_fifo = devres_alloc(wacom_devm_kfifo_release,
}
devres_add(&wacom->hdev->dev, pen_fifo);
+ wacom_wac->pen_fifo = pen_fifo;
return 0;
}
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
- struct kfifo_rec_ptr_2 pen_fifo;
+ struct kfifo_rec_ptr_2 *pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7aa6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Alder Lake-P */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
static int stm_heartbeat_init(void)
{
- int i, ret = -ENOMEM;
+ int i, ret;
if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
return -EINVAL;
for (i = 0; i < nr_devs; i++) {
stm_heartbeat[i].data.name =
kasprintf(GFP_KERNEL, "heartbeat.%d", i);
- if (!stm_heartbeat[i].data.name)
+ if (!stm_heartbeat[i].data.name) {
+ ret = -ENOMEM;
goto fail_unregister;
+ }
stm_heartbeat[i].data.nr_chans = 1;
stm_heartbeat[i].data.link = stm_heartbeat_link;
config I2C_SPRD
tristate "Spreadtrum I2C interface"
depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
+ depends on COMMON_CLK
help
If you say yes to this option, support will be included for the
Spreadtrum I2C interface.
};
+static const struct platform_device_id imx_i2c_devtype[] = {
+ {
+ .name = "imx1-i2c",
+ .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata,
+ }, {
+ .name = "imx21-i2c",
+ .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
+
static const struct of_device_id i2c_imx_dt_ids[] = {
{ .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
{ .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
return -ENOMEM;
match = device_get_match_data(&pdev->dev);
- i2c_imx->hwdata = match;
+ if (match)
+ i2c_imx->hwdata = match;
+ else
+ i2c_imx->hwdata = (struct imx_i2c_hwdata *)
+ platform_get_device_id(pdev)->driver_data;
/* Setup i2c_imx driver structure */
strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
.of_match_table = i2c_imx_dt_ids,
.acpi_match_table = i2c_imx_acpi_ids,
},
+ .id_table = imx_i2c_devtype,
};
static int __init i2c_adap_imx_init(void)
mtk_i2c_clock_disable(i2c);
ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq,
- IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c);
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ I2C_DRV_NAME, i2c);
if (ret < 0) {
dev_err(&pdev->dev,
"Request I2C IRQ %d fail\n", irq);
}
#ifdef CONFIG_PM_SLEEP
-static int mtk_i2c_resume(struct device *dev)
+static int mtk_i2c_suspend_noirq(struct device *dev)
+{
+ struct mtk_i2c *i2c = dev_get_drvdata(dev);
+
+ i2c_mark_adapter_suspended(&i2c->adap);
+
+ return 0;
+}
+
+static int mtk_i2c_resume_noirq(struct device *dev)
{
int ret;
struct mtk_i2c *i2c = dev_get_drvdata(dev);
mtk_i2c_clock_disable(i2c);
+ i2c_mark_adapter_resumed(&i2c->adap);
+
return 0;
}
#endif
static const struct dev_pm_ops mtk_i2c_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
+ mtk_i2c_resume_noirq)
};
static struct platform_driver mtk_i2c_driver = {
if (result)
return result;
if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+ if (data[i] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
length += data[i];
}
flags &= ~I2C_M_RECV_LEN;
}
- return (flags != 0) ? -EINVAL : 0;
+ return 0;
}
/**
/* read back register to make sure that register writes completed */
if (reg != I2C_TX_FIFO)
readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+ else if (i2c_dev->is_vi)
+ readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
}
static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
}
+static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned int reg, unsigned int len)
+{
+ u32 *data32 = data;
+
+ /*
+ * VI I2C controller has known hardware bug where writes get stuck
+ * when immediate multiple writes happen to TX_FIFO register.
+ * Recommended software work around is to read I2C register after
+ * each write to TX_FIFO register to flush out the data.
+ */
+ while (len--)
+ i2c_writel(i2c_dev, *data32++, reg);
+}
+
static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
unsigned int reg, unsigned int len)
{
void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg);
u32 val;
- if (!i2c_dev->atomic_mode)
+ if (!i2c_dev->atomic_mode && !in_irq())
return readl_relaxed_poll_timeout(addr, val, !(val & mask),
delay_us, timeout_us);
i2c_dev->msg_buf_remaining = buf_remaining;
i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
- i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ if (i2c_dev->is_vi)
+ i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ else
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
buf += words_to_transfer * BYTES_PER_FIFO_WORD;
}
ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
flags, indio_dev->name, indio_dev);
if (ret)
- goto error_kfifo_free;
+ return ret;
indio_dev->setup_ops = setup_ops;
indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
return 0;
-
-error_kfifo_free:
- iio_kfifo_free(indio_dev->buffer);
- return ret;
}
static const char * const chan_name_ain[] = {
* @sdata: Sensor data.
*
* returns:
- * 0 - no new samples available
- * 1 - new samples available
- * negative - error or unknown
+ * false - no new samples available or read error
+ * true - new samples available
*/
-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
- struct st_sensor_data *sdata)
+static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
+ struct st_sensor_data *sdata)
{
int ret, status;
/* How would I know if I can't check it? */
if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
- return -EINVAL;
+ return true;
/* No scan mask, no interrupt */
if (!indio_dev->active_scan_mask)
- return 0;
+ return false;
ret = regmap_read(sdata->regmap,
sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
dev_err(sdata->dev, "error checking samples available\n");
- return ret;
+ return false;
}
- if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
- return 1;
-
- return 0;
+ return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
}
/**
/* Tell the interrupt handler that we're dealing with edges */
if (irq_trig == IRQF_TRIGGER_FALLING ||
- irq_trig == IRQF_TRIGGER_RISING)
+ irq_trig == IRQF_TRIGGER_RISING) {
+ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+ dev_err(&indio_dev->dev,
+ "edge IRQ not supported w/o stat register.\n");
+ err = -EOPNOTSUPP;
+ goto iio_trigger_free;
+ }
sdata->edge_irq = true;
- else
+ } else {
/*
* If we're not using edges (i.e. level interrupts) we
* just mask off the IRQ, handle one interrupt, then
* interrupt handler top half again and start over.
*/
irq_trig |= IRQF_ONESHOT;
+ }
/*
* If the interrupt pin is Open Drain, by definition this
return ret;
if (pwr_down)
- st->pwr_down_mask |= (1 << chan->channel);
- else
st->pwr_down_mask &= ~(1 << chan->channel);
+ else
+ st->pwr_down_mask |= (1 << chan->channel);
ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
return ret;
regval = FIELD_GET(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
- if (regval > ARRAY_SIZE(sx9310_pthresh_codes))
+ if (regval >= ARRAY_SIZE(sx9310_pthresh_codes))
return -EINVAL;
*val = sx9310_pthresh_codes[regval];
if (ret)
break;
- pos = min(max(ilog2(pos), 3), 10) - 3;
+ /* Powers of 2, except for a gap between 16 and 64 */
+ pos = clamp(ilog2(pos), 3, 11) - (pos >= 32 ? 4 : 3);
reg_def->def &= ~SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK;
reg_def->def |= FIELD_PREP(SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK,
pos);
if (ret < 0)
return ret;
+ /*
+ * Give the mlx90632 some time to reset properly before sending a new I2C command
+ * if this is not done, the following I2C command(s) will not be accepted.
+ */
+ usleep_range(150, 200);
+
ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
(MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
(MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
struct hns_roce_hem_table sccc_table;
struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
- spinlock_t bank_lock;
+ struct mutex bank_mutex;
};
struct hns_roce_cq_table {
hr_qp->doorbell_qpn = 1;
} else {
- spin_lock(&qp_table->bank_lock);
+ mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
if (ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to alloc QPN, ret = %d\n", ret);
- spin_unlock(&qp_table->bank_lock);
+ mutex_unlock(&qp_table->bank_mutex);
return ret;
}
qp_table->bank[bankid].inuse++;
- spin_unlock(&qp_table->bank_lock);
+ mutex_unlock(&qp_table->bank_mutex);
hr_qp->doorbell_qpn = (u32)num;
}
ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
- spin_lock(&hr_dev->qp_table.bank_lock);
+ mutex_lock(&hr_dev->qp_table.bank_mutex);
hr_dev->qp_table.bank[bankid].inuse--;
- spin_unlock(&hr_dev->qp_table.bank_lock);
+ mutex_unlock(&hr_dev->qp_table.bank_mutex);
}
static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
unsigned int i;
mutex_init(&qp_table->scc_mutex);
+ mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa);
reserved_from_bot = hr_dev->caps.reserved_qps;
int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
- unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
struct usnic_vnic_res *vnic_res;
int len;
- len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
+ len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu",
qp_grp->ibqp.qp_num,
usnic_ib_qp_grp_state_to_string(qp_grp->state),
qp_grp->owner_pid,
res_chunk = qp_grp->res_chunk_list[i];
for (j = 0; j < res_chunk->cnt; j++) {
vnic_res = res_chunk->res[j];
- len += sysfs_emit_at(
- buf, len, "%s[%d] ",
+ len += sysfs_emit_at(buf, len, " %s[%d]",
usnic_vnic_res_type_to_str(vnic_res->type),
vnic_res->vnic_idx);
}
}
- len = sysfs_emit_at(buf, len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
}
+static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
+{
+ switch (type) {
+ case PVRDMA_NETWORK_ROCE_V1:
+ return RDMA_NETWORK_ROCE_V1;
+ case PVRDMA_NETWORK_IPV4:
+ return RDMA_NETWORK_IPV4;
+ case PVRDMA_NETWORK_IPV6:
+ return RDMA_NETWORK_IPV6;
+ default:
+ return RDMA_NETWORK_IPV6;
+ }
+}
+
void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
const struct pvrdma_qp_cap *src);
void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
wc->dlid_path_bits = cqe->dlid_path_bits;
wc->port_num = cqe->port_num;
wc->vendor_err = cqe->vendor_err;
- wc->network_hdr_type = cqe->network_hdr_type;
+ wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
/* Update shared ring state */
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/if.h>
+#include <linux/if_vlan.h>
#include <net/udp_tunnel.h>
#include <net/sch_generic.h>
#include <linux/netfilter.h>
{
struct udphdr *udph;
struct net_device *ndev = skb->dev;
+ struct net_device *rdev = ndev;
struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ if (!rxe && is_vlan_dev(rdev)) {
+ rdev = vlan_dev_real_dev(ndev);
+ rxe = rxe_get_dev_from_net(rdev);
+ }
if (!rxe)
goto drop;
else
wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ if (is_vlan_dev(skb->dev)) {
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ wc->vlan_id = vlan_dev_vlan_id(skb->dev);
+ }
+
if (pkt->mask & RXE_IMMDT_MASK) {
wc->wc_flags |= IB_WC_WITH_IMM;
wc->ex.imm_data = immdt_imm(pkt);
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
+ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
{ }
};
};
MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match);
-static const struct spi_device_id ariel_pwrbutton_id_table[] = {
- { "wyse-ariel-ec-input", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_id_table);
-
static struct spi_driver ariel_pwrbutton_driver = {
.driver = {
.name = "dell-wyse-ariel-ec-input",
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
{ .id = "5663", .data = >1x_chip_data },
{ .id = "5688", .data = >1x_chip_data },
{ .id = "917S", .data = >1x_chip_data },
+ { .id = "9286", .data = >1x_chip_data },
{ .id = "911", .data = >911_chip_data },
{ .id = "9271", .data = >911_chip_data },
{ .compatible = "goodix,gt927" },
{ .compatible = "goodix,gt9271" },
{ .compatible = "goodix,gt928" },
+ { .compatible = "goodix,gt9286" },
{ .compatible = "goodix,gt967" },
{ }
};
void *buf, size_t len);
int (*get_touch_data)(struct i2c_client *client, u8 *data);
bool (*parse_touch_data)(const u8 *data, unsigned int finger,
- unsigned int *x, unsigned int *y);
+ unsigned int *x, unsigned int *y,
+ unsigned int *z);
bool (*continue_polling)(const u8 *data, bool touch);
unsigned int max_touches;
unsigned int resolution;
bool has_calibrate_reg;
+ bool has_pressure_reg;
};
struct ili210x {
static bool ili210x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
if (touchdata[0] & BIT(finger))
return false;
static bool ili211x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u32 data;
static bool ili212x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u16 val;
static bool ili251x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u16 val;
*x = val & 0x3fff;
*y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
+ *z = touchdata[1 + (finger * 5) + 4];
return true;
}
.continue_polling = ili251x_check_continue_polling,
.max_touches = 10,
.has_calibrate_reg = true,
+ .has_pressure_reg = true,
};
static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
struct input_dev *input = priv->input;
int i;
bool contact = false, touch;
- unsigned int x = 0, y = 0;
+ unsigned int x = 0, y = 0, z = 0;
for (i = 0; i < priv->chip->max_touches; i++) {
- touch = priv->chip->parse_touch_data(touchdata, i, &x, &y);
+ touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z);
input_mt_slot(input, i);
if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
touchscreen_report_pos(input, &priv->prop, x, y, true);
+ if (priv->chip->has_pressure_reg)
+ input_report_abs(input, ABS_MT_PRESSURE, z);
contact = true;
}
}
max_xy = (chip->resolution ?: SZ_64K) - 1;
input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
+ if (priv->chip->has_pressure_reg)
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
touchscreen_parse_properties(input, true, &priv->prop);
error = input_mt_init_slots(input, priv->chip->max_touches,
#define ST1232_TS_NAME "st1232-ts"
#define ST1633_TS_NAME "st1633-ts"
+#define REG_STATUS 0x01 /* Device Status | Error Code */
+
+#define STATUS_NORMAL 0x00
+#define STATUS_INIT 0x01
+#define STATUS_ERROR 0x02
+#define STATUS_AUTO_TUNING 0x03
+#define STATUS_IDLE 0x04
+#define STATUS_POWER_DOWN 0x05
+
+#define ERROR_NONE 0x00
+#define ERROR_INVALID_ADDRESS 0x10
+#define ERROR_INVALID_VALUE 0x20
+#define ERROR_INVALID_PLATFORM 0x30
+
#define REG_XY_RESOLUTION 0x04
#define REG_XY_COORDINATES 0x12
#define ST_TS_MAX_FINGERS 10
u8 *read_buf;
};
-static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
+static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg,
+ unsigned int n)
{
struct i2c_client *client = ts->client;
struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = I2C_M_RD | I2C_M_DMA_SAFE,
- .len = ts->read_buf_len,
+ .len = n,
.buf = ts->read_buf,
}
};
return 0;
}
+static int st1232_ts_wait_ready(struct st1232_ts_data *ts)
+{
+ unsigned int retries;
+ int error;
+
+ for (retries = 10; retries; retries--) {
+ error = st1232_ts_read_data(ts, REG_STATUS, 1);
+ if (!error && ts->read_buf[0] == (STATUS_NORMAL | ERROR_NONE))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ENXIO;
+}
+
static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
u16 *max_y)
{
int error;
/* select resolution register */
- error = st1232_ts_read_data(ts, REG_XY_RESOLUTION);
+ error = st1232_ts_read_data(ts, REG_XY_RESOLUTION, 3);
if (error)
return error;
buf = ts->read_buf;
- *max_x = ((buf[0] & 0x0070) << 4) | buf[1];
- *max_y = ((buf[0] & 0x0007) << 8) | buf[2];
+ *max_x = (((buf[0] & 0x0070) << 4) | buf[1]) - 1;
+ *max_y = (((buf[0] & 0x0007) << 8) | buf[2]) - 1;
return 0;
}
int count;
int error;
- error = st1232_ts_read_data(ts, REG_XY_COORDINATES);
+ error = st1232_ts_read_data(ts, REG_XY_COORDINATES, ts->read_buf_len);
if (error)
goto out;
input_dev->name = "st1232-touchscreen";
input_dev->id.bustype = BUS_I2C;
+ /* Wait until device is ready */
+ error = st1232_ts_wait_ready(ts);
+ if (error)
+ return error;
+
/* Read resolution from the chip */
error = st1232_ts_read_resolution(ts, &max_x, &max_y);
if (error) {
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
{
- if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
- return false;
-
- return !!(iommu->features & f);
+ return !!(iommu->features & mask);
}
static inline u64 iommu_virt_to_phys(void *vaddr)
#define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR 27
+/* IOMMU IVINFO */
+#define IOMMU_IVINFO_OFFSET 36
+#define IOMMU_IVINFO_EFRSUP BIT(0)
+
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6
static bool amd_iommu_pre_enabled = true;
+static u32 amd_iommu_ivinfo __initdata;
+
bool translation_pre_enabled(struct amd_iommu *iommu)
{
return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
return amd_iommus_present;
}
+/*
+ * For IVHD type 0x11/0x40, EFR is also available via IVHD.
+ * Default to IVHD EFR since it is available sooner
+ * (i.e. before PCI init).
+ */
+static void __init early_iommu_features_init(struct amd_iommu *iommu,
+ struct ivhd_header *h)
+{
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
+ iommu->features = h->efr_reg;
+}
+
/* Access to l1 and l2 indexed register spaces */
static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+
+ early_iommu_features_init(iommu, h);
+
break;
default:
return -EINVAL;
NULL,
};
+/*
+ * Note: IVHD 0x11 and 0x40 also contains exact copy
+ * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
+ * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
+ */
+static void __init late_iommu_features_init(struct amd_iommu *iommu)
+{
+ u64 features;
+
+ if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+ return;
+
+ /* read extended feature bits */
+ features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+
+ if (!iommu->features) {
+ iommu->features = features;
+ return;
+ }
+
+ /*
+ * Sanity check and warn if EFR values from
+ * IVHD and MMIO conflict.
+ */
+ if (features != iommu->features)
+ pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
+ features, iommu->features);
+}
+
static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
- /* read extended feature bits */
- iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+ late_iommu_features_init(iommu);
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
free_unity_maps();
}
+static void __init ivinfo_init(void *ivrs)
+{
+ amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
+}
+
/*
* This is the hardware init function for AMD IOMMU in the system.
* This function is called either from amd_iommu_init or from the interrupt
if (ret)
goto out;
+ ivinfo_init(ivrs_base);
+
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP.
*/
- if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
+ if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
addr, size_order);
return ret;
}
+static bool domain_use_flush_queue(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool r = true;
+
+ if (intel_iommu_strict)
+ return false;
+
+ /*
+ * The flush queue implementation does not perform page-selective
+ * invalidations that are required for efficient TLB flushes in virtual
+ * environments. The benefit of batching is likely to be much lower than
+ * the overhead of synchronizing the virtual and physical IOMMU
+ * page-tables.
+ */
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!cap_caching_mode(iommu->cap))
+ continue;
+
+ pr_warn_once("IOMMU batching is disabled due to virtualization");
+ r = false;
+ break;
+ }
+ rcu_read_unlock();
+
+ return r;
+}
+
static int
intel_iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = !intel_iommu_strict;
+ *(int *)data = domain_use_flush_queue();
return 0;
default:
return -ENODEV;
TI System Controller, say Y here. Otherwise, say N.
config TI_PRUSS_INTC
- tristate "TI PRU-ICSS Interrupt Controller"
- depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ tristate
+ depends on TI_PRUSS
+ default TI_PRUSS
select IRQ_DOMAIN
help
This enables support for the PRU-ICSS Local Interrupt Controller
chained_irq_exit(chip, desc);
}
-static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
{
int cpu = smp_processor_id();
.name = "IPI",
.irq_mask = bcm2836_arm_irqchip_dummy_op,
.irq_unmask = bcm2836_arm_irqchip_dummy_op,
- .irq_eoi = bcm2836_arm_irqchip_ipi_eoi,
+ .irq_ack = bcm2836_arm_irqchip_ipi_ack,
.ipi_send_mask = bcm2836_arm_irqchip_ipi_send_mask,
};
static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
-int __init liointc_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init liointc_of_init(struct device_node *node,
+ struct device_node *parent)
{
struct irq_chip_generic *gc;
struct irq_domain *domain;
if (ret)
return ret;
+ ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+ &mips_mt_cpu_irq_controller,
+ NULL);
+
+ if (ret)
+ return ret;
+
ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
if (ret)
return ret;
irqchip->chip.num_regs = 1;
irqchip->chip.status_base = base + INTC_IP;
irqchip->chip.mask_base = base + INTC_IE;
- irqchip->chip.mask_invert = true,
+ irqchip->chip.mask_invert = true;
irqchip->chip.ack_base = base + INTC_IP;
return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
This option enables support for the Power Button LED of
Acer Iconia Tab A500.
+comment "Flash and Torch LED drivers"
+source "drivers/leds/flash/Kconfig"
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
# LED Userspace Drivers
obj-$(CONFIG_LEDS_USER) += uleds.o
+# Flash and Torch LED Drivers
+obj-$(CONFIG_LEDS_CLASS_FLASH) += flash/
+
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+
+if LEDS_CLASS_FLASH
+
+config LEDS_RT8515
+ tristate "LED support for Richtek RT8515 flash/torch LED"
+ depends on GPIOLIB
+ help
+ This option enables support for the Richtek RT8515 flash
+ and torch LEDs found on some mobile phones.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-rt8515.
+
+endif # LEDS_CLASS_FLASH
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LED driver for Richtek RT8515 flash/torch white LEDs
+ * found on some Samsung mobile phones.
+ *
+ * This is a 1.5A Boost dual channel driver produced around 2011.
+ *
+ * The component lacks a datasheet, but in the schematic picture
+ * from the LG P970 service manual you can see the connections
+ * from the RT8515 to the LED, with two resistors connected
+ * from the pins "RFS" and "RTS" to ground.
+ *
+ * On the LG P970:
+ * RFS (resistance flash setting?) is 20 kOhm
+ * RTS (resistance torch setting?) is 39 kOhm
+ *
+ * Some sleuthing finds us the RT9387A which we have a datasheet for:
+ * https://static5.arrow.com/pdfs/2014/7/27/8/21/12/794/rtt_/manual/94download_ds.jspprt9387a.jspprt9387a.pdf
+ * This apparently works the same way so in theory this driver
+ * should cover RT9387A as well. This has not been tested, please
+ * update the compatibles if you add RT9387A support.
+ *
+ * Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-flash.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-flash-led-class.h>
+
+/* We can provide 15-700 mA out to the LED */
+#define RT8515_MIN_IOUT_MA 15
+#define RT8515_MAX_IOUT_MA 700
+/* The maximum intensity is 1-16 for flash and 1-100 for torch */
+#define RT8515_FLASH_MAX 16
+#define RT8515_TORCH_MAX 100
+
+#define RT8515_TIMEOUT_US 250000U
+#define RT8515_MAX_TIMEOUT_US 300000U
+
+struct rt8515 {
+ struct led_classdev_flash fled;
+ struct device *dev;
+ struct v4l2_flash *v4l2_flash;
+ struct mutex lock;
+ struct regulator *reg;
+ struct gpio_desc *enable_torch;
+ struct gpio_desc *enable_flash;
+ struct timer_list powerdown_timer;
+ u32 max_timeout; /* Flash max timeout */
+ int flash_max_intensity;
+ int torch_max_intensity;
+};
+
+static struct rt8515 *to_rt8515(struct led_classdev_flash *fled)
+{
+ return container_of(fled, struct rt8515, fled);
+}
+
+static void rt8515_gpio_led_off(struct rt8515 *rt)
+{
+ gpiod_set_value(rt->enable_flash, 0);
+ gpiod_set_value(rt->enable_torch, 0);
+}
+
+static void rt8515_gpio_brightness_commit(struct gpio_desc *gpiod,
+ int brightness)
+{
+ int i;
+
+ /*
+ * Toggling a GPIO line with a small delay increases the
+ * brightness one step at a time.
+ */
+ for (i = 0; i < brightness; i++) {
+ gpiod_set_value(gpiod, 0);
+ udelay(1);
+ gpiod_set_value(gpiod, 1);
+ udelay(1);
+ }
+}
+
+/* This is setting the torch light level */
+static int rt8515_led_brightness_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled = lcdev_to_flcdev(led);
+ struct rt8515 *rt = to_rt8515(fled);
+
+ mutex_lock(&rt->lock);
+
+ if (brightness == LED_OFF) {
+ /* Off */
+ rt8515_gpio_led_off(rt);
+ } else if (brightness < RT8515_TORCH_MAX) {
+ /* Step it up to movie mode brightness using the flash pin */
+ rt8515_gpio_brightness_commit(rt->enable_torch, brightness);
+ } else {
+ /* Max torch brightness requested */
+ gpiod_set_value(rt->enable_torch, 1);
+ }
+
+ mutex_unlock(&rt->lock);
+
+ return 0;
+}
+
+static int rt8515_led_flash_strobe_set(struct led_classdev_flash *fled,
+ bool state)
+{
+ struct rt8515 *rt = to_rt8515(fled);
+ struct led_flash_setting *timeout = &fled->timeout;
+ int brightness = rt->flash_max_intensity;
+
+ mutex_lock(&rt->lock);
+
+ if (state) {
+ /* Enable LED flash mode and set brightness */
+ rt8515_gpio_brightness_commit(rt->enable_flash, brightness);
+ /* Set timeout */
+ mod_timer(&rt->powerdown_timer,
+ jiffies + usecs_to_jiffies(timeout->val));
+ } else {
+ del_timer_sync(&rt->powerdown_timer);
+ /* Turn the LED off */
+ rt8515_gpio_led_off(rt);
+ }
+
+ fled->led_cdev.brightness = LED_OFF;
+ /* After this the torch LED will be disabled */
+
+ mutex_unlock(&rt->lock);
+
+ return 0;
+}
+
+static int rt8515_led_flash_strobe_get(struct led_classdev_flash *fled,
+ bool *state)
+{
+ struct rt8515 *rt = to_rt8515(fled);
+
+ *state = timer_pending(&rt->powerdown_timer);
+
+ return 0;
+}
+
+static int rt8515_led_flash_timeout_set(struct led_classdev_flash *fled,
+ u32 timeout)
+{
+ /* The timeout is stored in the led-class-flash core */
+ return 0;
+}
+
+static const struct led_flash_ops rt8515_flash_ops = {
+ .strobe_set = rt8515_led_flash_strobe_set,
+ .strobe_get = rt8515_led_flash_strobe_get,
+ .timeout_set = rt8515_led_flash_timeout_set,
+};
+
+static void rt8515_powerdown_timer(struct timer_list *t)
+{
+ struct rt8515 *rt = from_timer(rt, t, powerdown_timer);
+
+ /* Turn the LED off */
+ rt8515_gpio_led_off(rt);
+}
+
+static void rt8515_init_flash_timeout(struct rt8515 *rt)
+{
+ struct led_classdev_flash *fled = &rt->fled;
+ struct led_flash_setting *s;
+
+ /* Init flash timeout setting */
+ s = &fled->timeout;
+ s->min = 1;
+ s->max = rt->max_timeout;
+ s->step = 1;
+ /*
+ * Set default timeout to RT8515_TIMEOUT_US except if
+ * max_timeout from DT is lower.
+ */
+ s->val = min(rt->max_timeout, RT8515_TIMEOUT_US);
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+/* Configure the V2L2 flash subdevice */
+static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+ struct led_classdev *led = &rt->fled.led_cdev;
+ struct led_flash_setting *s;
+
+ strscpy(v4l2_sd_cfg->dev_name, led->dev->kobj.name,
+ sizeof(v4l2_sd_cfg->dev_name));
+
+ /*
+ * Init flash intensity setting: this is a linear scale
+ * capped from the device tree max intensity setting
+ * 1..flash_max_intensity
+ */
+ s = &v4l2_sd_cfg->intensity;
+ s->min = 1;
+ s->max = rt->flash_max_intensity;
+ s->step = 1;
+ s->val = s->max;
+}
+
+static void rt8515_v4l2_flash_release(struct rt8515 *rt)
+{
+ v4l2_flash_release(rt->v4l2_flash);
+}
+
+#else
+static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+}
+
+static void rt8515_v4l2_flash_release(struct rt8515 *rt)
+{
+}
+#endif
+
+static void rt8515_determine_max_intensity(struct rt8515 *rt,
+ struct fwnode_handle *led,
+ const char *resistance,
+ const char *max_ua_prop, int hw_max,
+ int *max_intensity_setting)
+{
+ u32 res = 0; /* Can't be 0 so 0 is undefined */
+ u32 ua;
+ u32 max_ma;
+ int max_intensity;
+ int ret;
+
+ fwnode_property_read_u32(rt->dev->fwnode, resistance, &res);
+ ret = fwnode_property_read_u32(led, max_ua_prop, &ua);
+
+ /* Missing info in DT, OK go with hardware maxima */
+ if (ret || res == 0) {
+ dev_err(rt->dev,
+ "either %s or %s missing from DT, using HW max\n",
+ resistance, max_ua_prop);
+ max_ma = RT8515_MAX_IOUT_MA;
+ max_intensity = hw_max;
+ goto out_assign_max;
+ }
+
+ /*
+ * Formula from the datasheet, this is the maximum current
+ * defined by the hardware.
+ */
+ max_ma = (5500 * 1000) / res;
+ /*
+ * Calculate max intensity (linear scaling)
+ * Formula is ((ua / 1000) / max_ma) * 100, then simplified
+ */
+ max_intensity = (ua / 10) / max_ma;
+
+ dev_info(rt->dev,
+ "current restricted from %u to %u mA, max intensity %d/100\n",
+ max_ma, (ua / 1000), max_intensity);
+
+out_assign_max:
+ dev_info(rt->dev, "max intensity %d/%d = %d mA\n",
+ max_intensity, hw_max, max_ma);
+ *max_intensity_setting = max_intensity;
+}
+
+static int rt8515_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fwnode_handle *child;
+ struct rt8515 *rt;
+ struct led_classdev *led;
+ struct led_classdev_flash *fled;
+ struct led_init_data init_data = {};
+ struct v4l2_flash_config v4l2_sd_cfg = {};
+ int ret;
+
+ rt = devm_kzalloc(dev, sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ return -ENOMEM;
+
+ rt->dev = dev;
+ fled = &rt->fled;
+ led = &fled->led_cdev;
+
+ /* ENF - Enable Flash line */
+ rt->enable_flash = devm_gpiod_get(dev, "enf", GPIOD_OUT_LOW);
+ if (IS_ERR(rt->enable_flash))
+ return dev_err_probe(dev, PTR_ERR(rt->enable_flash),
+ "cannot get ENF (enable flash) GPIO\n");
+
+ /* ENT - Enable Torch line */
+ rt->enable_torch = devm_gpiod_get(dev, "ent", GPIOD_OUT_LOW);
+ if (IS_ERR(rt->enable_torch))
+ return dev_err_probe(dev, PTR_ERR(rt->enable_torch),
+ "cannot get ENT (enable torch) GPIO\n");
+
+ child = fwnode_get_next_available_child_node(dev->fwnode, NULL);
+ if (!child) {
+ dev_err(dev,
+ "No fwnode child node found for connected LED.\n");
+ return -EINVAL;
+ }
+ init_data.fwnode = child;
+
+ rt8515_determine_max_intensity(rt, child, "richtek,rfs-ohms",
+ "flash-max-microamp",
+ RT8515_FLASH_MAX,
+ &rt->flash_max_intensity);
+ rt8515_determine_max_intensity(rt, child, "richtek,rts-ohms",
+ "led-max-microamp",
+ RT8515_TORCH_MAX,
+ &rt->torch_max_intensity);
+
+ ret = fwnode_property_read_u32(child, "flash-max-timeout-us",
+ &rt->max_timeout);
+ if (ret) {
+ rt->max_timeout = RT8515_MAX_TIMEOUT_US;
+ dev_warn(dev,
+ "flash-max-timeout-us property missing\n");
+ }
+ timer_setup(&rt->powerdown_timer, rt8515_powerdown_timer, 0);
+ rt8515_init_flash_timeout(rt);
+
+ fled->ops = &rt8515_flash_ops;
+
+ led->max_brightness = rt->torch_max_intensity;
+ led->brightness_set_blocking = rt8515_led_brightness_set;
+ led->flags |= LED_CORE_SUSPENDRESUME | LED_DEV_CAP_FLASH;
+
+ mutex_init(&rt->lock);
+
+ platform_set_drvdata(pdev, rt);
+
+ ret = devm_led_classdev_flash_register_ext(dev, fled, &init_data);
+ if (ret) {
+ dev_err(dev, "can't register LED %s\n", led->name);
+ mutex_destroy(&rt->lock);
+ return ret;
+ }
+
+ rt8515_init_v4l2_flash_config(rt, &v4l2_sd_cfg);
+
+ /* Create a V4L2 Flash device if V4L2 flash is enabled */
+ rt->v4l2_flash = v4l2_flash_init(dev, child, fled, NULL, &v4l2_sd_cfg);
+ if (IS_ERR(rt->v4l2_flash)) {
+ ret = PTR_ERR(rt->v4l2_flash);
+ dev_err(dev, "failed to register V4L2 flash device (%d)\n",
+ ret);
+ /*
+ * Continue without the V4L2 flash
+ * (we still have the classdev)
+ */
+ }
+
+ return 0;
+}
+
+static int rt8515_remove(struct platform_device *pdev)
+{
+ struct rt8515 *rt = platform_get_drvdata(pdev);
+
+ rt8515_v4l2_flash_release(rt);
+ del_timer_sync(&rt->powerdown_timer);
+ mutex_destroy(&rt->lock);
+
+ return 0;
+}
+
+static const struct of_device_id rt8515_match[] = {
+ { .compatible = "richtek,rt8515", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rt8515_match);
+
+static struct platform_driver rt8515_driver = {
+ .driver = {
+ .name = "rt8515",
+ .of_match_table = rt8515_match,
+ },
+ .probe = rt8515_probe,
+ .remove = rt8515_remove,
+};
+module_platform_driver(rt8515_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Richtek RT8515 LED driver");
+MODULE_LICENSE("GPL");
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
+ unsigned long flags;
if (!trig)
return;
- read_lock(&trig->leddev_list_lock);
+ read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
- read_unlock(&trig->leddev_list_lock);
+ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
int invert)
{
struct led_classdev *led_cdev;
+ unsigned long flags;
if (!trig)
return;
- read_lock(&trig->leddev_list_lock);
+ read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
else
led_blink_set(led_cdev, delay_on, delay_off);
}
- read_unlock(&trig->leddev_list_lock);
+ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
void led_trigger_blink(struct led_trigger *trig,
return -ENOMEM;
leds[0].ec_index = EC_BLUE_LED;
- leds[0].led_cdev.name = "blue:power",
+ leds[0].led_cdev.name = "blue:power";
leds[0].led_cdev.default_trigger = "default-on";
leds[1].ec_index = EC_AMBER_LED;
- leds[1].led_cdev.name = "amber:status",
+ leds[1].led_cdev.name = "amber:status";
leds[2].ec_index = EC_GREEN_LED;
- leds[2].led_cdev.name = "green:status",
+ leds[2].led_cdev.name = "green:status";
leds[2].led_cdev.default_trigger = "default-on";
for (i = 0; i < NLEDS; i++) {
led->cdev.brightness_get = lm3533_led_get;
led->cdev.blink_set = lm3533_led_blink_set;
led->cdev.brightness = LED_OFF;
- led->cdev.groups = lm3533_led_attribute_groups,
+ led->cdev.groups = lm3533_led_attribute_groups;
led->id = pdev->id;
mutex_init(&led->mutex);
rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
ret = nvm_submit_io_sync_raw(dev, &rqd);
+ __free_page(page);
if (ret)
return ret;
- __free_page(page);
-
return rqd.error;
}
#define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_compat & \
BCH##_FEATURE_COMPAT_##flagname) != 0); \
} \
#define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_ro_compat & \
BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \
} \
#define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_incompat & \
BCH##_FEATURE_INCOMPAT_##flagname) != 0); \
} \
static int crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
- if (!ctx->r.req) {
- ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
- if (!ctx->r.req)
+ if (!ctx->r.req_aead) {
+ ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+ if (!ctx->r.req_aead)
return -ENOMEM;
}
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
- bool fix_padding;
bool discard;
+ bool fix_padding;
+ bool legacy_recalculate;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
return READ_ONCE(ic->failed);
}
+static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+{
+ if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
+ !ic->legacy_recalculate)
+ return true;
+ return false;
+}
+
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
unsigned j, unsigned char seq)
{
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
+ arg_count += ic->legacy_recalculate;
DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
}
if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
DMEMIT(" fix_padding");
+ if (ic->legacy_recalculate)
+ DMEMIT(" legacy_recalculate");
#define EMIT_ALG(a, n) \
do { \
unsigned extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
- {0, 15, "Invalid number of feature args"},
+ {0, 16, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;
ic->discard = true;
} else if (!strcmp(opt_string, "fix_padding")) {
ic->fix_padding = true;
+ } else if (!strcmp(opt_string, "legacy_recalculate")) {
+ ic->legacy_recalculate = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
r = -ENOMEM;
goto bad;
}
+ } else {
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ ti->error = "Recalculate can only be specified with internal_hash";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
+ dm_integrity_disable_recalculate(ic)) {
+ ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
+ r = -EOPNOTSUPP;
+ goto bad;
}
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
{
int r;
dev_t dev;
+ unsigned int major, minor;
+ char dummy;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
BUG_ON(!t);
- dev = dm_get_dev_t(path);
- if (!dev)
- return -ENODEV;
+ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
+ /* Extract the major/minor numbers */
+ dev = MKDEV(major, minor);
+ if (MAJOR(dev) != major || MINOR(dev) != minor)
+ return -EOVERFLOW;
+ } else {
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
+ }
dd = find_device(&t->devices, dev);
if (!dd) {
* could wait for this and below md_handle_request could wait for those
* bios because of suspend check
*/
+ spin_lock_irq(&mddev->lock);
mddev->prev_flush_start = mddev->start_flush;
mddev->flush_bio = NULL;
+ spin_unlock_irq(&mddev->lock);
wake_up(&mddev->sb_wait);
if (bio->bi_iter.bi_size == 0) {
obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
obj-$(CONFIG_CEC_SECO) += seco/
obj-$(CONFIG_CEC_STI) += sti/
+obj-$(CONFIG_CEC_STM32) += stm32/
obj-$(CONFIG_CEC_TEGRA) += tegra/
return -EINVAL;
}
} else {
- length = (b->memory == VB2_MEMORY_USERPTR ||
- b->memory == VB2_MEMORY_DMABUF)
+ length = (b->memory == VB2_MEMORY_USERPTR)
? b->length : vb->planes[0].length;
if (b->bytesused > length)
switch (pll->bus_type) {
case CCS_PLL_BUS_TYPE_CSI2_DPHY:
- /* CSI transfers 2 bits per clock per lane; thus times 2 */
- op_sys_clk_freq_hz_sdr = pll->link_freq * 2
- * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
- 1 : pll->csi2.lanes);
- break;
case CCS_PLL_BUS_TYPE_CSI2_CPHY:
- op_sys_clk_freq_hz_sdr =
- pll->link_freq
+ op_sys_clk_freq_hz_sdr = pll->link_freq * 2
* (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
1 : pll->csi2.lanes);
break;
vv->version_major = ((u16)v->static_data_version_major[0] << 8) +
v->static_data_version_major[1];
vv->version_minor = ((u16)v->static_data_version_minor[0] << 8) +
- v->static_data_version_major[1];
+ v->static_data_version_minor[1];
vv->date_year = ((u16)v->year[0] << 8) + v->year[1];
vv->date_month = v->month;
vv->date_day = v->day;
if (!q->sensor)
return -ENODEV;
- freq = v4l2_get_link_rate(q->sensor->ctrl_handler, bpp, lanes);
+ freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
if (freq < 0) {
dev_err(dev, "error %lld, invalid link_freq\n", freq);
return freq;
{
struct venus_core *core = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(core->dev);
venus_shutdown(core);
venus_firmware_deinit(core);
+ pm_runtime_put_sync(core->dev);
}
static __maybe_unused int venus_runtime_suspend(struct device *dev)
out:
fwnode_handle_put(fwnode);
- return 0;
+ return ret;
}
static int rvin_parallel_init(struct rvin_dev *vin)
struct rkisp1_match_data {
const char * const *clks;
unsigned int size;
+ enum rkisp1_cif_isp_version isp_ver;
};
/* ----------------------------------------------------------------------------
"hclk",
};
-static const struct rkisp1_match_data rk3399_isp_clk_data = {
+static const struct rkisp1_match_data rk3399_isp_match_data = {
.clks = rk3399_isp_clks,
.size = ARRAY_SIZE(rk3399_isp_clks),
+ .isp_ver = RKISP1_V10,
};
static const struct of_device_id rkisp1_of_match[] = {
{
.compatible = "rockchip,rk3399-cif-isp",
- .data = &rk3399_isp_clk_data,
+ .data = &rk3399_isp_match_data,
},
{},
};
static int rkisp1_probe(struct platform_device *pdev)
{
- const struct rkisp1_match_data *clk_data;
+ const struct rkisp1_match_data *match_data;
struct device *dev = &pdev->dev;
struct rkisp1_device *rkisp1;
struct v4l2_device *v4l2_dev;
unsigned int i;
int ret, irq;
- clk_data = of_device_get_match_data(&pdev->dev);
- if (!clk_data)
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
return -ENODEV;
rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL);
rkisp1->irq = irq;
- for (i = 0; i < clk_data->size; i++)
- rkisp1->clks[i].id = clk_data->clks[i];
- ret = devm_clk_bulk_get(dev, clk_data->size, rkisp1->clks);
+ for (i = 0; i < match_data->size; i++)
+ rkisp1->clks[i].id = match_data->clks[i];
+ ret = devm_clk_bulk_get(dev, match_data->size, rkisp1->clks);
if (ret)
return ret;
- rkisp1->clk_size = clk_data->size;
+ rkisp1->clk_size = match_data->size;
pm_runtime_enable(&pdev->dev);
+ rkisp1->media_dev.hw_revision = match_data->isp_ver;
strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME,
sizeof(rkisp1->media_dev.model));
rkisp1->media_dev.dev = &pdev->dev;
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE);
- for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES; i++)
+ for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++)
rkisp1_write(params->rkisp1, arg->gamma_y[i],
RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4);
}
RKISP1_CIF_ISP_HIST_WEIGHT_22TO03,
RKISP1_CIF_ISP_HIST_WEIGHT_13TO43,
RKISP1_CIF_ISP_HIST_WEIGHT_04TO34,
- RKISP1_CIF_ISP_HIST_WEIGHT_44,
};
const u8 *weight;
unsigned int i;
weight[2],
weight[3]),
hist_weight_regs[i]);
+
+ rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44);
}
static void
#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER 0x0000007F
#define RKISP1_CIF_ISP_HIST_ROW_NUM 5
#define RKISP1_CIF_ISP_HIST_COLUMN_NUM 5
+#define RKISP1_CIF_ISP_HIST_GET_BIN(x) ((x) & 0x000FFFFF)
/* AUTO FOCUS MEASUREMENT: ISP_AFM_CTRL */
#define RKISP1_ISP_AFM_CTRL_ENABLE BIT(0)
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
- for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX; i++)
+ for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V10; i++)
pbuf->params.ae.exp_mean[i] =
(u8)rkisp1_read(rkisp1,
RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4);
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
- for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX; i++)
- pbuf->params.hist.hist_bins[i] =
- (u8)rkisp1_read(rkisp1,
- RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+ for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10; i++) {
+ u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+
+ pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN(reg_val);
+ }
}
static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
data->body);
spin_lock(&data->keylock);
if (scancode) {
- delay = nsecs_to_jiffies(dev->timeout) +
+ delay = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(100);
mod_timer(&data->rx_timeout, jiffies + delay);
} else {
rdev->s_rx_carrier_range = ite_set_rx_carrier_range;
/* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */
rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR *
- itdev->params.sample_period;
+ itdev->params.sample_period / 1000;
rdev->timeout = IR_DEFAULT_TIMEOUT;
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR *
void rc_repeat(struct rc_dev *dev)
{
unsigned long flags;
- unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
+ unsigned int timeout = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(dev->last_protocol));
struct lirc_scancode sc = {
.scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
ir_do_keydown(dev, protocol, scancode, keycode, toggle);
if (dev->keypressed) {
- dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
+ dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(protocol));
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
}
goto out_raw;
}
+ dev->registered = true;
+
rc = device_add(&dev->dev);
if (rc)
goto out_rx_free;
dev->device_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
- dev->registered = true;
-
/*
* once the the input device is registered in rc_setup_rx_device,
* userspace can open the input device and rc_open() will be called
} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
mod_timer(&serial_ir.timeout_timer,
- jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));
+ jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout));
ir_raw_event_handle(serial_ir.rcdev);
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
unsigned int div)
{
struct v4l2_ctrl *ctrl;
return freq > 0 ? freq : -EINVAL;
}
-EXPORT_SYMBOL_GPL(v4l2_get_link_rate);
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
struct pcr_handle *handle;
u32 base, len;
int ret, i, bar = 0;
+ u8 val;
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-
+ rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+ if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+ pcr->aspm_enabled = false;
+ else
+ pcr->aspm_enabled = true;
pcr->card_inserted = 0;
pcr->card_removed = 0;
INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
if (hard_reset) {
/* Release kernel context */
- if (hl_ctx_put(hdev->kernel_ctx) == 1)
+ if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
hdev->kernel_ctx = NULL;
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
}
}
+ /* Disable PCI access from device F/W so it won't send us additional
+ * interrupts. We disable MSI/MSI-X at the halt_engines function and we
+ * can't have the F/W sending us interrupts after that. We need to
+ * disable the access here because if the device is marked disable, the
+ * message won't be send. Also, in case of heartbeat, the device CPU is
+ * marked as disable so this message won't be sent
+ */
+ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+
/* Mark device as disabled */
hdev->disabled = true;
}
counters->rx_throughput = result;
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
/* Fetch PCI tx counter */
pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
counters->tx_throughput = result;
/* Fetch PCI replay counter */
+ memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops);
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
&hw_idle.busy_engines_mask_ext, NULL);
+ hw_idle.busy_engines_mask =
+ lower_32_bits(hw_idle.busy_engines_mask_ext);
return copy_to_user(out, &hw_idle,
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr, i;
+ bool is_host_addr;
u32 page_size;
+ is_host_addr = !hl_is_dram_va(hdev, vaddr);
page_size = phys_pg_pack->page_size;
next_vaddr = vaddr;
/*
* unmapping on Palladium can be really long, so avoid a CPU
* soft lockup bug by sleeping a little between unmapping pages
+ *
+ * In addition, when unmapping host memory we pass through
+ * the Linux kernel to unpin the pages and that takes a long
+ * time. Therefore, sleep every 32K pages to avoid soft lockup
*/
- if (hdev->pldm)
- usleep_range(500, 1000);
+ if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
+ usleep_range(50, 200);
}
}
#include "habanalabs.h"
-static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = is_dram_va(hdev, virt_addr);
+ is_dram_addr = hl_is_dram_va(hdev, virt_addr);
if (is_dram_addr)
mmu_prop = &prop->dmmu;
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = is_dram_va(hdev, virt_addr);
+ is_dram_addr = hl_is_dram_va(hdev, virt_addr);
if (is_dram_addr)
mmu_prop = &prop->dmmu;
{
/* MMU H/W fini was already done in device hw_fini() */
- kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
- gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
+ kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+ gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ }
+
+ /* Make sure that if we arrive here again without init was called we
+ * won't cause kernel panic. This can happen for example if we fail
+ * during hard reset code at certain points
+ */
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
}
/**
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+ (dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+ (dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
"merging was advertised but not possible");
blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
- if (mmc_card_mmc(card))
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
block_size = card->ext_csd.data_sector_size;
+ WARN_ON(block_size != 512 && block_size != 4096);
+ }
blk_queue_logical_block_size(mq->queue, block_size);
/*
#include "sdio_cis.h"
#include "sdio_ops.h"
+#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
+
static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
do {
unsigned char tpl_code, tpl_link;
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
if (ret)
prev = &this->next;
if (ret == -ENOENT) {
+ if (time_after(jiffies, timeout))
+ break;
/* warn about unknown tuples */
pr_warn_ratelimited("%s: queuing unknown"
" CIS tuple 0x%02x (%u bytes)\n",
static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
{
- int ret;
-
- ret = sdhci_pltfm_unregister(pdev);
- if (ret)
- dev_err(&pdev->dev, "failed to shutdown\n");
+ sdhci_pltfm_suspend(&pdev->dev);
}
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
#include "sdhci-pltfm.h"
+#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16)
+
/* DWCMSHC specific Mode Select value */
#define DWCMSHC_CTRL_HS400 0x7
sdhci_adma_write_desc(host, desc, addr, len, cmd);
}
+static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ /*
+ * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
+ * block count register which doesn't support stuff bits of
+ * CMD23 argument on dwcmsch host controller.
+ */
+ if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
+ host->flags &= ~SDHCI_AUTO_CMD23;
+ else
+ host->flags |= SDHCI_AUTO_CMD23;
+}
+
+static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ dwcmshc_check_auto_cmd23(mmc, mrq);
+
+ sdhci_request(mmc, mrq);
+}
+
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
sdhci_get_of_property(pdev);
+ host->mmc_host_ops.request = dwcmshc_request;
+
err = sdhci_add_host(host);
if (err)
goto err_clk;
return host->private;
}
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#ifdef CONFIG_PM_SLEEP
int sdhci_pltfm_suspend(struct device *dev);
int sdhci_pltfm_resume(struct device *dev);
-extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#else
+static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; }
+static inline int sdhci_pltfm_resume(struct device *dev) { return 0; }
+#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
/* Disable tuning request and auto-retuning again */
xenon_retune_setup(host);
- xenon_set_acg(host, true);
+ /*
+ * The ACG should be turned off at the early init time, in order
+ * to solve a possible issues with the 1.8V regulator stabilization.
+ * The feature is enabled in later stage.
+ */
+ xenon_set_acg(host, false);
xenon_set_sdclk_off_idle(host, sdhc_id, false);
/* Extract interleaved payload data and ECC bits */
for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
if (buf)
- nand_extract_bits(buf, step * eccsize, tmp_buf,
+ nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
src_bit_off, eccsize * 8);
src_bit_off += eccsize * 8;
struct device *dev = &pdev->dev;
struct ebu_nand_controller *ebu_host;
struct nand_chip *nand;
- struct mtd_info *mtd = NULL;
+ struct mtd_info *mtd;
struct resource *res;
char *resname;
int ret;
ebu_host->ebu + EBU_ADDR_SEL(cs));
nand_set_flash_node(&ebu_host->chip, dev->of_node);
+
+ mtd = nand_to_mtd(&ebu_host->chip);
if (!mtd->name) {
dev_err(ebu_host->dev, "NAND label property is mandatory\n");
return -EINVAL;
}
- mtd = nand_to_mtd(&ebu_host->chip);
mtd->dev.parent = dev;
ebu_host->dev = dev;
{
unsigned int eccsteps, eccbytes;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
if (!bch)
return 0;
return -EINVAL;
}
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_BCH;
chip->ecc.size = 512;
chip->ecc.strength = bch;
chip->ecc.bytes = eccbytes;
nsmtd = nand_to_mtd(chip);
nand_set_controller_data(chip, (void *)ns);
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
/* and 'badblocks' parameters to work */
chip->options |= NAND_SKIP_BBTSCAN;
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/omap-dma.h>
static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
int off = BADBLOCK_MARKER_LENGTH;
- if (section >= chip->ecc.steps)
+ if (section >= engine_conf->nsteps)
return -ERANGE;
/*
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
- oobregion->offset = off + (section * (chip->ecc.bytes + 1));
- oobregion->length = chip->ecc.bytes;
+ oobregion->offset = off + (section * (engine_conf->code_size + 1));
+ oobregion->length = engine_conf->code_size;
return 0;
}
static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
int off = BADBLOCK_MARKER_LENGTH;
if (section)
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
- off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+ off += ((engine_conf->code_size + 1) * engine_conf->nsteps);
if (off >= mtd->oobsize)
return -ERANGE;
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
req->datalen);
- if (req->ooblen)
- memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
- req->ooblen);
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+ req->ooblen);
+ }
return 0;
}
dev->irq = 9;
if (arcrimi_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
iounmap(lp->mem_start);
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
#ifndef MODULE
int excnak_pending; /* We just got an excesive nak interrupt */
+ /* RESET flag handling */
+ int reset_in_progress;
+ struct work_struct reset_work;
+
struct {
uint16_t sequence; /* sequence number (incs with each packet) */
__be16 aborted_seq;
void arcnet_unregister_proto(struct ArcProto *proto);
irqreturn_t arcnet_interrupt(int irq, void *dev_id);
+
struct net_device *alloc_arcdev(const char *name);
+void free_arcdev(struct net_device *dev);
int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
struct arcnet_local *lp = from_timer(lp, t, timer);
struct net_device *dev = lp->dev;
- if (!netif_carrier_ok(dev)) {
+ spin_lock_irq(&lp->lock);
+
+ if (!lp->reset_in_progress && !netif_carrier_ok(dev)) {
netif_carrier_on(dev);
netdev_info(dev, "link up\n");
}
+
+ spin_unlock_irq(&lp->lock);
+}
+
+static void reset_device_work(struct work_struct *work)
+{
+ struct arcnet_local *lp;
+ struct net_device *dev;
+
+ lp = container_of(work, struct arcnet_local, reset_work);
+ dev = lp->dev;
+
+ /* Do not bring the network interface back up if an ifdown
+ * was already done.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ return;
+
+ rtnl_lock();
+
+ /* Do another check, in case of an ifdown that was triggered in
+ * the small race window between the exit condition above and
+ * acquiring RTNL.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ goto out;
+
+ dev_close(dev);
+ dev_open(dev, NULL);
+
+out:
+ rtnl_unlock();
}
static void arcnet_reply_tasklet(unsigned long data)
lp->dev = dev;
spin_lock_init(&lp->lock);
timer_setup(&lp->timer, arcnet_timer, 0);
+ INIT_WORK(&lp->reset_work, reset_device_work);
}
return dev;
}
EXPORT_SYMBOL(alloc_arcdev);
+void free_arcdev(struct net_device *dev)
+{
+ struct arcnet_local *lp = netdev_priv(dev);
+
+ /* Do not cancel this at ->ndo_close(), as the workqueue itself
+ * indirectly calls the ifdown path through dev_close().
+ */
+ cancel_work_sync(&lp->reset_work);
+ free_netdev(dev);
+}
+EXPORT_SYMBOL(free_arcdev);
+
/* Open/initialize the board. This is called sometime after booting when
* the 'ifconfig' program is run.
*
/* shut down the card */
lp->hw.close(dev);
+
+ /* reset counters */
+ lp->reset_in_progress = 0;
+
module_put(lp->hw.owner);
return 0;
}
spin_lock_irqsave(&lp->lock, flags);
+ if (lp->reset_in_progress)
+ goto out;
+
/* RESET flag was enabled - if device is not running, we must
* clear it right away (but nothing else).
*/
if (status & RESETflag) {
arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n",
status);
- arcnet_close(dev);
- arcnet_open(dev);
+
+ lp->reset_in_progress = 1;
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ schedule_work(&lp->reset_work);
/* get out of the interrupt handler! */
- break;
+ goto out;
}
/* RX is inhibited - we must have received something.
* Prepare to receive into the next buffer.
udelay(1);
lp->hw.intmask(dev, lp->intmask);
+out:
spin_unlock_irqrestore(&lp->lock, flags);
return retval;
}
dev->irq = 9;
if (com20020isa_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
unregister_netdev(my_dev);
free_irq(my_dev->irq, my_dev);
release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(my_dev);
+ free_arcdev(my_dev);
}
#ifndef MODULE
unregister_netdev(dev);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
dev = info->dev;
if (dev) {
dev_dbg(&link->dev, "kfree...\n");
- free_netdev(dev);
+ free_arcdev(dev);
}
dev_dbg(&link->dev, "kfree2...\n");
kfree(info);
err = com90io_probe(dev);
if (err) {
- free_netdev(dev);
+ free_arcdev(dev);
return err;
}
free_irq(dev->irq, dev);
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(dev);
+ free_arcdev(dev);
}
module_init(com90io_init)
err_release_mem:
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
err_free_dev:
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
release_mem_region(dev->mem_start,
dev->mem_end - dev->mem_start + 1);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx_ni(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->len;
+ netif_rx_ni(skb);
+
restart:
netdev_dbg(dev, "restarted\n");
priv->can_stats.restarts++;
{
struct can_priv *priv = netdev_priv(dev);
struct can_ctrlmode cm = {.flags = priv->ctrlmode};
- struct can_berr_counter bec;
+ struct can_berr_counter bec = { };
enum can_state state = priv->state;
if (priv->do_get_state)
else
memcpy(cfd->data, rm->d, cfd->len);
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cfd->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+
return 0;
}
if (!skb)
return -ENOMEM;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cf->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+
return 0;
}
struct net_device *peer;
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
+ u8 len;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ len = cfd->len;
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
- srcstats->tx_bytes += cfd->len;
+ srcstats->tx_bytes += len;
peerstats = &peer->stats;
peerstats->rx_packets++;
- peerstats->rx_bytes += cfd->len;
+ peerstats->rx_bytes += len;
}
out_unlock:
!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
return -EINVAL;
- if (vlan->vid_end > dev->num_vlans)
+ if (vlan->vid_end >= dev->num_vlans)
return -ERANGE;
b53_enable_vlan(dev, true, ds->vlan_filtering);
/* Find our integrated MDIO bus node */
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
priv->master_mii_bus = of_mdio_find_bus(dn);
- if (!priv->master_mii_bus)
+ if (!priv->master_mii_bus) {
+ of_node_put(dn);
return -EPROBE_DEFER;
+ }
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
- if (!priv->slave_mii_bus)
+ if (!priv->slave_mii_bus) {
+ of_node_put(dn);
return -ENOMEM;
+ }
priv->slave_mii_bus->priv = priv;
priv->slave_mii_bus->name = "sf2 slave mii";
.port_cnt = 5, /* total cpu and user ports */
},
{
+ /*
+ * WARNING
+ * =======
+ * KSZ8794 is similar to KSZ8795, except the port map
+ * contains a gap between external and CPU ports, the
+ * port map is NOT continuous. The per-port register
+ * map is shifted accordingly too, i.e. registers at
+ * offset 0x40 are NOT used on KSZ8794 and they ARE
+ * used on KSZ8795 for external port 3.
+ * external cpu
+ * KSZ8794 0,1,2 4
+ * KSZ8795 0,1,2,3 4
+ * KSZ8765 0,1,2,3 4
+ */
.chip_id = 0x8794,
.dev_name = "KSZ8794",
.num_vlans = 4096,
dev->num_vlans = chip->num_vlans;
dev->num_alus = chip->num_alus;
dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
+ dev->port_cnt = fls(chip->cpu_ports);
+ dev->cpu_port = fls(chip->cpu_ports) - 1;
+ dev->phy_port_cnt = dev->port_cnt - 1;
dev->cpu_ports = chip->cpu_ports;
-
+ dev->host_mask = chip->cpu_ports;
+ dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
+ chip->cpu_ports;
break;
}
}
if (!dev->cpu_ports)
return -ENODEV;
- dev->port_mask = BIT(dev->port_cnt) - 1;
- dev->port_mask |= dev->host_mask;
-
dev->reg_mib_cnt = KSZ8795_COUNTER_NUM;
dev->mib_cnt = ARRAY_SIZE(mib_names);
- dev->phy_port_cnt = dev->port_cnt - 1;
-
- dev->cpu_port = dev->port_cnt - 1;
- dev->host_mask = BIT(dev->cpu_port);
-
dev->ports = devm_kzalloc(dev->dev,
dev->port_cnt * sizeof(struct ksz_port),
GFP_KERNEL);
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
- usleep_range(100, 1000);
+ msleep(100);
}
mutex_init(&dev->dev_mutex);
if (of_property_read_u32(port, "reg",
&port_num))
continue;
- if (port_num >= dev->port_cnt)
+ if (!(dev->port_mask & BIT(port_num)))
return -EINVAL;
of_get_phy_mode(port,
&dev->ports[port_num].interface);
if (!entry.portvec)
entry.state = 0;
} else {
- entry.portvec |= BIT(port);
+ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
+ entry.portvec = BIT(port);
+ else
+ entry.portvec |= BIT(port);
+
entry.state = state;
}
if (err)
return err;
+ err = mv88e6185_g1_stu_data_read(chip, entry);
+ if (err)
+ return err;
+
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[5:4] are located in VTU Operation 9:8
*/
priv = netdev_priv(dev);
priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_free_netdev;
+ }
/* Allocate number of TX rings */
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
#endif
}
if (!n || !n->dev)
- goto free_sk;
+ goto free_dst;
ndev = n->dev;
- if (!ndev)
- goto free_dst;
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
- neigh_release(n);
+ if (n)
+ neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
*/
#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+/* Some link partners do not tolerate the momentary reset of the REF_CLK
+ * frequency when the RNCTL register is cleared by hardware reset.
+ */
+#define FEC_QUIRK_NO_HARD_RESET (1 << 18)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_NO_HARD_RESET,
};
static const struct fec_devinfo fec_imx6q_info = {
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
fep->mii_bus->parent = &pdev->dev;
err = of_mdiobus_register(fep->mii_bus, node);
- of_node_put(node);
if (err)
goto err_out_free_mdiobus;
+ of_node_put(node);
mii_cnt++;
err_out_free_mdiobus:
mdiobus_free(fep->mii_bus);
err_out:
+ of_node_put(node);
return err;
}
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
+ */
+ dma_rmb();
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
unsigned long flags;
spin_lock_irqsave(&adapter->state_lock, flags);
- if (test_bit(0, &adapter->resetting)) {
- spin_unlock_irqrestore(&adapter->state_lock, flags);
- return -EBUSY;
- }
-
adapter->state = VNIC_REMOVING;
spin_unlock_irqrestore(&adapter->state_lock, flags);
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
-
- /* Always report link is down if the VF queues aren't enabled */
- if (!vf->queues_enabled) {
- pfe.event_data.link_event.link_status = false;
- pfe.event_data.link_event.link_speed = 0;
- } else if (vf->link_forced) {
+ if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
(vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0);
pfe.event_data.link_event.link_speed =
i40e_virtchnl_link_speed(ls->link_speed);
}
-
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
}
}
- vf->queues_enabled = true;
-
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- /* Immediately mark queues as disabled */
- vf->queues_enabled = false;
-
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
goto error_param;
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
- * If the VF is indeed in reset, the vsi pointer has
- * to show on the newly loaded vsi under pf->vsi[id].
+ * Acquire the VSI pointer only after the VF has been
+ * properly initialized.
*/
for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- if (i > 0)
- vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
break;
- }
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
ret = -EAGAIN;
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if VF link is forced */
- bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
u16 num_vlan;
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
-#define ICE_MIN_MSIX 2
+#define ICE_MIN_LAN_TXRX_MSIX 1
+#define ICE_MIN_LAN_OICR_MSIX 1
+#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min_t(int, num_online_cpus(),
- pf->hw.func_caps.common_cap.num_txq);
+ return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+ (u16)pf->hw.func_caps.common_cap.num_txq);
}
/**
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min_t(int, num_online_cpus(),
- pf->hw.func_caps.common_cap.num_rxq);
+ return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+ (u16)pf->hw.func_caps.common_cap.num_rxq);
}
/**
sizeof(struct in6_addr));
input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
- input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
+ /* if no protocol requested, use IPPROTO_NONE */
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ input->ip.v6.proto = IPPROTO_NONE;
+ else
+ input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
switch (vsi->type) {
case ICE_VSI_PF:
- vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
- num_online_cpus());
+ vsi->alloc_txq = min3(pf->num_lan_msix,
+ ice_get_avail_txq_count(pf),
+ (u16)num_online_cpus());
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
} else {
- vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
- num_online_cpus());
+ vsi->alloc_rxq = min3(pf->num_lan_msix,
+ ice_get_avail_rxq_count(pf),
+ (u16)num_online_cpus());
if (vsi->req_rxq) {
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
+ vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
+ max_t(int, vsi->alloc_rxq,
+ vsi->alloc_txq));
break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
if (v_actual < v_budget) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
-/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
-#define ICE_MIN_LAN_VECS 2
-#define ICE_MIN_RDMA_VECS 2
-#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
- if (v_actual < ICE_MIN_LAN_VECS) {
+ if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
pci_disable_msix(pf->pdev);
err = -ERANGE;
goto msix_err;
} else {
- pf->num_lan_msix = ICE_MIN_LAN_VECS;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
}
}
goto err_update_filters;
}
- /* Add filter for new MAC. If filter exists, just return success */
+ /* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
+ /* Although this MAC filter is already present in hardware it's
+ * possible in some cases (e.g. bonding) that dev_addr was
+ * modified outside of the driver and needs to be restored back
+ * to this value.
+ */
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
return 0;
}
ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+ int ret;
+
tunnel |= ICE_TX_CTX_EIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
- if (l4.hdr != exthdr)
- ipv6_skip_exthdr(skb, exthdr - skb->data,
- &l4_proto, &frag_off);
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
}
/* define outer transport */
cmd->base.phy_address = hw->phy.addr;
/* advertising link modes */
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
/* set autoneg settings */
if (hw->mac.autoneg == 1) {
Asym_Pause);
}
- status = rd32(IGC_STATUS);
+ status = pm_runtime_suspended(&adapter->pdev->dev) ?
+ 0 : rd32(IGC_STATUS);
if (status & IGC_STATUS_LU) {
if (status & IGC_STATUS_SPEED_1000) {
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
+ /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
+ * We have to check this and convert it to ADVERTISE_2500_FULL
+ * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
+ */
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
+ advertising |= ADVERTISE_2500_FULL;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
u16 *data)
{
struct igc_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -IGC_ERR_NVM;
u32 attempts = 100000;
u32 i, k, eewr = 0;
- s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n");
- ret_val = -IGC_ERR_NVM;
goto out;
}
}
out:
- return 0;
+ return ret_val;
}
/**
/* Clear entry invalidation bit */
pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
- /* Write tcam index - indirect access */
- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
-
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
+ /* Write tcam index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
+
return 0;
}
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
int rc = 0, i;
u64 cfg;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->hdr.rc = rc;
dma_addr_t iova;
u8 *buf;
- buf = napi_alloc_frag(pool->rbsize);
+ buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
if (unlikely(!buf))
return -ENOMEM;
+ buf = PTR_ALIGN(buf, OTX2_ALIGN);
iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
if (err)
- return err;
+ goto free_page;
cmd = mlx5_rsc_dump_cmd_create(mdev, key);
if (IS_ERR(cmd)) {
.min_size = 16 * 1024,
};
+static bool
+mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+{
+ return !!(entry->tuple_nat_node.next);
+}
+
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
err_insert:
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
err_rules:
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node, tuples_nat_ht_params);
+ if (mlx5_tc_ct_entry_has_nat(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node, tuples_nat_ht_params);
err_tuple_nat:
- if (entry->tuple_node.next)
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
err_tuple:
err_set:
kfree(entry);
{
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
mutex_lock(&ct_priv->shared_counter_lock);
- if (entry->tuple_node.next)
+ if (mlx5_tc_ct_entry_has_nat(entry))
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
- return NUM_IPSEC_SW_COUNTERS;
+ return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
}
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
- return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
+ return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
{
struct mlx5e_channels new_channels = {};
bool reset_channels = true;
+ bool opened;
int err = 0;
mutex_lock(&priv->state_lock);
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
trust_state);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (!opened)
reset_channels = false;
- }
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
reset_channels = false;
- if (reset_channels)
+ if (reset_channels) {
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_update_trust_state_hw,
&trust_state);
- else
+ } else {
err = mlx5e_update_trust_state_hw(priv, &trust_state);
+ if (!err && !opened)
+ priv->channels.params = new_channels.params;
+ }
mutex_unlock(&priv->state_lock);
goto out;
}
- new_channels.params = priv->channels.params;
+ new_channels.params = *cur_params;
new_channels.params.num_channels = count;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ struct mlx5e_params old_params;
+
+ old_params = *cur_params;
*cur_params = new_channels.params;
err = mlx5e_num_channels_changed(priv);
+ if (err)
+ *cur_params = old_params;
+
goto out;
}
new_channels.params.num_tc = tc ? tc : 1;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ struct mlx5e_params old_params;
+
+ old_params = priv->channels.params;
priv->channels.params = new_channels.params;
+ err = mlx5e_num_channels_changed(priv);
+ if (err)
+ priv->channels.params = old_params;
+
goto out;
}
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_num_channels_changed_ctx, NULL);
- if (err)
- goto out;
- priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
- new_channels.params.num_tc);
out:
+ priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
+ priv->channels.params.num_tc);
mutex_unlock(&priv->state_lock);
return err;
}
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
- struct mlx5e_params *old_params;
+ struct mlx5e_params *cur_params;
int err = 0;
bool reset;
goto out;
}
- old_params = &priv->channels.params;
- if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+ cur_params = &priv->channels.params;
+ if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
netdev_warn(netdev, "can't set LRO with legacy RQ\n");
err = -EINVAL;
goto out;
reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
- new_channels.params = *old_params;
+ new_channels.params = *cur_params;
new_channels.params.lro_en = enable;
- if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
+ if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
reset = false;
}
if (!reset) {
- *old_params = new_channels.params;
+ struct mlx5e_params old_params;
+
+ old_params = *cur_params;
+ *cur_params = new_channels.params;
err = mlx5e_modify_tirs_lro(priv);
+ if (err)
+ *cur_params = old_params;
goto out;
}
}
if (!reset) {
+ unsigned int old_mtu = params->sw_mtu;
+
params->sw_mtu = new_mtu;
- if (preactivate)
- preactivate(priv, NULL);
+ if (preactivate) {
+ err = preactivate(priv, NULL);
+ if (err) {
+ params->sw_mtu = old_mtu;
+ goto out;
+ }
+ }
netdev->mtu = params->sw_mtu;
goto out;
}
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
#ifdef CONFIG_MLX5_EN_ARFS
netdev->features |= NETIF_F_NETNS_LOCAL;
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
+#endif
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb))
+ if (!mlx5e_tc_update_skb(cqe, skb)) {
+ dev_kfree_skb_any(skb);
goto free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ dev_kfree_skb_any(skb);
goto free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
+ }
napi_gro_receive(rq->cq.napi, skb);
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb))
+ if (!mlx5e_tc_update_skb(cqe, skb)) {
+ dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
+ }
napi_gro_receive(rq->cq.napi, skb);
#include "lib/geneve.h"
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
+#include <asm/div64.h>
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
{
flow_flag_clear(flow, OFFLOADED);
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ goto offload_rule_0;
+
if (flow_flag_test(flow, CT)) {
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+offload_rule_0:
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
- netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
- dissector->used_keys);
+ netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
return err;
}
-static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
+static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch *esw;
+ u32 rate_mbps = 0;
u16 vport_num;
- u32 rate_mbps;
int err;
vport_num = rpriv->rep->vport;
* Moreover, if rate is non zero we choose to configure to a minimum of
* 1 mbit/sec.
*/
- rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
+ if (rate) {
+ rate = (rate * BITS_PER_BYTE) + 500000;
+ rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
+ }
+
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
destroy_ft:
root->cmds->destroy_flow_table(root, ft);
free_ft:
+ rhltable_destroy(&ft->fgs_hash);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
if (!fte_tmp)
continue;
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
+ /* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false);
kmem_cache_free(steering->ftes_cache, fte);
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
+ if (IS_ERR(rule))
+ tree_put_node(&fte->node, false);
return rule;
}
rule = ERR_PTR(-ENOENT);
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
+ if (IS_ERR(rule))
+ tree_put_node(&fte->node, false);
tree_put_node(&g->node, false);
return rule;
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
#endif
struct rb_node rb_node;
u64 addr;
struct page *page;
- u16 func_id;
+ u32 function;
unsigned long bitmask;
struct list_head list;
unsigned free_count;
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
-static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
+static u32 get_function(u16 func_id, bool ec_function)
+{
+ return (u32)func_id | (ec_function << 16);
+}
+
+static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
int err;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (root)
return root;
if (!root)
return ERR_PTR(-ENOMEM);
- err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
+ err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
if (err) {
kfree(root);
return ERR_PTR(err);
return root;
}
-static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
+static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
{
struct rb_node *parent = NULL;
struct rb_root *root;
struct fw_page *tfp;
int i;
- root = page_root_per_func_id(dev, func_id);
+ root = page_root_per_function(dev, function);
if (IS_ERR(root))
return PTR_ERR(root);
nfp->addr = addr;
nfp->page = page;
- nfp->func_id = func_id;
+ nfp->function = function;
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
set_bit(i, &nfp->bitmask);
}
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
- u32 func_id)
+ u32 function)
{
struct fw_page *result = NULL;
struct rb_root *root;
struct rb_node *tmp;
struct fw_page *tfp;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return NULL;
return err;
}
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
{
struct fw_page *fp = NULL;
struct fw_page *iter;
unsigned n;
list_for_each_entry(iter, &dev->priv.free_list, list) {
- if (iter->func_id != func_id)
+ if (iter->function != function)
continue;
fp = iter;
}
{
struct rb_root *root;
- root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
+ root = xa_load(&dev->priv.page_root_xa, fwp->function);
if (WARN_ON_ONCE(!root))
return;
kfree(fwp);
}
-static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
+static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
{
struct fw_page *fwp;
int n;
- fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
list_add(&fwp->list, &dev->priv.free_list);
}
-static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
+static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
goto map;
}
- err = insert_page(dev, addr, page, func_id);
+ err = insert_page(dev, addr, page, function);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail, bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
for (i = 0; i < npages; i++) {
retry:
- err = alloc_4k(dev, &addr, func_id);
+ err = alloc_4k(dev, &addr, function);
if (err) {
if (err == -ENOMEM)
- err = alloc_system_page(dev, func_id);
+ err = alloc_system_page(dev, function);
if (err)
goto out_4k;
out_4k:
for (i--; i >= 0; i--)
- free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
out_free:
kvfree(in);
if (notify_fail)
return err;
}
-static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
struct rb_root *root;
struct rb_node *p;
int npages = 0;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return;
struct rb_root *root;
struct fw_page *fwp;
struct rb_node *p;
+ bool ec_function;
u32 func_id;
u32 npages;
u32 i = 0;
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
+ ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
if (WARN_ON_ONCE(!root))
return -EEXIST;
return 0;
}
-static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
+static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int *nclaimed, bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
}
for (i = 0; i < num_claimed; i++)
- free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
if (nclaimed)
*nclaimed = num_claimed;
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
+ .is_static = true,
.can_handle = mlxsw_sp1_span_cpu_can_handle,
.parms_set = mlxsw_sp1_span_entry_cpu_parms,
.configure = mlxsw_sp1_span_entry_cpu_configure,
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+ .is_static = true,
.can_handle = mlxsw_sp_port_dev_check,
.parms_set = mlxsw_sp_span_entry_phys_parms,
.configure = mlxsw_sp_span_entry_phys_configure,
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
+ .is_static = true,
.can_handle = mlxsw_sp2_span_cpu_can_handle,
.parms_set = mlxsw_sp2_span_entry_cpu_parms,
.configure = mlxsw_sp2_span_entry_cpu_configure,
if (!refcount_read(&curr->ref_count))
continue;
+ if (curr->ops->is_static)
+ continue;
+
err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
if (err)
continue;
};
struct mlxsw_sp_span_entry_ops {
+ bool is_static;
bool (*can_handle)(const struct net_device *to_dev);
int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type type)
{
+ u32 cmd = ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_DEST_IDX(port) |
+ ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
+ unsigned int mc_ports;
+
+ /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
+ if (type == ENTRYTYPE_MACv4)
+ mc_ports = (mac[1] << 8) | mac[2];
+ else if (type == ENTRYTYPE_MACv6)
+ mc_ports = (mac[0] << 8) | mac[1];
+ else
+ mc_ports = 0;
+
+ if (mc_ports & BIT(ocelot->num_phys_ports))
+ cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
- ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
- ANA_TABLES_MACACCESS_DEST_IDX(port) |
- ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
- ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
- ANA_TABLES_MACACCESS);
+ ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
return ocelot_mact_wait_for_completion(ocelot);
}
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
- if (!ocelot_netdevice_dev_check(dev))
- return 0;
-
if (event == NETDEV_PRECHANGEUPPER &&
+ ocelot_netdevice_dev_check(dev) &&
netif_is_lag_master(info->upper_dev)) {
struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
struct netlink_ext_ack *extack;
return -EIO;
}
-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
+static bool rtl_skb_is_udp(struct sk_buff *skb)
+{
+ int no = skb_network_offset(skb);
+ struct ipv6hdr *i6h, _i6h;
+ struct iphdr *ih, _ih;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih);
+ return ih && ih->protocol == IPPROTO_UDP;
+ case htons(ETH_P_IPV6):
+ i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h);
+ return i6h && i6h->nexthdr == IPPROTO_UDP;
+ default:
+ return false;
+ }
+}
+
+#define RTL_MIN_PATCH_LEN 47
+
+/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */
+static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
+ struct sk_buff *skb)
{
+ unsigned int padto = 0, len = skb->len;
+
+ if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
+ rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
+ unsigned int trans_data_len = skb_tail_pointer(skb) -
+ skb_transport_header(skb);
+
+ if (trans_data_len >= offsetof(struct udphdr, len) &&
+ trans_data_len < RTL_MIN_PATCH_LEN) {
+ u16 dest = ntohs(udp_hdr(skb)->dest);
+
+ /* dest is a standard PTP port */
+ if (dest == 319 || dest == 320)
+ padto = len + RTL_MIN_PATCH_LEN - trans_data_len;
+ }
+
+ if (trans_data_len < sizeof(struct udphdr))
+ padto = max_t(unsigned int, padto,
+ len + sizeof(struct udphdr) - trans_data_len);
+ }
+
+ return padto;
+}
+
+static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
+ struct sk_buff *skb)
+{
+ unsigned int padto;
+
+ padto = rtl8125_quirk_udp_padto(tp, skb);
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
- return true;
+ padto = max_t(unsigned int, padto, ETH_ZLEN);
default:
- return false;
+ break;
}
+
+ return padto;
}
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
- if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
- /* eth_skb_pad would free the skb on error */
- return !__skb_put_padto(skb, ETH_ZLEN, false);
+ unsigned int padto = rtl_quirk_packet_padto(tp, skb);
+
+ /* skb_padto would free the skb on error */
+ return !__skb_put_padto(skb, padto, false);
}
return true;
if (skb->len < ETH_ZLEN)
features &= ~NETIF_F_CSUM_MASK;
+ if (rtl_quirk_packet_padto(tp, skb))
+ features &= ~NETIF_F_CSUM_MASK;
+
if (transport_offset > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_CSUM_MASK;
cancel_work_sync(&tp->wk.work);
- phy_disconnect(tp->phydev);
-
free_irq(pci_irq_vector(pdev, 0), tp);
+ phy_disconnect(tp->phydev);
+
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev);
- pm_runtime_put_sync(&mdp->pdev->dev);
-
mdp->is_opened = 0;
+ pm_runtime_put(&mdp->pdev->dev);
+
return 0;
}
return 0;
}
+static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_read(bus, phy, reg);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
+static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_write(bus, phy, reg, val);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
/* MDIO bus init function */
static int sh_mdio_init(struct sh_eth_private *mdp,
struct sh_eth_plat_data *pd)
if (!mdp->mii_bus)
return -ENOMEM;
+ /* Wrap accessors with Runtime PM-aware ops */
+ mdp->mii_bus->read = sh_mdiobb_read;
+ mdp->mii_bus->write = sh_mdiobb_write;
+
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = dev;
if (ret) {
dev_err(&pdev->dev,
"Failed to set tx_clk\n");
- return ret;
+ goto err_remove_config_dt;
}
}
}
if (ret) {
dev_err(&pdev->dev,
"Failed to set clk_ptp_ref\n");
- return ret;
+ goto err_remove_config_dt;
}
}
}
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
+ plat->addr64 = 32;
return ehl_common_data(pdev, plat);
}
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 3;
+ plat->addr64 = 32;
return ehl_common_data(pdev, plat);
}
static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{
u32 channel_id = gsi_channel_id(channel);
- void *virt = channel->gsi->virt;
+ void __iomem *virt = channel->gsi->virt;
u32 val;
val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
/* Hardware requires a 2^n ring size, with alignment equal to size */
ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
if (ring->virt && addr % size) {
- dma_free_coherent(dev, size, ring->virt, ring->addr);
+ dma_free_coherent(dev, size, ring->virt, addr);
dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
size);
return -EINVAL; /* Not a good error value, but distinct */
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->data->qmap)
- val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
+ val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
return true;
if (!status->pkt_len)
return true;
- endpoint_id = u32_get_bits(status->endp_dst_idx,
- IPA_STATUS_DST_IDX_FMASK);
+ endpoint_id = u8_get_bits(status->endp_dst_idx,
+ IPA_STATUS_DST_IDX_FMASK);
if (endpoint_id != endpoint->endpoint_id)
return true;
size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
if (size != ipa->imem_size)
- dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n",
+ dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
size, ipa->imem_size);
} else {
dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
if (size != ipa->smem_size)
- dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n",
+ dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
size, ipa->smem_size);
} else {
return dev_addr;
}
-static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+int mdiobb_read(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
mdiobb_get_bit(ctrl);
return ret;
}
+EXPORT_SYMBOL(mdiobb_read);
-static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mdiobb_ctrl *ctrl = bus->priv;
mdiobb_get_bit(ctrl);
return 0;
}
+EXPORT_SYMBOL(mdiobb_write);
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
- list_for_each_entry(port, &team->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
TEAM_VLAN_FEATURES);
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
+ rcu_read_unlock();
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
static void team_compute_features(struct team *team)
{
- mutex_lock(&team->lock);
__team_compute_features(team);
- mutex_unlock(&team->lock);
netdev_change_features(team->dev);
}
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
+}, {
+ /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
+ /* if the speed hasn't changed, don't report it.
+ * RTL8156 shipped before 2021 sends notification about every 32ms.
+ */
+ if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
+ return;
+
+ dev->rx_speed = rx_speed;
+ dev->tx_speed = tx_speed;
+
/*
* Currently the USB-NET API does not support reporting the actual
* device speed. Do print it instead.
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- usbnet_link_change(dev, !!event->wValue, 0);
+ if (netif_carrier_ok(dev->net) != !!event->wValue)
+ usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
{QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_qu_b0_hr_b0 = {
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_qu_c0_hr_b0 = {
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
}
/*
- * Evaluate a DSM with no arguments and a single u8 return value (inside a
- * buffer object), verify and return that value.
+ * Generic function to evaluate a DSM with no arguments
+ * and an integer return value,
+ * (as an integer object or inside a buffer object),
+ * verify and assign the value in the "value" parameter.
+ * return 0 in success and the appropriate errno otherwise.
*/
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ u64 *value, size_t expected_size)
{
union acpi_object *obj;
- int ret;
+ int ret = 0;
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "Failed to get DSM object. func= %d\n",
+ func);
return -ENOENT;
+ }
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *value = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ __le64 le_value = 0;
- if (obj->type != ACPI_TYPE_BUFFER) {
+ if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
+ return -EINVAL;
+
+ /* if the buffer size doesn't match the expected size */
+ if (obj->buffer.length != expected_size)
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM invalid buffer size, padding or truncating (%d)\n",
+ obj->buffer.length);
+
+ /* assuming LE from Intel BIOS spec */
+ memcpy(&le_value, obj->buffer.pointer,
+ min_t(size_t, expected_size, (size_t)obj->buffer.length));
+ *value = le64_to_cpu(le_value);
+ } else {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method did not return a valid object, type=%d\n",
obj->type);
goto out;
}
- if (obj->buffer.length != sizeof(u8)) {
- IWL_DEBUG_DEV_RADIO(dev,
- "ACPI: DSM method returned invalid buffer, length=%d\n",
- obj->buffer.length);
- ret = -EINVAL;
- goto out;
- }
-
- ret = obj->buffer.pointer[0];
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method evaluated: func=%d, ret=%d\n",
func, ret);
ACPI_FREE(obj);
return ret;
}
+
+/*
+ * Evaluate a DSM with no arguments and a u8 return value,
+ */
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
+{
+ int ret;
+ u64 val;
+
+ ret = iwl_acpi_get_dsm_integer(dev, rev, func, &val, sizeof(u8));
+
+ if (ret < 0)
+ return ret;
+
+ /* cast val (u64) to be u8 */
+ *value = (u8)val;
+ return 0;
+}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
return ERR_PTR(-ENOENT);
}
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static inline
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
{
return -ENOENT;
}
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait)
{
- const struct firmware *pnvm;
struct iwl_notification_wait pnvm_wait;
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
PNVM_INIT_COMPLETE_NTFY) };
- char pnvm_name[64];
- int ret;
/* if the SKU_ID is empty, there's nothing to do */
if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
return 0;
- /* if we already have it, nothing to do either */
- if (trans->pnvm_loaded)
- return 0;
+ /* load from disk only if we haven't done it (or tried) before */
+ if (!trans->pnvm_loaded) {
+ const struct firmware *pnvm;
+ char pnvm_name[64];
+ int ret;
+
+ /*
+ * The prefix unfortunately includes a hyphen at the end, so
+ * don't add the dot here...
+ */
+ snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
+ trans->cfg->fw_name_pre);
+
+ /* ...but replace the hyphen with the dot here. */
+ if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
+ pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
+
+ ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
+ if (ret) {
+ IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
+ pnvm_name, ret);
+ /*
+ * Pretend we've loaded it - at least we've tried and
+ * couldn't load it at all, so there's no point in
+ * trying again over and over.
+ */
+ trans->pnvm_loaded = true;
+ } else {
+ iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
- /*
- * The prefix unfortunately includes a hyphen at the end, so
- * don't add the dot here...
- */
- snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
- trans->cfg->fw_name_pre);
-
- /* ...but replace the hyphen with the dot here. */
- if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
- pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
-
- ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
- if (ret) {
- IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
- pnvm_name, ret);
- } else {
- iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
-
- release_firmware(pnvm);
+ release_firmware(pnvm);
+ }
}
iwl_init_notification_wait(notif_wait, &pnvm_wait,
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __IWL_CONFIG_H__
#define IWL_CFG_CORES_BT_GNSS 0x5
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0100) >> 9)
+#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {
extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
+extern const char iwl_ax203_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
+extern const struct iwl_cfg iwl_qu_b0_hr_b0;
+extern const struct iwl_cfg iwl_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
- /* For safe using a string from FW make sure we have a
- * null terminator
- */
- reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
-
- IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
-
if (id >= IWL_FW_INI_MAX_REGION_ID) {
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
return -EINVAL;
}
IWL_EXPORT_SYMBOL(iwl_read_prph);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
+ mdelay(delay_ms);
iwl_write_prph_no_grab(trans, ofs, val);
iwl_trans_release_nic_access(trans, &flags);
}
}
-IWL_EXPORT_SYMBOL(iwl_write_prph);
+IWL_EXPORT_SYMBOL(iwl_write_prph_delay);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
void iwl_force_nmi(struct iwl_trans *trans)
{
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
- iwl_write_prph(trans, DEVICE_SET_NMI_REG,
- DEVICE_SET_NMI_VAL_DRV);
+ iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG,
+ DEVICE_SET_NMI_VAL_DRV, 1);
else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#ifndef __iwl_io_h__
#define __iwl_io_h__
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs,
+ u32 val, u32 delay_ms);
+static inline void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ iwl_write_prph_delay(trans, ofs, val, 0);
+}
+
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
#define RADIO_RSP_ADDR_POS (6)
#define RADIO_RSP_RD_CMD (3)
+/* LTR control (Qu only) */
+#define HPM_MAC_LTR_CSR 0xa0348c
+#define HPM_MAC_LRT_ENABLE_ALL 0xf
+/* also uses CSR_LTR_* for values */
+#define HPM_UMAC_LTR 0xa03480
+
/* FW monitor */
#define MON_BUFF_SAMPLE_CTL (0xa03c00)
#define MON_BUFF_BASE_ADDR (0xa03c1c)
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
mutex_lock(&mvm->mutex);
- clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
-
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
out:
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
/* no need to reset the device in unified images, if successful */
if (unified_image && !ret) {
/* nothing else to do if we already sent D0I3_END_CMD */
const size_t bufsz = sizeof(buf);
int pos = 0;
+ mutex_lock(&mvm->mutex);
iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ mutex_unlock(&mvm->mutex);
+
do_div(curr_os, NSEC_PER_USEC);
diff = curr_os - curr_gp2;
pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
{
+ u8 value;
+
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2);
+ DSM_FUNC_ENABLE_INDONESIA_5G2, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
ret);
- else if (ret >= DSM_VALUE_INDONESIA_MAX)
+ else if (value >= DSM_VALUE_INDONESIA_MAX)
IWL_DEBUG_RADIO(mvm,
- "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n",
- ret);
+ "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n",
+ value);
- else if (ret == DSM_VALUE_INDONESIA_ENABLE) {
+ else if (value == DSM_VALUE_INDONESIA_ENABLE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
return DSM_VALUE_INDONESIA_ENABLE;
static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
{
+ u8 value;
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_DISABLE_SRD);
+ DSM_FUNC_DISABLE_SRD, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
ret);
- else if (ret >= DSM_VALUE_SRD_MAX)
+ else if (value >= DSM_VALUE_SRD_MAX)
IWL_DEBUG_RADIO(mvm,
- "DSM function DISABLE_SRD return invalid value, ret=%d\n",
- ret);
+ "DSM function DISABLE_SRD return invalid value, value=%d\n",
+ value);
- else if (ret == DSM_VALUE_SRD_PASSIVE) {
+ else if (value == DSM_VALUE_SRD_PASSIVE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
return DSM_VALUE_SRD_PASSIVE;
- } else if (ret == DSM_VALUE_SRD_DISABLE) {
+ } else if (value == DSM_VALUE_SRD_DISABLE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function DISABLE_SRD: disabling SRD\n");
return DSM_VALUE_SRD_DISABLE;
iwl_mvm_binding_remove_vif(mvm, vif);
out:
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) &&
+ switching_chanctx)
+ return;
mvmvif->phy_ctxt = NULL;
iwl_mvm_power_update_mac(mvm);
}
if (!mvm->scan_cmd)
goto out_free;
+ /* invalidate ids to prevent accidental removal of sta_id 0 */
+ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
+
/* Set EBS as successful as long as not stated otherwise by the FW. */
mvm->last_ebs_successful = true;
reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
if (device_reprobe(reprobe->dev))
dev_err(reprobe->dev, "reprobe failed!\n");
+ put_device(reprobe->dev);
kfree(reprobe);
module_put(THIS_MODULE);
}
module_put(THIS_MODULE);
return;
}
- reprobe->dev = mvm->trans->dev;
+ reprobe->dev = get_device(mvm->trans->dev);
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
+ return -EINVAL;
+
iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
+ return -EINVAL;
+
iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
if (ret)
next = skb_gso_segment(skb, netdev_flags);
skb_shinfo(skb)->gso_size = mss;
+ skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL;
else if (next)
if (tcp_payload_len > mss) {
skb_shinfo(tmp)->gso_size = mss;
+ skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
+ SKB_GSO_TCPV6;
} else {
if (qos) {
u8 *qc;
const struct fw_img *fw)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+ u32_encode_bits(250,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+ CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+ u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
struct iwl_context_info_gen3 *ctxt_info_gen3;
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
/* Allocate IML */
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
&trans_pcie->iml_dma_addr, GFP_KERNEL);
- if (!iml_img)
- return -ENOMEM;
+ if (!iml_img) {
+ ret = -ENOMEM;
+ goto err_free_ctxt_info;
+ }
memcpy(iml_img, trans->iml, trans->iml_len);
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
- /*
- * The firmware initializes this again later (to a smaller
- * value), but for the boot process initialize the LTR to
- * ~250 usec.
- */
- u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
- u32_encode_bits(250,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
- CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
- u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
-
- iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val);
+ /*
+ * To workaround hardware latency issues during the boot process,
+ * initialize the LTR to ~250 usec (see ltr_val above).
+ * The firmware initializes this again later (to a smaller value).
+ */
+ if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+ !trans->trans_cfg->integrated) {
+ iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+ } else if (trans->trans_cfg->integrated &&
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+ iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+ iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0;
+err_free_ctxt_info:
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+ trans_pcie->ctxt_info_gen3,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_gen3 = NULL;
err_free_prph_info:
dma_free_coherent(trans->dev,
sizeof(*prph_info),
return ret;
}
+ if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
+ return -EBUSY;
+
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
cpu_to_le64(trans_pcie->pnvm_dram.physical);
prph_sc_ctrl->pnvm_cfg.pnvm_size =
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_c0_hr_b0, iwl_ax203_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
while (offs < dwords) {
/* limit the time we spin here under lock to 1/2s */
- ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
+ unsigned long end = jiffies + HZ / 2;
+ bool resched = false;
if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR,
HBUS_TARG_MEM_RDAT);
offs++;
- /* calling ktime_get is expensive so
- * do it once in 128 reads
- */
- if (offs % 128 == 0 && ktime_after(ktime_get(),
- timeout))
+ if (time_after(jiffies, end)) {
+ resched = true;
break;
+ }
}
iwl_trans_release_nic_access(trans, &flags);
+
+ if (resched)
+ cond_resched();
} else {
return -EBUSY;
}
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ if (!txq) {
+ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
+ return;
+ }
+
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
* idx is bounded by n_window
*/
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb;
lockdep_assert_held(&txq->lock);
+ if (!txq->entries)
+ return;
+
iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
iwl_txq_get_tfd(trans, txq, idx));
- /* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
+ skb = txq->entries[idx].skb;
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
}
}
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb;
- if (WARN_ON_ONCE(!skb))
- continue;
-
- iwl_txq_free_tso_page(trans, skb);
+ if (!WARN_ON_ONCE(!skb))
+ iwl_txq_free_tso_page(trans, skb);
}
iwl_txq_gen2_free_tfd(trans, txq);
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
*/
int rd_ptr = txq->read_ptr;
int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
+ struct sk_buff *skb;
lockdep_assert_held(&txq->lock);
+ if (!txq->entries)
+ return;
+
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
+ skb = txq->entries[idx].skb;
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
}
}
int cmd, int *seq)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- enum mt76_txq_id qid;
+ enum mt76_mcuq_id qid;
mt7615_mcu_fill_msg(dev, skb, cmd, seq);
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
{
struct mt76_queue *q = &dev->q_rx[qid];
struct mt76_sdio *sdio = &dev->sdio;
- int len = 0, err, i, order;
+ int len = 0, err, i;
struct page *page;
u8 *buf;
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
- order = get_order(len);
- page = __dev_alloc_pages(GFP_KERNEL, order);
+ page = __dev_alloc_pages(GFP_KERNEL, get_order(len));
if (!page)
return -ENOMEM;
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
- __free_pages(page, order);
+ put_page(page);
return err;
}
if (q->queued + i + 1 == q->ndesc)
break;
}
- __free_pages(page, order);
+ put_page(page);
spin_lock_bh(&q->lock);
q->head = (q->head + i) % q->ndesc;
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_mcu_txd *mcu_txd;
u8 seq, pkt_fmt, qidx;
- enum mt76_txq_id txq;
+ enum mt76_mcuq_id qid;
__le32 *txd;
u32 val;
seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (cmd == -MCU_CMD_FW_SCATTER) {
- txq = MT_MCUQ_FWDL;
+ qid = MT_MCUQ_FWDL;
goto exit;
}
mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
- txq = MT_MCUQ_WA;
+ qid = MT_MCUQ_WA;
qidx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
} else {
- txq = MT_MCUQ_WM;
+ qid = MT_MCUQ_WM;
qidx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
}
if (wait_seq)
*wait_seq = seq;
- return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
+ return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
}
static void
if (new_p) {
/* we have one extra ref from the allocator */
- __free_pages(e->p, MT_RX_ORDER);
-
+ put_page(e->p);
e->p = new_p;
}
}
}
e = &q->e[q->end];
- e->skb = skb;
usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
mt7601u_complete_tx, q);
ret = usb_submit_urb(e->urb, GFP_ATOMIC);
q->end = (q->end + 1) % q->entries;
q->used++;
+ e->skb = skb;
if (q->used >= q->entries)
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
}
length = (io.nblocks + 1) << ns->lba_shift;
- meta_len = (io.nblocks + 1) * ns->ms;
- metadata = nvme_to_user_ptr(io.metadata);
+
+ if ((io.control & NVME_RW_PRINFO_PRACT) &&
+ ns->ms == sizeof(struct t10_pi_tuple)) {
+ /*
+ * Protection information is stripped/inserted by the
+ * controller.
+ */
+ if (nvme_to_user_ptr(io.metadata))
+ return -EINVAL;
+ meta_len = 0;
+ metadata = NULL;
+ } else {
+ meta_len = (io.nblocks + 1) * ns->ms;
+ metadata = nvme_to_user_ptr(io.metadata);
+ }
if (ns->features & NVME_NS_EXT_LBAS) {
length += meta_len;
}
}
- list_add_tail(&ns->siblings, &head->list);
+ list_add_tail_rcu(&ns->siblings, &head->list);
ns->head = head;
mutex_unlock(&ctrl->subsys->lock);
return 0;
}
for (ns = nvme_next_ns(head, old);
- ns != old;
+ ns && ns != old;
ns = nvme_next_ns(head, ns)) {
if (nvme_path_is_disabled(ns))
continue;
#include <linux/t10-pi.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/sed-opal.h>
#include <linux/pci-p2pdma.h>
return true;
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
- dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
int i;
- if (iod->dma_len) {
- dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
- rq_dma_dir(req));
- return;
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+ dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+ dma_addr = next_dma_addr;
}
- WARN_ON_ONCE(!iod->nents);
+}
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
+{
+ const int last_sg = SGES_PER_PAGE - 1;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
+ int i;
+ for (i = 0; i < iod->npages; i++) {
+ struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
- if (iod->npages == 0)
- dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
- dma_addr);
+ dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
- for (i = 0; i < iod->npages; i++) {
- void *addr = nvme_pci_iod_list(req)[i];
+}
- if (iod->use_sgl) {
- struct nvme_sgl_desc *sg_list = addr;
+static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- next_dma_addr =
- le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
- } else {
- __le64 *prp_list = addr;
+ if (is_pci_p2pdma_page(sg_page(iod->sg)))
+ pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
+ rq_dma_dir(req));
+ else
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+}
- next_dma_addr = le64_to_cpu(prp_list[last_prp]);
- }
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- dma_pool_free(dev->prp_page_pool, addr, dma_addr);
- dma_addr = next_dma_addr;
+ if (iod->dma_len) {
+ dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+ rq_dma_dir(req));
+ return;
}
+ WARN_ON_ONCE(!iod->nents);
+
+ nvme_unmap_sg(dev, req);
+ if (iod->npages == 0)
+ dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+ iod->first_dma);
+ else if (iod->use_sgl)
+ nvme_free_sgls(dev, req);
+ else
+ nvme_free_prps(dev, req);
mempool_free(iod->sg, dev->iod_mempool);
}
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
- return BLK_STS_RESOURCE;
+ goto free_prps;
list[iod->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
-
done:
cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
-
return BLK_STS_OK;
-
- bad_sgl:
+free_prps:
+ nvme_free_prps(dev, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
"Invalid SGL for payload:%d nents:%d\n",
blk_rq_payload_bytes(req), iod->nents);
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list)
- return BLK_STS_RESOURCE;
+ goto free_sgls;
i = 0;
nvme_pci_iod_list(req)[iod->npages++] = sg_list;
} while (--entries > 0);
return BLK_STS_OK;
+free_sgls:
+ nvme_free_sgls(dev, req);
+ return BLK_STS_RESOURCE;
}
static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
- goto out;
+ goto out_free_sg;
if (is_pci_p2pdma_page(sg_page(iod->sg)))
nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
rq_dma_dir(req), DMA_ATTR_NO_WARN);
if (!nr_mapped)
- goto out;
+ goto out_free_sg;
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-out:
if (ret != BLK_STS_OK)
- nvme_unmap_data(dev, req);
+ goto out_unmap_sg;
+ return BLK_STS_OK;
+
+out_unmap_sg:
+ nvme_unmap_sg(dev, req);
+out_free_sg:
+ mempool_free(iod->sg, dev->iod_mempool);
return ret;
}
if (dev->cmb_size)
return;
+ if (NVME_CAP_CMBS(dev->ctrl.cap))
+ writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
+
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!dev->cmbsz)
return;
if (offset > bar_size)
return;
+ /*
+ * Tell the controller about the host side address mapping the CMB,
+ * and enable CMB decoding for the NVMe 1.4+ scheme:
+ */
+ if (NVME_CAP_CMBS(dev->ctrl.cap)) {
+ hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
+ (pci_bus_address(pdev, bar) + offset),
+ dev->bar + NVME_REG_CMBMSC);
+ }
+
/*
* Controllers may support a CMB size larger than their BAR,
* for example, due to being behind a bridge. Reduce the CMB to
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
struct completion cm_done;
bool pi_support;
int cq_size;
+ struct mutex queue_lock;
};
struct nvme_rdma_ctrl {
int ret;
queue = &ctrl->queues[idx];
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
if (idx && ctrl->ctrl.max_integrity_segments)
queue->pi_support = true;
if (IS_ERR(queue->cm_id)) {
dev_info(ctrl->ctrl.device,
"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
- return PTR_ERR(queue->cm_id);
+ ret = PTR_ERR(queue->cm_id);
+ goto out_destroy_mutex;
}
if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
out_destroy_cm_id:
rdma_destroy_id(queue->cm_id);
nvme_rdma_destroy_queue_ib(queue);
+out_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
- if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
- return;
- __nvme_rdma_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
+ mutex_destroy(&queue->queue_lock);
}
static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
struct work_struct io_work;
int io_cpu;
+ struct mutex queue_lock;
struct mutex send_mutex;
struct llist_head req_list;
struct list_head send_list;
sock_release(queue->sock);
kfree(queue->pdu);
+ mutex_destroy(&queue->queue_lock);
}
static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
int ret, rcv_pdu_size;
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
init_llist_head(&queue->req_list);
INIT_LIST_HEAD(&queue->send_list);
if (ret) {
dev_err(nctrl->device,
"failed to create socket: %d\n", ret);
- return ret;
+ goto err_destroy_mutex;
}
/* Single syn retry */
err_sock:
sock_release(queue->sock);
queue->sock = NULL;
+err_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
- return;
- __nvme_tcp_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
/* return an all zeroed buffer if we can't find an active namespace */
ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
- if (!ns)
+ if (!ns) {
+ status = NVME_SC_INVALID_NS;
goto done;
+ }
nvmet_ns_revalidate(ns);
id->nsattr |= (1 << 0);
nvmet_put_namespace(ns);
done:
- status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ if (!status)
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
kfree(id);
out:
nvmet_req_complete(req, status);
length = cmd->pdu_len;
cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
offset = cmd->rbytes_done;
- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
+ cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
sg = &cmd->req.sg[cmd->sg_idx];
length -= iov_len;
sg = sg_next(sg);
iov++;
+ sg_offset = 0;
}
iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
- /* ...but only set bus limit if we found valid dma-ranges earlier */
- if (!ret)
+ /* ...but only set bus limit and range map if we found valid dma-ranges earlier */
+ if (!ret) {
dev->bus_dma_limit = end;
+ dev->dma_range_map = map;
+ }
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
iommu = of_iommu_configure(dev, np, id);
if (PTR_ERR(iommu) == -EPROBE_DEFER) {
+ /* Don't touch range map if it wasn't set from a valid dma-ranges */
+ if (!ret)
+ dev->dma_range_map = NULL;
kfree(map);
return -EPROBE_DEFER;
}
arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
- dev->dma_range_map = map;
return 0;
}
EXPORT_SYMBOL_GPL(of_dma_configure_id);
return i;
pci_save_ltr_state(dev);
- pci_save_aspm_l1ss_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
* LTR itself (in the PCIe capability).
*/
pci_restore_ltr_state(dev);
- pci_restore_aspm_l1ss_state(dev);
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
if (error)
pci_err(dev, "unable to allocate suspend buffer for LTR\n");
- error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
- 2 * sizeof(u32));
- if (error)
- pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
-
pci_allocate_vc_save_buffers(dev);
}
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_pm_state_change(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-void pci_save_aspm_l1ss_state(struct pci_dev *dev);
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
-static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
-static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
PCI_L1SS_CTL1_L1SS_MASK, val);
}
-void pci_save_aspm_l1ss_state(struct pci_dev *dev)
-{
- int aspm_l1ss;
- struct pci_cap_saved_state *save_state;
- u32 *cap;
-
- if (!pci_is_pcie(dev))
- return;
-
- aspm_l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
- if (!aspm_l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- pci_read_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL1, cap++);
- pci_read_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL2, cap++);
-}
-
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
-{
- int aspm_l1ss;
- struct pci_cap_saved_state *save_state;
- u32 *cap;
-
- if (!pci_is_pcie(dev))
- return;
-
- aspm_l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
- if (!aspm_l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- pci_write_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL1, *cap++);
- pci_write_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL2, *cap++);
-}
-
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
# SPDX-License-Identifier: GPL-2.0
-obj-y += phy-ingenic-usb.o
+obj-$(CONFIG_PHY_INGENIC_USB) += phy-ingenic-usb.o
config PHY_MTK_MIPI_DSI
tristate "MediaTek MIPI-DSI Driver"
- depends on ARCH_MEDIATEK && OF
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on OF
select GENERIC_PHY
help
Support MIPI DSI for Mediatek SoCs.
generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
error = PTR_ERR(generic_phy);
- return PTR_ERR(generic_phy);
+ goto out_reg_disable;
}
phy_set_drvdata(generic_phy, ddata);
phy_provider = devm_of_phy_provider_register(ddata->dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
+ if (IS_ERR(phy_provider)) {
+ error = PTR_ERR(phy_provider);
+ goto out_reg_disable;
+ }
error = cpcap_usb_init_optional_pins(ddata);
if (error)
- return error;
+ goto out_reg_disable;
cpcap_usb_init_optional_gpios(ddata);
error = cpcap_usb_init_iio(ddata);
if (error)
- return error;
+ goto out_reg_disable;
error = cpcap_usb_init_interrupts(pdev, ddata);
if (error)
- return error;
+ goto out_reg_disable;
usb_add_phy_dev(&ddata->phy);
atomic_set(&ddata->active, 1);
schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
return 0;
+
+out_reg_disable:
+ regulator_disable(ddata->vusb);
+
+ return error;
}
static int cpcap_usb_phy_remove(struct platform_device *pdev)
#define D22 40
SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
-SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8));
+SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8));
PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8);
GROUP_DECL(PWM8G0, D22);
err = hw->soc->bias_set(hw, desc, pullup);
if (err)
return err;
+ } else if (hw->soc->bias_set_combo) {
+ err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
+ if (err)
+ return err;
} else {
return -ENOTSUPP;
}
} else {
int irq = chip->to_irq(chip, offset);
const int pullidx = pull ? 1 : 0;
- bool wake;
int val;
static const char * const pulls[] = {
"none ",
#define JZ4740_GPIO_TRIG 0x70
#define JZ4740_GPIO_FLAG 0x80
-#define JZ4760_GPIO_INT 0x10
-#define JZ4760_GPIO_PAT1 0x30
-#define JZ4760_GPIO_PAT0 0x40
-#define JZ4760_GPIO_FLAG 0x50
-#define JZ4760_GPIO_PEN 0x70
+#define JZ4770_GPIO_INT 0x10
+#define JZ4770_GPIO_PAT1 0x30
+#define JZ4770_GPIO_PAT0 0x40
+#define JZ4770_GPIO_FLAG 0x50
+#define JZ4770_GPIO_PEN 0x70
#define X1830_GPIO_PEL 0x110
#define X1830_GPIO_PEH 0x120
static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
u8 offset, int value)
{
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
}
break;
}
- if (jzgc->jzpc->info->version >= ID_JZ4760) {
- reg1 = JZ4760_GPIO_PAT1;
- reg2 = JZ4760_GPIO_PAT0;
+ if (jzgc->jzpc->info->version >= ID_JZ4770) {
+ reg1 = JZ4770_GPIO_PAT1;
+ reg2 = JZ4770_GPIO_PAT0;
} else {
reg1 = JZ4740_GPIO_TRIG;
reg2 = JZ4740_GPIO_DIR;
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
ingenic_gpio_irq_mask(irqd);
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
}
irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
}
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
}
chained_irq_enter(irq_chip, desc);
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
else
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->info->version >= ID_JZ4760) {
- if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
- ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+ if (jzpc->info->version >= ID_JZ4770) {
+ if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) ||
+ ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
'A' + offt, idx, func);
if (jzpc->info->version >= ID_X1000) {
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
}
return 0;
'A' + offt, idx, input ? "in" : "out");
if (jzpc->info->version >= ID_X1000) {
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
bool pull;
- if (jzpc->info->version >= ID_JZ4760)
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
+ if (jzpc->info->version >= ID_JZ4770)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
else
pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
REG_SET(X1830_GPIO_PEH), bias << idxh);
}
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
}
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
- if (jzpc->info->version >= ID_JZ4760)
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+ if (jzpc->info->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
else
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
}
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
* @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
+ * @disabled_for_mux: These IRQs were disabled because we muxed away.
* @soc: Reference to soc_data of platform specific data.
* @regs: Base addresses for the TLMM tiles.
* @phys_base: Physical base address
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
MSM_ACCESSOR(intr_status)
MSM_ACCESSOR(intr_target)
+static void msm_ack_intr_status(struct msm_pinctrl *pctrl,
+ const struct msm_pingroup *g)
+{
+ u32 val = g->intr_ack_high ? BIT(g->intr_status_bit) : 0;
+
+ msm_writel_intr_status(val, pctrl, g);
+}
+
static int msm_get_groups_count(struct pinctrl_dev *pctldev)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
unsigned group)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *gc = &pctrl->chip;
+ unsigned int irq = irq_find_mapping(gc->irq.domain, group);
+ struct irq_data *d = irq_get_irq_data(irq);
+ unsigned int gpio_func = pctrl->soc->gpio_func;
const struct msm_pingroup *g;
unsigned long flags;
u32 val, mask;
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
+ /*
+ * If an GPIO interrupt is setup on this pin then we need special
+ * handling. Specifically interrupt detection logic will still see
+ * the pin twiddle even when we're muxed away.
+ *
+ * When we see a pin with an interrupt setup on it then we'll disable
+ * (mask) interrupts on it when we mux away until we mux back. Note
+ * that disable_irq() refcounts and interrupts are disabled as long as
+ * at least one disable_irq() has been called.
+ */
+ if (d && i != gpio_func &&
+ !test_and_set_bit(d->hwirq, pctrl->disabled_for_mux))
+ disable_irq(irq);
+
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = msm_readl_ctl(pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ if (d && i == gpio_func &&
+ test_and_clear_bit(d->hwirq, pctrl->disabled_for_mux)) {
+ /*
+ * Clear interrupts detected while not GPIO since we only
+ * masked things.
+ */
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false);
+ else
+ msm_ack_intr_status(pctrl, g);
+
+ enable_irq(irq);
+ }
+
return 0;
}
if (!g->nfuncs)
return 0;
- /* For now assume function 0 is GPIO because it always is */
- return msm_pinmux_set_mux(pctldev, g->funcs[0], offset);
+ return msm_pinmux_set_mux(pctldev, g->funcs[pctrl->soc->gpio_func], offset);
}
static const struct pinmux_ops msm_pinmux_ops = {
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
-static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+static void msm_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
raw_spin_lock_irqsave(&pctrl->lock, flags);
- if (status_clear) {
- /*
- * clear the interrupt status bit before unmask to avoid
- * any erroneous interrupts that would have got latched
- * when the interrupt is not in use.
- */
- val = msm_readl_intr_status(pctrl, g);
- val &= ~BIT(g->intr_status_bit);
- msm_writel_intr_status(val, pctrl, g);
- }
-
val = msm_readl_intr_cfg(pctrl, g);
val |= BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_enable_bit);
irq_chip_enable_parent(d);
if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
- msm_gpio_irq_clear_unmask(d, true);
+ msm_gpio_irq_unmask(d);
}
static void msm_gpio_irq_disable(struct irq_data *d)
msm_gpio_irq_mask(d);
}
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
- msm_gpio_irq_clear_unmask(d, false);
-}
-
/**
* msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
* @d: The irq dta.
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val;
if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = msm_readl_intr_status(pctrl, g);
- if (g->intr_ack_high)
- val |= BIT(g->intr_status_bit);
- else
- val &= ~BIT(g->intr_status_bit);
- msm_writel_intr_status(val, pctrl, g);
+ msm_ack_intr_status(pctrl, g);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
+ bool was_enabled;
u32 val;
if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
* could cause the INTR_STATUS to be set for EDGE interrupts.
*/
val = msm_readl_intr_cfg(pctrl, g);
+ was_enabled = val & BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_raw_status_bit);
if (g->intr_detection_width == 2) {
val &= ~(3 << g->intr_detection_bit);
}
msm_writel_intr_cfg(val, pctrl, g);
+ /*
+ * The first time we set RAW_STATUS_EN it could trigger an interrupt.
+ * Clear the interrupt. This is safe because we have
+ * IRQCHIP_SET_TYPE_MASKED.
+ */
+ if (!was_enabled)
+ msm_ack_intr_status(pctrl, g);
+
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
}
/*
- * Clear the interrupt that may be pending before we enable
- * the line.
- * This is especially a problem with the GPIOs routed to the
- * PDC. These GPIOs are direct-connect interrupts to the GIC.
- * Disabling the interrupt line at the PDC does not prevent
- * the interrupt from being latched at the GIC. The state at
- * GIC needs to be cleared before enabling.
+ * The disable / clear-enable workaround we do in msm_pinmux_set_mux()
+ * only works if disable is not lazy since we only clear any bogus
+ * interrupt in hardware. Explicitly mark the interrupt as UNLAZY.
*/
- if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
- irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
return 0;
out:
* @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
* to be aware that their parent can't handle dual
* edge interrupts.
+ * @gpio_func: Which function number is GPIO (usually 0).
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
const struct msm_gpio_wakeirq_map *wakeirq_map;
unsigned int nwakeirq_map;
bool wakeirq_dual_edge_errata;
+ unsigned int gpio_func;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
menuconfig SURFACE_PLATFORMS
bool "Microsoft Surface Platform-Specific Device Drivers"
+ depends on ACPI
default y
help
Say Y here to get to see options for platform-specific device drivers
config SURFACE_3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
- depends on ACPI && KEYBOARD_GPIO && I2C
+ depends on KEYBOARD_GPIO && I2C
help
This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
config SURFACE_3_POWER_OPREGION
tristate "Surface 3 battery platform operation region support"
- depends on ACPI && I2C
+ depends on I2C
help
This driver provides support for ACPI operation
region of the Surface 3 battery platform driver.
config SURFACE_GPE
tristate "Surface GPE/Lid Support Driver"
- depends on ACPI
depends on DMI
help
This driver marks the GPEs related to the ACPI lid device found on
config SURFACE_PRO3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
- depends on ACPI && INPUT
+ depends on INPUT
help
This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
return 0;
}
-static int surface_gpe_suspend(struct device *dev)
+static int __maybe_unused surface_gpe_suspend(struct device *dev)
{
return surface_lid_enable_wakeup(dev, true);
}
-static int surface_gpe_resume(struct device *dev)
+static int __maybe_unused surface_gpe_resume(struct device *dev)
{
return surface_lid_enable_wakeup(dev, false);
}
iowrite32(val, dev->regbase + reg_offset);
}
-#if CONFIG_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
return retval;
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, guid);
- if (!obj)
+ if (!obj || obj->type != ACPI_TYPE_PACKAGE)
return -ENODEV;
elements = obj->package.elements;
mutex_lock(&wmi_priv.mutex);
while (elements) {
/* sanity checking */
+ if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
+ pr_debug("incorrect element type\n");
+ goto nextobj;
+ }
if (strlen(elements[ATTR_NAME].string.pointer) == 0) {
pr_debug("empty attribute found\n");
goto nextobj;
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+static int enable_tablet_mode_sw = -1;
+module_param(enable_tablet_mode_sw, int, 0444);
+MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
+
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
ret = bios_return->return_code;
if (ret) {
- if (ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+ if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
+ ret != HPWMI_RET_UNKNOWN_CMDTYPE)
pr_warn("query 0x%x returned error 0x%x\n", query, ret);
goto out_free;
}
}
/* Tablet mode */
- val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
- if (!(val < 0)) {
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ if (enable_tablet_mode_sw > 0) {
+ val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
+ if (val >= 0) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
}
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
{}
};
-static const struct i2c_inst_data int3515_data[] = {
- { "tps6598x", IRQ_RESOURCE_APIC, 0 },
- { "tps6598x", IRQ_RESOURCE_APIC, 1 },
- { "tps6598x", IRQ_RESOURCE_APIC, 2 },
- { "tps6598x", IRQ_RESOURCE_APIC, 3 },
- {}
-};
+/*
+ * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt
+ * issues. The most common problem seen is interrupt flood.
+ *
+ * There are at least two known causes. Firstly, on some boards, the
+ * I2CSerialBus resource index does not match the Interrupt resource, i.e. they
+ * are not one-to-one mapped like in the array below. Secondly, on some boards
+ * the IRQ line from the PD controller is not actually connected at all. But the
+ * interrupt flood is also seen on some boards where those are not a problem, so
+ * there are some other problems as well.
+ *
+ * Because of the issues with the interrupt, the device is disabled for now. If
+ * you wish to debug the issues, uncomment the below, and add an entry for the
+ * INT3515 device to the i2c_multi_instance_ids table.
+ *
+ * static const struct i2c_inst_data int3515_data[] = {
+ * { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ * { }
+ * };
+ */
/*
* Note new device-ids must also be added to i2c_multi_instantiate_ids in
static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
{ "BSG1160", (unsigned long)bsg1160_data },
{ "BSG2150", (unsigned long)bsg2150_data },
- { "INT3515", (unsigned long)int3515_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
struct dentry *debug;
unsigned long cfg;
bool has_hw_rfkill_switch;
+ bool has_touchpad_switch;
const char *fnesc_guid;
};
} else if (attr == &dev_attr_fn_lock.attr) {
supported = acpi_has_method(priv->adev->handle, "HALS") &&
acpi_has_method(priv->adev->handle, "SALS");
- } else
+ } else if (attr == &dev_attr_touchpad.attr)
+ supported = priv->has_touchpad_switch;
+ else
supported = true;
return supported ? attr->mode : 0;
{
unsigned long value;
+ if (!priv->has_touchpad_switch)
+ return;
+
/* Without reading from EC touchpad LED doesn't switch state */
if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
/* Some IdeaPads don't really turn off touchpad - they only
priv->platform_device = pdev;
priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
+ /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
+ priv->has_touchpad_switch = !acpi_dev_present("ELAN0634", NULL, -1);
+
ret = ideapad_sysfs_init(priv);
if (ret)
return ret;
if (!priv->has_hw_rfkill_switch)
write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
+ /* The same for Touchpad */
+ if (!priv->has_touchpad_switch)
+ write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1);
+
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
ideapad_register_rfkill(priv, i);
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
},
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
},
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
},
},
{} /* Array terminator */
TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL), /* P71 */
TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL), /* P51 */
TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */
+ TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL), /* P53 / P73 */
TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
if ((palm_err == -ENODEV) && (lap_err == -ENODEV))
return 0;
/* Otherwise, if there was an error return it */
- if (palm_err && (palm_err != ENODEV))
+ if (palm_err && (palm_err != -ENODEV))
return palm_err;
- if (lap_err && (lap_err != ENODEV))
+ if (lap_err && (lap_err != -ENODEV))
return lap_err;
if (has_palmsensor) {
.properties = digma_citi_e200_props,
};
+static const struct property_entry estar_beauty_hd_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ { }
+};
+
+static const struct ts_dmi_data estar_beauty_hd_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = estar_beauty_hd_props,
+};
+
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
},
},
+ {
+ /* Estar Beauty HD (MID 7316R) */
+ .driver_data = (void *)&estar_beauty_hd_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+ },
+ },
{
/* GP-electronic T701 */
.driver_data = (void *)&gp_electronic_t701_data,
{
struct regulator_dev *r;
struct device *dev = rdev->dev.parent;
- int ret;
+ int ret = 0;
/* No supply to resolve? */
if (!rdev->supply_name)
return 0;
- /* Supply already resolved? */
+ /* Supply already resolved? (fast-path without locking contention) */
if (rdev->supply)
return 0;
/* Did the lookup explicitly defer for us? */
if (ret == -EPROBE_DEFER)
- return ret;
+ goto out;
if (have_full_constraints()) {
r = dummy_regulator_rdev;
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
rdev->supply_name, rdev->desc->name);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
if (r == rdev) {
dev_err(dev, "Supply for %s (%s) resolved to itself\n",
rdev->desc->name, rdev->supply_name);
- if (!have_full_constraints())
- return -EINVAL;
+ if (!have_full_constraints()) {
+ ret = -EINVAL;
+ goto out;
+ }
r = dummy_regulator_rdev;
get_device(&r->dev);
}
if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
if (!device_is_bound(r->dev.parent)) {
put_device(&r->dev);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
ret = regulator_resolve_supply(r);
if (ret < 0) {
put_device(&r->dev);
- return ret;
+ goto out;
+ }
+
+ /*
+ * Recheck rdev->supply with rdev->mutex lock held to avoid a race
+ * between rdev->supply null check and setting rdev->supply in
+ * set_supply() from concurrent tasks.
+ */
+ regulator_lock(rdev);
+
+ /* Supply just resolved by a concurrent task? */
+ if (rdev->supply) {
+ regulator_unlock(rdev);
+ put_device(&r->dev);
+ goto out;
}
ret = set_supply(rdev, r);
if (ret < 0) {
+ regulator_unlock(rdev);
put_device(&r->dev);
- return ret;
+ goto out;
}
+ regulator_unlock(rdev);
+
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
if (ret < 0) {
_regulator_put(rdev->supply);
rdev->supply = NULL;
- return ret;
+ goto out;
}
}
- return 0;
+out:
+ return ret;
}
/* Internal regulator request function */
spin_lock_irq(&rtc_lock);
+ /* Ensure that the RTC is accessible. Bit 0-6 must be 0! */
+ if ((CMOS_READ(RTC_VALID) & 0x7f) != 0) {
+ spin_unlock_irq(&rtc_lock);
+ dev_warn(dev, "not accessible\n");
+ retval = -ENXIO;
+ goto cleanup1;
+ }
+
if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) {
/* force periodic irq to CMOS reset default of 1024Hz;
*
again:
spin_lock_irqsave(&rtc_lock, flags);
+ /* Ensure that the RTC is accessible. Bit 0-6 must be 0! */
+ if (WARN_ON_ONCE((CMOS_READ(RTC_VALID) & 0x7f) != 0)) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ memset(time, 0xff, sizeof(*time));
+ return 0;
+ }
+
/*
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
}
EXPORT_SYMBOL(dasd_path_create_kobjects);
-/*
- * As we keep kobjects for the lifetime of a device, this function must not be
- * called anywhere but in the context of offlining a device.
- */
-void dasd_path_remove_kobj(struct dasd_device *device, int chp)
+static void dasd_path_remove_kobj(struct dasd_device *device, int chp)
{
if (device->path[chp].in_sysfs) {
kobject_put(&device->path[chp].kobj);
device->path[chp].in_sysfs = false;
}
}
-EXPORT_SYMBOL(dasd_path_remove_kobj);
+
+/*
+ * As we keep kobjects for the lifetime of a device, this function must not be
+ * called anywhere but in the context of offlining a device.
+ */
+void dasd_path_remove_kobjects(struct dasd_device *device)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ dasd_path_remove_kobj(device, i);
+}
+EXPORT_SYMBOL(dasd_path_remove_kobjects);
int dasd_add_sysfs_files(struct ccw_device *cdev)
{
device->path[i].ssid = 0;
device->path[i].chpid = 0;
dasd_path_notoper(device, i);
- dasd_path_remove_kobj(device, i);
}
}
device->block = NULL;
out_err1:
dasd_eckd_clear_conf_data(device);
+ dasd_path_remove_kobjects(device);
kfree(device->private);
device->private = NULL;
return rc;
private->vdsneq = NULL;
private->gneq = NULL;
dasd_eckd_clear_conf_data(device);
+ dasd_path_remove_kobjects(device);
}
static struct dasd_ccw_req *
void dasd_remove_sysfs_files(struct ccw_device *);
void dasd_path_create_kobj(struct dasd_device *, int);
void dasd_path_create_kobjects(struct dasd_device *);
-void dasd_path_remove_kobj(struct dasd_device *, int);
+void dasd_path_remove_kobjects(struct dasd_device *);
struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
{
struct vfio_ap_queue *q;
- int apid, apqi;
mutex_lock(&matrix_dev->lock);
q = dev_get_drvdata(&apdev->device);
+ vfio_ap_mdev_reset_queue(q, 1);
dev_set_drvdata(&apdev->device, NULL);
- apid = AP_QID_CARD(q->apqn);
- apqi = AP_QID_QUEUE(q->apqn);
- vfio_ap_mdev_reset_queue(apid, apqi, 1);
- vfio_ap_irq_disable(q);
kfree(q);
mutex_unlock(&matrix_dev->lock);
}
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static int match_apqn(struct device *dev, const void *data)
{
int apqn)
{
struct vfio_ap_queue *q;
- struct device *dev;
if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
return NULL;
if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
return NULL;
- dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &apqn, match_apqn);
- if (!dev)
- return NULL;
- q = dev_get_drvdata(dev);
- q->matrix_mdev = matrix_mdev;
- put_device(dev);
+ q = vfio_ap_find_queue(apqn);
+ if (q)
+ q->matrix_mdev = matrix_mdev;
return q;
}
*/
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
- if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev)
+ if (!q)
+ return;
+ if (q->saved_isc != VFIO_AP_ISC_INVALID &&
+ !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
- if (q->saved_pfn && q->matrix_mdev)
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ }
+ if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
&q->saved_pfn, 1);
- q->saved_pfn = 0;
- q->saved_isc = VFIO_AP_ISC_INVALID;
+ q->saved_pfn = 0;
+ }
}
/**
* Returns if ap_aqic function failed with invalid, deconfigured or
* checkstopped AP.
*/
-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
{
struct ap_qirq_ctrl aqic_gisa = {};
struct ap_queue_status status;
{
struct ap_matrix_mdev *m;
- mutex_lock(&matrix_dev->lock);
-
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
- if ((m != matrix_mdev) && (m->kvm == kvm)) {
- mutex_unlock(&matrix_dev->lock);
+ if ((m != matrix_mdev) && (m->kvm == kvm))
return -EPERM;
- }
}
matrix_mdev->kvm = kvm;
kvm_get_kvm(kvm);
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
- mutex_unlock(&matrix_dev->lock);
return 0;
}
return NOTIFY_DONE;
}
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+{
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+ vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
+ kvm_put_kvm(matrix_mdev->kvm);
+ matrix_mdev->kvm = NULL;
+}
+
static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- int ret;
+ int ret, notify_rc = NOTIFY_OK;
struct ap_matrix_mdev *matrix_mdev;
if (action != VFIO_GROUP_NOTIFY_SET_KVM)
return NOTIFY_OK;
matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+ mutex_lock(&matrix_dev->lock);
if (!data) {
- matrix_mdev->kvm = NULL;
- return NOTIFY_OK;
+ if (matrix_mdev->kvm)
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
+ goto notify_done;
}
ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
- if (ret)
- return NOTIFY_DONE;
+ if (ret) {
+ notify_rc = NOTIFY_DONE;
+ goto notify_done;
+ }
/* If there is no CRYCB pointer, then we can't copy the masks */
- if (!matrix_mdev->kvm->arch.crypto.crycbd)
- return NOTIFY_DONE;
+ if (!matrix_mdev->kvm->arch.crypto.crycbd) {
+ notify_rc = NOTIFY_DONE;
+ goto notify_done;
+ }
kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm,
matrix_mdev->matrix.adm);
- return NOTIFY_OK;
+notify_done:
+ mutex_unlock(&matrix_dev->lock);
+ return notify_rc;
}
-static void vfio_ap_irq_disable_apqn(int apqn)
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
{
struct device *dev;
- struct vfio_ap_queue *q;
+ struct vfio_ap_queue *q = NULL;
dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
&apqn, match_apqn);
if (dev) {
q = dev_get_drvdata(dev);
- vfio_ap_irq_disable(q);
put_device(dev);
}
+
+ return q;
}
-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
unsigned int retry)
{
struct ap_queue_status status;
+ int ret;
int retry2 = 2;
- int apqn = AP_MKQID(apid, apqi);
- do {
- status = ap_zapq(apqn);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- while (!status.queue_empty && retry2--) {
- msleep(20);
- status = ap_tapq(apqn, NULL);
- }
- WARN_ON_ONCE(retry2 <= 0);
- return 0;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- case AP_RESPONSE_BUSY:
+ if (!q)
+ return 0;
+
+retry_zapq:
+ status = ap_zapq(q->apqn);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ ret = 0;
+ break;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (retry--) {
msleep(20);
- break;
- default:
- /* things are really broken, give up */
- return -EIO;
+ goto retry_zapq;
}
- } while (retry--);
+ ret = -EBUSY;
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ WARN_ON_ONCE(status.irq_enabled);
+ ret = -EBUSY;
+ goto free_resources;
+ default:
+ /* things are really broken, give up */
+ WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
+ status.response_code);
+ return -EIO;
+ }
+
+ /* wait for the reset to take effect */
+ while (retry2--) {
+ if (status.queue_empty && !status.irq_enabled)
+ break;
+ msleep(20);
+ status = ap_tapq(q->apqn, NULL);
+ }
+ WARN_ON_ONCE(retry2 <= 0);
- return -EBUSY;
+free_resources:
+ vfio_ap_free_aqic_resources(q);
+
+ return ret;
}
static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
int ret;
int rc = 0;
unsigned long apid, apqi;
+ struct vfio_ap_queue *q;
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
matrix_mdev->matrix.apm_max + 1) {
for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
matrix_mdev->matrix.aqm_max + 1) {
- ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
+ ret = vfio_ap_mdev_reset_queue(q, 1);
/*
* Regardless whether a queue turns out to be busy, or
* is not operational, we need to continue resetting
*/
if (ret)
rc = ret;
- vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi));
}
}
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mutex_lock(&matrix_dev->lock);
- if (matrix_mdev->kvm) {
- kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
- matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
- vfio_ap_mdev_reset_queues(mdev);
- kvm_put_kvm(matrix_mdev->kvm);
- matrix_mdev->kvm = NULL;
- }
+ if (matrix_mdev->kvm)
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
mutex_unlock(&matrix_dev->lock);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
struct mdev_device *mdev;
};
-extern int vfio_ap_mdev_register(void);
-extern void vfio_ap_mdev_unregister(void);
-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
- unsigned int retry);
-
struct vfio_ap_queue {
struct ap_matrix_mdev *matrix_mdev;
unsigned long saved_pfn;
#define VFIO_AP_ISC_INVALID 0xff
unsigned char saved_isc;
};
-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q);
+
+int vfio_ap_mdev_register(void);
+void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+ unsigned int retry);
+
#endif /* _VFIO_AP_PRIVATE_H_ */
fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
pr_err("error in devcmd2 init");
- return -ENODEV;
+ err = -ENODEV;
+ goto err_free_wq;
}
/*
err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
if (err)
- goto err_free_wq;
+ goto err_disable_wq;
vdev->devcmd2->result =
(struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
err_free_desc_ring:
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
-err_free_wq:
+err_disable_wq:
vnic_wq_disable(&vdev->devcmd2->wq);
+err_free_wq:
vnic_wq_free(&vdev->devcmd2->wq);
err_free_devcmd2:
kfree(vdev->devcmd2);
iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
}
- vfc_cmd->correlation = cpu_to_be64(evt);
+ vfc_cmd->correlation = cpu_to_be64((u64)evt);
if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
return ibmvfc_send_event(evt, vhost, 0);
tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
evt->sync_iu = &rsp_iu;
- tmf->correlation = cpu_to_be64(evt);
+ tmf->correlation = cpu_to_be64((u64)evt);
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (sdev->type == TYPE_DISK)
+ if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
+ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
}
/*
if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp);
+skip_resp:
fc_exch_release(ep);
return;
rel:
fc_exch_hold(ep);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+skip_resp:
fc_seq_set_resp(sp, NULL, ep->arg);
fc_exch_release(ep);
}
return -ENODEV;
}
+ if (!vport->phba->sli4_hba.nvmels_wq)
+ return -ENOMEM;
+
/*
* there are two dma buf in the request, actually there is one and
* the second one is just the start address + cmd size.
goto out;
}
+ /* always store 64 bits regardless of addressing */
sense_ptr = (void *)cmd->frame + ioc->sense_off;
- if (instance->consistent_mask_64bit)
- put_unaligned_le64(sense_handle, sense_ptr);
- else
- put_unaligned_le32(sense_handle, sense_ptr);
+ put_unaligned_le64(sense_handle, sense_ptr);
}
/*
int ql2xenforce_iocb_limit = 1;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
- "Enforce IOCB throttling, to avoid FW congestion. (default: 0)");
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
/*
* CT6 CTX allocation cache
res = mutex_lock_interruptible(&rport->mutex);
if (res)
goto out;
- scsi_target_block(&shost->shost_gendev);
+ if (rport->state != SRP_RPORT_FAIL_FAST)
+ /*
+ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+ * later is ok though, scsi_internal_device_unblock_nowait()
+ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
+ */
+ scsi_target_block(&shost->shost_gendev);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
config SCSI_UFSHCD_PLATFORM
tristate "Platform bus based UFS Controller support"
depends on SCSI_UFSHCD
+ depends on HAS_IOMEM
help
This selects the UFS host controller support. Select this if
you have an UFS controller on Platform bus.
if (ret)
dev_err(hba->dev, "%s: link recovery failed, err %d",
__func__, ret);
+ else
+ ufshcd_clear_ua_wluns(hba);
return ret;
}
break;
} /* end of switch */
- if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
+ if ((host_byte(result) != DID_OK) &&
+ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
ufshcd_scsi_unblock_requests(hba);
ufshcd_err_handling_unprepare(hba);
up(&hba->eh_sem);
+
+ if (!err && needs_reset)
+ ufshcd_clear_ua_wluns(hba);
}
/**
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
}
- if (enabled_intr_status && retval == IRQ_NONE) {
- dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
- __func__, intr_status);
+ if (enabled_intr_status && retval == IRQ_NONE &&
+ !ufshcd_eh_in_progress(hba)) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
+ __func__,
+ intr_status,
+ hba->ufs_stats.last_intr_status,
+ enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
- req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
req->end_io_data = &wait;
free_slot = req->tag;
WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
ufshcd_set_clk_freq(hba, true);
err = ufshcd_hba_enable(hba);
- if (err)
- goto out;
/* Establish the link again and restore the device */
- err = ufshcd_probe_hba(hba, false);
if (!err)
- ufshcd_clear_ua_wluns(hba);
-out:
+ err = ufshcd_probe_hba(hba, false);
+
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
if (ret)
goto out;
+ ufshcd_clear_ua_wluns(hba);
+
/* Initialize devfreq after UFS device is detected */
if (ufshcd_is_clkscaling_supported(hba)) {
memcpy(&hba->clk_scaling.saved_pwr_info.info,
pm_runtime_put_sync(hba->dev);
ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
- } else {
- ufshcd_clear_ua_wluns(hba);
}
}
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
+ ufshcd_clear_ua_wluns(hba);
ufshcd_release(hba);
out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
+ ufshcd_clear_ua_wluns(hba);
+
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
d->window[k].phys = res->start;
d->window[k].size = resource_size(res);
d->window[k].virt = ioremap(res->start,
- resource_size(res));
+ resource_size(res));
if (!d->window[k].virt)
goto err2;
}
#include <linux/debugfs.h>
#include "internals.h"
-static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
+static int intc_irq_xlate_show(struct seq_file *m, void *priv)
{
int i;
return 0;
}
-static int intc_irq_xlate_open(struct inode *inode, struct file *file)
-{
- return single_open(file, intc_irq_xlate_debug, inode->i_private);
-}
-
-static const struct file_operations intc_irq_xlate_fops = {
- .open = intc_irq_xlate_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(intc_irq_xlate);
static int __init intc_irq_xlate_init(void)
{
return soc_dev;
}
+static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+ { .compatible = "atmel,at91rm9200", },
+ { .compatible = "atmel,at91sam9", },
+ { .compatible = "atmel,sama5", },
+ { .compatible = "atmel,samv7", },
+ { }
+};
+
static int __init atmel_soc_device_init(void)
{
+ struct device_node *np = of_find_node_by_path("/");
+
+ if (!of_match_node(at91_soc_allowed_list, np))
+ return 0;
+
at91_soc_init(socs);
return 0;
depends on ARCH_MXC || COMPILE_TEST
default ARCH_MXC && ARM64
select SOC_BUS
- select ARM_GIC_V3 if ARCH_MXC
+ select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7
help
If you say yes here you get support for the NXP i.MX8M family
support, it will provide the SoC info like SoC family,
config LITEX_SOC_CONTROLLER
tristate "Enable LiteX SoC Controller driver"
depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
select LITEX
help
This option enables the SoC Controller Driver which verifies
void __iomem *base;
};
+#ifdef CONFIG_OF
static const struct of_device_id litex_soc_ctrl_of_match[] = {
{.compatible = "litex,soc-controller"},
{},
};
-
MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
+#endif /* CONFIG_OF */
static int litex_soc_ctrl_probe(struct platform_device *pdev)
{
"allwinner,sun7i-a20-display-engine",
"allwinner,sun8i-a23-display-engine",
"allwinner,sun8i-a33-display-engine",
- "allwinner,sun8i-a83t-display-engine",
- "allwinner,sun8i-h3-display-engine",
- "allwinner,sun8i-r40-display-engine",
- "allwinner,sun8i-v3s-display-engine",
"allwinner,sun9i-a80-display-engine",
- "allwinner,sun50i-a64-display-engine",
/*
* And now we have the regular devices connected to the MBUS
const struct omap_rst_map *map;
struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
char buf[32];
+ u32 v;
/*
* Check if we have controllable resets. If either rstctrl is non-zero
map++;
}
+ /* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
+ if (prm->data->rstmap == rst_map_012) {
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ if ((v & reset->mask) != reset->mask) {
+ dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
+ writel_relaxed(reset->mask, reset->prm->base +
+ reset->prm->data->rstctrl);
+ }
+ }
+
return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
}
dev_err(&pdev->dev,
"Invalid number of chipselect: %hu\n",
pdata->num_chipselect);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit;
}
master->num_chipselect = pdata->num_chipselect;
void __iomem *regs;
struct clk *ref_clk;
struct clk *pclk;
+ unsigned int clk_rate;
u32 speed_hz;
const u8 *txbuf;
u8 *rxbuf;
u32 ctrl_reg, baud_rate_val;
unsigned long frequency;
- frequency = clk_get_rate(xspi->ref_clk);
+ frequency = xspi->clk_rate;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
master->auto_runtime_pm = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ xspi->clk_rate = clk_get_rate(xspi->ref_clk);
/* Set to default valid value */
- master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+ master->max_speed_hz = xspi->clk_rate / 4;
xspi->speed_hz = master->max_speed_hz;
master->bits_per_word_mask = SPI_BPW_MASK(8);
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_spi_platform_data *pdata;
- bool pol = spi->mode & SPI_CS_HIGH;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
pdata = spi->dev.parent->parent->platform_data;
if (value == BITBANG_CS_INACTIVE) {
if (pdata->cs_control)
- pdata->cs_control(spi, !pol);
+ pdata->cs_control(spi, false);
}
if (value == BITBANG_CS_ACTIVE) {
fsl_spi_change_mode(spi);
if (pdata->cs_control)
- pdata->cs_control(spi, pol);
+ pdata->cs_control(spi, true);
}
}
{ .compatible = "lwn,bk4" },
{ .compatible = "dh,dhcom-board" },
{ .compatible = "menlo,m53cpld" },
+ { .compatible = "cisco,spi-petra" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
raw_fmt->width = encoded_fmt->width;
- raw_fmt->width = encoded_fmt->width;
+ raw_fmt->height = encoded_fmt->height;
if (ctx->is_encoder)
hantro_set_fmt_out(ctx, raw_fmt);
else
position = cedrus_buf->codec.h264.position;
sram_array[i] |= position << 1;
- if (ref_list[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
+ if (ref_list[i].fields == V4L2_H264_BOTTOM_FIELD_REF)
sram_array[i] |= BIT(0);
}
COUNTRY_CODE_MAX
};
-int rtw_regd_init(struct adapter *padapter,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
+void rtw_regd_init(struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
rtw_cfg80211_init_ht_capab(&bands->ht_cap, NL80211_BAND_2GHZ, rf_type);
}
- /* init regulary domain */
- rtw_regd_init(padapter, rtw_reg_notifier);
-
/* copy mac_addr to wiphy */
memcpy(wiphy->perm_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
*((struct adapter **)wiphy_priv(wiphy)) = padapter;
rtw_cfg80211_preinit_wiphy(padapter, wiphy);
+ /* init regulary domain */
+ rtw_regd_init(wiphy, rtw_reg_notifier);
+
ret = wiphy_register(wiphy);
if (ret < 0) {
DBG_8192C("Couldn't register wiphy device\n");
padapter = rtw_netdev_priv(pnetdev);
- rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
-
/* 3 3. init driver special setting, interface, OS and hardware relative */
/* 4 3.1 set hardware operation functions */
goto free_hal_data;
}
+ rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
+
/* 3 8. get WLan MAC address */
/* set mac addr */
rtw_macaddr_cfg(&psdio->func->dev, padapter->eeprompriv.mac_addr);
_rtw_reg_apply_flags(wiphy);
}
-int rtw_regd_init(struct adapter *padapter,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+void rtw_regd_init(struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
{
- struct wiphy *wiphy = padapter->rtw_wdev->wiphy;
-
_rtw_regd_init_wiphy(NULL, wiphy, reg_notifier);
-
- return 0;
}
void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
else
len = sizeof(struct sockaddr_in);
/*
- * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
+ * Set SO_REUSEADDR, and disable Nagle Algorithm with TCP_NODELAY.
*/
if (np->np_network_transport == ISCSI_TCP)
tcp_sock_set_nodelay(sock->sk);
static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
{
- if (tcmu_cmd->se_cmd)
- tcmu_cmd->se_cmd->priv = NULL;
kfree(tcmu_cmd->dbi);
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
}
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
mutex_lock(&udev->cmdr_lock);
- se_cmd->priv = tcmu_cmd;
if (!(se_cmd->transport_state & CMD_T_ABORTED))
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0)
tcmu_free_cmd(tcmu_cmd);
+ else
+ se_cmd->priv = tcmu_cmd;
mutex_unlock(&udev->cmdr_lock);
return scsi_ret;
}
list_del_init(&cmd->queue_entry);
tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
unqueued = true;
}
}
done:
+ se_cmd->priv = NULL;
if (read_len_valid) {
pr_debug("read_len = %d\n", read_len);
target_complete_cmd_with_length(cmd->se_cmd,
se_cmd = cmd->se_cmd;
tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
* removed then LIO core will do the right thing and
* fail the retry.
*/
+ tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
tcmu_free_cmd(tcmu_cmd);
continue;
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
*/
+ tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd,
SAM_STAT_CHECK_CONDITION);
tcmu_free_cmd(tcmu_cmd);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
+ cmd->se_cmd->priv = NULL;
if (err_level == 1) {
/*
* Userspace was not able to start the
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
*/
optee_cq_wait_for_completion(&optee->call_queue, &w);
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
- might_sleep();
+ if (need_resched())
+ cond_resched();
param.a0 = res.a0;
param.a1 = res.a1;
param.a2 = res.a2;
* managed with the xHCI and the SuperSpeed hub so we create the
* link from xHCI instead.
*/
- while (!dev_is_pci(dev))
+ while (dev && !dev_is_pci(dev))
dev = dev->parent;
if (!dev)
if (auth && auth->reply.route_hi == sw->config.route_hi &&
auth->reply.route_lo == sw->config.route_lo) {
- tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n",
+ tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
ret = -EIO;
return 0;
}
-extern ssize_t redirected_tty_write(struct file *, const char __user *,
- size_t, loff_t *);
-
/**
* job_control - check job control
* @tty: tty
/* NOTE: not yet done after every sleep pending a thorough
check of the logic of this change. -- jlc */
/* don't stop on /dev/console */
- if (file->f_op->write == redirected_tty_write)
+ if (file->f_op->write_iter == redirected_tty_write)
return 0;
return __tty_check_change(tty, SIGTTIN);
ssize_t retval = 0;
/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
- if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
+ if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) {
retval = tty_check_change(tty);
if (retval)
return retval;
(val & STAT_TX_RDY(port)), 1, 10000);
}
+static void wait_for_xmite(struct uart_port *port)
+{
+ u32 val;
+
+ readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+ (val & STAT_TX_EMP), 1, 10000);
+}
+
static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
{
wait_for_xmitr(port);
uart_console_write(port, s, count, mvebu_uart_console_putchar);
- wait_for_xmitr(port);
+ wait_for_xmite(port);
if (ier)
writel(ier, port->membase + UART_CTRL(port));
DEFINE_MUTEX(tty_mutex);
static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *,
- size_t, loff_t *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
-long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
return 0;
}
-static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
{
return -EIO;
}
static const struct file_operations tty_fops = {
.llseek = no_llseek,
.read = tty_read,
- .write = tty_write,
+ .write_iter = tty_write,
+ .splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
static const struct file_operations console_fops = {
.llseek = no_llseek,
.read = tty_read,
- .write = redirected_tty_write,
+ .write_iter = redirected_tty_write,
+ .splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
static const struct file_operations hung_up_tty_fops = {
.llseek = no_llseek,
.read = hung_up_tty_read,
- .write = hung_up_tty_write,
+ .write_iter = hung_up_tty_write,
.poll = hung_up_tty_poll,
.unlocked_ioctl = hung_up_tty_ioctl,
.compat_ioctl = hung_up_tty_compat_ioctl,
/* This breaks for file handles being sent over AF_UNIX sockets ? */
list_for_each_entry(priv, &tty->tty_files, list) {
filp = priv->file;
- if (filp->f_op->write == redirected_tty_write)
+ if (filp->f_op->write_iter == redirected_tty_write)
cons_filp = filp;
- if (filp->f_op->write != tty_write)
+ if (filp->f_op->write_iter != tty_write)
continue;
closecount++;
__tty_fasync(-1, filp, 0); /* can't block */
ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
struct tty_struct *tty,
struct file *file,
- const char __user *buf,
- size_t count)
+ struct iov_iter *from)
{
+ size_t count = iov_iter_count(from);
ssize_t ret, written = 0;
unsigned int chunk;
size_t size = count;
if (size > chunk)
size = chunk;
+
ret = -EFAULT;
- if (copy_from_user(tty->write_buf, buf, size))
+ if (copy_from_iter(tty->write_buf, size, from) != size)
break;
+
ret = write(tty, file, tty->write_buf, size);
if (ret <= 0)
break;
+
+ /* FIXME! Have Al check this! */
+ if (ret != size)
+ iov_iter_revert(from, size-ret);
+
written += ret;
- buf += ret;
count -= ret;
if (!count)
break;
* write method will not be invoked in parallel for each device.
*/
-static ssize_t tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
{
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
tty_err(tty, "missing write_room method\n");
ld = tty_ldisc_ref_wait(tty);
if (!ld)
- return hung_up_tty_write(file, buf, count, ppos);
+ return hung_up_tty_write(iocb, from);
if (!ld->ops->write)
ret = -EIO;
else
- ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+ ret = do_tty_write(ld->ops->write, tty, file, from);
tty_ldisc_deref(ld);
return ret;
}
-ssize_t redirected_tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ return file_tty_write(iocb->ki_filp, iocb, from);
+}
+
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *p = NULL;
p = get_file(redirect);
spin_unlock(&redirect_lock);
+ /*
+ * We know the redirected tty is just another tty, we can can
+ * call file_tty_write() directly with that file pointer.
+ */
if (p) {
ssize_t res;
- res = vfs_write(p, buf, count, &p->f_pos);
+ res = file_tty_write(p, iocb, iter);
fput(p);
return res;
}
- return tty_write(file, buf, count, ppos);
+ return tty_write(iocb, iter);
}
/*
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (file->f_op->write == redirected_tty_write) {
+ if (file->f_op->write_iter == redirected_tty_write) {
struct file *f;
spin_lock(&redirect_lock);
f = redirect;
fput(f);
return 0;
}
+ if (file->f_op->write_iter != tty_write)
+ return -ENOTTY;
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+ if (!(file->f_mode & FMODE_CAN_WRITE))
+ return -EINVAL;
spin_lock(&redirect_lock);
if (redirect) {
spin_unlock(&redirect_lock);
}
data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
- data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+ data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
+ sizeof(imx_cdns3_core_clks), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
if (ret)
return ret;
return ret;
}
-static int cdns_imx_remove_core(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
-
- return 0;
-}
-
static int cdns_imx_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct cdns_imx *data = dev_get_drvdata(dev);
- device_for_each_child(dev, NULL, cdns_imx_remove_core);
+ pm_runtime_get_sync(dev);
+ of_platform_depopulate(dev);
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
platform_set_drvdata(pdev, NULL);
return 0;
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL;
- alts = usblp->protocol[protocol].alt_setting;
- if (alts < 0)
- return -EINVAL;
- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
- if (r < 0) {
- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
- alts, usblp->ifnum);
- return r;
+ /* Don't unnecessarily set the interface if there's a single alt. */
+ if (usblp->intf->num_altsetting > 1) {
+ alts = usblp->protocol[protocol].alt_setting;
+ if (alts < 0)
+ return -EINVAL;
+ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
+ if (r < 0) {
+ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
+ alts, usblp->ifnum);
+ return r;
+ }
}
usblp->bidir = (usblp->protocol[protocol].epread != NULL);
static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
u32 windex)
{
- struct dwc2_hsotg_ep *ep;
int dir = (windex & USB_DIR_IN) ? 1 : 0;
int idx = windex & 0x7F;
if (idx > hsotg->num_of_eps)
return NULL;
- ep = index_to_ep(hsotg, idx, dir);
-
- if (idx && ep->dir_in != dir)
- return NULL;
-
- return ep;
+ return index_to_ep(hsotg, idx, dir);
}
/**
if (PMSG_IS_AUTO(msg))
break;
- ret = dwc3_core_init(dwc);
+ ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail1;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
u32 state, reg, loops;
/* Stop DMA activity */
- writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ if (ep->epn.desc_mode)
+ writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ else
+ writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Wait for it to complete */
for (loops = 0; loops < 1000; loops++) {
str_array[offset].s = NULL;
ret = ast_vhub_str_alloc_add(vhub, &lang_str);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
return ret;
comment "Platform Support"
config USB_BDC_PCI
tristate "BDC support for PCIe based platforms"
- depends on USB_PCI
+ depends on USB_PCI && BROKEN
default USB_BDC_UDC
help
Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ ssize_t ret;
+ mutex_lock(&udc_lock);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if (sysfs_streq(buf, "connect")) {
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return n;
+ ret = n;
+out:
+ mutex_unlock(&udc_lock);
+ return ret;
}
static DEVICE_ATTR_WO(soft_connect);
}
fallthrough;
case USB_PORT_FEAT_RESET:
+ if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+ break;
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
- dum_hcd->port_status = 0;
dum_hcd->port_status =
(USB_SS_PORT_STAT_POWER |
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_RESET);
- } else
+ } else {
dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
+ dum_hcd->port_status |= USB_PORT_STAT_RESET;
+ }
/*
* We want to reset device status. All but the
* Self powered feature
* interval? Is it still 50msec as for HS?
*/
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
- fallthrough;
+ set_link_state(dum_hcd);
+ break;
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_RESET:
case USB_PORT_FEAT_C_ENABLE:
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 hcc_params;
+ int rc;
hcd->uses_new_polling = 1;
down_write(&ehci_cf_port_reset_rwsem);
ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+ /* Wait until HC become operational */
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
+ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
up_write(&ehci_cf_port_reset_rwsem);
+
+ if (rc) {
+ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+ return rc;
+ }
+
ehci->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
unlink_empty_async_suspended(ehci);
+ /* Some Synopsys controllers mistakenly leave IAA turned on */
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
/* Any IAA cycle that started before the suspend is now invalid */
end_iaa_cycle(ehci);
ehci_handle_start_intr_unlinks(ehci);
sch_ep->sch_tt = tt;
sch_ep->ep = ep;
+ INIT_LIST_HEAD(&sch_ep->endpoint);
+ INIT_LIST_HEAD(&sch_ep->tt_endpoint);
return sch_ep;
}
sch_ep->bw_budget_table[j];
}
}
+ sch_ep->allocated = used;
}
static int check_sch_tt(struct usb_device *udev,
return 0;
}
+static void destroy_sch_ep(struct usb_device *udev,
+ struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
+{
+ /* only release ep bw check passed by check_sch_bw() */
+ if (sch_ep->allocated)
+ update_bus_bw(sch_bw, sch_ep, 0);
+
+ list_del(&sch_ep->endpoint);
+
+ if (sch_ep->sch_tt) {
+ list_del(&sch_ep->tt_endpoint);
+ drop_tt(udev);
+ }
+ kfree(sch_ep);
+}
+
static bool need_bw_sch(struct usb_host_endpoint *ep,
enum usb_device_speed speed, int has_tt)
{
mtk->sch_array = sch_array;
+ INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
+
return 0;
}
EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_virt_device *virt_dev;
- struct mu3h_sch_bw_info *sch_bw;
struct mu3h_sch_ep_info *sch_ep;
- struct mu3h_sch_bw_info *sch_array;
unsigned int ep_index;
- int bw_index;
- int ret = 0;
xhci = hcd_to_xhci(hcd);
virt_dev = xhci->devs[udev->slot_id];
ep_index = xhci_get_endpoint_index(&ep->desc);
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
- sch_array = mtk->sch_array;
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
return 0;
}
- bw_index = get_bw_index(xhci, udev, ep);
- sch_bw = &sch_array[bw_index];
-
sch_ep = create_sch_ep(udev, ep, ep_ctx);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(udev, ep_ctx, sch_ep);
- ret = check_sch_bw(udev, sch_bw, sch_ep);
- if (ret) {
- xhci_err(xhci, "Not enough bandwidth!\n");
- if (is_fs_or_ls(udev->speed))
- drop_tt(udev);
-
- kfree(sch_ep);
- return -ENOSPC;
- }
-
- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
-
- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
- | EP_BREPEAT(sch_ep->repeat));
-
- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
- sch_ep->offset, sch_ep->repeat);
+ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
return 0;
}
struct xhci_virt_device *virt_dev;
struct mu3h_sch_bw_info *sch_array;
struct mu3h_sch_bw_info *sch_bw;
- struct mu3h_sch_ep_info *sch_ep;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
int bw_index;
xhci = hcd_to_xhci(hcd);
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
+ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
if (sch_ep->ep == ep) {
- update_bus_bw(sch_bw, sch_ep, 0);
- list_del(&sch_ep->endpoint);
- if (is_fs_or_ls(udev->speed)) {
- list_del(&sch_ep->tt_endpoint);
- drop_tt(udev);
- }
- kfree(sch_ep);
+ destroy_sch_ep(udev, sch_bw, sch_ep);
break;
}
}
}
EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
+
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index, ret;
+
+ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+ list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ ret = check_sch_bw(udev, sch_bw, sch_ep);
+ if (ret) {
+ xhci_err(xhci, "Not enough bandwidth!\n");
+ return -ENOSPC;
+ }
+ }
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+ struct xhci_ep_ctx *ep_ctx;
+ struct usb_host_endpoint *ep = sch_ep->ep;
+ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
+
+ bw_index = get_bw_index(xhci, udev, ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
+ | EP_BCSCOUNT(sch_ep->cs_count)
+ | EP_BBM(sch_ep->burst_mode));
+ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
+ | EP_BREPEAT(sch_ep->repeat));
+
+ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
+ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
+ sch_ep->offset, sch_ep->repeat);
+ }
+
+ return xhci_check_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth);
+
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index;
+
+ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+ destroy_sch_ep(udev, sch_bw, sch_ep);
+ }
+
+ xhci_reset_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth);
static int xhci_mtk_setup(struct usb_hcd *hcd);
static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
.reset = xhci_mtk_setup,
+ .check_bandwidth = xhci_mtk_check_bandwidth,
+ .reset_bandwidth = xhci_mtk_reset_bandwidth,
};
static struct hc_driver __read_mostly xhci_mtk_hc_driver;
* @ep_type: endpoint type
* @maxpkt: max packet size of endpoint
* @ep: address of usb_host_endpoint struct
+ * @allocated: the bandwidth is aready allocated from bus_bw
* @offset: which uframe of the interval that transfer should be
* scheduled first time within the interval
* @repeat: the time gap between two uframes that transfers are
u32 ep_type;
u32 maxpkt;
void *ep;
+ bool allocated;
/*
* mtk xHCI scheduling information put into reserved DWs
* in ep context
struct device *dev;
struct usb_hcd *hcd;
struct mu3h_sch_bw_info *sch_array;
+ struct list_head bw_ep_chk_list;
struct mu3c_ippc_regs __iomem *ippc_regs;
bool has_ippc;
int num_u2_ports;
struct usb_host_endpoint *ep);
void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
#else
static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd,
{
}
+static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+ return 0;
+}
+
+static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+}
#endif
#endif /* _XHCI_MTK_H_ */
#include <linux/mbus.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
return 0;
}
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct phy *phy;
+ int ret;
+
+ /* Old bindings miss the PHY handle */
+ phy = of_phy_get(dev->of_node, "usb3-phy");
+ if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(phy))
+ goto phy_out;
+
+ ret = phy_init(phy);
+ if (ret)
+ goto phy_put;
+
+ ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS);
+ if (ret)
+ goto phy_exit;
+
+ ret = phy_power_on(phy);
+ if (ret == -EOPNOTSUPP) {
+ /* Skip initializatin of XHCI PHY when it is unsupported by firmware */
+ dev_warn(dev, "PHY unsupported by firmware\n");
+ xhci->quirks |= XHCI_SKIP_PHY_INIT;
+ }
+ if (ret)
+ goto phy_exit;
+
+ phy_power_off(phy);
+phy_exit:
+ phy_exit(phy);
+phy_put:
+ of_phy_put(phy);
+phy_out:
+
+ return 0;
+}
+
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd);
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
#else
static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
+static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
return 0;
priv->plat_start(hcd);
}
+static int xhci_priv_plat_setup(struct usb_hcd *hcd)
+{
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ if (!priv->plat_setup)
+ return 0;
+
+ return priv->plat_setup(hcd);
+}
+
static int xhci_priv_init_quirk(struct usb_hcd *hcd)
{
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
};
static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
+ .plat_setup = xhci_mvebu_a3700_plat_setup,
.init_quirk = xhci_mvebu_a3700_init_quirk,
};
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
xhci->shared_hcd->tpl_support = hcd->tpl_support;
- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+
+ if (priv) {
+ ret = xhci_priv_plat_setup(hcd);
+ if (ret)
+ goto disable_usb_phy;
+ }
+
+ if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)))
hcd->skip_phy_initialization = 1;
if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
struct xhci_plat_priv {
const char *firmware_name;
unsigned long long quirks;
+ int (*plat_setup)(struct usb_hcd *);
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
int (*suspend_quirk)(struct usb_hcd *);
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
/* for in tranfers we need to copy the data from bounce to sg */
- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
- seg->bounce_len, seg->bounce_offs);
- if (len != seg->bounce_len)
- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
- len, seg->bounce_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ if (len != seg->bounce_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+ len, seg->bounce_len);
+ } else {
+ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
+ seg->bounce_len);
+ }
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
+ /* make sure TRB is fully written before giving it to the controller */
+ wmb();
trb->field[3] = cpu_to_le32(field4);
trace_xhci_queue_trb(ring, trb);
/* create a max max_pkt sized bounce buffer pointed to by last trb */
if (usb_urb_dir_out(urb)) {
- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
- seg->bounce_buf, new_buff_len, enqd_len);
- if (len != new_buff_len)
- xhci_warn(xhci,
- "WARN Wrong bounce buffer write length: %zu != %d\n",
- len, new_buff_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ if (len != new_buff_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
+ len, new_buff_len);
+ } else {
+ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
+ }
+
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
enable);
if (err < 0)
break;
+
+ /*
+ * wait 500us for LFPS detector to be disabled before
+ * sending ACK
+ */
+ if (!enable)
+ usleep_range(500, 1000);
}
if (err < 0) {
* else should be touching the xhci->devs[slot_id] structure, so we
* don't need to take the xhci->lock for manipulating that.
*/
-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
return ret;
}
-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
drv->reset = over->reset;
if (over->start)
drv->start = over->start;
+ if (over->check_bandwidth)
+ drv->check_bandwidth = over->check_bandwidth;
+ if (over->reset_bandwidth)
+ drv->reset_bandwidth = over->reset_bandwidth;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
size_t extra_priv_size;
int (*reset)(struct usb_hcd *hcd);
int (*start)(struct usb_hcd *hcd);
+ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
};
#define XHCI_CFC_DELAY 10
void xhci_shutdown(struct usb_hcd *hcd);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
}
usbhs_pipe_clear_without_sequence(pipe, 0, 0);
+ usbhs_pipe_running(pipe, 0);
__usbhsf_pkt_del(pkt);
}
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
+ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
#define CINTERION_PRODUCT_CLS8 0x00b0
+#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
+#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
+ .driver_info = RSVD(0)},
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
struct sg_table sg_head;
int log_size;
int nsg;
+ int nent;
struct list_head list;
u64 offset;
};
return (npages + 1) / 2;
}
-static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
-{
- struct scatterlist *sg;
- __be64 *pas;
- int i;
-
- pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
- (*pas) = cpu_to_be64(sg_dma_address(sg));
-}
-
static void mlx5_set_access_mode(void *mkc, int mode)
{
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
{
struct scatterlist *sg;
+ int nsg = mr->nsg;
+ u64 dma_addr;
+ u64 dma_len;
+ int j = 0;
int i;
- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
- mtt[i] = cpu_to_be64(sg_dma_address(sg));
+ for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
+ for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
+ nsg && dma_len;
+ nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
+ mtt[j++] = cpu_to_be64(dma_addr);
+ }
}
static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
- fill_sg(mr, in);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
done:
mr->log_size = log_entity_size;
mr->nsg = nsg;
- err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
- if (!err)
+ mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+ if (!mr->nent)
goto err_map;
err = create_direct_mr(mvdev, mr);
u64 device_addr;
u64 driver_addr;
u16 avail_index;
+ u16 used_index;
bool ready;
struct vdpa_callback cb;
bool restore;
u32 virtq_id;
struct mlx5_vdpa_net *ndev;
u16 avail_idx;
+ u16 used_idx;
int fw_state;
/* keep last in the struct */
obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
+ MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
get_features_12_3(ndev->mvdev.actual_features));
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
struct mlx5_virtq_attr {
u8 state;
u16 available_index;
+ u16 used_index;
};
static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
memset(attr, 0, sizeof(*attr));
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
+ attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
kfree(out);
return 0;
}
}
+static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
+ ndev->vqs[i].avail_idx = 0;
+ ndev->vqs[i].used_idx = 0;
+ }
+}
+
/* TODO: cross-endian support */
static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
{
return err;
ri->avail_index = attr.available_index;
+ ri->used_index = attr.used_index;
ri->ready = mvq->ready;
ri->num_ent = mvq->num_ent;
ri->desc_addr = mvq->desc_addr;
continue;
mvq->avail_idx = ri->avail_index;
+ mvq->used_idx = ri->used_index;
mvq->ready = ri->ready;
mvq->num_ent = ri->num_ent;
mvq->desc_addr = ri->desc_addr;
if (!status) {
mlx5_vdpa_info(mvdev, "performing device reset\n");
teardown_driver(ndev);
+ clear_virtqueues(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0;
#endif
}
+static int xenbus_probe_thread(void *unused)
+{
+ DEFINE_WAIT(w);
+
+ /*
+ * We actually just want to wait for *any* trigger of xb_waitq,
+ * and run xenbus_probe() the moment it occurs.
+ */
+ prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&xb_waitq, &w);
+
+ DPRINTK("probing");
+ xenbus_probe();
+ return 0;
+}
+
static int __init xenbus_probe_initcall(void)
{
/*
!xs_hvm_defer_init_for_callback()))
xenbus_probe();
+ /*
+ * For XS_LOCAL, spawn a thread which will wait for xenstored
+ * or a xenstore-stubdom to be started, then probe. It will be
+ * triggered when communication starts happening, by waiting
+ * on xb_waitq.
+ */
+ if (xen_store_domain_type == XS_LOCAL) {
+ struct task_struct *probe_task;
+
+ probe_task = kthread_run(xenbus_probe_thread, NULL,
+ "xenbus_probe");
+ if (IS_ERR(probe_task))
+ return PTR_ERR(probe_task);
+ }
return 0;
}
device_initcall(xenbus_probe_initcall);
goto error_cache;
#endif
- ret = register_pernet_subsys(&afs_net_ops);
+ ret = register_pernet_device(&afs_net_ops);
if (ret < 0)
goto error_net;
error_proc:
afs_fs_exit();
error_fs:
- unregister_pernet_subsys(&afs_net_ops);
+ unregister_pernet_device(&afs_net_ops);
error_net:
#ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs);
proc_remove(afs_proc_symlink);
afs_fs_exit();
- unregister_pernet_subsys(&afs_net_ops);
+ unregister_pernet_device(&afs_net_ops);
#ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs);
#endif
static void set_init_blocksize(struct block_device *bdev)
{
- bdev->bd_inode->i_blkbits = blksize_bits(bdev_logical_block_size(bdev));
+ unsigned int bsize = bdev_logical_block_size(bdev);
+ loff_t size = i_size_read(bdev->bd_inode);
+
+ while (bsize < PAGE_SIZE) {
+ if (size & bsize)
+ break;
+ bsize <<= 1;
+ }
+ bdev->bd_inode->i_blkbits = blksize_bits(bsize);
}
int set_blocksize(struct block_device *bdev, int size)
list_del_init(&lower->list);
if (lower == node)
node = NULL;
- btrfs_backref_free_node(cache, lower);
+ btrfs_backref_drop_node(cache, lower);
}
btrfs_backref_cleanup_node(cache, node);
wake_up(&caching_ctl->wait);
}
- if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+ /*
+ * If we are in the transaction that populated the free space tree we
+ * can't actually cache from the free space tree as our commit root and
+ * real root are the same, so we could change the contents of the blocks
+ * while caching. Instead do the slow caching in this case, and after
+ * the transaction has committed we will be safe.
+ */
+ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
ret = load_free_space_tree(caching_ctl);
else
ret = load_extent_tree_free(caching_ctl);
* Go through delayed refs for all the stuff we've just kicked off
* and then loop back (just once)
*/
- ret = btrfs_run_delayed_refs(trans, 0);
+ if (!ret)
+ ret = btrfs_run_delayed_refs(trans, 0);
if (!ret && loops == 0) {
loops++;
spin_lock(&cur_trans->dirty_bgs_lock);
/* Indicate that we need to cleanup space cache v1 */
BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
+
+ /* Indicate that we can't trust the free space tree for caching yet */
+ BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
};
/*
struct btrfs_block_group *cache;
int ret;
- btrfs_add_excluded_extent(trans->fs_info, bytenr, num_bytes);
-
cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
if (!cache)
return -EINVAL;
* the pinned extents.
*/
btrfs_cache_block_group(cache, 1);
+ /*
+ * Make sure we wait until the cache is completely built in case it is
+ * missing or is invalid and therefore needs to be rebuilt.
+ */
+ ret = btrfs_wait_block_group_cache_done(cache);
+ if (ret)
+ goto out;
pin_down_extent(trans, cache, bytenr, num_bytes, 0);
/* remove us from the free space cache (if we're there at all) */
ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
+out:
btrfs_put_block_group(cache);
return ret;
}
{
int ret;
struct btrfs_block_group *block_group;
- struct btrfs_caching_control *caching_ctl;
block_group = btrfs_lookup_block_group(fs_info, start);
if (!block_group)
return -EINVAL;
- btrfs_cache_block_group(block_group, 0);
- caching_ctl = btrfs_get_caching_control(block_group);
-
- if (!caching_ctl) {
- /* Logic error */
- BUG_ON(!btrfs_block_group_done(block_group));
- ret = btrfs_remove_free_space(block_group, start, num_bytes);
- } else {
- /*
- * We must wait for v1 caching to finish, otherwise we may not
- * remove our space.
- */
- btrfs_wait_space_cache_v1_finished(block_group, caching_ctl);
- mutex_lock(&caching_ctl->mutex);
-
- if (start >= caching_ctl->progress) {
- ret = btrfs_add_excluded_extent(fs_info, start,
- num_bytes);
- } else if (start + num_bytes <= caching_ctl->progress) {
- ret = btrfs_remove_free_space(block_group,
- start, num_bytes);
- } else {
- num_bytes = caching_ctl->progress - start;
- ret = btrfs_remove_free_space(block_group,
- start, num_bytes);
- if (ret)
- goto out_lock;
+ btrfs_cache_block_group(block_group, 1);
+ /*
+ * Make sure we wait until the cache is completely built in case it is
+ * missing or is invalid and therefore needs to be rebuilt.
+ */
+ ret = btrfs_wait_block_group_cache_done(block_group);
+ if (ret)
+ goto out;
- num_bytes = (start + num_bytes) -
- caching_ctl->progress;
- start = caching_ctl->progress;
- ret = btrfs_add_excluded_extent(fs_info, start,
- num_bytes);
- }
-out_lock:
- mutex_unlock(&caching_ctl->mutex);
- btrfs_put_caching_control(caching_ctl);
- }
+ ret = btrfs_remove_free_space(block_group, start, num_bytes);
+out:
btrfs_put_block_group(block_group);
return ret;
}
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
}
- if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
- clear_extent_bits(&fs_info->excluded_extents, start,
- end, EXTENT_UPTODATE);
if (btrfs_test_opt(fs_info, DISCARD_SYNC))
ret = btrfs_discard_extent(fs_info, start,
goto out_free;
}
- trans = btrfs_start_transaction(tree_root, 0);
+ /*
+ * Use join to avoid potential EINTR from transaction
+ * start. See wait_reserve_ticket and the whole
+ * reservation callchain.
+ */
+ if (for_reloc)
+ trans = btrfs_join_transaction(tree_root);
+ else
+ trans = btrfs_start_transaction(tree_root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out_free;
return PTR_ERR(trans);
set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+ set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
free_space_root = btrfs_create_tree(trans,
BTRFS_FREE_SPACE_TREE_OBJECTID);
if (IS_ERR(free_space_root)) {
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+ ret = btrfs_commit_transaction(trans);
- return btrfs_commit_transaction(trans);
+ /*
+ * Now that we've committed the transaction any reading of our commit
+ * root will be safe, so we can cache from the free space tree now.
+ */
+ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
+ return ret;
abort:
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
break;
offset += clone_len;
clone_root->offset += clone_len;
+
+ /*
+ * If we are cloning from the file we are currently processing,
+ * and using the send root as the clone root, we must stop once
+ * the current clone offset reaches the current eof of the file
+ * at the receiver, otherwise we would issue an invalid clone
+ * operation (source range going beyond eof) and cause the
+ * receiver to fail. So if we reach the current eof, bail out
+ * and fallback to a regular write.
+ */
+ if (clone_root->root == sctx->send_root &&
+ clone_root->ino == sctx->cur_ino &&
+ clone_root->offset >= sctx->cur_inode_next_write_offset)
+ break;
+
data_offset += clone_len;
next:
path->slots[0]++;
*/
btrfs_free_log_root_tree(trans, fs_info);
- /*
- * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
- * new delayed refs. Must handle them or qgroup can be wrong.
- */
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
- if (ret)
- goto unlock_tree_log;
-
/*
* Since fs roots are all committed, we can get a quite accurate
* new_roots. So let's do quota accounting.
atomic_set(&dev->reada_in_flight, 0);
atomic_set(&dev->dev_stats_ccnt, 0);
- btrfs_device_data_ordered_init(dev, fs_info);
+ btrfs_device_data_ordered_init(dev);
INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
extent_io_tree_init(fs_info, &dev->alloc_state,
btrfs_warn(fs_info,
"balance: cannot set exclusive op status, resume manually");
+ btrfs_release_path(path);
+
mutex_lock(&fs_info->balance_mutex);
BUG_ON(fs_info->balance_ctl);
spin_lock(&fs_info->balance_lock);
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
#include <linux/seqlock.h>
#define __BTRFS_NEED_DEVICE_DATA_ORDERED
-#define btrfs_device_data_ordered_init(device, info) \
- seqcount_mutex_init(&device->data_seqcount, &info->chunk_mutex)
+#define btrfs_device_data_ordered_init(device) \
+ seqcount_init(&device->data_seqcount)
#else
-#define btrfs_device_data_ordered_init(device, info) do { } while (0)
+#define btrfs_device_data_ordered_init(device) do { } while (0)
#endif
#define BTRFS_DEV_STATE_WRITEABLE (0)
blk_status_t last_flush_error;
#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
- /* A seqcount_t with associated chunk_mutex (for lockdep) */
- seqcount_mutex_t data_seqcount;
+ seqcount_t data_seqcount;
#endif
/* the internal btrfs device id */
static inline void \
btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
{ \
+ preempt_disable(); \
write_seqcount_begin(&dev->data_seqcount); \
dev->name = size; \
write_seqcount_end(&dev->data_seqcount); \
+ preempt_enable(); \
}
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
#define BTRFS_DEVICE_GETSET_FUNCS(name) \
inode = d_backing_inode(object->backer);
ASSERT(S_ISREG(inode->i_mode));
- ASSERT(inode->i_mapping->a_ops->readpages);
/* calculate the shift required to use bmap */
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
inode = d_backing_inode(object->backer);
ASSERT(S_ISREG(inode->i_mode));
- ASSERT(inode->i_mapping->a_ops->readpages);
/* calculate the shift required to use bmap */
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
return;
}
-static struct ceph_connection *con_get(struct ceph_connection *con)
+static struct ceph_connection *mds_get_con(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
return NULL;
}
-static void con_put(struct ceph_connection *con)
+static void mds_put_con(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
* if the client is unresponsive for long enough, the mds will kill
* the session entirely.
*/
-static void peer_reset(struct ceph_connection *con)
+static void mds_peer_reset(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
send_mds_reconnect(mdsc, s);
}
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
* Note: returned pointer is the address of a structure that's
* managed separately. Caller must *not* attempt to free it.
*/
-static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
- int *proto, int force_new)
+static struct ceph_auth_handshake *
+mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
return auth;
}
-static int add_authorizer_challenge(struct ceph_connection *con,
+static int mds_add_authorizer_challenge(struct ceph_connection *con,
void *challenge_buf, int challenge_buf_len)
{
struct ceph_mds_session *s = con->private;
challenge_buf, challenge_buf_len);
}
-static int verify_authorizer_reply(struct ceph_connection *con)
+static int mds_verify_authorizer_reply(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
NULL, NULL, NULL, NULL);
}
-static int invalidate_authorizer(struct ceph_connection *con)
+static int mds_invalidate_authorizer(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
}
static const struct ceph_connection_operations mds_con_ops = {
- .get = con_get,
- .put = con_put,
- .dispatch = dispatch,
- .get_authorizer = get_authorizer,
- .add_authorizer_challenge = add_authorizer_challenge,
- .verify_authorizer_reply = verify_authorizer_reply,
- .invalidate_authorizer = invalidate_authorizer,
- .peer_reset = peer_reset,
+ .get = mds_get_con,
+ .put = mds_put_con,
.alloc_msg = mds_alloc_msg,
+ .dispatch = mds_dispatch,
+ .peer_reset = mds_peer_reset,
+ .get_authorizer = mds_get_authorizer,
+ .add_authorizer_challenge = mds_add_authorizer_challenge,
+ .verify_authorizer_reply = mds_verify_authorizer_reply,
+ .invalidate_authorizer = mds_invalidate_authorizer,
.sign_message = mds_sign_message,
.check_message_signature = mds_check_message_signature,
.get_auth_request = mds_get_auth_request,
* Caller is responsible for freeing returned value if it is not error.
*/
char *cifs_compose_mount_options(const char *sb_mountdata,
- const char *fullpath,
- const struct dfs_info3_param *ref)
+ const char *fullpath,
+ const struct dfs_info3_param *ref,
+ char **devname)
{
int rc;
char *name;
strcat(mountdata, "ip=");
strcat(mountdata, srvIP);
- kfree(name);
+ if (devname)
+ *devname = name;
+ else
+ kfree(name);
/*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
/*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
/* strip first '\' from fullpath */
mountdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- fullpath + 1, NULL);
+ fullpath + 1, NULL, NULL);
if (IS_ERR(mountdata)) {
kfree(devname);
return (struct vfsmount *)mountdata;
goto out;
}
- rc = cifs_setup_volume_info(cifs_sb->ctx);
+ rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC);
if (rc) {
root = ERR_PTR(rc);
goto out;
int add_treename);
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
extern char *cifs_compose_mount_options(const char *sb_mountdata,
- const char *fullpath, const struct dfs_info3_param *ref);
+ const char *fullpath, const struct dfs_info3_param *ref,
+ char **devname);
/* extern void renew_parental_timestamps(struct dentry *direntry);*/
extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
struct TCP_Server_Info *server);
extern int cifs_handle_standard(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
extern int smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx);
+extern int smb3_parse_opt(const char *options, const char *key, char **val);
extern bool cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs);
extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
extern int cifs_call_async(struct TCP_Server_Info *server,
unsigned char *p24);
extern int
-cifs_setup_volume_info(struct smb3_fs_context *ctx);
+cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname);
extern struct TCP_Server_Info *
cifs_find_tcp_session(struct smb3_fs_context *ctx);
if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
tcon->nohandlecache = ctx->nohandlecache;
else
- tcon->nohandlecache = 1;
+ tcon->nohandlecache = true;
tcon->nodelete = ctx->nodelete;
tcon->local_lease = ctx->local_lease;
INIT_LIST_HEAD(&tcon->pending_opens);
} else if (ctx)
tcon->unix_ext = 1; /* Unix Extensions supported */
- if (tcon->unix_ext == 0) {
+ if (!tcon->unix_ext) {
cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
return;
}
rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
ref_path, &referral, NULL);
if (!rc) {
+ char *fake_devname = NULL;
+
mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- full_path + 1, &referral);
+ full_path + 1, &referral,
+ &fake_devname);
free_dfs_info_param(&referral);
if (IS_ERR(mdata)) {
rc = PTR_ERR(mdata);
mdata = NULL;
} else {
- smb3_cleanup_fs_context_contents(ctx);
- rc = cifs_setup_volume_info(ctx);
+ rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
}
+ kfree(fake_devname);
kfree(cifs_sb->ctx->mount_options);
cifs_sb->ctx->mount_options = mdata;
}
struct dfs_info3_param ref = {0};
char *mdata = NULL;
struct smb3_fs_context fake_ctx = {NULL};
+ char *fake_devname = NULL;
cifs_dbg(FYI, "%s: dfs path: %s\n", __func__, path);
return rc;
mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- full_path + 1, &ref);
+ full_path + 1, &ref,
+ &fake_devname);
free_dfs_info_param(&ref);
if (IS_ERR(mdata)) {
rc = PTR_ERR(mdata);
mdata = NULL;
} else
- rc = cifs_setup_volume_info(&fake_ctx);
+ rc = cifs_setup_volume_info(&fake_ctx, mdata, fake_devname);
kfree(mdata);
+ kfree(fake_devname);
if (!rc) {
/*
* we should pass a clone of the original context?
*/
int
-cifs_setup_volume_info(struct smb3_fs_context *ctx)
+cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
{
int rc = 0;
+ smb3_parse_devname(devname, ctx);
+
+ if (mntopts) {
+ char *ip;
+
+ cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts);
+ rc = smb3_parse_opt(mntopts, "ip", &ip);
+ if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip,
+ strlen(ip))) {
+ cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
+ return -EINVAL;
+ }
+ }
+
if (ctx->nullauth) {
cifs_dbg(FYI, "Anonymous login\n");
kfree(ctx->username);
int rc;
struct cache_entry *ce;
struct dfs_info3_param ref = {0};
- char *mdata = NULL;
+ char *mdata = NULL, *devname = NULL;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct smb3_fs_context ctx = {NULL};
up_read(&htable_rw_lock);
- mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref);
+ mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
+ &devname);
free_dfs_info_param(&ref);
if (IS_ERR(mdata)) {
goto out;
}
- rc = cifs_setup_volume_info(&ctx);
+ rc = cifs_setup_volume_info(&ctx, NULL, devname);
if (rc) {
ses = ERR_PTR(rc);
smb3_cleanup_fs_context_contents(&ctx);
kfree(mdata);
kfree(rpath);
+ kfree(devname);
return ses;
}
cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
{
struct inode *inode;
+ int rc;
if (flags & LOOKUP_RCU)
return -ECHILD;
if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
CIFS_I(inode)->time = 0; /* force reval */
- if (cifs_revalidate_dentry(direntry))
- return 0;
+ rc = cifs_revalidate_dentry(direntry);
+ if (rc) {
+ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
+ switch (rc) {
+ case -ENOENT:
+ case -ESTALE:
+ /*
+ * Those errors mean the dentry is invalid
+ * (file was deleted or recreated)
+ */
+ return 0;
+ default:
+ /*
+ * Otherwise some unexpected error happened
+ * report it as-is to VFS layer
+ */
+ return rc;
+ }
+ }
else {
/*
* If the inode wasn't known to be a dfs entry when
fsparam_flag_no("exec", Opt_ignore),
fsparam_flag_no("dev", Opt_ignore),
fsparam_flag_no("mand", Opt_ignore),
+ fsparam_flag_no("auto", Opt_ignore),
fsparam_string("cred", Opt_ignore),
fsparam_string("credentials", Opt_ignore),
+ fsparam_string("prefixpath", Opt_ignore),
{}
};
return 0;
}
+int smb3_parse_opt(const char *options, const char *key, char **val)
+{
+ int rc = -ENOENT;
+ char *opts, *orig, *p;
+
+ orig = opts = kstrdup(options, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ while ((p = strsep(&opts, ","))) {
+ char *nval;
+
+ if (!*p)
+ continue;
+ if (strncasecmp(p, key, strlen(key)))
+ continue;
+ nval = strchr(p, '=');
+ if (nval) {
+ if (nval == p)
+ continue;
+ *nval++ = 0;
+ *val = kstrndup(nval, strlen(nval), GFP_KERNEL);
+ rc = !*val ? -ENOMEM : 0;
+ goto out;
+ }
+ }
+out:
+ kfree(orig);
+ return rc;
+}
+
/*
* Parse a devname into substrings and populate the ctx->UNC and ctx->prepath
* fields with the result. Returns 0 on success and an error otherwise
if (ctx->rdma && ctx->vals->protocol_id < SMB30_PROT_ID) {
cifs_dbg(VFS, "SMB Direct requires Version >=3.0\n");
- return -1;
+ return -EOPNOTSUPP;
}
#ifndef CONFIG_KEYS
/* make sure UNC has a share name */
if (strlen(ctx->UNC) < 3 || !strchr(ctx->UNC + 3, '\\')) {
cifs_dbg(VFS, "Malformed UNC. Unable to find share name.\n");
- return -1;
+ return -ENOENT;
}
if (!ctx->got_ip) {
if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr,
&ctx->UNC[2], len)) {
pr_err("Unable to determine destination address\n");
- return -1;
+ return -EHOSTUNREACH;
}
}
return 0;
cifs_parse_mount_err:
- return 1;
+ return -EINVAL;
}
int smb3_init_fs_context(struct fs_context *fc)
__le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
__le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */
__le16 Reserved2;
- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
+ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
} __packed;
/* Dialects */
if (ssocket == NULL)
return -EAGAIN;
- if (signal_pending(current)) {
+ if (fatal_signal_pending(current)) {
cifs_dbg(FYI, "signal pending before send request\n");
return -ERESTARTSYS;
}
if (signal_pending(current) && (total_len != send_length)) {
cifs_dbg(FYI, "signal is pending after attempt to send\n");
- rc = -EINTR;
+ rc = -ERESTARTSYS;
}
/* uncork it */
if (*credits < num) {
/*
- * Return immediately if not too many requests in flight since
- * we will likely be stuck on waiting for credits.
+ * If the server is tight on resources or just gives us less
+ * credits for other reasons (e.g. requests are coming out of
+ * order and the server delays granting more credits until it
+ * processes a missing mid) and we exhausted most available
+ * credits there may be situations when we try to send
+ * a compound request but we don't have enough credits. At this
+ * point the client needs to decide if it should wait for
+ * additional credits or fail the request. If at least one
+ * request is in flight there is a high probability that the
+ * server will return enough credits to satisfy this compound
+ * request.
+ *
+ * Return immediately if no requests in flight since we will be
+ * stuck on waiting for credits.
*/
- if (server->in_flight < num - *credits) {
+ if (server->in_flight == 0) {
spin_unlock(&server->req_lock);
trace_smb3_insufficient_credits(server->CurrentMid,
server->hostname, scredits, sin_flight);
{
int rc;
struct dentry *lower_dentry;
+ struct inode *lower_inode;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (!(d_inode(lower_dentry)->i_opflags & IOP_XATTR)) {
+ lower_inode = d_inode(lower_dentry);
+ if (!(lower_inode->i_opflags & IOP_XATTR)) {
rc = -EOPNOTSUPP;
goto out;
}
- rc = vfs_setxattr(lower_dentry, name, value, size, flags);
+ inode_lock(lower_inode);
+ rc = __vfs_setxattr_locked(lower_dentry, name, value, size, flags, NULL);
+ inode_unlock(lower_inode);
if (!rc && inode)
- fsstack_copy_attr_all(inode, d_inode(lower_dentry));
+ fsstack_copy_attr_all(inode, lower_inode);
out:
return rc;
}
}
/*
- * Some filesystems may redirty the inode during the writeback
- * due to delalloc, clear dirty metadata flags right before
- * write_inode()
+ * If the inode has dirty timestamps and we need to write them, call
+ * mark_inode_dirty_sync() to notify the filesystem about it and to
+ * change I_DIRTY_TIME into I_DIRTY_SYNC.
*/
- spin_lock(&inode->i_lock);
-
- dirty = inode->i_state & I_DIRTY;
if ((inode->i_state & I_DIRTY_TIME) &&
- ((dirty & I_DIRTY_INODE) ||
- wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
+ (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
time_after(jiffies, inode->dirtied_time_when +
dirtytime_expire_interval * HZ))) {
- dirty |= I_DIRTY_TIME;
trace_writeback_lazytime(inode);
+ mark_inode_dirty_sync(inode);
}
+
+ /*
+ * Some filesystems may redirty the inode during the writeback
+ * due to delalloc, clear dirty metadata flags right before
+ * write_inode()
+ */
+ spin_lock(&inode->i_lock);
+ dirty = inode->i_state & I_DIRTY;
inode->i_state &= ~dirty;
/*
spin_unlock(&inode->i_lock);
- if (dirty & I_DIRTY_TIME)
- mark_inode_dirty_sync(inode);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & ~I_DIRTY_PAGES) {
int err = write_inode(inode, wbc);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ set_page_huge_active(page);
/*
* unlock_page because locked by add_to_page_cache()
- * page_put due to reference from alloc_huge_page()
+ * put_page() due to reference from alloc_huge_page()
*/
unlock_page(page);
put_page(page);
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
const struct iovec *fast_iov,
struct iov_iter *iter, bool force);
+static void io_req_drop_files(struct io_kiocb *req);
+static void io_req_task_queue(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
static inline void io_clean_op(struct io_kiocb *req)
{
- if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
- REQ_F_INFLIGHT))
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
__io_clean_op(req);
}
{
struct io_kiocb *req;
- if (task && head->task != task)
+ if (task && head->task != task) {
+ /* in terms of cancelation, always match if req task is dead */
+ if (head->task->flags & PF_EXITING)
+ return true;
return false;
+ }
if (!files)
return true;
io_for_each_link(req, head) {
- if ((req->flags & REQ_F_WORK_INITIALIZED) &&
- (req->work.flags & IO_WQ_WORK_FILES) &&
+ if (!(req->flags & REQ_F_WORK_INITIALIZED))
+ continue;
+ if (req->file && req->file->f_op == &io_uring_fops)
+ return true;
+ if ((req->work.flags & IO_WQ_WORK_FILES) &&
req->work.identity->files == files)
return true;
}
free_fs_struct(fs);
req->work.flags &= ~IO_WQ_WORK_FS;
}
+ if (req->flags & REQ_F_INFLIGHT)
+ io_req_drop_files(req);
io_put_identity(req->task->io_uring, req);
}
return false;
atomic_inc(&id->files->count);
get_nsproxy(id->nsproxy);
- req->flags |= REQ_F_INFLIGHT;
- spin_lock_irq(&ctx->inflight_lock);
- list_add(&req->inflight_entry, &ctx->inflight_list);
- spin_unlock_irq(&ctx->inflight_lock);
+ if (!(req->flags & REQ_F_INFLIGHT)) {
+ req->flags |= REQ_F_INFLIGHT;
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_add(&req->inflight_entry, &ctx->inflight_list);
+ spin_unlock_irq(&ctx->inflight_lock);
+ }
req->work.flags |= IO_WQ_WORK_FILES;
}
if (!(req->work.flags & IO_WQ_WORK_MM) &&
do {
struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
struct io_defer_entry, list);
- struct io_kiocb *link;
if (req_need_defer(de->req, de->seq))
break;
list_del_init(&de->list);
- /* punt-init is done before queueing for defer */
- link = __io_queue_async_work(de->req);
- if (link) {
- __io_queue_linked_timeout(link);
- /* drop submission reference */
- io_put_req_deferred(link, 1);
- }
+ io_req_task_queue(de->req);
kfree(de);
} while (!list_empty(&ctx->defer_list));
}
struct io_kiocb *req, *tmp;
struct io_uring_cqe *cqe;
unsigned long flags;
- bool all_flushed;
+ bool all_flushed, posted;
LIST_HEAD(list);
if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
return false;
+ posted = false;
spin_lock_irqsave(&ctx->completion_lock, flags);
list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
if (!io_match_task(req, tsk, files))
WRITE_ONCE(ctx->rings->cq_overflow,
ctx->cached_cq_overflow);
}
+ posted = true;
}
all_flushed = list_empty(&ctx->cq_overflow_list);
ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
}
- io_commit_cqring(ctx);
+ if (posted)
+ io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
- io_cqring_ev_posted(ctx);
+ if (posted)
+ io_cqring_ev_posted(ctx);
while (!list_empty(&list)) {
req = list_first_entry(&list, struct io_kiocb, compl.list);
else
__io_req_task_cancel(req, -EFAULT);
mutex_unlock(&ctx->uring_lock);
+
+ if (ctx->flags & IORING_SETUP_SQPOLL)
+ io_sq_thread_drop_mm_files();
}
static void io_req_task_submit(struct callback_head *cb)
struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs);
+ if (atomic_read(&tctx->in_idle))
+ wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs);
rb->task = NULL;
}
struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs);
+ if (atomic_read(&tctx->in_idle))
+ wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs);
}
rb->task = req->task;
/* read it all, or we did blocking attempt. no retry. */
if (!iov_iter_count(iter) || !force_nonblock ||
- (req->file->f_flags & O_NONBLOCK))
+ (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
goto done;
io_size -= ret;
* io_wq_work.flags, so initialize io_wq_work firstly.
*/
io_req_init_async(req);
- req->work.flags |= IO_WQ_WORK_NO_CANCEL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
/* if the file has a flush method, be safe and punt to async */
if (close->put_file->f_op->flush && force_nonblock) {
+ /* not safe to cancel at this point */
+ req->work.flags |= IO_WQ_WORK_NO_CANCEL;
/* was never set, but play safe */
req->flags &= ~REQ_F_NOWAIT;
/* avoid grabbing files - we don't need the files */
struct io_uring_task *tctx = req->task->io_uring;
unsigned long flags;
- put_files_struct(req->work.identity->files);
- put_nsproxy(req->work.identity->nsproxy);
+ if (req->work.flags & IO_WQ_WORK_FILES) {
+ put_files_struct(req->work.identity->files);
+ put_nsproxy(req->work.identity->nsproxy);
+ }
spin_lock_irqsave(&ctx->inflight_lock, flags);
list_del(&req->inflight_entry);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
req->flags &= ~REQ_F_NEED_CLEANUP;
}
-
- if (req->flags & REQ_F_INFLIGHT)
- io_req_drop_files(req);
}
static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
file = __io_file_get(state, fd);
}
+ if (file && file->f_op == &io_uring_fops &&
+ !(req->flags & REQ_F_INFLIGHT)) {
+ io_req_init_async(req);
+ req->flags |= REQ_F_INFLIGHT;
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_add(&req->inflight_entry, &ctx->inflight_list);
+ spin_unlock_irq(&ctx->inflight_lock);
+ }
+
return file;
}
TASK_INTERRUPTIBLE);
/* make sure we run task_work before checking for signals */
ret = io_run_task_work_sig();
- if (ret > 0)
+ if (ret > 0) {
+ finish_wait(&ctx->wait, &iowq.wq);
continue;
+ }
else if (ret < 0)
break;
if (io_should_wake(&iowq))
break;
- if (test_bit(0, &ctx->cq_check_overflow))
+ if (test_bit(0, &ctx->cq_check_overflow)) {
+ finish_wait(&ctx->wait, &iowq.wq);
continue;
+ }
if (uts) {
timeout = schedule_timeout(timeout);
if (timeout == 0) {
}
}
+static int io_uring_count_inflight(struct io_ring_ctx *ctx,
+ struct task_struct *task,
+ struct files_struct *files)
+{
+ struct io_kiocb *req;
+ int cnt = 0;
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
+ cnt += io_match_task(req, task, files);
+ spin_unlock_irq(&ctx->inflight_lock);
+ return cnt;
+}
+
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct task_struct *task,
struct files_struct *files)
{
while (!list_empty_careful(&ctx->inflight_list)) {
struct io_task_cancel cancel = { .task = task, .files = files };
- struct io_kiocb *req;
DEFINE_WAIT(wait);
- bool found = false;
-
- spin_lock_irq(&ctx->inflight_lock);
- list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
- if (req->task != task ||
- req->work.identity->files != files)
- continue;
- found = true;
- break;
- }
- if (found)
- prepare_to_wait(&task->io_uring->wait, &wait,
- TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&ctx->inflight_lock);
+ int inflight;
- /* We need to keep going until we don't find a matching req */
- if (!found)
+ inflight = io_uring_count_inflight(ctx, task, files);
+ if (!inflight)
break;
io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
io_poll_remove_all(ctx, task, files);
io_kill_timeouts(ctx, task, files);
+ io_cqring_overflow_flush(ctx, true, task, files);
/* cancellations _may_ trigger task work */
io_run_task_work();
- schedule();
+
+ prepare_to_wait(&task->io_uring->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (inflight == io_uring_count_inflight(ctx, task, files))
+ schedule();
finish_wait(&task->io_uring->wait, &wait);
}
}
static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
{
- WARN_ON_ONCE(ctx->sqo_task != current);
-
mutex_lock(&ctx->uring_lock);
ctx->sqo_dead = 1;
mutex_unlock(&ctx->uring_lock);
struct task_struct *task = current;
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
- /* for SQPOLL only sqo_task has task notes */
io_disable_sqo_submit(ctx);
task = ctx->sq_data->thread;
atomic_inc(&task->io_uring->in_idle);
io_cancel_defer_files(ctx, task, files);
io_cqring_overflow_flush(ctx, true, task, files);
+ io_uring_cancel_files(ctx, task, files);
if (!files)
__io_uring_cancel_task_requests(ctx, task);
- else
- io_uring_cancel_files(ctx, task, files);
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
atomic_dec(&task->io_uring->in_idle);
- /*
- * If the files that are going away are the ones in the thread
- * identity, clear them out.
- */
- if (task->io_uring->identity->files == files)
- task->io_uring->identity->files = NULL;
io_sq_thread_unpark(ctx->sq_data);
}
}
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
+ /* trigger io_disable_sqo_submit() */
+ if (tctx->sqpoll)
+ __io_uring_files_cancel(NULL);
+
do {
/* read completions before cancelations */
inflight = tctx_inflight(tctx);
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
/*
- * If we've seen completions, retry. This avoids a race where
- * a completion comes in before we did prepare_to_wait().
+ * If we've seen completions, retry without waiting. This
+ * avoids a race where a completion comes in before we did
+ * prepare_to_wait().
*/
- if (inflight != tctx_inflight(tctx))
- continue;
- schedule();
+ if (inflight == tctx_inflight(tctx))
+ schedule();
finish_wait(&tctx->wait, &wait);
} while (1);
- finish_wait(&tctx->wait, &wait);
atomic_dec(&tctx->in_idle);
io_uring_remove_task_files(tctx);
struct io_uring_task *tctx = current->io_uring;
struct io_ring_ctx *ctx = file->private_data;
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+ io_uring_cancel_task_requests(ctx, NULL);
+
if (!tctx)
return 0;
if (ctx->flags & IORING_SETUP_SQPOLL) {
/* there is only one file note, which is owned by sqo_task */
- WARN_ON_ONCE((ctx->sqo_task == current) ==
+ WARN_ON_ONCE(ctx->sqo_task != current &&
+ xa_load(&tctx->xa, (unsigned long)file));
+ /* sqo_dead check is for when this happens after cancellation */
+ WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
!xa_load(&tctx->xa, (unsigned long)file));
io_disable_sqo_submit(ctx);
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/fsnotify.h>
+#include <linux/uio.h>
#include "kernfs-internal.h"
* it difficult to use seq_file. Implement simplistic custom buffering for
* bin files.
*/
-static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
- char __user *user_buf, size_t count,
- loff_t *ppos)
+static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
- ssize_t len = min_t(size_t, count, PAGE_SIZE);
+ struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
+ ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
const struct kernfs_ops *ops;
char *buf;
of->event = atomic_read(&of->kn->attr.open->event);
ops = kernfs_ops(of->kn);
if (ops->read)
- len = ops->read(of, buf, len, *ppos);
+ len = ops->read(of, buf, len, iocb->ki_pos);
else
len = -EINVAL;
if (len < 0)
goto out_free;
- if (copy_to_user(user_buf, buf, len)) {
+ if (copy_to_iter(buf, len, iter) != len) {
len = -EFAULT;
goto out_free;
}
- *ppos += len;
+ iocb->ki_pos += len;
out_free:
if (buf == of->prealloc_buf)
return len;
}
-/**
- * kernfs_fop_read - kernfs vfs read callback
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- */
-static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
- struct kernfs_open_file *of = kernfs_of(file);
-
- if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
- return seq_read(file, user_buf, count, ppos);
- else
- return kernfs_file_direct_read(of, user_buf, count, ppos);
+ if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW)
+ return seq_read_iter(iocb, iter);
+ return kernfs_file_read_iter(iocb, iter);
}
-/**
- * kernfs_fop_write - kernfs vfs write callback
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- *
+/*
* Copy data in from userland and pass it to the matching kernfs write
* operation.
*
* modify only the the value you're changing, then write entire buffer
* back.
*/
-static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
{
- struct kernfs_open_file *of = kernfs_of(file);
+ struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
+ ssize_t len = iov_iter_count(iter);
const struct kernfs_ops *ops;
- ssize_t len;
char *buf;
if (of->atomic_write_len) {
- len = count;
if (len > of->atomic_write_len)
return -E2BIG;
} else {
- len = min_t(size_t, count, PAGE_SIZE);
+ len = min_t(size_t, len, PAGE_SIZE);
}
buf = of->prealloc_buf;
if (!buf)
return -ENOMEM;
- if (copy_from_user(buf, user_buf, len)) {
+ if (copy_from_iter(buf, len, iter) != len) {
len = -EFAULT;
goto out_free;
}
ops = kernfs_ops(of->kn);
if (ops->write)
- len = ops->write(of, buf, len, *ppos);
+ len = ops->write(of, buf, len, iocb->ki_pos);
else
len = -EINVAL;
mutex_unlock(&of->mutex);
if (len > 0)
- *ppos += len;
+ iocb->ki_pos += len;
out_free:
if (buf == of->prealloc_buf)
/*
* Write path needs to atomic_write_len outside active reference.
- * Cache it in open_file. See kernfs_fop_write() for details.
+ * Cache it in open_file. See kernfs_fop_write_iter() for details.
*/
of->atomic_write_len = ops->atomic_write_len;
EXPORT_SYMBOL_GPL(kernfs_notify);
const struct file_operations kernfs_file_fops = {
- .read = kernfs_fop_read,
- .write = kernfs_fop_write,
+ .read_iter = kernfs_fop_read_iter,
+ .write_iter = kernfs_fop_write_iter,
.llseek = generic_file_llseek,
.mmap = kernfs_fop_mmap,
.open = kernfs_fop_open,
.release = kernfs_fop_release,
.poll = kernfs_fop_poll,
.fsync = noop_fsync,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
};
/**
return NULL;
}
+/*
+ * Compare 2 layout stateid sequence ids, to see which is newer,
+ * taking into account wraparound issues.
+ */
+static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
+{
+ return (s32)(s1 - s2) > 0;
+}
+
+static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
+{
+ if (pnfs_seqid_is_newer(newseq, lo->plh_barrier))
+ lo->plh_barrier = newseq;
+}
+
static void
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
u32 seq)
if (seq != 0) {
WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
lo->plh_return_seq = seq;
+ pnfs_barrier_update(lo, seq);
}
}
return rv;
}
-/*
- * Compare 2 layout stateid sequence ids, to see which is newer,
- * taking into account wraparound issues.
- */
-static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
-{
- return (s32)(s1 - s2) > 0;
-}
-
static bool
pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
const struct pnfs_layout_range *recall_range)
new_barrier = be32_to_cpu(new->seqid);
else if (new_barrier == 0)
return;
- if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
- lo->plh_barrier = new_barrier;
+ pnfs_barrier_update(lo, new_barrier);
}
static bool
{
u32 seqid = be32_to_cpu(stateid->seqid);
- return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
+ return !pnfs_seqid_is_newer(seqid, lo->plh_barrier) && lo->plh_barrier;
}
/* lget is set to 1 if called from inside send_layoutget call chain */
return false;
set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
pnfs_get_layout_hdr(lo);
+ nfs4_stateid_copy(stateid, &lo->plh_stateid);
+ *cred = get_cred(lo->plh_lc_cred);
if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
- nfs4_stateid_copy(stateid, &lo->plh_stateid);
- *cred = get_cred(lo->plh_lc_cred);
if (lo->plh_return_seq != 0)
stateid->seqid = cpu_to_be32(lo->plh_return_seq);
if (iomode != NULL)
*iomode = lo->plh_return_iomode;
pnfs_clear_layoutreturn_info(lo);
- return true;
- }
- nfs4_stateid_copy(stateid, &lo->plh_stateid);
- *cred = get_cred(lo->plh_lc_cred);
- if (iomode != NULL)
+ } else if (iomode != NULL)
*iomode = IOMODE_ANY;
+ pnfs_barrier_update(lo, be32_to_cpu(stateid->seqid));
return true;
}
wake_up_var(&lo->plh_outstanding);
}
+static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
+{
+ return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags);
+}
+
static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
{
unsigned long *bitlock = &lo->plh_flags;
goto out_forget;
}
- if (!pnfs_layout_is_valid(lo)) {
- /* We have a completely new layout */
- pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
- } else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
+ if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
/* existing state ID, make sure the sequence number matches. */
if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
+ if (!pnfs_layout_is_valid(lo) &&
+ pnfs_is_first_layoutget(lo))
+ lo->plh_barrier = 0;
dprintk("%s forget reply due to sequence\n", __func__);
goto out_forget;
}
pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false);
- } else {
+ } else if (pnfs_layout_is_valid(lo)) {
/*
* We got an entirely new state ID. Mark all segments for the
* inode invalid, and retry the layoutget
*/
- pnfs_mark_layout_stateid_invalid(lo, &free_me);
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .length = NFS4_MAX_UINT64,
+ };
+ pnfs_set_plh_return_info(lo, IOMODE_ANY, 0);
+ pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
+ &range, 0);
goto out_forget;
+ } else {
+ /* We have a completely new layout */
+ if (!pnfs_is_first_layoutget(lo))
+ goto out_forget;
+ pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
}
pnfs_get_lseg(lseg);
if (isdotent(name, namlen)) {
if (namlen == 2) {
dchild = dget_parent(dparent);
- /* filesystem root - cannot return filehandle for ".." */
+ /*
+ * Don't return filehandle for ".." if we're at
+ * the filesystem or export root:
+ */
if (dchild == dparent)
goto out;
+ if (dparent == exp->ex_path.dentry)
+ goto out;
} else
dchild = dget(dparent);
} else
if (ovl_is_private_xattr(sb, name))
continue;
+
+ error = security_inode_copy_up_xattr(name);
+ if (error < 0 && error != -EOPNOTSUPP)
+ break;
+ if (error == 1) {
+ error = 0;
+ continue; /* Discard */
+ }
retry:
size = vfs_getxattr(old, name, value, value_size);
if (size == -ERANGE)
goto retry;
}
- error = security_inode_copy_up_xattr(name);
- if (error < 0 && error != -EOPNOTSUPP)
- break;
- if (error == 1) {
- error = 0;
- continue; /* Discard */
- }
error = vfs_setxattr(new, name, value, size, 0);
if (error) {
if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
buflen -= thislen;
memcpy(&buf[buflen], name, thislen);
- tmp = dget_dlock(d->d_parent);
spin_unlock(&d->d_lock);
+ tmp = dget_parent(d);
dput(d);
d = tmp;
const struct cred *old_cred;
int ret;
- if (!ovl_should_sync(OVL_FS(file_inode(file)->i_sb)))
- return 0;
+ ret = ovl_sync_status(OVL_FS(file_inode(file)->i_sb));
+ if (ret <= 0)
+ return ret;
ret = ovl_real_fdget_meta(file, &real, !datasync);
if (ret)
goto out;
if (!value && !upperdentry) {
+ old_cred = ovl_override_creds(dentry->d_sb);
err = vfs_getxattr(realdentry, name, NULL, 0);
+ revert_creds(old_cred);
if (err < 0)
goto out_drop_write;
}
bool ovl_is_metacopy_dentry(struct dentry *dentry);
char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
int padding);
+int ovl_sync_status(struct ovl_fs *ofs);
static inline bool ovl_is_impuredir(struct super_block *sb,
struct dentry *dentry)
atomic_long_t last_ino;
/* Whiteout dentry cache */
struct dentry *whiteout;
+ /* r/o snapshot of upperdir sb's only taken on volatile mounts */
+ errseq_t errseq;
};
static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
struct ovl_dir_file *od = file->private_data;
struct dentry *dentry = file->f_path.dentry;
- struct file *realfile = od->realfile;
+ struct file *old, *realfile = od->realfile;
if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
return want_upper ? NULL : realfile;
* Need to check if we started out being a lower dir, but got copied up
*/
if (!od->is_upper) {
- struct inode *inode = file_inode(file);
-
realfile = READ_ONCE(od->upperfile);
if (!realfile) {
struct path upperpath;
ovl_path_upper(dentry, &upperpath);
realfile = ovl_dir_open_realfile(file, &upperpath);
+ if (IS_ERR(realfile))
+ return realfile;
- inode_lock(inode);
- if (!od->upperfile) {
- if (IS_ERR(realfile)) {
- inode_unlock(inode);
- return realfile;
- }
- smp_store_release(&od->upperfile, realfile);
- } else {
- /* somebody has beaten us to it */
- if (!IS_ERR(realfile))
- fput(realfile);
- realfile = od->upperfile;
+ old = cmpxchg_release(&od->upperfile, NULL, realfile);
+ if (old) {
+ fput(realfile);
+ realfile = old;
}
- inode_unlock(inode);
}
}
struct file *realfile;
int err;
- if (!ovl_should_sync(OVL_FS(file->f_path.dentry->d_sb)))
- return 0;
+ err = ovl_sync_status(OVL_FS(file->f_path.dentry->d_sb));
+ if (err <= 0)
+ return err;
realfile = ovl_dir_real_file(file, true);
err = PTR_ERR_OR_ZERO(realfile);
struct super_block *upper_sb;
int ret;
- if (!ovl_upper_mnt(ofs))
- return 0;
+ ret = ovl_sync_status(ofs);
+ /*
+ * We have to always set the err, because the return value isn't
+ * checked in syncfs, and instead indirectly return an error via
+ * the sb's writeback errseq, which VFS inspects after this call.
+ */
+ if (ret < 0) {
+ errseq_set(&sb->s_wb_err, -EIO);
+ return -EIO;
+ }
+
+ if (!ret)
+ return ret;
- if (!ovl_should_sync(ofs))
- return 0;
/*
* Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
* All the super blocks will be iterated, including upper_sb.
unsigned int numlower;
int err;
+ err = -EIO;
+ if (WARN_ON(sb->s_user_ns != current_user_ns()))
+ goto out;
+
sb->s_d_op = &ovl_dentry_operations;
err = -ENOMEM;
sb->s_op = &ovl_super_operations;
if (ofs->config.upperdir) {
+ struct super_block *upper_sb;
+
if (!ofs->config.workdir) {
pr_err("missing 'workdir'\n");
goto out_err;
if (err)
goto out_err;
+ upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
+ if (!ovl_should_sync(ofs)) {
+ ofs->errseq = errseq_sample(&upper_sb->s_wb_err);
+ if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) {
+ err = -EIO;
+ pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n");
+ goto out_err;
+ }
+ }
+
err = ovl_get_workdir(sb, ofs, &upperpath);
if (err)
goto out_err;
if (!ofs->workdir)
sb->s_flags |= SB_RDONLY;
- sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth;
- sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran;
-
+ sb->s_stack_depth = upper_sb->s_stack_depth;
+ sb->s_time_gran = upper_sb->s_time_gran;
}
oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers);
err = PTR_ERR(oe);
kfree(buf);
return ERR_PTR(res);
}
+
+/*
+ * ovl_sync_status() - Check fs sync status for volatile mounts
+ *
+ * Returns 1 if this is not a volatile mount and a real sync is required.
+ *
+ * Returns 0 if syncing can be skipped because mount is volatile, and no errors
+ * have occurred on the upperdir since the mount.
+ *
+ * Returns -errno if it is a volatile mount, and the error that occurred since
+ * the last mount. If the error code changes, it'll return the latest error
+ * code.
+ */
+
+int ovl_sync_status(struct ovl_fs *ofs)
+{
+ struct vfsmount *mnt;
+
+ if (ovl_should_sync(ofs))
+ return 1;
+
+ mnt = ovl_upper_mnt(ofs);
+ if (!mnt)
+ return 0;
+
+ return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq);
+}
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
.fasync = pipe_fasync,
+ .splice_write = iter_file_splice_write,
};
/*
return 0;
}
+ if (!val)
+ return -EINVAL;
+ len = strlen(val);
+ if (len == 0)
+ return -EINVAL;
+
/*
* To set sysctl options, we use a temporary mount of proc, look up the
* respective sys/ file and write to it. To avoid mounting it when no
file, param, val);
goto out;
}
- len = strlen(val);
wret = kernel_write(file, val, len, &pos);
if (wret < 0) {
err = wret;
struct buffer_head *bh = NULL;
int nsr = 0;
struct udf_sb_info *sbi;
+ loff_t session_offset;
sbi = UDF_SB(sb);
if (sb->s_blocksize < sizeof(struct volStructDesc))
else
sectorsize = sb->s_blocksize;
- sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
+ session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
+ sector += session_offset;
udf_debug("Starting at sector %u (%lu byte sectors)\n",
(unsigned int)(sector >> sb->s_blocksize_bits),
if (nsr > 0)
return 1;
- else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
- VSD_FIRST_SECTOR_OFFSET)
+ else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
return -1;
else
return 0;
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
#ifndef __DT_APQ8016_LPASS_H
#define __DT_APQ8016_LPASS_H
-#define MI2S_PRIMARY 0
-#define MI2S_SECONDARY 1
-#define MI2S_TERTIARY 2
-#define MI2S_QUATERNARY 3
+#include <dt-bindings/sound/qcom,lpass.h>
+
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
#endif /* __DT_APQ8016_LPASS_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_QCOM_LPASS_H
+#define __DT_QCOM_LPASS_H
+
+#define MI2S_PRIMARY 0
+#define MI2S_SECONDARY 1
+#define MI2S_TERTIARY 2
+#define MI2S_QUATERNARY 3
+#define MI2S_QUINARY 4
+
+#define LPASS_DP_RX 5
+
+#define LPASS_MCLK0 0
+
+#endif /* __DT_QCOM_LPASS_H */
#ifndef __DT_SC7180_LPASS_H
#define __DT_SC7180_LPASS_H
-#define MI2S_PRIMARY 0
-#define MI2S_SECONDARY 1
-#define LPASS_DP_RX 2
+#include <dt-bindings/sound/qcom,lpass.h>
-#define LPASS_MCLK0 0
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
#endif /* __DT_APQ8016_LPASS_H */
return kobject_name(&dev->kobj);
}
+/**
+ * dev_bus_name - Return a device's bus/class name, if at all possible
+ * @dev: struct device to get the bus/class name of
+ *
+ * Will return the name of the bus/class the device is attached to. If it is
+ * not attached to a bus/class, an empty string will be returned.
+ */
+static inline const char *dev_bus_name(const struct device *dev)
+{
+ return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
+}
+
__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
#ifdef CONFIG_NUMA
}
#endif
+void set_page_huge_active(struct page *page);
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
static inline void *dev_iommu_priv_get(struct device *dev)
{
- return dev->iommu->priv;
+ if (dev->iommu)
+ return dev->iommu->priv;
+ else
+ return NULL;
}
static inline void dev_iommu_priv_set(struct device *dev, void *priv)
return (void *)arch_kasan_reset_tag(addr);
}
+/**
+ * kasan_report - print a report about a bad memory access detected by KASAN
+ * @addr: address of the bad access
+ * @size: size of the bad access
+ * @is_write: whether the bad access is a write or a read
+ * @ip: instruction pointer for the accessibility check or the bad access itself
+ */
bool kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void);
extern bool arch_kprobe_on_func_entry(unsigned long offset);
-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
+extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
extern int kprobe_add_ksym_blacklist(unsigned long entry);
unsigned int cpu,
const char *namefmt);
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
/**
* kthread_run - create and wake a thread.
* @threadfn: the function to run until signal_pending(current).
}
# include <linux/timekeeping.h>
-# include <linux/timekeeping32.h>
#endif
* Objtool generates debug info for both FUNC & CODE, but needs special
* annotations for each CODE's start (to describe the actual stack frame).
*
+ * Objtool requires that all code must be contained in an ELF symbol. Symbol
+ * names that have a .L prefix do not emit symbol table entries. .L
+ * prefixed symbols can be used within a code region, but should be avoided for
+ * denoting a range of code via ``SYM_*_START/END`` annotations.
+ *
* ALIAS -- does not generate debug info -- the aliased function will
*/
const struct mdiobb_ops *ops;
};
+int mdiobb_read(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val);
+
/* The returned bus is not yet registered with the phy layer. */
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
return val.vbool;
}
-/**
- * mlx5_core_net - Provide net namespace of the mlx5_core_dev
- * @dev: mlx5 core device
- *
- * mlx5_core_net() returns the net namespace of mlx5 core device.
- * This can be called only in below described limited context.
- * (a) When a devlink instance for mlx5_core is registered and
- * when devlink reload operation is disabled.
- * or
- * (b) during devlink reload reload_down() and reload_up callbacks
- * where it is ensured that devlink instance's net namespace is
- * stable.
- */
-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
-{
- return devlink_net(priv_to_devlink(dev));
-}
-
#endif /* MLX5_DRIVER_H */
NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
* Location
*/
+ NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory
+ * Space Control
+ */
NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
#define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1)
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
NVME_CSTS_SHST_MASK = 3 << 2,
+ NVME_CMBMSC_CRE = 1 << 0,
+ NVME_CMBMSC_CMSE = 1 << 1,
};
struct nvme_id_power_state {
return ERR_PTR(-ENODEV);
}
+static inline struct regulator *__must_check
+devm_regulator_get_exclusive(struct device *dev, const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct regulator *__must_check
regulator_get_optional(struct device *dev, const char *id)
{
return -EINVAL;
}
+static inline int regulator_sync_voltage(struct regulator *regulator)
+{
+ return -EINVAL;
+}
+
static inline int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
{
return 0;
}
+static inline int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_set_suspend_voltage(struct regulator *regulator,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
static inline void *regulator_get_drvdata(struct regulator *regulator)
{
return NULL;
#define XDR_QUADLEN(l) (((l) + 3) >> 2)
/*
- * Generic opaque `network object.' At the kernel level, this type
- * is used only by lockd.
+ * Generic opaque `network object.'
*/
#define XDR_MAX_NETOBJ 1024
struct xdr_netobj {
+++ /dev/null
-#ifndef _LINUX_TIMEKEEPING32_H
-#define _LINUX_TIMEKEEPING32_H
-/*
- * These interfaces are all based on the old timespec type
- * and should get replaced with the timespec64 based versions
- * over time so we can remove the file here.
- */
-
-static inline unsigned long get_seconds(void)
-{
- return ktime_get_real_seconds();
-}
-
-#endif
\
it_func_ptr = \
rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
- do { \
- it_func = (it_func_ptr)->func; \
- __data = (it_func_ptr)->data; \
- ((void(*)(void *, proto))(it_func))(__data, args); \
- } while ((++it_func_ptr)->func); \
+ if (it_func_ptr) { \
+ do { \
+ it_func = (it_func_ptr)->func; \
+ __data = (it_func_ptr)->data; \
+ ((void(*)(void *, proto))(it_func))(__data, args); \
+ } while ((++it_func_ptr)->func); \
+ } \
return 0; \
} \
DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
extern int tty_dev_name_to_number(const char *name, dev_t *number);
extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
extern void tty_ldisc_unlock(struct tty_struct *tty);
+extern ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
#else
static inline void tty_kref_put(struct tty_struct *tty)
{ }
# define EVENT_LINK_CHANGE 11
# define EVENT_SET_RX_MODE 12
# define EVENT_NO_IP_ALIGN 13
+ u32 rx_speed; /* in bps - NOT Mbps */
+ u32 tx_speed; /* in bps - NOT Mbps */
};
static inline struct usb_driver *driver_of(struct usb_interface *intf)
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
-#define VM_MAP_PUT_PAGES 0x00000100 /* put pages and free array in vfree */
+#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
+#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
/*
* VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
* determine which allocations need the module shadow freed.
*/
-/*
- * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
- * vfree_atomic().
- */
-#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
-
/* bits [20..32] reserved for arch specific ioremap internals */
/*
u32 width, u32 height);
/**
- * v4l2_get_link_rate - Get link rate from transmitter
+ * v4l2_get_link_freq - Get link rate from transmitter
*
* @handler: The transmitter's control handler
* @mul: The multiplier between pixel rate and link frequency. Bits per pixel on
* -ENOENT: Link frequency or pixel rate control not found
* -EINVAL: Invalid link frequency value
*/
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
unsigned int div);
static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
/**
- * @struct cfg80211_sar_chan_ranges - sar frequency ranges
+ * struct cfg80211_sar_freq_ranges - sar frequency ranges
* @start_freq: start range edge frequency
* @end_freq: end range edge frequency
*/
* This callback may sleep.
* @reset_tid_config: Reset TID specific configuration for the peer, for the
* given TIDs. This callback may sleep.
+ *
+ * @set_sar_specs: Update the SAR (TX power) settings.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
* @max_data_retry_count: maximum supported per TID retry count for
* configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
+ * @sar_capa: SAR control capabilities
*/
struct wiphy {
/* assign these fields before you register the wiphy */
* @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options)
* @icsk_ack: Delayed ACK control data
* @icsk_mtup; MTU probing control data
+ * @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack)
+ * @icsk_user_timeout: TCP_USER_TIMEOUT value
*/
struct inet_connection_sock {
/* inet_sock has to be the first member! */
u32 probe_timestamp;
} icsk_mtup;
+ u32 icsk_probes_tstamp;
u32 icsk_user_timeout;
u64 icsk_ca_priv[104 / sizeof(u64)];
unsigned short n2, n2count;
unsigned short t1, t2;
struct timer_list t1timer, t2timer;
+ bool t1timer_stop, t2timer_stop;
/* Internal control information */
struct sk_buff_head write_queue;
struct lapb_frame frmr_data;
unsigned char frmr_type;
+ spinlock_t lock;
refcount_t refcnt;
};
* This callback may sleep.
* @sta_set_4addr: Called to notify the driver when a station starts/stops using
* 4-address mode
+ * @set_sar_specs: Update the SAR (TX power) settings.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *key_end, const u32 *data,
u64 timeout, u64 expiration, gfp_t gfp);
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_expr *expr_array[]);
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr);
old = *pold;
*pold = new;
if (old != NULL)
- qdisc_tree_flush_backlog(old);
+ qdisc_purge_queue(old);
sch_tree_unlock(sch);
return old;
sk->sk_txhash = net_tx_rndhash();
}
-static inline void sk_rethink_txhash(struct sock *sk)
+static inline bool sk_rethink_txhash(struct sock *sk)
{
- if (sk->sk_txhash)
+ if (sk->sk_txhash) {
sk_set_txhash(sk);
+ return true;
+ }
+ return false;
}
static inline struct dst_entry *
return dst;
}
-static inline void dst_negative_advice(struct sock *sk)
+static inline void __dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
- sk_rethink_txhash(sk);
-
if (dst && dst->ops->negative_advice) {
ndst = dst->ops->negative_advice(dst);
}
}
+static inline void dst_negative_advice(struct sock *sk)
+{
+ sk_rethink_txhash(sk);
+ __dst_negative_advice(sk);
+}
+
static inline void
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
unsigned int tcp_current_mss(struct sock *sk);
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
u32 reo_wnd);
-extern void tcp_rack_mark_lost(struct sock *sk);
+extern bool tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
- netdev_features_t features);
+ netdev_features_t features, bool is_ipv6);
static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
{
struct snd_pcm_hw_rule {
unsigned int cond;
int var;
- int deps[4];
+ int deps[5];
snd_pcm_hw_rule_func_t func;
void *private;
);
/*
- * Tracepoint for do_fork:
+ * Tracepoint for kernel_clone:
*/
TRACE_EVENT(sched_process_fork,
)
);
+/* Record an xdr_buf containing a fully-formed RPC message */
+DECLARE_EVENT_CLASS(svc_xdr_msg_class,
+ TP_PROTO(
+ const struct xdr_buf *xdr
+ ),
+
+ TP_ARGS(xdr),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_len)
+ __field(unsigned int, msg_len)
+ ),
+
+ TP_fast_assign(
+ __be32 *p = (__be32 *)xdr->head[0].iov_base;
+
+ __entry->xid = be32_to_cpu(*p);
+ __entry->head_base = p;
+ __entry->head_len = xdr->head[0].iov_len;
+ __entry->tail_base = xdr->tail[0].iov_base;
+ __entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_len = xdr->page_len;
+ __entry->msg_len = xdr->len;
+ ),
+
+ TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ __entry->xid,
+ __entry->head_base, __entry->head_len, __entry->page_len,
+ __entry->tail_base, __entry->tail_len, __entry->msg_len
+ )
+);
+
+#define DEFINE_SVCXDRMSG_EVENT(name) \
+ DEFINE_EVENT(svc_xdr_msg_class, \
+ svc_xdr_##name, \
+ TP_PROTO( \
+ const struct xdr_buf *xdr \
+ ), \
+ TP_ARGS(xdr))
+
+DEFINE_SVCXDRMSG_EVENT(recvfrom);
+
+/* Record an xdr_buf containing arbitrary data, tagged with an XID */
DECLARE_EVENT_CLASS(svc_xdr_buf_class,
TP_PROTO(
- const struct svc_rqst *rqst,
+ __be32 xid,
const struct xdr_buf *xdr
),
- TP_ARGS(rqst, xdr),
+ TP_ARGS(xid, xdr),
TP_STRUCT__entry(
__field(u32, xid)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->xid = be32_to_cpu(xid);
__entry->head_base = xdr->head[0].iov_base;
__entry->head_len = xdr->head[0].iov_len;
__entry->tail_base = xdr->tail[0].iov_base;
DEFINE_EVENT(svc_xdr_buf_class, \
svc_xdr_##name, \
TP_PROTO( \
- const struct svc_rqst *rqst, \
+ __be32 xid, \
const struct xdr_buf *xdr \
), \
- TP_ARGS(rqst, xdr))
+ TP_ARGS(xid, xdr))
-DEFINE_SVCXDRBUF_EVENT(recvfrom);
DEFINE_SVCXDRBUF_EVENT(sendto);
/*
BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR = 0x3,
};
-struct br_mrp_tlv_hdr {
- __u8 type;
- __u8 length;
-};
-
-struct br_mrp_sub_tlv_hdr {
- __u8 type;
- __u8 length;
-};
-
-struct br_mrp_end_hdr {
- struct br_mrp_tlv_hdr hdr;
-};
-
-struct br_mrp_common_hdr {
- __be16 seq_id;
- __u8 domain[MRP_DOMAIN_UUID_LENGTH];
-};
-
-struct br_mrp_ring_test_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 state;
- __be16 transitions;
- __be32 timestamp;
-};
-
-struct br_mrp_ring_topo_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 interval;
-};
-
-struct br_mrp_ring_link_hdr {
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 interval;
- __be16 blocked;
-};
-
-struct br_mrp_sub_opt_hdr {
- __u8 type;
- __u8 manufacture_data[MRP_MANUFACTURE_DATA_LENGTH];
-};
-
-struct br_mrp_test_mgr_nack_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 other_prio;
- __u8 other_sa[ETH_ALEN];
-};
-
-struct br_mrp_test_prop_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 other_prio;
- __u8 other_sa[ETH_ALEN];
-};
-
-struct br_mrp_oui_hdr {
- __u8 oui[MRP_OUI_LENGTH];
-};
-
-struct br_mrp_in_test_hdr {
- __be16 id;
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 state;
- __be16 transitions;
- __be32 timestamp;
-};
-
-struct br_mrp_in_topo_hdr {
- __u8 sa[ETH_ALEN];
- __be16 id;
- __be16 interval;
-};
-
-struct br_mrp_in_link_hdr {
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 id;
- __be16 interval;
-};
-
#endif
#define RKISP1_CIF_ISP_CTK_COEFF_MAX 0x100
#define RKISP1_CIF_ISP_CTK_OFFSET_MAX 0x800
-#define RKISP1_CIF_ISP_AE_MEAN_MAX 25
-#define RKISP1_CIF_ISP_HIST_BIN_N_MAX 16
+#define RKISP1_CIF_ISP_AE_MEAN_MAX_V10 25
+#define RKISP1_CIF_ISP_AE_MEAN_MAX_V12 81
+#define RKISP1_CIF_ISP_AE_MEAN_MAX RKISP1_CIF_ISP_AE_MEAN_MAX_V12
+
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 16
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 32
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12
+
#define RKISP1_CIF_ISP_AFM_MAX_WINDOWS 3
#define RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE 17
* Gamma out
*/
/* Maximum number of color samples supported */
-#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES 17
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 17
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 34
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
/*
* Lens shade correction
/*
* Histogram calculation
*/
-/* Last 3 values unused. */
-#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE 28
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 25
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 81
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
/*
* Defect Pixel Cluster Correction
#define RKISP1_CIF_ISP_STAT_AFM (1U << 2)
#define RKISP1_CIF_ISP_STAT_HIST (1U << 3)
+/**
+ * enum rkisp1_cif_isp_version - ISP variants
+ *
+ * @RKISP1_V10: used at least in rk3288 and rk3399
+ * @RKISP1_V11: declared in the original vendor code, but not used
+ * @RKISP1_V12: used at least in rk3326 and px30
+ * @RKISP1_V13: used at least in rk1808
+ */
+enum rkisp1_cif_isp_version {
+ RKISP1_V10 = 10,
+ RKISP1_V11,
+ RKISP1_V12,
+ RKISP1_V13,
+};
+
enum rkisp1_cif_isp_histogram_mode {
RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE,
RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
*
* @mode: goc mode (from enum rkisp1_cif_isp_goc_mode)
* @gamma_y: gamma out curve y-axis for all color components
+ *
+ * The number of entries of @gamma_y depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10
+ * entries, versions >= V12 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
+ * entries. RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES is equal to the maximum
+ * of the two.
*/
struct rkisp1_cif_isp_goc_config {
__u32 mode;
* skipped
* @meas_window: coordinates of the measure window
* @hist_weight: weighting factor for sub-windows
+ *
+ * The number of entries of @hist_weight depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10
+ * entries, versions >= V12 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
+ * entries. RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE is equal to the maximum
+ * of the two.
*/
struct rkisp1_cif_isp_hst_config {
__u32 mode;
* @exp_mean: Mean luminance value of block xx
* @bls_val: BLS measured values
*
- * Image is divided into 5x5 blocks.
+ * The number of entries of @exp_mean depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries,
+ * versions >= V12 have RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries.
+ * RKISP1_CIF_ISP_AE_MEAN_MAX is equal to the maximum of the two.
+ *
+ * Image is divided into 5x5 blocks on V10 and 9x9 blocks on V12.
*/
struct rkisp1_cif_isp_ae_stat {
__u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX];
/**
* struct rkisp1_cif_isp_hist_stat - statistics histogram data
*
- * @hist_bins: measured bin counters
+ * @hist_bins: measured bin counters. Each bin is a 20 bits unsigned fixed point
+ * type. Bits 0-4 are the fractional part and bits 5-19 are the
+ * integer part.
+ *
+ * The window of the measurements area is divided to 5x5 sub-windows for
+ * V10/V11 and to 9x9 sub-windows for V12. The histogram is then computed for
+ * each sub-window independently and the final result is a weighted average of
+ * the histogram measurements on all sub-windows. The window of the
+ * measurements area and the weight of each sub-window are configurable using
+ * struct @rkisp1_cif_isp_hst_config.
+ *
+ * The histogram contains 16 bins in V10/V11 and 32 bins in V12/V13.
+ *
+ * The number of entries of @hist_bins depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
*
- * Measurement window divided into 25 sub-windows, set
- * with ISP_HIST_XXX
+ * Versions <= V11 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 entries,
+ * versions >= V12 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 entries.
+ * RKISP1_CIF_ISP_HIST_BIN_N_MAX is equal to the maximum of the two.
*/
struct rkisp1_cif_isp_hist_stat {
- __u16 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
+ __u32 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
};
/**
pad:4,
reserved1:16;
#elif defined(__BIG_ENDIAN_BITFIELD)
- __u32 reserved:20,
+ __u32 cmpri:4,
+ cmpre:4,
pad:4,
- cmpri:4,
- cmpre:4;
+ reserved:20;
#else
#error "Please fix <asm/byteorder.h>"
#endif
};
/* The v4l2 sub-device video device node is registered in read-only mode. */
-#define V4L2_SUBDEV_CAP_RO_SUBDEV BIT(0)
+#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001
/* Backwards compatibility define --- to be removed */
#define v4l2_subdev_edid v4l2_edid
PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
};
+enum pvrdma_network_type {
+ PVRDMA_NETWORK_IB,
+ PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB,
+ PVRDMA_NETWORK_IPV4,
+ PVRDMA_NETWORK_IPV6
+};
+
struct pvrdma_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 reserved;
config CONSTRUCTORS
bool
- depends on !UML
config IRQ_WORK
bool
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .ret_stack = NULL,
+ .ret_stack = NULL,
+ .tracing_graph_pause = ATOMIC_INIT(0),
#endif
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
.trace_recursion = 0,
/* Call all constructor functions linked into the kernel. */
static void __init do_ctors(void)
{
-#ifdef CONFIG_CONSTRUCTORS
+/*
+ * For UML, the constructors have already been called by the
+ * normal setup code as it's just a normal ELF binary, so we
+ * cannot do it again - but we do need CONFIG_CONSTRUCTORS
+ * even on UML for modules.
+ */
+#if defined(CONFIG_CONSTRUCTORS) && !defined(CONFIG_UML)
ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
for (; fn < (ctor_fn_t *) __ctors_end; fn++)
fd = *(int *)key;
f = fget_raw(fd);
- if (!f || !inode_storage_ptr(f->f_inode))
+ if (!f)
+ return -EBADF;
+ if (!inode_storage_ptr(f->f_inode)) {
+ fput(f);
return -EBADF;
+ }
sdata = bpf_local_storage_update(f->f_inode,
(struct bpf_local_storage_map *)map,
* bpf_local_storage_update expects the owner to have a
* valid storage pointer.
*/
- if (!inode_storage_ptr(inode))
+ if (!inode || !inode_storage_ptr(inode))
return (unsigned long)NULL;
sdata = inode_storage_lookup(inode, map, true);
if (sdata)
return (unsigned long)sdata->data;
- /* This helper must only called from where the inode is gurranteed
+ /* This helper must only called from where the inode is guaranteed
* to have a refcount and cannot be freed.
*/
if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) {
BPF_CALL_2(bpf_inode_storage_delete,
struct bpf_map *, map, struct inode *, inode)
{
- /* This helper must only called from where the inode is gurranteed
+ if (!inode)
+ return -EINVAL;
+
+ /* This helper must only called from where the inode is guaranteed
* to have a refcount and cannot be freed.
*/
return inode_storage_delete(inode, map);
BTF_ID(func, bpf_lsm_file_lock)
BTF_ID(func, bpf_lsm_file_open)
BTF_ID(func, bpf_lsm_file_receive)
+
+#ifdef CONFIG_SECURITY_NETWORK
BTF_ID(func, bpf_lsm_inet_conn_established)
+#endif /* CONFIG_SECURITY_NETWORK */
+
BTF_ID(func, bpf_lsm_inode_create)
BTF_ID(func, bpf_lsm_inode_free_security)
BTF_ID(func, bpf_lsm_inode_getattr)
BTF_ID(func, bpf_lsm_inode_unlink)
BTF_ID(func, bpf_lsm_kernel_module_request)
BTF_ID(func, bpf_lsm_kernfs_init_security)
+
+#ifdef CONFIG_KEYS
BTF_ID(func, bpf_lsm_key_free)
+#endif /* CONFIG_KEYS */
+
BTF_ID(func, bpf_lsm_mmap_file)
BTF_ID(func, bpf_lsm_netlink_send)
BTF_ID(func, bpf_lsm_path_notify)
BTF_ID(func, bpf_lsm_sb_statfs)
BTF_ID(func, bpf_lsm_sb_umount)
BTF_ID(func, bpf_lsm_settime)
+
+#ifdef CONFIG_SECURITY_NETWORK
BTF_ID(func, bpf_lsm_socket_accept)
BTF_ID(func, bpf_lsm_socket_bind)
BTF_ID(func, bpf_lsm_socket_connect)
BTF_ID(func, bpf_lsm_socket_sendmsg)
BTF_ID(func, bpf_lsm_socket_shutdown)
BTF_ID(func, bpf_lsm_socket_socketpair)
+#endif /* CONFIG_SECURITY_NETWORK */
+
BTF_ID(func, bpf_lsm_syslog)
BTF_ID(func, bpf_lsm_task_alloc)
BTF_ID(func, bpf_lsm_task_getsecid)
* bpf_local_storage_update expects the owner to have a
* valid storage pointer.
*/
- if (!task_storage_ptr(task))
+ if (!task || !task_storage_ptr(task))
return (unsigned long)NULL;
sdata = task_storage_lookup(task, map, true);
BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
task)
{
+ if (!task)
+ return -EINVAL;
+
/* This helper must only be called from places where the lifetime of the task
* is guaranteed. Either by being refcounted or by being protected
* by an RCU read-side critical section.
return -ENOTSUPP;
}
- if (btf_data_size == hdr->hdr_len) {
+ if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
btf_verifier_log(env, "No data");
return -EINVAL;
}
if (ctx.optlen != 0) {
*optlen = ctx.optlen;
*kernel_optval = ctx.optval;
+ /* export and don't free sockopt buf */
+ return 0;
}
}
out:
- if (ret)
- sockopt_free_buf(&ctx);
+ sockopt_free_buf(&ctx);
return ret;
}
goto out;
}
+ if (ctx.optlen < 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
if (copy_from_user(ctx.optval, optval,
min(ctx.optlen, max_optlen)) != 0) {
ret = -EFAULT;
goto out;
}
- if (ctx.optlen > max_optlen) {
+ if (ctx.optlen > max_optlen || ctx.optlen < 0) {
ret = -EFAULT;
goto out;
}
}
const struct bpf_func_proto bpf_map_peek_elem_proto = {
- .func = bpf_map_pop_elem,
+ .func = bpf_map_peek_elem,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
LIBBPF_A = $(obj)/libbpf.a
LIBBPF_OUT = $(abspath $(obj))
+# Although not in use by libbpf's Makefile, set $(O) so that the "dummy" test
+# in tools/scripts/Makefile.include always succeeds when building the kernel
+# with $(O) pointing to a relative path, as in "make O=build bindeb-pkg".
$(LIBBPF_A):
- $(Q)$(MAKE) -C $(LIBBPF_SRCS) OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
+ $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
-I $(srctree)/tools/lib/ -Wno-unused-result
out_put_prog:
if (tgt_prog_fd && tgt_prog)
bpf_prog_put(tgt_prog);
- bpf_prog_put(prog);
return err;
}
tp_name = prog->aux->attach_func_name;
break;
}
- return bpf_tracing_prog_attach(prog, 0, 0);
+ err = bpf_tracing_prog_attach(prog, 0, 0);
+ if (err >= 0)
+ return err;
+ goto out_put_prog;
case BPF_PROG_TYPE_RAW_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
if (strncpy_from_user(buf,
case PTR_TO_RDWR_BUF:
case PTR_TO_RDWR_BUF_OR_NULL:
case PTR_TO_PERCPU_BTF_ID:
+ case PTR_TO_MEM:
+ case PTR_TO_MEM_OR_NULL:
return true;
default:
return false;
return res < a;
}
-static bool signed_add32_overflows(s64 a, s64 b)
+static bool signed_add32_overflows(s32 a, s32 b)
{
/* Do the add in u32, where overflow is well-defined */
s32 res = (s32)((u32)a + (u32)b);
return res < a;
}
-static bool signed_sub_overflows(s32 a, s32 b)
+static bool signed_sub_overflows(s64 a, s64 b)
{
/* Do the sub in u64, where overflow is well-defined */
s64 res = (s64)((u64)a - (u64)b);
static bool signed_sub32_overflows(s32 a, s32 b)
{
- /* Do the sub in u64, where overflow is well-defined */
+ /* Do the sub in u32, where overflow is well-defined */
s32 res = (s32)((u32)a - (u32)b);
if (b < 0)
atomic64_set(&map->sum_sq_unmap, 0);
atomic64_set(&map->loops, 0);
- for (i = 0; i < threads; i++)
+ for (i = 0; i < threads; i++) {
+ get_task_struct(tsk[i]);
wake_up_process(tsk[i]);
+ }
msleep_interruptible(map->bparam.seconds * 1000);
}
out:
+ for (i = 0; i < threads; i++)
+ put_task_struct(tsk[i]);
put_device(map->dev);
kfree(tsk);
return ret;
*/
static inline bool report_single_step(unsigned long work)
{
- if (!(work & SYSCALL_WORK_SYSCALL_EMU))
+ if (work & SYSCALL_WORK_SYSCALL_EMU)
return false;
return !!(current_thread_info()->flags & _TIF_SINGLESTEP);
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
- for (i = 0; i < UCOUNT_COUNTS; i++) {
+ for (i = 0; i < UCOUNT_COUNTS; i++)
init_user_ns.ucount_max[i] = max_threads/2;
- }
#ifdef CONFIG_VMAP_STACK
cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
{
enum pid_type type;
- for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
+ for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
INIT_HLIST_NODE(&task->pid_links[type]);
- }
}
static inline void
return pi_state;
}
+static void pi_state_update_owner(struct futex_pi_state *pi_state,
+ struct task_struct *new_owner)
+{
+ struct task_struct *old_owner = pi_state->owner;
+
+ lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
+
+ if (old_owner) {
+ raw_spin_lock(&old_owner->pi_lock);
+ WARN_ON(list_empty(&pi_state->list));
+ list_del_init(&pi_state->list);
+ raw_spin_unlock(&old_owner->pi_lock);
+ }
+
+ if (new_owner) {
+ raw_spin_lock(&new_owner->pi_lock);
+ WARN_ON(!list_empty(&pi_state->list));
+ list_add(&pi_state->list, &new_owner->pi_state_list);
+ pi_state->owner = new_owner;
+ raw_spin_unlock(&new_owner->pi_lock);
+ }
+}
+
static void get_pi_state(struct futex_pi_state *pi_state)
{
WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
- struct task_struct *owner;
unsigned long flags;
raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
- owner = pi_state->owner;
- if (owner) {
- raw_spin_lock(&owner->pi_lock);
- list_del_init(&pi_state->list);
- raw_spin_unlock(&owner->pi_lock);
- }
- rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
+ pi_state_update_owner(pi_state, NULL);
+ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
}
* FUTEX_OWNER_DIED bit. See [4]
*
* [10] There is no transient state which leaves owner and user space
- * TID out of sync.
+ * TID out of sync. Except one error case where the kernel is denied
+ * write access to the user address, see fixup_pi_state_owner().
*
*
* Serialization and lifetime rules:
ret = -EINVAL;
}
- if (ret)
- goto out_unlock;
-
- /*
- * This is a point of no return; once we modify the uval there is no
- * going back and subsequent operations must not fail.
- */
-
- raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- list_del_init(&pi_state->list);
- raw_spin_unlock(&pi_state->owner->pi_lock);
-
- raw_spin_lock(&new_owner->pi_lock);
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &new_owner->pi_state_list);
- pi_state->owner = new_owner;
- raw_spin_unlock(&new_owner->pi_lock);
-
- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+ if (!ret) {
+ /*
+ * This is a point of no return; once we modified the uval
+ * there is no going back and subsequent operations must
+ * not fail.
+ */
+ pi_state_update_owner(pi_state, new_owner);
+ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+ }
out_unlock:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(q->lock_ptr);
}
-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- struct task_struct *argowner)
+static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ struct task_struct *argowner)
{
struct futex_pi_state *pi_state = q->pi_state;
- u32 uval, curval, newval;
struct task_struct *oldowner, *newowner;
- u32 newtid;
- int ret, err = 0;
-
- lockdep_assert_held(q->lock_ptr);
-
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ u32 uval, curval, newval, newtid;
+ int err = 0;
oldowner = pi_state->owner;
* We raced against a concurrent self; things are
* already fixed up. Nothing to do.
*/
- ret = 0;
- goto out_unlock;
+ return 0;
}
if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
- /* We got the lock after all, nothing to fix. */
- ret = 0;
- goto out_unlock;
+ /* We got the lock. pi_state is correct. Tell caller. */
+ return 1;
}
/*
* We raced against a concurrent self; things are
* already fixed up. Nothing to do.
*/
- ret = 0;
- goto out_unlock;
+ return 1;
}
newowner = argowner;
}
* We fixed up user space. Now we need to fix the pi_state
* itself.
*/
- if (pi_state->owner != NULL) {
- raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- list_del_init(&pi_state->list);
- raw_spin_unlock(&pi_state->owner->pi_lock);
- }
+ pi_state_update_owner(pi_state, newowner);
- pi_state->owner = newowner;
-
- raw_spin_lock(&newowner->pi_lock);
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &newowner->pi_state_list);
- raw_spin_unlock(&newowner->pi_lock);
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-
- return 0;
+ return argowner == current;
/*
* In order to reschedule or handle a page fault, we need to drop the
switch (err) {
case -EFAULT:
- ret = fault_in_user_writeable(uaddr);
+ err = fault_in_user_writeable(uaddr);
break;
case -EAGAIN:
cond_resched();
- ret = 0;
+ err = 0;
break;
default:
WARN_ON_ONCE(1);
- ret = err;
break;
}
/*
* Check if someone else fixed it for us:
*/
- if (pi_state->owner != oldowner) {
- ret = 0;
- goto out_unlock;
- }
+ if (pi_state->owner != oldowner)
+ return argowner == current;
- if (ret)
- goto out_unlock;
+ /* Retry if err was -EAGAIN or the fault in succeeded */
+ if (!err)
+ goto retry;
- goto retry;
+ /*
+ * fault_in_user_writeable() failed so user state is immutable. At
+ * best we can make the kernel state consistent but user state will
+ * be most likely hosed and any subsequent unlock operation will be
+ * rejected due to PI futex rule [10].
+ *
+ * Ensure that the rtmutex owner is also the pi_state owner despite
+ * the user space value claiming something different. There is no
+ * point in unlocking the rtmutex if current is the owner as it
+ * would need to wait until the next waiter has taken the rtmutex
+ * to guarantee consistent state. Keep it simple. Userspace asked
+ * for this wreckaged state.
+ *
+ * The rtmutex has an owner - either current or some other
+ * task. See the EAGAIN loop above.
+ */
+ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
-out_unlock:
+ return err;
+}
+
+static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ struct task_struct *argowner)
+{
+ struct futex_pi_state *pi_state = q->pi_state;
+ int ret;
+
+ lockdep_assert_held(q->lock_ptr);
+
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ ret = __fixup_pi_state_owner(uaddr, q, argowner);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
return ret;
}
*/
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
{
- int ret = 0;
-
if (locked) {
/*
* Got the lock. We might not be the anticipated owner if we
* stable state, anything else needs more attention.
*/
if (q->pi_state->owner != current)
- ret = fixup_pi_state_owner(uaddr, q, current);
- return ret ? ret : locked;
+ return fixup_pi_state_owner(uaddr, q, current);
+ return 1;
}
/*
* Another speculative read; pi_state->owner == current is unstable
* but needs our attention.
*/
- if (q->pi_state->owner == current) {
- ret = fixup_pi_state_owner(uaddr, q, NULL);
- return ret;
- }
+ if (q->pi_state->owner == current)
+ return fixup_pi_state_owner(uaddr, q, NULL);
/*
* Paranoia check. If we did not take the lock, then we should not be
- * the owner of the rt_mutex.
+ * the owner of the rt_mutex. Warn and establish consistent state.
*/
- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
- "pi-state %p\n", ret,
- q->pi_state->pi_mutex.owner,
- q->pi_state->owner);
- }
+ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
+ return fixup_pi_state_owner(uaddr, q, current);
- return ret;
+ return 0;
}
/**
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to;
- struct futex_pi_state *pi_state = NULL;
struct task_struct *exiting = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
if (res)
ret = (res < 0) ? res : 0;
- /*
- * If fixup_owner() faulted and was unable to handle the fault, unlock
- * it and return the fault to userspace.
- */
- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
-
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
-
- if (pi_state) {
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
- put_pi_state(pi_state);
- }
-
goto out;
out_unlock_put_key:
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to;
- struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
spin_unlock(q.lock_ptr);
+ /*
+ * Adjust the return value. It's either -EFAULT or
+ * success (1) but the caller expects 0 for success.
+ */
+ ret = ret < 0 ? ret : 0;
}
} else {
struct rt_mutex *pi_mutex;
if (res)
ret = (res < 0) ? res : 0;
- /*
- * If fixup_pi_state_owner() faulted and was unable to handle
- * the fault, unlock the rt_mutex and return the fault to
- * userspace.
- */
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
-
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
- if (pi_state) {
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
- put_pi_state(pi_state);
- }
-
if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
config GCOV_KERNEL
bool "Enable gcov-based kernel profiling"
depends on DEBUG_FS
- select CONSTRUCTORS if !UML
+ select CONSTRUCTORS
default n
help
This option enables gcov-based code profiling (e.g. for code coverage
rcu_read_unlock();
return res;
}
+EXPORT_SYMBOL_GPL(irq_check_status_bit);
struct msi_domain_ops *ops = info->ops;
struct irq_data *irq_data;
struct msi_desc *desc;
- msi_alloc_info_t arg;
+ msi_alloc_info_t arg = { };
int i, ret, virq;
bool can_reserve;
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
- lock_system_sleep();
pm_prepare_console();
error = freeze_processes();
if (error) {
thaw_processes();
Restore_console:
pm_restore_console();
- unlock_system_sleep();
}
#endif
return !offset;
}
-bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
+/**
+ * kprobe_on_func_entry() -- check whether given address is function entry
+ * @addr: Target address
+ * @sym: Target symbol name
+ * @offset: The offset from the symbol or the address
+ *
+ * This checks whether the given @addr+@offset or @sym+@offset is on the
+ * function entry address or not.
+ * This returns 0 if it is the function entry, or -EINVAL if it is not.
+ * And also it returns -ENOENT if it fails the symbol or address lookup.
+ * Caller must pass @addr or @sym (either one must be NULL), or this
+ * returns -EINVAL.
+ */
+int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
{
kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
if (IS_ERR(kp_addr))
- return false;
+ return PTR_ERR(kp_addr);
- if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
- !arch_kprobe_on_func_entry(offset))
- return false;
+ if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
+ return -ENOENT;
- return true;
+ if (!arch_kprobe_on_func_entry(offset))
+ return -EINVAL;
+
+ return 0;
}
int register_kretprobe(struct kretprobe *rp)
{
- int ret = 0;
+ int ret;
struct kretprobe_instance *inst;
int i;
void *addr;
- if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
+ ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
+ if (ret)
+ return ret;
+
+ /* If only rp->kp.addr is specified, check reregistering kprobes */
+ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
return -EINVAL;
if (kretprobe_blacklist_size) {
do_exit(ret);
}
-/* called from do_fork() to get node information for about to be created task */
+/* called from kernel_clone() to get node information for about to be created task */
int tsk_fork_get_node(struct task_struct *tsk)
{
#ifdef CONFIG_NUMA
return p;
kthread_bind(p, cpu);
/* CPU hotplug need to bind once again when unparking the thread. */
- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
to_kthread(p)->cpu = cpu;
return p;
}
+void kthread_set_per_cpu(struct task_struct *k, int cpu)
+{
+ struct kthread *kthread = to_kthread(k);
+ if (!kthread)
+ return;
+
+ WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
+
+ if (cpu < 0) {
+ clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+ return;
+ }
+
+ kthread->cpu = cpu;
+ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
+bool kthread_is_per_cpu(struct task_struct *k)
+{
+ struct kthread *kthread = to_kthread(k);
+ if (!kthread)
+ return false;
+
+ return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
/**
* kthread_unpark - unpark a thread created by kthread_create().
* @k: thread created by kthread_create().
DEFINE_PER_CPU(unsigned int, lockdep_recursion);
EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
-static inline bool lockdep_enabled(void)
+static __always_inline bool lockdep_enabled(void)
{
if (!debug_locks)
return false;
/*
* Check whether we follow the irq-flags state precisely:
*/
-static void check_flags(unsigned long flags)
+static noinstr void check_flags(unsigned long flags)
{
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
if (!debug_locks)
return;
+ /* Get the warning out.. */
+ instrumentation_begin();
+
if (irqs_disabled_flags(flags)) {
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
printk("possible reason: unannotated irqs-off.\n");
if (!debug_locks)
print_irqtrace_events(current);
+
+ instrumentation_end();
#endif
}
* possible because it belongs to the pi_state which is about to be freed
* and it is not longer visible to other tasks.
*/
-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
- struct task_struct *proxy_owner)
+void rt_mutex_proxy_unlock(struct rt_mutex *lock)
{
debug_rt_mutex_proxy_unlock(lock);
rt_mutex_set_owner(lock, NULL);
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
- struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
unsigned int flags, int error)
{
if (!error) {
- flush_swap_writer(handle);
pr_info("S");
error = mark_swapfiles(handle, flags);
pr_cont("|\n");
+ flush_swap_writer(handle);
}
if (error)
* done:
*
* - Add prefix for each line.
+ * - Drop truncated lines that no longer fit into the buffer.
* - Add the trailing newline that has been removed in vprintk_store().
- * - Drop truncated lines that do not longer fit into the buffer.
+ * - Add a string terminator.
+ *
+ * Since the produced string is always terminated, the maximum possible
+ * return value is @r->text_buf_size - 1;
*
* Return: The length of the updated/prepared text, including the added
- * prefixes and the newline. The dropped line(s) are not counted.
+ * prefixes and the newline. The terminator is not counted. The dropped
+ * line(s) are not counted.
*/
static size_t record_print_text(struct printk_record *r, bool syslog,
bool time)
/*
* Truncate the text if there is not enough space to add the
- * prefix and a trailing newline.
+ * prefix and a trailing newline and a terminator.
*/
- if (len + prefix_len + text_len + 1 > buf_size) {
+ if (len + prefix_len + text_len + 1 + 1 > buf_size) {
/* Drop even the current line if no space. */
- if (len + prefix_len + line_len + 1 > buf_size)
+ if (len + prefix_len + line_len + 1 + 1 > buf_size)
break;
- text_len = buf_size - len - prefix_len - 1;
+ text_len = buf_size - len - prefix_len - 1 - 1;
truncated = true;
}
memmove(text + prefix_len, text, text_len);
memcpy(text, prefix, prefix_len);
+ /*
+ * Increment the prepared length to include the text and
+ * prefix that were just moved+copied. Also increment for the
+ * newline at the end of this line. If this is the last line,
+ * there is no newline, but it will be added immediately below.
+ */
len += prefix_len + line_len + 1;
-
if (text_len == line_len) {
/*
- * Add the trailing newline removed in
- * vprintk_store().
+ * This is the last line. Add the trailing newline
+ * removed in vprintk_store().
*/
text[prefix_len + line_len] = '\n';
break;
text_len -= line_len + 1;
}
+ /*
+ * If a buffer was provided, it will be terminated. Space for the
+ * string terminator is guaranteed to be available. The terminator is
+ * not counted in the return value.
+ */
+ if (buf_size > 0)
+ r->text_buf[len] = 0;
+
return len;
}
while (prb_read_valid_info(prb, seq, &info, &line_count)) {
if (r.info->seq >= dumper->next_seq)
break;
- l += get_record_print_text_size(&info, line_count, true, time);
+ l += get_record_print_text_size(&info, line_count, syslog, time);
seq = r.info->seq + 1;
}
&info, &line_count)) {
if (r.info->seq >= dumper->next_seq)
break;
- l -= get_record_print_text_size(&info, line_count, true, time);
+ l -= get_record_print_text_size(&info, line_count, syslog, time);
seq = r.info->seq + 1;
}
/* Caller interested in the line count? */
if (line_count)
- *line_count = count_lines(data, data_size);
+ *line_count = count_lines(data, len);
/* Caller interested in the data content? */
if (!buf || !buf_size)
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
+ /* When not in the task's cpumask, no point in looking further. */
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
- if (is_per_cpu_kthread(p) || is_migration_disabled(p))
+ /* migrate_disabled() must be allowed to finish. */
+ if (is_migration_disabled(p))
return cpu_online(cpu);
- return cpu_active(cpu);
+ /* Non kernel threads are not allowed during either online or offline. */
+ if (!(p->flags & PF_KTHREAD))
+ return cpu_active(cpu);
+
+ /* KTHREAD_IS_PER_CPU is always allowed. */
+ if (kthread_is_per_cpu(p))
+ return cpu_online(cpu);
+
+ /* Regular kernel threads don't get to stay during offline. */
+ if (cpu_rq(cpu)->balance_push)
+ return false;
+
+ /* But are allowed during online. */
+ return cpu_online(cpu);
}
/*
if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
/*
- * Kernel threads are allowed on online && !active CPUs.
+ * Kernel threads are allowed on online && !active CPUs,
+ * however, during cpu-hot-unplug, even these might get pushed
+ * away if not KTHREAD_IS_PER_CPU.
*
* Specifically, migration_disabled() tasks must not fail the
* cpumask_any_and_distribute() pick below, esp. so on
__do_set_cpus_allowed(p, new_mask, flags);
- if (p->flags & PF_KTHREAD) {
- /*
- * For kernel threads that do indeed end up on online &&
- * !active we want to ensure they are strict per-CPU threads.
- */
- WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
- !cpumask_intersects(new_mask, cpu_active_mask) &&
- p->nr_cpus_allowed != 1);
- }
-
return affine_move_task(rq, p, &rf, dest_cpu, flags);
out:
static inline bool ttwu_queue_cond(int cpu, int wake_flags)
{
+ /*
+ * Do not complicate things with the async wake_list while the CPU is
+ * in hotplug state.
+ */
+ if (!cpu_active(cpu))
+ return false;
+
/*
* If the CPU does not share cache, then queue the task on the
* remote rqs wakelist to avoid accessing remote data.
/*
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
+ *
+ * XXX: the idle task does not match kthread_is_per_cpu() due to
+ * histerical raisins.
*/
- if (is_per_cpu_kthread(push_task) || is_migration_disabled(push_task)) {
+ if (rq->idle == push_task ||
+ ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
+ is_migration_disabled(push_task)) {
+
/*
* If this is the idle task on the outgoing CPU try to wake
* up the hotplug control thread which might wait for the
/*
* At this point need_resched() is true and we'll take the loop in
* schedule(). The next pick is obviously going to be the stop task
- * which is_per_cpu_kthread() and will push this task away.
+ * which kthread_is_per_cpu() and will push this task away.
*/
raw_spin_lock(&rq->lock);
}
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
- if (on)
+ rq->balance_push = on;
+ if (on) {
+ WARN_ON_ONCE(rq->balance_callback);
rq->balance_callback = &balance_push_callback;
- else
+ } else if (rq->balance_callback == &balance_push_callback) {
rq->balance_callback = NULL;
+ }
rq_unlock_irqrestore(rq, &rf);
}
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
+ /*
+ * Make sure that when the hotplug state machine does a roll-back
+ * we clear balance_push. Ideally that would happen earlier...
+ */
balance_push_set(cpu, false);
#ifdef CONFIG_SCHED_SMT
int ret;
set_cpu_active(cpu, false);
+
+ /*
+ * From this point forward, this CPU will refuse to run any task that
+ * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
+ * push those tasks away until this gets cleared, see
+ * sched_cpu_dying().
+ */
+ balance_push_set(cpu, true);
+
/*
- * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
- * users of this state to go away such that all new such users will
- * observe it.
+ * We've cleared cpu_active_mask / set balance_push, wait for all
+ * preempt-disabled and RCU users of this state to go away such that
+ * all new such users will observe it.
+ *
+ * Specifically, we rely on ttwu to no longer target this CPU, see
+ * ttwu_queue_cond() and is_cpu_allowed().
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
synchronize_rcu();
- balance_push_set(cpu, true);
-
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
update_rq_clock(rq);
atomic_long_add(delta, &calc_load_tasks);
}
+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
+{
+ struct task_struct *g, *p;
+ int cpu = cpu_of(rq);
+
+ lockdep_assert_held(&rq->lock);
+
+ printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
+ for_each_process_thread(g, p) {
+ if (task_cpu(p) != cpu)
+ continue;
+
+ if (!task_on_rq_queued(p))
+ continue;
+
+ printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
+ }
+}
+
int sched_cpu_dying(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
- BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq));
+ if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
+ WARN(true, "Dying CPU not properly vacated!");
+ dump_rq_tasks(rq, KERN_WARNING);
+ }
rq_unlock_irqrestore(rq, &rf);
+ /*
+ * Now that the CPU is offline, make sure we're welcome
+ * to new tasks once we come back up.
+ */
+ balance_push_set(cpu, false);
+
calc_load_migrate(rq);
update_max_interval();
nohz_balance_exit_idle(rq);
unsigned long cpu_capacity_orig;
struct callback_head *balance_callback;
+ unsigned char balance_push;
unsigned char nohz_idle_balance;
unsigned char idle_balance;
struct signal_struct *signal = current->signal;
int signr;
+ if (unlikely(current->task_works))
+ task_work_run();
+
/*
* For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
* that the arch handlers don't all have to do it. If we get here
return true;
}
-static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
+static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
+ siginfo_t __user *info)
{
#ifdef CONFIG_COMPAT
/*
kfree(td);
return PTR_ERR(tsk);
}
+ kthread_set_per_cpu(tsk, cpu);
/*
* Park the thread so that it could start right on the CPU
* when it is available.
static void sync_hw_clock(struct work_struct *work);
static DECLARE_WORK(sync_work, sync_hw_clock);
static struct hrtimer sync_hrtimer;
-#define SYNC_PERIOD_NS (11UL * 60 * NSEC_PER_SEC)
+#define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC)
static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
{
ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
if (retry)
- exp = ktime_add_ns(exp, 2 * NSEC_PER_SEC - offset_nsec);
+ exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec);
else
exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
/**
* ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
*
- * Returns the wall clock seconds since 1970. This replaces the
- * get_seconds() interface which is not y2038 safe on 32bit systems.
+ * Returns the wall clock seconds since 1970.
*
* For 64bit systems the fast access to tk->xtime_sec is preserved. On
* 32bit systems the access must be protected with the sequence
}
if (t->ret_stack == NULL) {
- atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->curr_ret_stack = -1;
t->curr_ret_depth = -1;
static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
- atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0;
/* make curr_ret_stack visible before we add the ret_stack */
/* non overwrite screws up the latency tracers */
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
+ /* without pause, we will produce garbage if another latency occurs */
+ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
tr->max_latency = 0;
irqsoff_trace = tr;
{
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+ int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
stop_irqsoff_tracer(tr, is_graph(tr));
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
ftrace_reset_array_ops(tr);
irqsoff_busy = false;
{
struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
- return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
+ return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
- tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
+ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
}
bool trace_kprobe_error_injectable(struct trace_event_call *call)
}
if (is_return)
flags |= TPARG_FL_RETURN;
- if (kprobe_on_func_entry(NULL, symbol, offset))
+ ret = kprobe_on_func_entry(NULL, symbol, offset);
+ if (ret == 0)
flags |= TPARG_FL_FENTRY;
- if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
+ /* Defer the ENOENT case until register kprobe */
+ if (ret == -EINVAL && is_return) {
trace_probe_log_err(0, BAD_RETPROBE);
goto parse_error;
}
{
mutex_lock(&wq_pool_attach_mutex);
- /*
- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
- * online CPUs. It'll be re-applied when any of the CPUs come up.
- */
- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
/*
* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
* stable across this function. See the comments above the flag
*/
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
+ else
+ kthread_set_per_cpu(worker->task, pool->cpu);
+
+ if (worker->rescue_wq)
+ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
mutex_lock(&wq_pool_attach_mutex);
+ kthread_set_per_cpu(worker->task, -1);
list_del(&worker->node);
worker->pool = NULL;
raw_spin_unlock_irq(&pool->lock);
- for_each_pool_worker(worker, pool)
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
+ for_each_pool_worker(worker, pool) {
+ kthread_set_per_cpu(worker->task, -1);
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+ }
mutex_unlock(&wq_pool_attach_mutex);
* of all workers first and then clear UNBOUND. As we're called
* from CPU_ONLINE, the following shouldn't fail.
*/
- for_each_pool_worker(worker, pool)
+ for_each_pool_worker(worker, pool) {
+ kthread_set_per_cpu(worker->task, pool->cpu);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
+ }
raw_spin_lock_irq(&pool->lock);
config UBSAN_UNSIGNED_OVERFLOW
bool "Perform checking for unsigned arithmetic overflow"
depends on $(cc-option,-fsanitize=unsigned-integer-overflow)
+ depends on !X86_32 # avoid excessive stack usage on x86-32/clang
help
This option enables -fsanitize=unsigned-integer-overflow which checks
for overflow of any arithmetic operations with unsigned integers. This
ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
+
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset)
+{
+ struct alignment_assumption_data *data = _data;
+ unsigned long real_ptr;
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "alignment-assumption");
+
+ if (offset)
+ pr_err("assumption of %lu byte alignment (with offset of %lu byte) for pointer of type %s failed",
+ align, offset, data->type->type_name);
+ else
+ pr_err("assumption of %lu byte alignment for pointer of type %s failed",
+ align, data->type->type_name);
+
+ real_ptr = ptr - offset;
+ pr_err("%saddress is %lu aligned, misalignment offset is %lu bytes",
+ offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0),
+ real_ptr & (align - 1));
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_alignment_assumption);
struct type_descriptor *type;
};
+struct alignment_assumption_data {
+ struct source_location location;
+ struct source_location assumption_location;
+ struct type_descriptor *type;
+};
+
#if defined(CONFIG_ARCH_SUPPORTS_INT128)
typedef __int128 s_max;
typedef unsigned __int128 u_max;
{
unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
unsigned int nr_scanned = 0;
- unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
+ unsigned long low_pfn, min_pfn, highest = 0;
unsigned long nr_isolated = 0;
unsigned long distance;
struct page *page = NULL;
struct page *freepage;
unsigned long flags;
unsigned int order_scanned = 0;
+ unsigned long high_pfn = 0;
if (!area->nr_free)
continue;
XA_STATE(xas, &mapping->i_pages, offset);
int huge = PageHuge(page);
int error;
+ bool charged = false;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
error = mem_cgroup_charge(page, current->mm, gfp);
if (error)
goto error;
+ charged = true;
}
gfp &= GFP_RECLAIM_MASK;
if (xas_error(&xas)) {
error = xas_error(&xas);
+ if (charged)
+ mem_cgroup_uncharge(page);
goto error;
}
}
#endif
+#ifndef arch_kmap_local_set_pte
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
+ set_pte_at(mm, vaddr, ptep, ptev)
+#endif
+
/* Unmap a local mapping which was obtained by kmap_high_get() */
static inline bool kmap_high_unmap_local(unsigned long vaddr)
{
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte - idx)));
pteval = pfn_pte(pfn, prot);
- set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
+ arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
arch_kmap_local_post_map(vaddr, pteval);
current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
preempt_enable();
{
spinlock_t *ptl;
struct mmu_notifier_range range;
- bool was_locked = false;
+ bool do_unlock_page = false;
pmd_t _pmd;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
VM_BUG_ON(freeze && !page);
if (page) {
VM_WARN_ON_ONCE(!PageLocked(page));
- was_locked = true;
if (page != pmd_page(*pmd))
goto out;
}
if (pmd_trans_huge(*pmd)) {
if (!page) {
page = pmd_page(*pmd);
- if (unlikely(!trylock_page(page))) {
- get_page(page);
- _pmd = *pmd;
- spin_unlock(ptl);
- lock_page(page);
- spin_lock(ptl);
- if (unlikely(!pmd_same(*pmd, _pmd))) {
- unlock_page(page);
+ /*
+ * An anonymous page must be locked, to ensure that a
+ * concurrent reuse_swap_page() sees stable mapcount;
+ * but reuse_swap_page() is not used on shmem or file,
+ * and page lock must not be taken when zap_pmd_range()
+ * calls __split_huge_pmd() while i_mmap_lock is held.
+ */
+ if (PageAnon(page)) {
+ if (unlikely(!trylock_page(page))) {
+ get_page(page);
+ _pmd = *pmd;
+ spin_unlock(ptl);
+ lock_page(page);
+ spin_lock(ptl);
+ if (unlikely(!pmd_same(*pmd, _pmd))) {
+ unlock_page(page);
+ put_page(page);
+ page = NULL;
+ goto repeat;
+ }
put_page(page);
- page = NULL;
- goto repeat;
}
- put_page(page);
+ do_unlock_page = true;
}
}
if (PageMlocked(page))
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
out:
spin_unlock(ptl);
- if (!was_locked && page)
+ if (do_unlock_page)
unlock_page(page);
/*
* No need to double call mmu_notifier->invalidate_range() callback.
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
+static inline bool PageHugeFreed(struct page *head)
+{
+ return page_private(head + 4) == -1UL;
+}
+
+static inline void SetPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, -1UL);
+}
+
+static inline void ClearPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, 0);
+}
+
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
list_move(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
+ SetPageHugeFreed(page);
}
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
+ ClearPageHugeFreed(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
*/
bool page_huge_active(struct page *page)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- return PageHead(page) && PagePrivate(&page[1]);
+ return PageHeadHuge(page) && PagePrivate(&page[1]);
}
/* never called for tail page */
-static void set_page_huge_active(struct page *page)
+void set_page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
SetPagePrivate(&page[1]);
spin_lock(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
+ ClearPageHugeFreed(page);
spin_unlock(&hugetlb_lock);
}
{
int rc = -EBUSY;
+retry:
/* Not to disrupt normal path by vainly holding hugetlb_lock */
if (!PageHuge(page))
return 0;
int nid = page_to_nid(head);
if (h->free_huge_pages - h->resv_huge_pages == 0)
goto out;
+
+ /*
+ * We should make sure that the page is already on the free list
+ * when it is dissolved.
+ */
+ if (unlikely(!PageHugeFreed(head))) {
+ spin_unlock(&hugetlb_lock);
+ cond_resched();
+
+ /*
+ * Theoretically, we should return -EBUSY when we
+ * encounter this race. In fact, we have a chance
+ * to successfully dissolve the page if we do a
+ * retry. Because the race window is quite small.
+ * If we seize this opportunity, it is an optimization
+ * for increasing the success rate of dissolving page.
+ */
+ goto retry;
+ }
+
/*
* Move PageHWPoison flag from head page to the raw error page,
* which makes any subpages rather than the error page reusable.
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+ int zeroed;
+
if ((--needed) < 0)
break;
/*
* This page is now managed by the hugetlb allocator and has
* no users -- drop the buddy allocator's reference.
*/
- VM_BUG_ON_PAGE(!put_page_testzero(page), page);
+ zeroed = put_page_testzero(page);
+ VM_BUG_ON_PAGE(!zeroed, page);
enqueue_huge_page(h, page);
}
free:
{
bool ret = true;
- VM_BUG_ON_PAGE(!PageHead(page), page);
spin_lock(&hugetlb_lock);
- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
+ if (!PageHeadHuge(page) || !page_huge_active(page) ||
+ !get_page_unless_zero(page)) {
ret = false;
goto unlock;
}
#include "kasan.h"
-enum kasan_arg_mode {
- KASAN_ARG_MODE_DEFAULT,
- KASAN_ARG_MODE_OFF,
- KASAN_ARG_MODE_PROD,
- KASAN_ARG_MODE_FULL,
+enum kasan_arg {
+ KASAN_ARG_DEFAULT,
+ KASAN_ARG_OFF,
+ KASAN_ARG_ON,
};
enum kasan_arg_stacktrace {
KASAN_ARG_FAULT_PANIC,
};
-static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
+static enum kasan_arg kasan_arg __ro_after_init;
static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init;
static enum kasan_arg_fault kasan_arg_fault __ro_after_init;
/* Whether panic or disable tag checking on fault. */
bool kasan_flag_panic __ro_after_init;
-/* kasan.mode=off/prod/full */
-static int __init early_kasan_mode(char *arg)
+/* kasan=off/on */
+static int __init early_kasan_flag(char *arg)
{
if (!arg)
return -EINVAL;
if (!strcmp(arg, "off"))
- kasan_arg_mode = KASAN_ARG_MODE_OFF;
- else if (!strcmp(arg, "prod"))
- kasan_arg_mode = KASAN_ARG_MODE_PROD;
- else if (!strcmp(arg, "full"))
- kasan_arg_mode = KASAN_ARG_MODE_FULL;
+ kasan_arg = KASAN_ARG_OFF;
+ else if (!strcmp(arg, "on"))
+ kasan_arg = KASAN_ARG_ON;
else
return -EINVAL;
return 0;
}
-early_param("kasan.mode", early_kasan_mode);
+early_param("kasan", early_kasan_flag);
-/* kasan.stack=off/on */
+/* kasan.stacktrace=off/on */
static int __init early_kasan_flag_stacktrace(char *arg)
{
if (!arg)
* as this function is only called for MTE-capable hardware.
*/
- /* If KASAN is disabled, do nothing. */
- if (kasan_arg_mode == KASAN_ARG_MODE_OFF)
+ /* If KASAN is disabled via command line, don't initialize it. */
+ if (kasan_arg == KASAN_ARG_OFF)
return;
hw_init_tags(KASAN_TAG_MAX);
/* kasan_init_hw_tags() is called once on boot CPU. */
void __init kasan_init_hw_tags(void)
{
- /* If hardware doesn't support MTE, do nothing. */
+ /* If hardware doesn't support MTE, don't initialize KASAN. */
if (!system_supports_mte())
return;
- /* Choose KASAN mode if kasan boot parameter is not provided. */
- if (kasan_arg_mode == KASAN_ARG_MODE_DEFAULT) {
- if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
- kasan_arg_mode = KASAN_ARG_MODE_FULL;
- else
- kasan_arg_mode = KASAN_ARG_MODE_PROD;
- }
-
- /* Preset parameter values based on the mode. */
- switch (kasan_arg_mode) {
- case KASAN_ARG_MODE_DEFAULT:
- /* Shouldn't happen as per the check above. */
- WARN_ON(1);
- return;
- case KASAN_ARG_MODE_OFF:
- /* If KASAN is disabled, do nothing. */
+ /* If KASAN is disabled via command line, don't initialize it. */
+ if (kasan_arg == KASAN_ARG_OFF)
return;
- case KASAN_ARG_MODE_PROD:
- static_branch_enable(&kasan_flag_enabled);
- break;
- case KASAN_ARG_MODE_FULL:
- static_branch_enable(&kasan_flag_enabled);
- static_branch_enable(&kasan_flag_stacktrace);
- break;
- }
- /* Now, optionally override the presets. */
+ /* Enable KASAN. */
+ static_branch_enable(&kasan_flag_enabled);
switch (kasan_arg_stacktrace) {
case KASAN_ARG_STACKTRACE_DEFAULT:
+ /*
+ * Default to enabling stack trace collection for
+ * debug kernels.
+ */
+ if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
+ static_branch_enable(&kasan_flag_stacktrace);
break;
case KASAN_ARG_STACKTRACE_OFF:
- static_branch_disable(&kasan_flag_stacktrace);
+ /* Do nothing, kasan_flag_stacktrace keeps its default value. */
break;
case KASAN_ARG_STACKTRACE_ON:
static_branch_enable(&kasan_flag_stacktrace);
switch (kasan_arg_fault) {
case KASAN_ARG_FAULT_DEFAULT:
+ /*
+ * Default to no panic on report.
+ * Do nothing, kasan_flag_panic keeps its default value.
+ */
break;
case KASAN_ARG_FAULT_REPORT:
- kasan_flag_panic = false;
+ /* Do nothing, kasan_flag_panic keeps its default value. */
break;
case KASAN_ARG_FAULT_PANIC:
+ /* Enable panic on report. */
kasan_flag_panic = true;
break;
}
if (kasan_pte_table(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
- IS_ALIGNED(next, PMD_SIZE))
+ IS_ALIGNED(next, PMD_SIZE)) {
pmd_clear(pmd);
- continue;
+ continue;
+ }
}
pte = pte_offset_kernel(pmd, addr);
kasan_remove_pte_table(pte, addr, next);
if (kasan_pmd_table(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
- IS_ALIGNED(next, PUD_SIZE))
+ IS_ALIGNED(next, PUD_SIZE)) {
pud_clear(pud);
- continue;
+ continue;
+ }
}
pmd = pmd_offset(pud, addr);
pmd_base = pmd_offset(pud, 0);
if (kasan_pud_table(*p4d)) {
if (IS_ALIGNED(addr, P4D_SIZE) &&
- IS_ALIGNED(next, P4D_SIZE))
+ IS_ALIGNED(next, P4D_SIZE)) {
p4d_clear(p4d);
- continue;
+ continue;
+ }
}
pud = pud_offset(p4d, addr);
kasan_remove_pud_table(pud, addr, next);
if (kasan_p4d_table(*pgd)) {
if (IS_ALIGNED(addr, PGDIR_SIZE) &&
- IS_ALIGNED(next, PGDIR_SIZE))
+ IS_ALIGNED(next, PGDIR_SIZE)) {
pgd_clear(pgd);
- continue;
+ continue;
+ }
}
p4d = p4d_offset(pgd, addr);
ret = kasan_populate_early_shadow(shadow_start, shadow_end);
if (ret)
- kasan_remove_zero_shadow(shadow_start,
- size >> KASAN_SHADOW_SCALE_SHIFT);
+ kasan_remove_zero_shadow(start, size);
return ret;
}
static inline bool addr_has_metadata(const void *addr)
{
- return true;
+ return (is_vmalloc_addr(addr) || virt_addr_valid(addr));
}
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
*
* Find @size free area aligned to @align in the specified range and node.
*
- * When allocation direction is bottom-up, the @start should be greater
- * than the end of the kernel image. Otherwise, it will be trimmed. The
- * reason is that we want the bottom-up allocation just near the kernel
- * image so it is highly likely that the allocated memory and the kernel
- * will reside in the same node.
- *
- * If bottom-up allocation failed, will try to allocate memory top-down.
- *
* Return:
* Found address on success, 0 on failure.
*/
phys_addr_t end, int nid,
enum memblock_flags flags)
{
- phys_addr_t kernel_end, ret;
-
/* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
end == MEMBLOCK_ALLOC_KASAN)
/* avoid allocating the first page */
start = max_t(phys_addr_t, start, PAGE_SIZE);
end = max(start, end);
- kernel_end = __pa_symbol(_end);
-
- /*
- * try bottom-up allocation only when bottom-up mode
- * is set and @end is above the kernel image.
- */
- if (memblock_bottom_up() && end > kernel_end) {
- phys_addr_t bottom_up_start;
-
- /* make sure we will allocate above the kernel */
- bottom_up_start = max(start, kernel_end);
- /* ok, try bottom-up allocation first */
- ret = __memblock_find_range_bottom_up(bottom_up_start, end,
- size, align, nid, flags);
- if (ret)
- return ret;
-
- /*
- * we always limit bottom-up allocation above the kernel,
- * but top-down allocation doesn't have the limit, so
- * retrying top-down allocation may succeed when bottom-up
- * allocation failed.
- *
- * bottom-up allocation is expected to be fail very rarely,
- * so we use WARN_ONCE() here to see the stack trace if
- * fail happens.
- */
- WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
- "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
- }
-
- return __memblock_find_range_top_down(start, end, size, align, nid,
- flags);
+ if (memblock_bottom_up())
+ return __memblock_find_range_bottom_up(start, end, size, align,
+ nid, flags);
+ else
+ return __memblock_find_range_top_down(start, end, size, align,
+ nid, flags);
}
/**
}
/**
- * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
+ * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
page_counter_uncharge(&memcg->kmem, nr_pages);
- page_counter_uncharge(&memcg->memory, nr_pages);
- if (do_memsw_account())
- page_counter_uncharge(&memcg->memsw, nr_pages);
+ refill_stock(memcg, nr_pages);
}
/**
return rc;
}
+static void put_ref_page(struct page *page)
+{
+ if (page)
+ put_page(page);
+}
+
/**
* soft_offline_page - Soft offline a page.
* @pfn: pfn to soft-offline
int soft_offline_page(unsigned long pfn, int flags)
{
int ret;
- struct page *page;
bool try_again = true;
+ struct page *page, *ref_page = NULL;
+
+ WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
if (!pfn_valid(pfn))
return -ENXIO;
+ if (flags & MF_COUNT_INCREASED)
+ ref_page = pfn_to_page(pfn);
+
/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
page = pfn_to_online_page(pfn);
- if (!page)
+ if (!page) {
+ put_ref_page(ref_page);
return -EIO;
+ }
if (PageHWPoison(page)) {
pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
- if (flags & MF_COUNT_INCREASED)
- put_page(page);
+ put_ref_page(ref_page);
return 0;
}
struct zone *oldzone, *newzone;
int dirty;
int expected_count = expected_page_refs(mapping, page) + extra_count;
+ int nr = thp_nr_pages(page);
if (!mapping) {
/* Anonymous page without mapping */
*/
newpage->index = page->index;
newpage->mapping = page->mapping;
- page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
+ page_ref_add(newpage, nr); /* add cache reference */
if (PageSwapBacked(page)) {
__SetPageSwapBacked(newpage);
if (PageSwapCache(page)) {
if (PageTransHuge(page)) {
int i;
- for (i = 1; i < HPAGE_PMD_NR; i++) {
+ for (i = 1; i < nr; i++) {
xas_next(&xas);
xas_store(&xas, newpage);
}
* to one less reference.
* We know this isn't the last reference.
*/
- page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
+ page_ref_unfreeze(page, expected_count - nr);
xas_unlock(&xas);
/* Leave irq disabled to prevent preemption while updating stats */
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
- __dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
- __inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
+ __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
+ __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
if (PageSwapBacked(page) && !PageSwapCache(page)) {
- __dec_lruvec_state(old_lruvec, NR_SHMEM);
- __inc_lruvec_state(new_lruvec, NR_SHMEM);
+ __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
+ __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
}
if (dirty && mapping_can_writeback(mapping)) {
- __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
- __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
- __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
- __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
+ __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
+ __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
+ __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
+ __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
}
}
local_irq_enable();
return -ENOSYS;
}
+ if (page_count(hpage) == 1) {
+ /* page was freed from under us. So we are done. */
+ putback_active_hugepage(hpage);
+ return MIGRATEPAGE_SUCCESS;
+ }
+
new_hpage = get_new_page(hpage, private);
if (!new_hpage)
return -ENOMEM;
/* s390's use of memset() could override KASAN redzones. */
kasan_disable_current();
for (i = 0; i < numpages; i++) {
+ u8 tag = page_kasan_tag(page + i);
page_kasan_tag_reset(page + i);
clear_highpage(page + i);
+ page_kasan_tag_set(page + i, tag);
}
kasan_enable_current();
}
void *obj)
{
if (unlikely(slab_want_init_on_free(s)) && obj)
- memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
+ memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
+ 0, sizeof(void *));
}
/*
stat(s, ALLOC_FASTPATH);
}
- maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));
+ maybe_wipe_obj_freeptr(s, object);
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(kasan_reset_tag(object), 0, s->object_size);
int j;
for (j = 0; j < i; j++)
- memset(p[j], 0, s->object_size);
+ memset(kasan_reset_tag(p[j]), 0, s->object_size);
}
/* memcg and kmem_cache debug support */
s->kobj.kset = kset;
err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
- if (err) {
- kobject_put(&s->kobj);
+ if (err)
goto out;
- }
err = sysfs_create_group(&s->kobj, &slab_attr_group);
if (err)
kattr->test.repeat)
return -EINVAL;
- if (ctx_size_in < prog->aux->max_ctx_offset)
+ if (ctx_size_in < prog->aux->max_ctx_offset ||
+ ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
return -EINVAL;
if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
int br_mrp_ring_port_open(struct net_device *dev, u8 loc);
int br_mrp_in_port_open(struct net_device *dev, u8 loc);
+/* MRP protocol data units */
+struct br_mrp_tlv_hdr {
+ __u8 type;
+ __u8 length;
+};
+
+struct br_mrp_common_hdr {
+ __be16 seq_id;
+ __u8 domain[MRP_DOMAIN_UUID_LENGTH];
+};
+
+struct br_mrp_ring_test_hdr {
+ __be16 prio;
+ __u8 sa[ETH_ALEN];
+ __be16 port_role;
+ __be16 state;
+ __be16 transitions;
+ __be32 timestamp;
+} __attribute__((__packed__));
+
+struct br_mrp_in_test_hdr {
+ __be16 id;
+ __u8 sa[ETH_ALEN];
+ __be16 port_role;
+ __be16 state;
+ __be16 transitions;
+ __be32 timestamp;
+} __attribute__((__packed__));
+
#endif /* _BR_PRIVATE_MRP_H */
return -ERANGE;
}
+static int decode_con_secret(void **p, void *end, u8 *con_secret,
+ int *con_secret_len)
+{
+ int len;
+
+ ceph_decode_32_safe(p, end, len, bad);
+ ceph_decode_need(p, end, len, bad);
+
+ dout("%s len %d\n", __func__, len);
+ if (con_secret) {
+ if (len > CEPH_MAX_CON_SECRET_LEN) {
+ pr_err("connection secret too big %d\n", len);
+ goto bad_memzero;
+ }
+ memcpy(con_secret, *p, len);
+ *con_secret_len = len;
+ }
+ memzero_explicit(*p, len);
+ *p += len;
+ return 0;
+
+bad_memzero:
+ memzero_explicit(*p, len);
+bad:
+ pr_err("failed to decode connection secret\n");
+ return -EINVAL;
+}
+
static int handle_auth_session_key(struct ceph_auth_client *ac,
void **p, void *end,
u8 *session_key, int *session_key_len,
dout("%s decrypted %d bytes\n", __func__, ret);
dend = dp + ret;
- ceph_decode_32_safe(&dp, dend, len, e_inval);
- if (len > CEPH_MAX_CON_SECRET_LEN) {
- pr_err("connection secret too big %d\n", len);
- return -EINVAL;
- }
-
- dout("%s connection secret len %d\n", __func__, len);
- if (con_secret) {
- memcpy(con_secret, dp, len);
- *con_secret_len = len;
- }
+ ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
+ if (ret)
+ return ret;
}
/* service tickets */
{
void *dp, *dend;
u8 struct_v;
- int len;
int ret;
dp = *p + ceph_x_encrypt_offset();
ceph_decode_64_safe(&dp, dend, *nonce_plus_one, e_inval);
dout("%s nonce_plus_one %llu\n", __func__, *nonce_plus_one);
if (struct_v >= 2) {
- ceph_decode_32_safe(&dp, dend, len, e_inval);
- if (len > CEPH_MAX_CON_SECRET_LEN) {
- pr_err("connection secret too big %d\n", len);
- return -EINVAL;
- }
-
- dout("%s connection secret len %d\n", __func__, len);
- if (con_secret) {
- memcpy(con_secret, dp, len);
- *con_secret_len = len;
- }
+ ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
+ if (ret)
+ return ret;
}
return 0;
key->len = ceph_decode_16(p);
ceph_decode_need(p, end, key->len, bad);
ret = set_secret(key, *p);
+ memzero_explicit(*p, key->len);
*p += key->len;
return ret;
void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
{
if (key) {
- kfree(key->key);
+ kfree_sensitive(key->key);
key->key = NULL;
if (key->tfm) {
crypto_free_sync_skcipher(key->tfm);
if (ret < 0)
return ret;
- BUG_ON(!con->in_msg ^ skip);
+ BUG_ON((!con->in_msg) ^ skip);
if (skip) {
/* skip this message */
dout("alloc_msg said skip message\n");
}
static int setup_crypto(struct ceph_connection *con,
- u8 *session_key, int session_key_len,
- u8 *con_secret, int con_secret_len)
+ const u8 *session_key, int session_key_len,
+ const u8 *con_secret, int con_secret_len)
{
unsigned int noio_flag;
- void *p;
int ret;
dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
return ret;
}
- p = con_secret;
- WARN_ON((unsigned long)p & crypto_aead_alignmask(con->v2.gcm_tfm));
- ret = crypto_aead_setkey(con->v2.gcm_tfm, p, CEPH_GCM_KEY_LEN);
+ WARN_ON((unsigned long)con_secret &
+ crypto_aead_alignmask(con->v2.gcm_tfm));
+ ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
if (ret) {
pr_err("failed to set gcm key: %d\n", ret);
return ret;
}
- p += CEPH_GCM_KEY_LEN;
WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
if (ret) {
aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &con->v2.gcm_wait);
- memcpy(&con->v2.in_gcm_nonce, p, CEPH_GCM_IV_LEN);
- memcpy(&con->v2.out_gcm_nonce, p + CEPH_GCM_IV_LEN, CEPH_GCM_IV_LEN);
+ memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
+ CEPH_GCM_IV_LEN);
+ memcpy(&con->v2.out_gcm_nonce,
+ con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
+ CEPH_GCM_IV_LEN);
return 0; /* auth_x, secure mode */
}
desc->tfm = con->v2.hmac_tfm;
ret = crypto_shash_init(desc);
if (ret)
- return ret;
+ goto out;
for (i = 0; i < kvec_cnt; i++) {
WARN_ON((unsigned long)kvecs[i].iov_base &
ret = crypto_shash_update(desc, kvecs[i].iov_base,
kvecs[i].iov_len);
if (ret)
- return ret;
+ goto out;
}
ret = crypto_shash_final(desc, hmac);
- if (ret)
- return ret;
+out:
shash_desc_zero(desc);
- return 0; /* auth_x, both plain and secure modes */
+ return ret; /* auth_x, both plain and secure modes */
}
static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
if (con->state != CEPH_CON_S_V2_AUTH) {
dout("%s con %p state changed to %d\n", __func__, con,
con->state);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto out;
}
dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
if (ret)
- return ret;
+ goto out;
ret = setup_crypto(con, session_key, session_key_len, con_secret,
con_secret_len);
if (ret)
- return ret;
+ goto out;
reset_out_kvecs(con);
ret = prepare_auth_signature(con);
if (ret) {
pr_err("prepare_auth_signature failed: %d\n", ret);
- return ret;
+ goto out;
}
con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
- return 0;
+
+out:
+ memzero_explicit(session_key_buf, sizeof(session_key_buf));
+ memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
+ return ret;
bad:
pr_err("failed to decode auth_done\n");
}
con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
+ memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
+ memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
if (con->v2.hmac_tfm) {
crypto_free_shash(con->v2.hmac_tfm);
/*
* handle incoming message
*/
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void mon_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_mon_client *monc = con->private;
int type = le16_to_cpu(msg->hdr.type);
* will come from the messenger workqueue, which is drained prior to
* mon_client destruction.
*/
-static struct ceph_connection *con_get(struct ceph_connection *con)
+static struct ceph_connection *mon_get_con(struct ceph_connection *con)
{
return con;
}
-static void con_put(struct ceph_connection *con)
+static void mon_put_con(struct ceph_connection *con)
{
}
static const struct ceph_connection_operations mon_con_ops = {
- .get = con_get,
- .put = con_put,
- .dispatch = dispatch,
- .fault = mon_fault,
+ .get = mon_get_con,
+ .put = mon_put_con,
.alloc_msg = mon_alloc_msg,
+ .dispatch = mon_dispatch,
+ .fault = mon_fault,
.get_auth_request = mon_get_auth_request,
.handle_auth_reply_more = mon_handle_auth_reply_more,
.handle_auth_done = mon_handle_auth_done,
/*
* handle incoming message
*/
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_osd *osd = con->private;
struct ceph_osd_client *osdc = osd->o_osdc;
return m;
}
-static struct ceph_msg *alloc_msg(struct ceph_connection *con,
- struct ceph_msg_header *hdr,
- int *skip)
+static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con,
+ struct ceph_msg_header *hdr,
+ int *skip)
{
struct ceph_osd *osd = con->private;
int type = le16_to_cpu(hdr->type);
/*
* Wrappers to refcount containing ceph_osd struct
*/
-static struct ceph_connection *get_osd_con(struct ceph_connection *con)
+static struct ceph_connection *osd_get_con(struct ceph_connection *con)
{
struct ceph_osd *osd = con->private;
if (get_osd(osd))
return NULL;
}
-static void put_osd_con(struct ceph_connection *con)
+static void osd_put_con(struct ceph_connection *con)
{
struct ceph_osd *osd = con->private;
put_osd(osd);
* Note: returned pointer is the address of a structure that's
* managed separately. Caller must *not* attempt to free it.
*/
-static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
- int *proto, int force_new)
+static struct ceph_auth_handshake *
+osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
return auth;
}
-static int add_authorizer_challenge(struct ceph_connection *con,
+static int osd_add_authorizer_challenge(struct ceph_connection *con,
void *challenge_buf, int challenge_buf_len)
{
struct ceph_osd *o = con->private;
challenge_buf, challenge_buf_len);
}
-static int verify_authorizer_reply(struct ceph_connection *con)
+static int osd_verify_authorizer_reply(struct ceph_connection *con)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
NULL, NULL, NULL, NULL);
}
-static int invalidate_authorizer(struct ceph_connection *con)
+static int osd_invalidate_authorizer(struct ceph_connection *con)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
}
static const struct ceph_connection_operations osd_con_ops = {
- .get = get_osd_con,
- .put = put_osd_con,
- .dispatch = dispatch,
- .get_authorizer = get_authorizer,
- .add_authorizer_challenge = add_authorizer_challenge,
- .verify_authorizer_reply = verify_authorizer_reply,
- .invalidate_authorizer = invalidate_authorizer,
- .alloc_msg = alloc_msg,
+ .get = osd_get_con,
+ .put = osd_put_con,
+ .alloc_msg = osd_alloc_msg,
+ .dispatch = osd_dispatch,
+ .fault = osd_fault,
.reencode_message = osd_reencode_message,
+ .get_authorizer = osd_get_authorizer,
+ .add_authorizer_challenge = osd_add_authorizer_challenge,
+ .verify_authorizer_reply = osd_verify_authorizer_reply,
+ .invalidate_authorizer = osd_invalidate_authorizer,
.sign_message = osd_sign_message,
.check_message_signature = osd_check_message_signature,
- .fault = osd_fault,
.get_auth_request = osd_get_auth_request,
.handle_auth_reply_more = osd_handle_auth_reply_more,
.handle_auth_done = osd_handle_auth_done,
}
}
+ if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
+ netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
+ features &= ~NETIF_F_HW_TLS_RX;
+ }
+
return features;
}
static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
- struct devlink_port *devlink_port = info->user_ptr[0];
+ struct devlink_port *devlink_port = info->user_ptr[1];
struct devlink_param_item *param_item;
struct sk_buff *msg;
int err;
static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
struct genl_info *info)
{
- struct devlink_port *devlink_port = info->user_ptr[0];
+ struct devlink_port *devlink_port = info->user_ptr[1];
return __devlink_nl_cmd_param_set_doit(devlink_port->devlink,
devlink_port->index,
u64 rate, brate;
est_fetch_counters(est, &b);
- brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
- brate -= (est->avbps >> est->ewma_log);
+ brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
+ brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
- rate = (b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
- rate -= (est->avpps >> est->ewma_log);
+ rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
+ rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
write_seqcount_begin(&est->seq);
est->avbps += brate;
if (parm->interval < -2 || parm->interval > 3)
return -EINVAL;
+ if (parm->ewma_log == 0 || parm->ewma_log >= 31)
+ return -EINVAL;
+
est = kzalloc(sizeof(*est), GFP_KERNEL);
if (!est)
return -ENOBUFS;
old = neigh->nud_state;
err = -EPERM;
- if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
- (old & (NUD_NOARP | NUD_PERMANENT)))
- goto out;
if (neigh->dead) {
NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
+ new = old;
goto out;
}
+ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+ (old & (NUD_NOARP | NUD_PERMANENT)))
+ goto out;
ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify);
len += NET_SKB_PAD;
- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+ /* If requested length is either too small or too big,
+ * we use kmalloc() for skb->head allocation.
+ */
+ if (len <= SKB_WITH_OVERHEAD(1024) ||
+ len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
fld.saddr = dnet_select_source(dev_out, 0,
RT_SCOPE_HOST);
if (!fld.daddr)
- goto out;
+ goto done;
}
fld.flowidn_oif = LOOPBACK_IFINDEX;
res.type = RTN_LOCAL;
u8 net_id; /* for PRP, it occupies most significant 3 bits
* of lan_id
*/
- unsigned char sup_multicast_addr[ETH_ALEN];
+ unsigned char sup_multicast_addr[ETH_ALEN] __aligned(sizeof(u16));
+ /* Align to u16 boundary to avoid unaligned access
+ * in ether_addr_equal
+ */
#ifdef CONFIG_DEBUG_FS
struct dentry *node_tbl_root;
#endif
newicsk->icsk_retransmits = 0;
newicsk->icsk_backoff = 0;
newicsk->icsk_probes_out = 0;
+ newicsk->icsk_probes_tstamp = 0;
/* Deinitialize accept_queue to trap illegal accesses. */
memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
}
dev->needed_headroom = t_hlen + hlen;
- mtu -= (dev->hard_header_len + t_hlen);
+ mtu -= t_hlen;
if (mtu < IPV4_MIN_MTU)
mtu = IPV4_MIN_MTU;
nt = netdev_priv(dev);
t_hlen = nt->hlen + sizeof(struct iphdr);
dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
+ dev->max_mtu = IP_MAX_MTU - t_hlen;
ip_tunnel_add(itn, nt);
return nt;
int mtu;
tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
- pkt_size = skb->len - tunnel_hlen - dev->hard_header_len;
+ pkt_size = skb->len - tunnel_hlen;
if (df)
- mtu = dst_mtu(&rt->dst) - dev->hard_header_len
- - sizeof(struct iphdr) - tunnel_hlen;
+ mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
else
mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
- int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
+ int max_mtu = IP_MAX_MTU - t_hlen;
if (new_mtu < ETH_MIN_MTU)
return -EINVAL;
mtu = ip_tunnel_bind_dev(dev);
if (tb[IFLA_MTU]) {
- unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
+ unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
- mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
- (unsigned int)(max - sizeof(struct iphdr)));
+ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
}
err = dev_set_mtu(dev, mtu);
flow.daddr = iph->saddr;
flow.saddr = rpfilter_get_saddr(iph->daddr);
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
- flow.flowi4_tos = RT_TOS(iph->tos);
+ flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
icsk->icsk_backoff = 0;
icsk->icsk_probes_out = 0;
+ icsk->icsk_probes_tstamp = 0;
icsk->icsk_rto = TCP_TIMEOUT_INIT;
icsk->icsk_rto_min = TCP_RTO_MIN;
icsk->icsk_delack_max = TCP_DELACK_MAX;
} else if (tcp_is_rack(sk)) {
u32 prior_retrans = tp->retrans_out;
- tcp_rack_mark_lost(sk);
+ if (tcp_rack_mark_lost(sk))
+ *ack_flag &= ~FLAG_SET_XMIT_TIMER;
if (prior_retrans > tp->retrans_out)
*ack_flag |= FLAG_LOST_RETRANS;
}
return;
if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
icsk->icsk_backoff = 0;
+ icsk->icsk_probes_tstamp = 0;
inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
/* Socket must be waked up by subsequent tcp_data_snd_check().
* This function is not for random using!
} else {
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- when, TCP_RTO_MAX);
+ when = tcp_clamp_probe0_to_user_timeout(sk, when);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX);
}
}
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
- /* If needed, reset TLP/RTO timer; RACK may later override this. */
- if (flag & FLAG_SET_XMIT_TIMER)
- tcp_set_xmit_timer(sk);
if (tcp_ack_is_dubious(sk, flag)) {
if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
&rexmit);
}
+ /* If needed, reset TLP/RTO timer when RACK doesn't set. */
+ if (flag & FLAG_SET_XMIT_TIMER)
+ tcp_set_xmit_timer(sk);
+
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
sk_dst_confirm(sk);
* The receiver remembers and reflects via DSACKs. Leverage the
* DSACK state and change the txhash to re-route speculatively.
*/
- if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq) {
- sk_rethink_txhash(sk);
+ if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq &&
+ sk_rethink_txhash(sk))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
- }
}
static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
tcp_move_syn(newtp, req);
ireq->ireq_opt = NULL;
} else {
+ newinet->inet_opt = NULL;
+
if (!req_unhash && found_dup_sk) {
/* This code path should only be executed in the
* syncookie case only
bh_unlock_sock(newsk);
sock_put(newsk);
newsk = NULL;
- } else {
- newinet->inet_opt = NULL;
}
}
return newsk;
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
+ u32 tail_gso_size, tail_gso_segs;
struct skb_shared_info *shinfo;
const struct tcphdr *th;
struct tcphdr *thtail;
unsigned int hdrlen;
bool fragstolen;
u32 gso_segs;
+ u32 gso_size;
int delta;
/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
*/
th = (const struct tcphdr *)skb->data;
hdrlen = th->doff * 4;
- shinfo = skb_shinfo(skb);
-
- if (!shinfo->gso_size)
- shinfo->gso_size = skb->len - hdrlen;
-
- if (!shinfo->gso_segs)
- shinfo->gso_segs = 1;
tail = sk->sk_backlog.tail;
if (!tail)
goto no_coalesce;
__skb_pull(skb, hdrlen);
+
+ shinfo = skb_shinfo(skb);
+ gso_size = shinfo->gso_size ?: skb->len;
+ gso_segs = shinfo->gso_segs ?: 1;
+
+ shinfo = skb_shinfo(tail);
+ tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
+ tail_gso_segs = shinfo->gso_segs ?: 1;
+
if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
}
/* Not as strict as GRO. We only need to carry mss max value */
- skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
- skb_shinfo(tail)->gso_size);
-
- gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
- skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+ shinfo->gso_size = max(gso_size, tail_gso_size);
+ shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
sk->sk_backlog.len += delta;
__NET_INC_STATS(sock_net(sk),
/* Cancel probe timer, if it is not required. */
icsk->icsk_probes_out = 0;
icsk->icsk_backoff = 0;
+ icsk->icsk_probes_tstamp = 0;
return;
}
*/
timeout = TCP_RESOURCE_PROBE_INTERVAL;
}
+
+ timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
}
}
}
-void tcp_rack_mark_lost(struct sock *sk)
+bool tcp_rack_mark_lost(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 timeout;
if (!tp->rack.advanced)
- return;
+ return false;
/* Reset the advanced flag to avoid unnecessary queue scanning */
tp->rack.advanced = 0;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
timeout, inet_csk(sk)->icsk_rto);
}
+ return !!timeout;
}
/* Record the most recently (re)sent time among the (s)acked packets
return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
}
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 remaining;
+ s32 elapsed;
+
+ if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
+ return when;
+
+ elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
+ if (unlikely(elapsed < 0))
+ elapsed = 0;
+ remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
+ remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
+
+ return min_t(u32, remaining, when);
+}
+
/**
* tcp_write_err() - close socket and save error info
* @sk: The socket the error has appeared on.
int retry_until;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
- if (icsk->icsk_retransmits) {
- dst_negative_advice(sk);
- } else {
- sk_rethink_txhash(sk);
- tp->timeout_rehash++;
- __NET_INC_STATS(sock_net(sk),
- LINUX_MIB_TCPTIMEOUTREHASH);
- }
+ if (icsk->icsk_retransmits)
+ __dst_negative_advice(sk);
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
expired = icsk->icsk_retransmits >= retry_until;
} else {
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
- dst_negative_advice(sk);
- } else {
- sk_rethink_txhash(sk);
- tp->timeout_rehash++;
- __NET_INC_STATS(sock_net(sk),
- LINUX_MIB_TCPTIMEOUTREHASH);
+ __dst_negative_advice(sk);
}
retry_until = net->ipv4.sysctl_tcp_retries2;
return 1;
}
+ if (sk_rethink_txhash(sk)) {
+ tp->timeout_rehash++;
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
+ }
+
return 0;
}
if (tp->packets_out || !skb) {
icsk->icsk_probes_out = 0;
+ icsk->icsk_probes_tstamp = 0;
return;
}
* corresponding system limit. We also implement similar policy when
* we use RTO to probe window in tcp_retransmit_timer().
*/
- if (icsk->icsk_user_timeout) {
- u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
- tcp_probe0_base(sk));
-
- if (elapsed >= icsk->icsk_user_timeout)
- goto abort;
- }
+ if (!icsk->icsk_probes_tstamp)
+ icsk->icsk_probes_tstamp = tcp_jiffies32;
+ else if (icsk->icsk_user_timeout &&
+ (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
+ msecs_to_jiffies(icsk->icsk_user_timeout))
+ goto abort;
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
*/
if (!inet_sk(sk)->inet_daddr && in_dev)
return ip_mc_validate_source(skb, iph->daddr,
- iph->saddr, iph->tos,
+ iph->saddr,
+ iph->tos & IPTOS_RT_MASK,
skb->dev, in_dev, &itag);
}
return 0;
}
EXPORT_SYMBOL(skb_udp_tunnel_segment);
+static void __udpv4_gso_segment_csum(struct sk_buff *seg,
+ __be32 *oldip, __be32 *newip,
+ __be16 *oldport, __be16 *newport)
+{
+ struct udphdr *uh;
+ struct iphdr *iph;
+
+ if (*oldip == *newip && *oldport == *newport)
+ return;
+
+ uh = udp_hdr(seg);
+ iph = ip_hdr(seg);
+
+ if (uh->check) {
+ inet_proto_csum_replace4(&uh->check, seg, *oldip, *newip,
+ true);
+ inet_proto_csum_replace2(&uh->check, seg, *oldport, *newport,
+ false);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ *oldport = *newport;
+
+ csum_replace4(&iph->check, *oldip, *newip);
+ *oldip = *newip;
+}
+
+static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
+{
+ struct sk_buff *seg;
+ struct udphdr *uh, *uh2;
+ struct iphdr *iph, *iph2;
+
+ seg = segs;
+ uh = udp_hdr(seg);
+ iph = ip_hdr(seg);
+
+ if ((udp_hdr(seg)->dest == udp_hdr(seg->next)->dest) &&
+ (udp_hdr(seg)->source == udp_hdr(seg->next)->source) &&
+ (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) &&
+ (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr))
+ return segs;
+
+ while ((seg = seg->next)) {
+ uh2 = udp_hdr(seg);
+ iph2 = ip_hdr(seg);
+
+ __udpv4_gso_segment_csum(seg,
+ &iph2->saddr, &iph->saddr,
+ &uh2->source, &uh->source);
+ __udpv4_gso_segment_csum(seg,
+ &iph2->daddr, &iph->daddr,
+ &uh2->dest, &uh->dest);
+ }
+
+ return segs;
+}
+
static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
- netdev_features_t features)
+ netdev_features_t features,
+ bool is_ipv6)
{
unsigned int mss = skb_shinfo(skb)->gso_size;
udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
- return skb;
+ return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
}
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
- netdev_features_t features)
+ netdev_features_t features, bool is_ipv6)
{
struct sock *sk = gso_skb->sk;
unsigned int sum_truesize = 0;
__be16 newlen;
if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
- return __udp_gso_segment_list(gso_skb, features);
+ return __udp_gso_segment_list(gso_skb, features, is_ipv6);
mss = skb_shinfo(gso_skb)->gso_size;
if (gso_skb->len <= sizeof(*uh) + mss)
goto out;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- return __udp_gso_segment(skb, features);
+ return __udp_gso_segment(skb, features, false);
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
.fc_ifindex = dev->ifindex,
.fc_dst_len = 8,
.fc_flags = RTF_UP,
- .fc_type = RTN_UNICAST,
+ .fc_type = RTN_MULTICAST,
.fc_nlinfo.nl_net = dev_net(dev),
+ .fc_protocol = RTPROT_KERNEL,
};
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
goto out;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- return __udp_gso_segment(skb, features);
+ return __udp_gso_segment(skb, features, true);
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
break;
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available)
+ if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
return sz + sizeof(struct sadb_prop);
if (!ealg->pfkey_supported)
continue;
- if (!(ealg_tmpl_set(t, ealg) && ealg->available))
+ if (!(ealg_tmpl_set(t, ealg)))
continue;
for (k = 1; ; k++) {
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available)
+ if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
}
timer_setup(&lapb->t1timer, NULL, 0);
timer_setup(&lapb->t2timer, NULL, 0);
+ lapb->t1timer_stop = true;
+ lapb->t2timer_stop = true;
lapb->t1 = LAPB_DEFAULT_T1;
lapb->t2 = LAPB_DEFAULT_T2;
lapb->mode = LAPB_DEFAULT_MODE;
lapb->window = LAPB_DEFAULT_WINDOW;
lapb->state = LAPB_STATE_0;
+
+ spin_lock_init(&lapb->lock);
refcount_set(&lapb->refcnt, 1);
out:
return lapb;
goto out;
lapb_put(lapb);
+ /* Wait for other refs to "lapb" to drop */
+ while (refcount_read(&lapb->refcnt) > 2)
+ usleep_range(1, 10);
+
+ spin_lock_bh(&lapb->lock);
+
lapb_stop_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb_clear_queues(lapb);
+ spin_unlock_bh(&lapb->lock);
+
+ /* Wait for running timers to stop */
+ del_timer_sync(&lapb->t1timer);
+ del_timer_sync(&lapb->t2timer);
+
__lapb_remove_cb(lapb);
lapb_put(lapb);
if (!lapb)
goto out;
+ spin_lock_bh(&lapb->lock);
+
parms->t1 = lapb->t1 / HZ;
parms->t2 = lapb->t2 / HZ;
parms->n2 = lapb->n2;
else
parms->t2timer = (lapb->t2timer.expires - jiffies) / HZ;
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
rc = LAPB_OK;
out:
if (!lapb)
goto out;
+ spin_lock_bh(&lapb->lock);
+
rc = LAPB_INVALUE;
if (parms->t1 < 1 || parms->t2 < 1 || parms->n2 < 1)
goto out_put;
rc = LAPB_OK;
out_put:
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
out:
return rc;
if (!lapb)
goto out;
+ spin_lock_bh(&lapb->lock);
+
rc = LAPB_OK;
if (lapb->state == LAPB_STATE_1)
goto out_put;
rc = LAPB_OK;
out_put:
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
out:
return rc;
}
EXPORT_SYMBOL(lapb_connect_request);
-int lapb_disconnect_request(struct net_device *dev)
+static int __lapb_disconnect_request(struct lapb_cb *lapb)
{
- struct lapb_cb *lapb = lapb_devtostruct(dev);
- int rc = LAPB_BADTOKEN;
-
- if (!lapb)
- goto out;
-
switch (lapb->state) {
case LAPB_STATE_0:
- rc = LAPB_NOTCONNECTED;
- goto out_put;
+ return LAPB_NOTCONNECTED;
case LAPB_STATE_1:
lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev);
lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
- rc = LAPB_NOTCONNECTED;
- goto out_put;
+ return LAPB_NOTCONNECTED;
case LAPB_STATE_2:
- rc = LAPB_OK;
- goto out_put;
+ return LAPB_OK;
}
lapb_clear_queues(lapb);
lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev);
lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev);
- rc = LAPB_OK;
-out_put:
+ return LAPB_OK;
+}
+
+int lapb_disconnect_request(struct net_device *dev)
+{
+ struct lapb_cb *lapb = lapb_devtostruct(dev);
+ int rc = LAPB_BADTOKEN;
+
+ if (!lapb)
+ goto out;
+
+ spin_lock_bh(&lapb->lock);
+
+ rc = __lapb_disconnect_request(lapb);
+
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
out:
return rc;
if (!lapb)
goto out;
+ spin_lock_bh(&lapb->lock);
+
rc = LAPB_NOTCONNECTED;
if (lapb->state != LAPB_STATE_3 && lapb->state != LAPB_STATE_4)
goto out_put;
lapb_kick(lapb);
rc = LAPB_OK;
out_put:
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
out:
return rc;
int rc = LAPB_BADTOKEN;
if (lapb) {
+ spin_lock_bh(&lapb->lock);
lapb_data_input(lapb, skb);
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
rc = LAPB_OK;
}
if (!lapb)
return NOTIFY_DONE;
+ spin_lock_bh(&lapb->lock);
+
switch (event) {
case NETDEV_UP:
lapb_dbg(0, "(%p) Interface up: %s\n", dev, dev->name);
break;
case NETDEV_GOING_DOWN:
if (netif_carrier_ok(dev))
- lapb_disconnect_request(dev);
+ __lapb_disconnect_request(lapb);
break;
case NETDEV_DOWN:
lapb_dbg(0, "(%p) Interface down: %s\n", dev, dev->name);
break;
}
+ spin_unlock_bh(&lapb->lock);
lapb_put(lapb);
return NOTIFY_DONE;
}
skb = skb_dequeue(&lapb->write_queue);
do {
- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
+ skbn = skb_copy(skb, GFP_ATOMIC);
+ if (!skbn) {
skb_queue_head(&lapb->write_queue, skb);
break;
}
lapb->t1timer.function = lapb_t1timer_expiry;
lapb->t1timer.expires = jiffies + lapb->t1;
+ lapb->t1timer_stop = false;
add_timer(&lapb->t1timer);
}
lapb->t2timer.function = lapb_t2timer_expiry;
lapb->t2timer.expires = jiffies + lapb->t2;
+ lapb->t2timer_stop = false;
add_timer(&lapb->t2timer);
}
void lapb_stop_t1timer(struct lapb_cb *lapb)
{
+ lapb->t1timer_stop = true;
del_timer(&lapb->t1timer);
}
void lapb_stop_t2timer(struct lapb_cb *lapb)
{
+ lapb->t2timer_stop = true;
del_timer(&lapb->t2timer);
}
{
struct lapb_cb *lapb = from_timer(lapb, t, t2timer);
+ spin_lock_bh(&lapb->lock);
+ if (timer_pending(&lapb->t2timer)) /* A new timer has been set up */
+ goto out;
+ if (lapb->t2timer_stop) /* The timer has been stopped */
+ goto out;
+
if (lapb->condition & LAPB_ACK_PENDING_CONDITION) {
lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
lapb_timeout_response(lapb);
}
+
+out:
+ spin_unlock_bh(&lapb->lock);
}
static void lapb_t1timer_expiry(struct timer_list *t)
{
struct lapb_cb *lapb = from_timer(lapb, t, t1timer);
+ spin_lock_bh(&lapb->lock);
+ if (timer_pending(&lapb->t1timer)) /* A new timer has been set up */
+ goto out;
+ if (lapb->t1timer_stop) /* The timer has been stopped */
+ goto out;
+
switch (lapb->state) {
/*
lapb->state = LAPB_STATE_0;
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
- return;
+ goto out;
} else {
lapb->n2count++;
if (lapb->mode & LAPB_EXTENDED) {
lapb->state = LAPB_STATE_0;
lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT);
lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
- return;
+ goto out;
} else {
lapb->n2count++;
lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev);
lapb_stop_t2timer(lapb);
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
- return;
+ goto out;
} else {
lapb->n2count++;
lapb_requeue_frames(lapb);
lapb->state = LAPB_STATE_0;
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev);
- return;
+ goto out;
} else {
lapb->n2count++;
lapb_transmit_frmr(lapb);
}
lapb_start_t1timer(lapb);
+
+out:
+ spin_unlock_bh(&lapb->lock);
}
{
struct ieee80211_local *local = file->private_data;
char buf[100];
- size_t len;
- if (count > sizeof(buf))
+ if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- len = strlen(buf);
- if (len > 0 && buf[len-1] == '\n')
- buf[len-1] = 0;
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1)
return count;
{
struct ieee80211_local *local = file->private_data;
char buf[16];
- size_t len;
- if (count > sizeof(buf))
+ if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- buf[sizeof(buf) - 1] = 0;
- len = strlen(buf);
- if (len > 0 && buf[len - 1] == '\n')
- buf[len - 1] = 0;
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
if (kstrtou16(buf, 0, &local->airtime_flags))
return -EINVAL;
{
struct ieee80211_local *local = file->private_data;
char buf[100];
- size_t len;
u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old;
struct sta_info *sta;
- if (count > sizeof(buf))
+ if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- buf[sizeof(buf) - 1] = 0;
- len = strlen(buf);
- if (len > 0 && buf[len - 1] == '\n')
- buf[len - 1] = 0;
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3)
return -EINVAL;
{
struct ieee80211_local *local = file->private_data;
char buf[3];
- size_t len;
- if (count > sizeof(buf))
+ if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- len = strlen(buf);
- if (len > 0 && buf[len - 1] == '\n')
- buf[len - 1] = 0;
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
if (buf[0] == '0' && buf[1] == '\0')
local->force_tx_status = 0;
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
ret = drv_sta_add(local, sdata, &sta->sta);
- if (ret == 0)
+ if (ret == 0) {
sta->uploaded = true;
+ if (rcu_access_pointer(sta->sta.rates))
+ drv_sta_rate_tbl_update(local, sdata, &sta->sta);
+ }
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
drv_sta_remove(local, sdata, &sta->sta);
IEEE80211_QUEUE_STOP_REASON_FLUSH,
IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID,
+ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE,
IEEE80211_QUEUE_STOP_REASONS,
};
if (ret)
return ret;
+ ieee80211_stop_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
+ synchronize_net();
+
ieee80211_do_stop(sdata, false);
ieee80211_teardown_sdata(sdata);
err = ieee80211_do_open(&sdata->wdev, false);
WARN(err, "type change: do_open returned %d", err);
+ ieee80211_wake_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
return ret;
}
if (old)
kfree_rcu(old, rcu_head);
- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
+ if (sta->uploaded)
+ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta));
rcu_read_lock();
key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+ if (!key)
+ key = rcu_dereference(sdata->default_unicast_key);
if (key) {
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_TKIP:
}
if (wide_bw_chansw_ie) {
+ u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
struct ieee80211_vht_operation vht_oper = {
.chan_width =
wide_bw_chansw_ie->new_channel_width,
.center_freq_seg0_idx =
wide_bw_chansw_ie->new_center_freq_seg0,
- .center_freq_seg1_idx =
- wide_bw_chansw_ie->new_center_freq_seg1,
+ .center_freq_seg1_idx = new_seg1,
/* .basic_mcs_set doesn't matter */
};
- struct ieee80211_ht_operation ht_oper = {};
+ struct ieee80211_ht_operation ht_oper = {
+ .operation_mode =
+ cpu_to_le16(new_seg1 <<
+ IEEE80211_HT_OP_MODE_CCFS2_SHIFT),
+ };
/* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
* to the previously parsed chandef
if (!skip_hw && tx->key &&
tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
info->control.hw_key = &tx->key->conf;
- } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
+ } else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta &&
test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
return TX_DROP;
}
* get immediately moved to the back of the list on the next
* call to ieee80211_next_txq().
*/
- if (txqi->txq.sta &&
+ if (txqi->txq.sta && local->airtime_flags &&
wiphy_ext_feature_isset(local->hw.wiphy,
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
list_add(&txqi->schedule_order,
struct ethhdr *ehdr = (struct ethhdr *)skb->data;
struct ieee80211_key *key;
struct sta_info *sta;
- bool offload = true;
if (unlikely(skb->len < ETH_HLEN)) {
kfree_skb(skb);
if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
- sdata->control_port_protocol == ehdr->h_proto))
- offload = false;
- else if ((key = rcu_dereference(sta->ptk[sta->ptk_idx])) &&
- (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
- key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
- offload = false;
-
- if (offload)
- ieee80211_8023_xmit(sdata, dev, sta, key, skb);
- else
- ieee80211_subif_start_xmit(skb, dev);
+ sdata->control_port_protocol == ehdr->h_proto))
+ goto skip_offload;
+
+ key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+ if (!key)
+ key = rcu_dereference(sdata->default_unicast_key);
+
+ if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
+ key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
+ goto skip_offload;
+
+ ieee80211_8023_xmit(sdata, dev, sta, key, skb);
+ goto out;
+skip_offload:
+ ieee80211_subif_start_xmit(skb, dev);
out:
rcu_read_unlock();
kfree(elem);
}
-static int nft_set_elem_expr_clone(const struct nft_ctx *ctx,
- struct nft_set *set,
- struct nft_expr *expr_array[])
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_expr *expr_array[])
{
struct nft_expr *expr;
int err, i, k;
err = -EOPNOTSUPP;
goto err_expr_free;
}
+ } else if (set->num_exprs > 0) {
+ err = nft_set_elem_expr_clone(ctx, set, priv->expr_array);
+ if (err < 0)
+ return err;
+
+ priv->num_exprs = set->num_exprs;
}
nft_set_ext_prepare(&priv->tmpl);
nft_dynset_ext_add_expr(priv);
if (set->flags & NFT_SET_TIMEOUT) {
- if (timeout || set->timeout)
+ if (timeout || set->timeout) {
+ nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
+ }
}
priv->timeout = timeout;
nf_jiffies64_to_msecs(priv->timeout),
NFTA_DYNSET_PAD))
goto nla_put_failure;
- if (priv->num_exprs == 1) {
- if (nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr_array[0]))
- goto nla_put_failure;
- } else if (priv->num_exprs > 1) {
- struct nlattr *nest;
-
- nest = nla_nest_start_noflag(skb, NFTA_DYNSET_EXPRESSIONS);
- if (!nest)
- goto nla_put_failure;
-
- for (i = 0; i < priv->num_exprs; i++) {
- if (nft_expr_dump(skb, NFTA_LIST_ELEM,
- priv->expr_array[i]))
+ if (priv->set->num_exprs == 0) {
+ if (priv->num_exprs == 1) {
+ if (nft_expr_dump(skb, NFTA_DYNSET_EXPR,
+ priv->expr_array[0]))
goto nla_put_failure;
+ } else if (priv->num_exprs > 1) {
+ struct nlattr *nest;
+
+ nest = nla_nest_start_noflag(skb, NFTA_DYNSET_EXPRESSIONS);
+ if (!nest)
+ goto nla_put_failure;
+
+ for (i = 0; i < priv->num_exprs; i++) {
+ if (nft_expr_dump(skb, NFTA_LIST_ELEM,
+ priv->expr_array[i]))
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, nest);
}
- nla_nest_end(skb, nest);
}
if (nla_put_be32(skb, NFTA_DYNSET_FLAGS, htonl(flags)))
goto nla_put_failure;
};
unsigned long opt = 0;
- if (!(ndev->nci_ver & NCI_VER_2_MASK))
+ if (ndev->nci_ver & NCI_VER_2_MASK)
opt = (unsigned long)&nci_init_v2_cmd;
rc = __nci_request(ndev, nci_init_req, opt,
if (!dev->polling) {
device_unlock(&dev->dev);
+ nfc_put_device(dev);
return -EINVAL;
}
if (addr->target_idx > dev->target_next_idx - 1 ||
addr->target_idx < dev->target_next_idx - dev->n_targets) {
rc = -EINVAL;
- goto error;
+ goto put_dev;
}
rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
if (args->nr_local == 0)
return -EINVAL;
+ if (args->nr_local > UIO_MAXIOV)
+ return -EMSGSIZE;
+
iov->iov = kcalloc(args->nr_local,
sizeof(struct rds_iovec),
GFP_KERNEL);
goto error_security;
}
- ret = register_pernet_subsys(&rxrpc_net_ops);
+ ret = register_pernet_device(&rxrpc_net_ops);
if (ret)
goto error_pernet;
error_sock:
proto_unregister(&rxrpc_proto);
error_proto:
- unregister_pernet_subsys(&rxrpc_net_ops);
+ unregister_pernet_device(&rxrpc_net_ops);
error_pernet:
rxrpc_exit_security();
error_security:
unregister_key_type(&key_type_rxrpc);
sock_unregister(PF_RXRPC);
proto_unregister(&rxrpc_proto);
- unregister_pernet_subsys(&rxrpc_net_ops);
+ unregister_pernet_device(&rxrpc_net_ops);
ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
tail = b->peer_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_peer *peer = b->peer_backlog[tail];
+ rxrpc_put_local(peer->local);
kfree(peer);
tail = (tail + 1) & (size - 1);
}
nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
+ if (!nla_ok(nla_opt_msk, msk_depth)) {
+ NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
+ return -EINVAL;
+ }
}
nla_for_each_attr(nla_opt_key, nla_enc_key,
NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
return -EINVAL;
}
-
- if (msk_depth)
- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
break;
case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
if (key->enc_opts.dst_opt_type) {
NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
return -EINVAL;
}
-
- if (msk_depth)
- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
break;
case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
if (key->enc_opts.dst_opt_type) {
NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
return -EINVAL;
}
-
- if (msk_depth)
- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
break;
default:
NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
return -EINVAL;
}
+
+ if (!msk_depth)
+ continue;
+
+ if (!nla_ok(nla_opt_msk, msk_depth)) {
+ NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
+ return -EINVAL;
+ }
+ nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
}
return 0;
if (tb[TCA_TCINDEX_MASK])
cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
- if (tb[TCA_TCINDEX_SHIFT])
+ if (tb[TCA_TCINDEX_SHIFT]) {
cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
-
+ if (cp->shift > 16) {
+ err = -EINVAL;
+ goto errout;
+ }
+ }
if (!cp->hash) {
/* Hash not specified, use perfect hash if the upper limit
* of the hashing index is below the threshold.
{
struct qdisc_rate_table *rtab;
- if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
+ if (tab == NULL || r->rate == 0 ||
+ r->cell_log == 0 || r->cell_log >= 32 ||
nla_len(tab) != TC_RTAB_SIZE) {
NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
return NULL;
#include <linux/uaccess.h>
#include <linux/hashtable.h>
+#include "auth_gss_internal.h"
#include "../netns.h"
#include <trace/events/rpcgss.h>
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
}
-static const void *
-simple_get_bytes(const void *p, const void *end, void *res, size_t len)
-{
- const void *q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- memcpy(res, p, len);
- return q;
-}
-
-static inline const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
-{
- const void *q;
- unsigned int len;
-
- p = simple_get_bytes(p, end, &len, sizeof(len));
- if (IS_ERR(p))
- return p;
- q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- dest->data = kmemdup(p, len, GFP_NOFS);
- if (unlikely(dest->data == NULL))
- return ERR_PTR(-ENOMEM);
- dest->len = len;
- return q;
-}
-
static struct gss_cl_ctx *
gss_cred_get_ctx(struct rpc_cred *cred)
{
--- /dev/null
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * linux/net/sunrpc/auth_gss/auth_gss_internal.h
+ *
+ * Internal definitions for RPCSEC_GSS client authentication
+ *
+ * Copyright (c) 2000 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ */
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/sunrpc/xdr.h>
+
+static inline const void *
+simple_get_bytes(const void *p, const void *end, void *res, size_t len)
+{
+ const void *q = (const void *)((const char *)p + len);
+ if (unlikely(q > end || q < p))
+ return ERR_PTR(-EFAULT);
+ memcpy(res, p, len);
+ return q;
+}
+
+static inline const void *
+simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
+{
+ const void *q;
+ unsigned int len;
+
+ p = simple_get_bytes(p, end, &len, sizeof(len));
+ if (IS_ERR(p))
+ return p;
+ q = (const void *)((const char *)p + len);
+ if (unlikely(q > end || q < p))
+ return ERR_PTR(-EFAULT);
+ if (len) {
+ dest->data = kmemdup(p, len, GFP_NOFS);
+ if (unlikely(dest->data == NULL))
+ return ERR_PTR(-ENOMEM);
+ } else
+ dest->data = NULL;
+ dest->len = len;
+ return q;
+}
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/gss_krb5_enctypes.h>
+#include "auth_gss_internal.h"
+
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
return NULL;
}
-static const void *
-simple_get_bytes(const void *p, const void *end, void *res, int len)
-{
- const void *q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- memcpy(res, p, len);
- return q;
-}
-
-static const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
-{
- const void *q;
- unsigned int len;
-
- p = simple_get_bytes(p, end, &len, sizeof(len));
- if (IS_ERR(p))
- return p;
- q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- res->data = kmemdup(p, len, GFP_NOFS);
- if (unlikely(res->data == NULL))
- return ERR_PTR(-ENOMEM);
- res->len = len;
- return q;
-}
-
static inline const void *
get_key(const void *p, const void *end,
struct krb5_ctx *ctx, struct crypto_sync_skcipher **res)
err = -EAGAIN;
if (len <= 0)
goto out_release;
+ trace_svc_xdr_recvfrom(&rqstp->rq_arg);
clear_bit(XPT_OLD, &xprt->xpt_flags);
if (serv->sv_stats)
serv->sv_stats->netcnt++;
- trace_svc_xdr_recvfrom(rqstp, &rqstp->rq_arg);
return len;
out_release:
rqstp->rq_res.len = 0;
xb->len = xb->head[0].iov_len +
xb->page_len +
xb->tail[0].iov_len;
- trace_svc_xdr_sendto(rqstp, xb);
+ trace_svc_xdr_sendto(rqstp->rq_xid, xb);
trace_svc_stats_latency(rqstp);
len = xprt->xpt_ops->xpo_sendto(rqstp);
unsigned int offset, len, remaining;
struct bio_vec *bvec;
- bvec = xdr->bvec;
- offset = xdr->page_base;
+ bvec = xdr->bvec + (xdr->page_base >> PAGE_SHIFT);
+ offset = offset_in_page(xdr->page_base);
remaining = xdr->page_len;
flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
while (remaining > 0) {
if (remaining <= PAGE_SIZE && tail->iov_len == 0)
flags = 0;
- len = min(remaining, bvec->bv_len);
+
+ len = min(remaining, bvec->bv_len - offset);
ret = kernel_sendpage(sock, bvec->bv_page,
bvec->bv_offset + offset,
len, flags);
extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
if (check_cb(dev)) {
- /* This flag is only checked if the return value is success. */
- port_obj_info->handled = true;
- return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
- extack);
+ err = add_cb(dev, port_obj_info->obj, port_obj_info->trans,
+ extack);
+ if (err != -EOPNOTSUPP)
+ port_obj_info->handled = true;
+ return err;
}
/* Switch ports might be stacked under e.g. a LAG. Ignore the
int err = -EOPNOTSUPP;
if (check_cb(dev)) {
- /* This flag is only checked if the return value is success. */
- port_obj_info->handled = true;
- return del_cb(dev, port_obj_info->obj);
+ err = del_cb(dev, port_obj_info->obj);
+ if (err != -EOPNOTSUPP)
+ port_obj_info->handled = true;
+ return err;
}
/* Switch ports might be stacked under e.g. a LAG. Ignore the
int err = -EOPNOTSUPP;
if (check_cb(dev)) {
- port_attr_info->handled = true;
- return set_cb(dev, port_attr_info->attr,
- port_attr_info->trans);
+ err = set_cb(dev, port_attr_info->attr, port_attr_info->trans);
+ if (err != -EOPNOTSUPP)
+ port_attr_info->handled = true;
+ return err;
}
/* Switch ports might be stacked under e.g. a LAG. Ignore the
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
} else if (sock->type == SOCK_STREAM) {
- const struct vsock_transport *transport = vsk->transport;
+ const struct vsock_transport *transport;
+
lock_sock(sk);
+ transport = vsk->transport;
+
/* Listening sockets that have connections in their accept
* queue can be read.
*/
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
- transport = vsk->transport;
lock_sock(sk);
+ transport = vsk->transport;
+
err = vsock_auto_bind(vsk);
if (err)
goto out;
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
- transport = vsk->transport;
lock_sock(sk);
+ transport = vsk->transport;
+
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
COPY_IN(val);
sk = sock->sk;
vsk = vsock_sk(sk);
- transport = vsk->transport;
total_written = 0;
err = 0;
lock_sock(sk);
+ transport = vsk->transport;
+
/* Callers should not provide a destination with stream sockets. */
if (msg->msg_namelen) {
err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
sk = sock->sk;
vsk = vsock_sk(sk);
- transport = vsk->transport;
err = 0;
lock_sock(sk);
+ transport = vsk->transport;
+
if (!transport || sk->sk_state != TCP_ESTABLISHED) {
/* Recvmsg is supposed to return 0 if a peer performs an
* orderly shutdown. Differentiate between that case and when a
* Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
return rcu_dereference_rtnl(cfg80211_regdomain);
}
+/*
+ * Returns the regulatory domain associated with the wiphy.
+ *
+ * Requires either RTNL or RCU protection
+ */
const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
{
return rcu_dereference_rtnl(wiphy->regd);
if (IS_ERR(new_regd))
return;
+ rtnl_lock();
+
tmp = get_wiphy_regdom(wiphy);
rcu_assign_pointer(wiphy->regd, new_regd);
rcu_free_regdom(tmp);
+
+ rtnl_unlock();
}
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
int call_commit_handler(struct net_device *dev)
{
#ifdef CONFIG_WIRELESS_EXT
- if ((netif_running(dev)) &&
- (dev->wireless_handlers->standard[0] != NULL))
+ if (netif_running(dev) &&
+ dev->wireless_handlers &&
+ dev->wireless_handlers->standard[0])
/* Call the commit handler on the driver */
return dev->wireless_handlers->standard[0](dev, NULL,
NULL, NULL);
void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
{
- if (queue_id < dev->real_num_rx_queues)
+ if (queue_id < dev->num_rx_queues)
dev->_rx[queue_id].pool = NULL;
- if (queue_id < dev->real_num_tx_queues)
+ if (queue_id < dev->num_tx_queues)
dev->_tx[queue_id].pool = NULL;
}
/* only the first xfrm gets the encap type */
encap_type = 0;
- if (async && x->repl->recheck(x, skb, seq)) {
+ if (x->repl->recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
const xfrm_address_t *b,
u8 prefixlen, u16 family)
{
+ u32 ma, mb, mask;
unsigned int pdw, pbi;
int delta = 0;
switch (family) {
case AF_INET:
- if (sizeof(long) == 4 && prefixlen == 0)
- return ntohl(a->a4) - ntohl(b->a4);
- return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
- (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
+ if (prefixlen == 0)
+ return 0;
+ mask = ~0U << (32 - prefixlen);
+ ma = ntohl(a->a4) & mask;
+ mb = ntohl(b->a4) & mask;
+ if (ma < mb)
+ delta = -1;
+ else if (ma > mb)
+ delta = 1;
+ break;
case AF_INET6:
pdw = prefixlen >> 5;
pbi = prefixlen & 0x1f;
return delta;
}
if (pbi) {
- u32 mask = ~0u << (32 - pbi);
-
- delta = (ntohl(a->a6[pdw]) & mask) -
- (ntohl(b->a6[pdw]) & mask);
+ mask = ~0U << (32 - pbi);
+ ma = ntohl(a->a6[pdw]) & mask;
+ mb = ntohl(b->a6[pdw]) & mask;
+ if (ma < mb)
+ delta = -1;
+ else if (ma > mb)
+ delta = 1;
}
break;
default:
xflo.flags = flags;
/* To accelerate a bit... */
- if ((dst_orig->flags & DST_NOXFRM) ||
- !net->xfrm.policy_count[XFRM_POLICY_OUT])
+ if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
+ !net->xfrm.policy_count[XFRM_POLICY_OUT]))
goto nopol;
xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
{
int size, ret;
kuid_t kroot;
+ u32 nsmagic, magic;
uid_t root, mappedroot;
char *tmpbuf = NULL;
struct vfs_cap_data *cap;
- struct vfs_ns_cap_data *nscap;
+ struct vfs_ns_cap_data *nscap = NULL;
struct dentry *dentry;
struct user_namespace *fs_ns;
fs_ns = inode->i_sb->s_user_ns;
cap = (struct vfs_cap_data *) tmpbuf;
if (is_v2header((size_t) ret, cap)) {
- /* If this is sizeof(vfs_cap_data) then we're ok with the
- * on-disk value, so return that. */
- if (alloc)
- *buffer = tmpbuf;
- else
- kfree(tmpbuf);
- return ret;
- } else if (!is_v3header((size_t) ret, cap)) {
- kfree(tmpbuf);
- return -EINVAL;
+ root = 0;
+ } else if (is_v3header((size_t) ret, cap)) {
+ nscap = (struct vfs_ns_cap_data *) tmpbuf;
+ root = le32_to_cpu(nscap->rootid);
+ } else {
+ size = -EINVAL;
+ goto out_free;
}
- nscap = (struct vfs_ns_cap_data *) tmpbuf;
- root = le32_to_cpu(nscap->rootid);
kroot = make_kuid(fs_ns, root);
/* If the root kuid maps to a valid uid in current ns, then return
* this as a nscap. */
mappedroot = from_kuid(current_user_ns(), kroot);
if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
+ size = sizeof(struct vfs_ns_cap_data);
if (alloc) {
- *buffer = tmpbuf;
+ if (!nscap) {
+ /* v2 -> v3 conversion */
+ nscap = kzalloc(size, GFP_ATOMIC);
+ if (!nscap) {
+ size = -ENOMEM;
+ goto out_free;
+ }
+ nsmagic = VFS_CAP_REVISION_3;
+ magic = le32_to_cpu(cap->magic_etc);
+ if (magic & VFS_CAP_FLAGS_EFFECTIVE)
+ nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
+ memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
+ nscap->magic_etc = cpu_to_le32(nsmagic);
+ } else {
+ /* use allocated v3 buffer */
+ tmpbuf = NULL;
+ }
nscap->rootid = cpu_to_le32(mappedroot);
- } else
- kfree(tmpbuf);
- return size;
+ *buffer = nscap;
+ }
+ goto out_free;
}
if (!rootid_owns_currentns(kroot)) {
- kfree(tmpbuf);
- return -EOPNOTSUPP;
+ size = -EOVERFLOW;
+ goto out_free;
}
/* This comes from a parent namespace. Return as a v2 capability */
size = sizeof(struct vfs_cap_data);
if (alloc) {
- *buffer = kmalloc(size, GFP_ATOMIC);
- if (*buffer) {
- struct vfs_cap_data *cap = *buffer;
- __le32 nsmagic, magic;
+ if (nscap) {
+ /* v3 -> v2 conversion */
+ cap = kzalloc(size, GFP_ATOMIC);
+ if (!cap) {
+ size = -ENOMEM;
+ goto out_free;
+ }
magic = VFS_CAP_REVISION_2;
nsmagic = le32_to_cpu(nscap->magic_etc);
if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
cap->magic_etc = cpu_to_le32(magic);
} else {
- size = -ENOMEM;
+ /* use unconverted v2 */
+ tmpbuf = NULL;
}
+ *buffer = cap;
}
+out_free:
kfree(tmpbuf);
return size;
}
continue;
/*
- * The 'deps' array includes maximum three dependencies
- * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
+ * The 'deps' array includes maximum four dependencies
+ * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fifth
* member of this array is a sentinel and should be
* negative value.
*
if (info->is_midi) {
struct midi_info minf;
- snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
+ if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf))
+ return -ENXIO;
inf->synth_type = SYNTH_TYPE_MIDI;
inf->synth_subtype = 0;
inf->nr_voices = 16;
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0xa0c8,
},
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x43c8,
+ },
#endif
/* Elkhart Lake */
snd_hdac_leave_pm(&codec->core);
}
-static int hda_codec_suspend(struct device *dev)
+static int hda_codec_runtime_suspend(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
unsigned int state;
return 0;
}
-static int hda_codec_resume(struct device *dev)
+static int hda_codec_runtime_resume(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
return 0;
}
-static int hda_codec_runtime_suspend(struct device *dev)
-{
- return hda_codec_suspend(dev);
-}
-
-static int hda_codec_runtime_resume(struct device *dev)
-{
- return hda_codec_resume(dev);
-}
-
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
static int hda_codec_pm_suspend(struct device *dev)
{
dev->power.power_state = PMSG_SUSPEND;
- return hda_codec_suspend(dev);
+ return pm_runtime_force_suspend(dev);
}
static int hda_codec_pm_resume(struct device *dev)
{
dev->power.power_state = PMSG_RESUME;
- return hda_codec_resume(dev);
+ return pm_runtime_force_resume(dev);
}
static int hda_codec_pm_freeze(struct device *dev)
{
dev->power.power_state = PMSG_FREEZE;
- return hda_codec_suspend(dev);
+ return pm_runtime_force_suspend(dev);
}
static int hda_codec_pm_thaw(struct device *dev)
{
dev->power.power_state = PMSG_THAW;
- return hda_codec_resume(dev);
+ return pm_runtime_force_resume(dev);
}
static int hda_codec_pm_restore(struct device *dev)
{
dev->power.power_state = PMSG_RESTORE;
- return hda_codec_resume(dev);
+ return pm_runtime_force_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
/* CometLake-S */
{ PCI_DEVICE(0x8086, 0xa3f0),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* CometLake-R */
+ { PCI_DEVICE(0x8086, 0xf0c8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Icelake */
{ PCI_DEVICE(0x8086, 0x34c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Alderlake-S */
{ PCI_DEVICE(0x8086, 0x7ad0),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Alderlake-P */
+ { PCI_DEVICE(0x8086, 0x51c8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Elkhart Lake */
{ PCI_DEVICE(0x8086, 0x4b55),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi),
ALC256_FIXUP_HP_HEADSET_MIC,
ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
ALC282_FIXUP_ACER_DISABLE_LINEOUT,
+ ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
};
static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE
},
+ [ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_limit_int_mic_boost,
+ .chained = true,
+ .chain_id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x1025, 0x1094, "Acer Aspire E5-575T", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
+ SND_PCI_QUIRK(0x1043, 0x1982, "ASUS B1400CEPE", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
spec->codec_type = VT1708S;
spec->gen.indep_hp = 1;
spec->gen.keep_eapd_on = 1;
+ spec->gen.dac_min_mute = 1;
spec->gen.pcm_playback_hook = via_playback_pcm_hook;
spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
codec->power_save_node = 1;
static const struct snd_pci_quirk vt2002p_fixups[] = {
SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
- SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE),
+ SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
{}
};
static const struct dmi_system_id rn_acp_quirk_table[] = {
{
- /* Lenovo IdeaPad Flex 5 14ARE05, IdeaPad 5 15ARE05 */
+ /* Lenovo IdeaPad S340-14API */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "LNVNB161216"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81NB"),
+ }
+ },
+ {
+ /* Lenovo IdeaPad Flex 5 14ARE05 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81X2"),
+ }
+ },
+ {
+ /* Lenovo IdeaPad 5 15ARE05 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81YQ"),
}
},
{
.ops = &ak4458_dai_ops,
};
-static void ak4458_power_off(struct ak4458_priv *ak4458)
+static void ak4458_reset(struct ak4458_priv *ak4458, bool active)
{
if (ak4458->reset_gpiod) {
- gpiod_set_value_cansleep(ak4458->reset_gpiod, 0);
- usleep_range(1000, 2000);
- }
-}
-
-static void ak4458_power_on(struct ak4458_priv *ak4458)
-{
- if (ak4458->reset_gpiod) {
- gpiod_set_value_cansleep(ak4458->reset_gpiod, 1);
+ gpiod_set_value_cansleep(ak4458->reset_gpiod, active);
usleep_range(1000, 2000);
}
}
if (ak4458->mute_gpiod)
gpiod_set_value_cansleep(ak4458->mute_gpiod, 1);
- ak4458_power_on(ak4458);
+ ak4458_reset(ak4458, false);
ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1,
0x80, 0x80); /* ACKS bit = 1; 10000000 */
{
struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component);
- ak4458_power_off(ak4458);
+ ak4458_reset(ak4458, true);
}
#ifdef CONFIG_PM
regcache_cache_only(ak4458->regmap, true);
- ak4458_power_off(ak4458);
+ ak4458_reset(ak4458, true);
if (ak4458->mute_gpiod)
gpiod_set_value_cansleep(ak4458->mute_gpiod, 0);
if (ak4458->mute_gpiod)
gpiod_set_value_cansleep(ak4458->mute_gpiod, 1);
- ak4458_power_off(ak4458);
- ak4458_power_on(ak4458);
+ ak4458_reset(ak4458, true);
+ ak4458_reset(ak4458, false);
regcache_cache_only(ak4458->regmap, false);
regcache_mark_dirty(ak4458->regmap);
void *data)
{
struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
- int ret = -EOPNOTSUPP;
+ int ret = -ENOTSUPP;
if (hcp->hcd.ops->hook_plugged_cb) {
hcp->jack = jack;
unsigned int alg)
{
struct wm_coeff_ctl *pos, *rslt = NULL;
+ const char *fw_txt = wm_adsp_fw_text[dsp->fw];
list_for_each_entry(pos, &dsp->ctl_list, list) {
if (!pos->subname)
continue;
if (strncmp(pos->subname, name, pos->subname_len) == 0 &&
+ strncmp(pos->fw_name, fw_txt,
+ SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0 &&
pos->alg_region.alg == alg &&
pos->alg_region.type == type) {
rslt = pos;
}
ret = snd_soc_component_set_jack(component, &data->hdmi_jack, NULL);
- if (ret && ret != -EOPNOTSUPP) {
+ if (ret && ret != -ENOTSUPP) {
dev_err(card->dev, "Can't set HDMI Jack %d\n", ret);
return ret;
}
.driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
SOF_RT715_DAI_ID_FIX),
},
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5E")
+ },
+ .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
+ SOF_RT715_DAI_ID_FIX |
+ SOF_SDW_FOUR_SPK),
+ },
{
.callback = sof_sdw_quirk_cb,
.matches = {
list_for_each_entry(dobj, &component->dobj_list, list) {
struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
- struct soc_enum *se =
- (struct soc_enum *)kcontrol->private_value;
- char **texts = dobj->control.dtexts;
+ struct soc_enum *se;
+ char **texts;
char chan_text[4];
- if (dobj->type != SND_SOC_DOBJ_ENUM ||
- dobj->control.kcontrol->put !=
- skl_tplg_multi_config_set_dmic)
+ if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol ||
+ kcontrol->put != skl_tplg_multi_config_set_dmic)
continue;
+
+ se = (struct soc_enum *)kcontrol->private_value;
+ texts = dobj->control.dtexts;
sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
for (i = 0; i < se->items; i++) {
- struct snd_ctl_elem_value val;
+ struct snd_ctl_elem_value val = {};
if (strstr(texts[i], chan_text)) {
val.value.enumerated.item[0] = i;
.dpcm_playback = 1,
.ignore_suspend = 1,
.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+ .ignore = 1,
.init = mt8183_da7219_max98357_hdmi_init,
SND_SOC_DAILINK_REG(tdm),
},
}
}
- if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0)
+ if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0) {
dai_link->codecs->of_node = hdmi_codec;
+ dai_link->ignore = 0;
+ }
if (!dai_link->platforms->name)
dai_link->platforms->of_node = platform_node;
.ignore_suspend = 1,
.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
.ops = &mt8183_mt6358_tdm_ops,
+ .ignore = 1,
.init = mt8183_mt6358_ts3a227_max98357_hdmi_init,
SND_SOC_DAILINK_REG(tdm),
},
SND_SOC_DAIFMT_CBM_CFM;
}
- if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0)
+ if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0) {
dai_link->codecs->of_node = hdmi_codec;
+ dai_link->ignore = 0;
+ }
if (!dai_link->platforms->name)
dai_link->platforms->of_node = platform_node;
.startup = mt8192_mt6359_rt1015_rt5682_cap1_startup,
};
+static int
+mt8192_mt6359_rt5682_startup(struct snd_pcm_substream *substream)
+{
+ static const unsigned int channels[] = {
+ 1, 2
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+ };
+ static const unsigned int rates[] = {
+ 48000
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+ };
+
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list channels failed\n");
+ return ret;
+ }
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list rate failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_ops mt8192_mt6359_rt5682_ops = {
+ .startup = mt8192_mt6359_rt5682_startup,
+};
+
/* FE */
SND_SOC_DAILINK_DEFS(playback1,
DAILINK_COMP_ARRAY(COMP_CPU("DL1")),
SND_SOC_DPCM_TRIGGER_PRE},
.dynamic = 1,
.dpcm_playback = 1,
+ .ops = &mt8192_mt6359_rt5682_ops,
SND_SOC_DAILINK_REG(playback3),
},
{
SND_SOC_DPCM_TRIGGER_PRE},
.dynamic = 1,
.dpcm_capture = 1,
+ .ops = &mt8192_mt6359_rt5682_ops,
SND_SOC_DAILINK_REG(capture2),
},
{
}
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_probe);
+static int asoc_qcom_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name)
+{
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct lpass_variant *variant = drvdata->variant;
+ int id = args->args[0];
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < variant->num_dai; i++) {
+ if (variant->dai_driver[i].id == id) {
+ *dai_name = variant->dai_driver[i].name;
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
.name = "lpass-cpu",
+ .of_xlate_dai_name = asoc_qcom_of_xlate_dai_name,
};
static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
.micmode = REG_FIELD_ID(0x0010, 4, 7, 5, 0x4),
.micmono = REG_FIELD_ID(0x0010, 3, 3, 5, 0x4),
.wssrc = REG_FIELD_ID(0x0010, 2, 2, 5, 0x4),
- .bitwidth = REG_FIELD_ID(0x0010, 0, 0, 5, 0x4),
+ .bitwidth = REG_FIELD_ID(0x0010, 0, 1, 5, 0x4),
.rdma_dyncclk = REG_FIELD_ID(0x6000, 12, 12, 4, 0x1000),
.rdma_bursten = REG_FIELD_ID(0x6000, 11, 11, 4, 0x1000),
#define LPAIF_WRDMAPERCNT_REG(v, chan) LPAIF_WRDMA_REG_ADDR(v, 0x14, (chan))
#define LPAIF_INTFDMA_REG(v, chan, reg, dai_id) \
- ((v->dai_driver[dai_id].id == LPASS_DP_RX) ? \
+ ((dai_id == LPASS_DP_RX) ? \
LPAIF_HDMI_RDMA##reg##_REG(v, chan) : \
LPAIF_RDMA##reg##_REG(v, chan))
break;
case MI2S_PRIMARY:
case MI2S_SECONDARY:
+ case MI2S_TERTIARY:
+ case MI2S_QUATERNARY:
+ case MI2S_QUINARY:
ret = regmap_fields_write(dmactl->intf, id,
LPAIF_DMACTL_AUDINTF(dma_port));
if (ret) {
break;
case MI2S_PRIMARY:
case MI2S_SECONDARY:
+ case MI2S_TERTIARY:
+ case MI2S_QUATERNARY:
+ case MI2S_QUINARY:
reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
val_irqclr = LPAIF_IRQ_ALL(ch);
break;
case MI2S_PRIMARY:
case MI2S_SECONDARY:
+ case MI2S_TERTIARY:
+ case MI2S_QUATERNARY:
+ case MI2S_QUINARY:
reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
val_mask = LPAIF_IRQ_ALL(ch);
val_irqen = 0;
break;
case MI2S_PRIMARY:
case MI2S_SECONDARY:
+ case MI2S_TERTIARY:
+ case MI2S_QUATERNARY:
+ case MI2S_QUINARY:
map = drvdata->lpaif_map;
reg = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
val = 0;
#include "lpass.h"
static struct snd_soc_dai_driver sc7180_lpass_cpu_dai_driver[] = {
- [MI2S_PRIMARY] = {
+ {
.id = MI2S_PRIMARY,
.name = "Primary MI2S",
.playback = {
},
.probe = &asoc_qcom_lpass_cpu_dai_probe,
.ops = &asoc_qcom_lpass_cpu_dai_ops,
- },
-
- [MI2S_SECONDARY] = {
+ }, {
.id = MI2S_SECONDARY,
.name = "Secondary MI2S",
.playback = {
},
.probe = &asoc_qcom_lpass_cpu_dai_probe,
.ops = &asoc_qcom_lpass_cpu_dai_ops,
- },
- [LPASS_DP_RX] = {
+ }, {
.id = LPASS_DP_RX,
.name = "Hdmi",
.playback = {
.rdma_channels = 5,
.hdmi_rdma_reg_base = 0x64000,
.hdmi_rdma_reg_stride = 0x1000,
- .hdmi_rdma_channels = 4,
+ .hdmi_rdma_channels = 3,
.dmactl_audif_start = 1,
.wrdma_reg_base = 0x18000,
.wrdma_reg_stride = 0x1000,
#include <linux/compiler.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <dt-bindings/sound/sc7180-lpass.h>
+#include <dt-bindings/sound/qcom,lpass.h>
#include "lpass-hdmi.h"
#define LPASS_AHBIX_CLOCK_FREQUENCY 131072000
{
struct snd_soc_dai_driver *dai_drv =
container_of(dobj, struct snd_soc_dai_driver, dobj);
- struct snd_soc_dai *dai;
+ struct snd_soc_dai *dai, *_dai;
if (pass != SOC_TPLG_PASS_PCM_DAI)
return;
if (dobj->ops && dobj->ops->dai_unload)
dobj->ops->dai_unload(comp, dobj);
- for_each_component_dais(comp, dai)
+ for_each_component_dais_safe(comp, dai, _dai)
if (dai->driver == dai_drv)
- dai->driver = NULL;
+ snd_soc_unregister_dai(dai);
list_del(&dobj->list);
}
return -EINVAL;
se->dobj.control.dvalues = devm_kcalloc(tplg->dev, le32_to_cpu(ec->items),
- sizeof(u32),
+ sizeof(*se->dobj.control.dvalues),
GFP_KERNEL);
if (!se->dobj.control.dvalues)
return -ENOMEM;
list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list);
/* register the DAI to the component */
- dai = devm_snd_soc_register_dai(tplg->dev, tplg->comp, dai_drv, false);
+ dai = snd_soc_register_dai(tplg->comp, dai_drv, false);
if (!dai)
return -ENOMEM;
ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
if (ret != 0) {
dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret);
+ snd_soc_unregister_dai(dai);
return ret;
}
config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK
bool "SOF support for SoundWire"
- depends on SOUNDWIRE && ACPI
+ depends on ACPI
help
This adds support for SoundWire with Sound Open Firmware
for Intel(R) platforms.
config SND_SOC_SOF_INTEL_SOUNDWIRE
tristate
+ select SOUNDWIRE
select SOUNDWIRE_INTEL
help
This option is not user-selectable but automagically handled by
}
/* enable controller wake up event for all codecs with jack connectors */
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable)
{
struct hda_bus *hbus = sof_to_hbus(sdev);
struct hdac_bus *bus = sof_to_bus(sdev);
struct hda_codec *codec;
unsigned int mask = 0;
- list_for_each_codec(codec, hbus)
- if (codec->jacktbl.used)
- mask |= BIT(codec->core.addr);
+ if (enable) {
+ list_for_each_codec(codec, hbus)
+ if (codec->jacktbl.used)
+ mask |= BIT(codec->core.addr);
+ }
snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
}
void hda_codec_jack_check(struct snd_sof_dev *sdev)
{
struct hda_bus *hbus = sof_to_hbus(sdev);
- struct hdac_bus *bus = sof_to_bus(sdev);
struct hda_codec *codec;
- /* disable controller Wake Up event*/
- snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
-
list_for_each_codec(codec, hbus)
/*
* Wake up all jack-detecting codecs regardless whether an event
* has been recorded in STATESTS
*/
if (codec->jacktbl.used)
- schedule_delayed_work(&codec->jackpoll_work,
- codec->jackpoll_interval);
+ pm_request_resume(&codec->core.dev);
}
#else
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {}
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable) {}
void hda_codec_jack_check(struct snd_sof_dev *sdev) {}
#endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
EXPORT_SYMBOL_NS(hda_codec_jack_wake_enable, SND_SOC_SOF_HDA_AUDIO_CODEC);
if (!hdev->bus->audio_component) {
dev_dbg(sdev->dev,
"iDisp hw present but no driver\n");
- goto error;
+ ret = -ENOENT;
+ goto out;
}
hda_priv->need_display_power = true;
}
* other return codes without modification
*/
if (ret == 0)
- goto error;
+ ret = -ENOENT;
}
- return ret;
-
-error:
- snd_hdac_ext_bus_device_exit(hdev);
- return -ENOENT;
-
+out:
+ if (ret < 0) {
+ snd_hdac_device_unregister(hdev);
+ put_device(&hdev->dev);
+ }
#else
hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
if (!hdev)
return -ENOMEM;
ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev, HDA_DEV_ASOC);
+#endif
return ret;
-#endif
}
/* Codec initialization */
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
if (runtime_suspend)
- hda_codec_jack_wake_enable(sdev);
+ hda_codec_jack_wake_enable(sdev, true);
/* power down all hda link */
snd_hdac_ext_bus_link_power_down_all(bus);
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
/* check jack status */
- if (runtime_resume)
- hda_codec_jack_check(sdev);
+ if (runtime_resume) {
+ hda_codec_jack_wake_enable(sdev, false);
+ if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
+ hda_codec_jack_check(sdev);
+ }
/* turn off the links that were off before suspend */
list_for_each_entry(hlink, &bus->hlink_list, list) {
*/
void hda_codec_probe_bus(struct snd_sof_dev *sdev,
bool hda_codec_use_common_hdmi);
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev);
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable);
void hda_codec_jack_check(struct snd_sof_dev *sdev);
#endif /* CONFIG_SND_SOC_SOF_HDA */
if (!id)
return -ENODEV;
- ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
- if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
- dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
- return -ENODEV;
+ if (IS_REACHABLE(CONFIG_SND_INTEL_DSP_CONFIG)) {
+ ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
}
-
dev_dbg(dev, "ACPI DSP detected");
sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
const struct snd_sof_dsp_ops *ops;
int ret;
- ret = snd_intel_dsp_driver_probe(pci);
- if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
- dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
- return -ENODEV;
+ if (IS_REACHABLE(CONFIG_SND_INTEL_DSP_CONFIG)) {
+ ret = snd_intel_dsp_driver_probe(pci);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
}
dev_dbg(&pci->dev, "PCI DSP detected");
const struct audioformat *fmt, int rate)
{
struct usb_device *dev = chip->dev;
- struct usb_host_interface *alts;
- unsigned int ep;
unsigned char data[3];
int err, crate;
- alts = snd_usb_get_host_interface(chip, fmt->iface, fmt->altsetting);
- if (!alts)
- return -EINVAL;
- if (get_iface_desc(alts)->bNumEndpoints < 1)
- return -EINVAL;
- ep = get_endpoint(alts, 0)->bEndpointAddress;
-
/* if endpoint doesn't have sampling rate control, bail out */
if (!(fmt->attributes & UAC_EP_CS_ATTR_SAMPLE_RATE))
return 0;
data[2] = rate >> 16;
err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT,
- UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
- data, sizeof(data));
+ UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+ fmt->endpoint, data, sizeof(data));
if (err < 0) {
dev_err(&dev->dev, "%d:%d: cannot set freq %d to ep %#x\n",
- fmt->iface, fmt->altsetting, rate, ep);
+ fmt->iface, fmt->altsetting, rate, fmt->endpoint);
return err;
}
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
- UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
- data, sizeof(data));
+ UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+ fmt->endpoint, data, sizeof(data));
if (err < 0) {
dev_err(&dev->dev, "%d:%d: cannot get freq at ep %#x\n",
- fmt->iface, fmt->altsetting, ep);
+ fmt->iface, fmt->altsetting, fmt->endpoint);
chip->sample_rate_read_error++;
return 0; /* some devices don't support reading */
}
/* If the interface has been already set up, just set EP parameters */
if (!ep->iface_ref->need_setup) {
+ /* sample rate setup of UAC1 is per endpoint, and we need
+ * to update at each EP configuration
+ */
+ if (ep->cur_audiofmt->protocol == UAC_VERSION_1) {
+ err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt,
+ ep->cur_rate);
+ if (err < 0)
+ goto unlock;
+ }
err = snd_usb_endpoint_set_params(chip, ep);
if (err < 0)
goto unlock;
unsigned int nr_rates;
int i, err;
+ /* performing the rate verification may lead to unexpected USB bus
+ * behavior afterwards by some unknown reason. Do this only for the
+ * known devices.
+ */
+ switch (USB_ID_VENDOR(chip->usb_id)) {
+ case 0x07fd: /* MOTU */
+ break;
+ default:
+ return 0; /* don't perform the validation as default */
+ }
+
table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
ifnum, alts);
}
-/* Pioneer devices: playback and capture streams sharing the same iface/altset
+/* Playback and capture EPs on Pioneer devices share the same iface/altset,
+ * but they don't seem working with the implicit fb mode well, hence we
+ * just return as if the sync were already set up.
*/
-static int add_pioneer_implicit_fb(struct snd_usb_audio *chip,
- struct audioformat *fmt,
- struct usb_host_interface *alts)
+static int skip_pioneer_sync_ep(struct snd_usb_audio *chip,
+ struct audioformat *fmt,
+ struct usb_host_interface *alts)
{
struct usb_endpoint_descriptor *epd;
(epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
USB_ENDPOINT_USAGE_IMPLICIT_FB))
return 0;
- return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 1,
- alts->desc.bInterfaceNumber, alts);
+ return 1; /* don't handle with the implicit fb, just skip sync EP */
}
static int __add_generic_implicit_fb(struct snd_usb_audio *chip,
return 1;
}
- /* Pioneer devices implicit feedback with vendor spec class */
+ /* Pioneer devices with vendor spec class */
if (attr == USB_ENDPOINT_SYNC_ASYNC &&
alts->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
USB_ID_VENDOR(chip->usb_id) == 0x2b73 /* Pioneer */) {
- if (add_pioneer_implicit_fb(chip, fmt, alts))
+ if (skip_pioneer_sync_ep(chip, fmt, alts))
return 1;
}
check_fmts.bits[1] = (u32)(fp->formats >> 32);
snd_mask_intersect(&check_fmts, fmts);
if (snd_mask_empty(&check_fmts)) {
- hwc_debug(" > check: no supported format %d\n", fp->format);
+ hwc_debug(" > check: no supported format 0x%llx\n", fp->formats);
return 0;
}
/* check the channels */
return apply_hw_params_minmax(it, rmin, rmax);
}
-static int hw_rule_format(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
+static int apply_hw_params_format_bits(struct snd_mask *fmt, u64 fbits)
{
- struct snd_usb_substream *subs = rule->private;
- const struct audioformat *fp;
- struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
- u64 fbits;
u32 oldbits[2];
int changed;
- hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
- fbits = 0;
- list_for_each_entry(fp, &subs->fmt_list, list) {
- if (!hw_check_valid_format(subs, params, fp))
- continue;
- fbits |= fp->formats;
- }
-
oldbits[0] = fmt->bits[0];
oldbits[1] = fmt->bits[1];
fmt->bits[0] &= (u32)fbits;
return changed;
}
+static int hw_rule_format(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ const struct audioformat *fp;
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ u64 fbits;
+
+ hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
+ fbits = 0;
+ list_for_each_entry(fp, &subs->fmt_list, list) {
+ if (!hw_check_valid_format(subs, params, fp))
+ continue;
+ fbits |= fp->formats;
+ }
+ return apply_hw_params_format_bits(fmt, fbits);
+}
+
static int hw_rule_period_time(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
return apply_hw_params_minmax(it, pmin, UINT_MAX);
}
-/* apply PCM hw constraints from the concurrent sync EP */
-static int apply_hw_constraint_from_sync(struct snd_pcm_runtime *runtime,
- struct snd_usb_substream *subs)
+/* get the EP or the sync EP for implicit fb when it's already set up */
+static const struct snd_usb_endpoint *
+get_sync_ep_from_substream(struct snd_usb_substream *subs)
{
struct snd_usb_audio *chip = subs->stream->chip;
- struct snd_usb_endpoint *ep;
const struct audioformat *fp;
- int err;
+ const struct snd_usb_endpoint *ep;
list_for_each_entry(fp, &subs->fmt_list, list) {
ep = snd_usb_get_endpoint(chip, fp->endpoint);
if (ep && ep->cur_rate)
- goto found;
+ return ep;
if (!fp->implicit_fb)
continue;
/* for the implicit fb, check the sync ep as well */
ep = snd_usb_get_endpoint(chip, fp->sync_ep);
if (ep && ep->cur_rate)
- goto found;
+ return ep;
}
- return 0;
+ return NULL;
+}
- found:
- if (!find_format(&subs->fmt_list, ep->cur_format, ep->cur_rate,
- ep->cur_channels, false, NULL)) {
- usb_audio_dbg(chip, "EP 0x%x being used, but not applicable\n",
- ep->ep_num);
+/* additional hw constraints for implicit feedback mode */
+static int hw_rule_format_implicit_fb(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ const struct snd_usb_endpoint *ep;
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ ep = get_sync_ep_from_substream(subs);
+ if (!ep)
return 0;
- }
- usb_audio_dbg(chip, "EP 0x%x being used, using fixed params:\n",
- ep->ep_num);
- usb_audio_dbg(chip, "rate=%d, period_size=%d, periods=%d\n",
- ep->cur_rate, ep->cur_period_frames,
- ep->cur_buffer_periods);
+ hwc_debug("applying %s\n", __func__);
+ return apply_hw_params_format_bits(fmt, pcm_format_to_bits(ep->cur_format));
+}
- runtime->hw.formats = subs->formats;
- runtime->hw.rate_min = runtime->hw.rate_max = ep->cur_rate;
- runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
- runtime->hw.periods_min = runtime->hw.periods_max =
- ep->cur_buffer_periods;
+static int hw_rule_rate_implicit_fb(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ const struct snd_usb_endpoint *ep;
+ struct snd_interval *it;
- err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- hw_rule_channels, subs,
- SNDRV_PCM_HW_PARAM_FORMAT,
- SNDRV_PCM_HW_PARAM_RATE,
- -1);
- if (err < 0)
- return err;
+ ep = get_sync_ep_from_substream(subs);
+ if (!ep)
+ return 0;
- err = snd_pcm_hw_constraint_minmax(runtime,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- ep->cur_period_frames,
- ep->cur_period_frames);
- if (err < 0)
- return err;
+ hwc_debug("applying %s\n", __func__);
+ it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ return apply_hw_params_minmax(it, ep->cur_rate, ep->cur_rate);
+}
- return 1; /* notify the finding */
+static int hw_rule_period_size_implicit_fb(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ const struct snd_usb_endpoint *ep;
+ struct snd_interval *it;
+
+ ep = get_sync_ep_from_substream(subs);
+ if (!ep)
+ return 0;
+
+ hwc_debug("applying %s\n", __func__);
+ it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+ return apply_hw_params_minmax(it, ep->cur_period_frames,
+ ep->cur_period_frames);
+}
+
+static int hw_rule_periods_implicit_fb(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ const struct snd_usb_endpoint *ep;
+ struct snd_interval *it;
+
+ ep = get_sync_ep_from_substream(subs);
+ if (!ep)
+ return 0;
+
+ hwc_debug("applying %s\n", __func__);
+ it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIODS);
+ return apply_hw_params_minmax(it, ep->cur_buffer_periods,
+ ep->cur_buffer_periods);
}
/*
static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs)
{
- struct snd_usb_audio *chip = subs->stream->chip;
const struct audioformat *fp;
unsigned int pt, ptmin;
int param_period_time_if_needed = -1;
int err;
- mutex_lock(&chip->mutex);
- err = apply_hw_constraint_from_sync(runtime, subs);
- mutex_unlock(&chip->mutex);
- if (err < 0)
- return err;
- if (err > 0) /* found the matching? */
- goto add_extra_rules;
-
runtime->hw.formats = subs->formats;
runtime->hw.rate_min = 0x7fffffff;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
hw_rule_rate, subs,
+ SNDRV_PCM_HW_PARAM_RATE,
SNDRV_PCM_HW_PARAM_FORMAT,
SNDRV_PCM_HW_PARAM_CHANNELS,
param_period_time_if_needed,
if (err < 0)
return err;
-add_extra_rules:
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
hw_rule_channels, subs,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
SNDRV_PCM_HW_PARAM_FORMAT,
SNDRV_PCM_HW_PARAM_RATE,
param_period_time_if_needed,
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
hw_rule_format, subs,
+ SNDRV_PCM_HW_PARAM_FORMAT,
SNDRV_PCM_HW_PARAM_RATE,
SNDRV_PCM_HW_PARAM_CHANNELS,
param_period_time_if_needed,
return err;
}
+ /* additional hw constraints for implicit fb */
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+ hw_rule_format_implicit_fb, subs,
+ SNDRV_PCM_HW_PARAM_FORMAT, -1);
+ if (err < 0)
+ return err;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ hw_rule_rate_implicit_fb, subs,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+ if (err < 0)
+ return err;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ hw_rule_period_size_implicit_fb, subs,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+ if (err < 0)
+ return err;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
+ hw_rule_periods_implicit_fb, subs,
+ SNDRV_PCM_HW_PARAM_PERIODS, -1);
+ if (err < 0)
+ return err;
+
return 0;
}
subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
}
-
-/*
- * Pioneer DJ DJM-900NXS2
- * Device needs to know the sample rate each time substream is started
- */
-static int pioneer_djm_set_format_quirk(struct snd_usb_substream *subs)
-{
- unsigned int cur_rate = subs->data_endpoint->cur_rate;
- /* Convert sample rate value to little endian */
- u8 sr[3];
-
- sr[0] = cur_rate & 0xff;
- sr[1] = (cur_rate >> 8) & 0xff;
- sr[2] = (cur_rate >> 16) & 0xff;
-
- /* Configure device */
- usb_set_interface(subs->dev, 0, 1);
- snd_usb_ctl_msg(subs->stream->chip->dev,
- usb_rcvctrlpipe(subs->stream->chip->dev, 0),
- 0x01, 0x22, 0x0100, 0x0082, &sr, 0x0003);
-
- return 0;
-}
-
void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
const struct audioformat *fmt)
{
case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
set_format_emu_quirk(subs, fmt);
break;
- case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
- case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
- pioneer_djm_set_format_quirk(subs);
- break;
case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
subs->stream_offset_adj = 2;
break;
ret = -EIO;
break;
}
- fprintf(stdout, "GPIO EVENT at %llu on line %d (%d|%d) ",
- event.timestamp_ns, event.offset, event.line_seqno,
+ fprintf(stdout, "GPIO EVENT at %" PRIu64 " on line %d (%d|%d) ",
+ (uint64_t)event.timestamp_ns, event.offset, event.line_seqno,
event.seqno);
switch (event.id) {
case GPIO_V2_LINE_EVENT_RISING_EDGE:
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
+#include <inttypes.h>
#include <linux/gpio.h>
#include <poll.h>
#include <stdbool.h>
return EXIT_FAILURE;
}
- printf("line %u: %s at %llu\n",
- chg.info.offset, event, chg.timestamp_ns);
+ printf("line %u: %s at %" PRIu64 "\n",
+ chg.info.offset, event, (uint64_t)chg.timestamp_ns);
}
}
}
meta_left = btf->raw_size - sizeof(*hdr);
- if (!meta_left) {
- pr_debug("BTF has no data\n");
- return -EINVAL;
- }
-
if (meta_left < hdr->str_off + hdr->str_len) {
pr_debug("Invalid BTF total size:%u\n", btf->raw_size);
return -EINVAL;
return map;
}
-static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
- struct perf_evsel *evsel, int idx, int cpu,
- int thread)
+static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
{
struct perf_sample_id *sid = SID(evsel, cpu, thread);
sid->idx = idx;
- if (evlist->cpus && cpu >= 0)
- sid->cpu = evlist->cpus->map[cpu];
- else
- sid->cpu = -1;
- if (!evsel->system_wide && evlist->threads && thread >= 0)
- sid->tid = perf_thread_map__pid(evlist->threads, thread);
- else
- sid->tid = -1;
+ sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
+ sid->tid = perf_thread_map__pid(evsel->threads, thread);
}
static struct perf_mmap*
if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
fd) < 0)
return -1;
- perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
- thread);
+ perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
}
}
warnings += ret;
out:
- if (ret < 0) {
- /*
- * Fatal error. The binary is corrupt or otherwise broken in
- * some way, or objtool itself is broken. Fail the kernel
- * build.
- */
- return ret;
- }
-
+ /*
+ * For now, don't fail the kernel build on fatal warnings. These
+ * errors are still fairly common due to the growing matrix of
+ * supported toolchains and their recent pace of change.
+ */
return 0;
}
symtab = find_section_by_name(elf, ".symtab");
if (!symtab) {
- WARN("missing symbol table");
- return -1;
+ /*
+ * A missing symbol table is actually possible if it's an empty
+ * .o file. This can happen for thunk_64.o.
+ */
+ return 0;
}
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
list_add(&sym->list, entry);
elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+
+ /*
+ * Don't store empty STT_NOTYPE symbols in the rbtree. They
+ * can exist within a function, confusing the sorting.
+ */
+ if (!sym->len)
+ rb_erase(&sym->node, &sym->sec->symbol_tree);
}
if (stats)
enum {
OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
+ OUTPUT_TYPE_OTHER,
OUTPUT_TYPE_MAX
};
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
+
+ [OUTPUT_TYPE_OTHER] = {
+ .user_set = false,
+
+ .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+ PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+ PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+ PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+ PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+ .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+ },
};
struct evsel_script {
case PERF_TYPE_SYNTH:
return OUTPUT_TYPE_SYNTH;
default:
- return type;
+ if (type < PERF_TYPE_MAX)
+ return type;
}
+
+ return OUTPUT_TYPE_OTHER;
}
static inline unsigned int attr_type(unsigned int type)
return false;
}
+static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
+{
+ if (!ev1->pmu_name || !ev2->pmu_name)
+ return false;
+
+ return !strcmp(ev1->pmu_name, ev2->pmu_name);
+}
+
/**
* Find a group of events in perf_evlist that correspond to those from a parsed
* metric expression. Note, as find_evsel_group is called in the same order as
*/
if (!has_constraint &&
ev->leader != metric_events[i]->leader &&
- !strcmp(ev->leader->pmu_name,
- metric_events[i]->leader->pmu_name))
+ evsel_same_pmu(ev->leader, metric_events[i]->leader))
break;
if (!strcmp(metric_events[i]->name, ev->name)) {
set_bit(ev->idx, evlist_used);
struct metricgroup_add_iter_data {
struct list_head *metric_list;
const char *metric;
- struct metric **m;
struct expr_ids *ids;
int *ret;
bool *has_match;
void *data)
{
struct metricgroup_add_iter_data *d = data;
+ struct metric *m = NULL;
int ret;
if (!match_pe_metric(pe, d->metric))
return 0;
- ret = add_metric(d->metric_list, pe, d->metric_no_group, d->m, NULL, d->ids);
+ ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
if (ret)
return ret;
.metric_list = &list,
.metric = metric,
.metric_no_group = metric_no_group,
- .m = &m,
.ids = &ids,
.has_match = &has_match,
.ret = &ret,
isst_ctdp_display_information_end(outf);
}
+static void adjust_scaling_max_from_base_freq(int cpu);
+
static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
void *arg4)
{
int pkg_id = get_physical_package_id(cpu);
int die_id = get_physical_die_id(cpu);
+ /* Wait for updated base frequencies */
+ usleep(2000);
+
fprintf(stderr, "Option is set to online/offline\n");
ctdp_level.core_cpumask_size =
alloc_cpu_set(&ctdp_level.core_cpumask);
if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) {
fprintf(stderr, "online cpu %d\n", i);
set_cpu_online_offline(i, 1);
+ adjust_scaling_max_from_base_freq(i);
} else {
fprintf(stderr, "offline cpu %d\n", i);
set_cpu_online_offline(i, 0);
return 0;
}
+static int no_turbo(void)
+{
+ return parse_int_file(0, "/sys/devices/system/cpu/intel_pstate/no_turbo");
+}
+
+static void adjust_scaling_max_from_base_freq(int cpu)
+{
+ int base_freq, scaling_max_freq;
+
+ scaling_max_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
+ base_freq = get_cpufreq_base_freq(cpu);
+ if (scaling_max_freq < base_freq || no_turbo())
+ set_cpufreq_scaling_min_max(cpu, 1, base_freq);
+}
+
+static void adjust_scaling_min_from_base_freq(int cpu)
+{
+ int base_freq, scaling_min_freq;
+
+ scaling_min_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
+ base_freq = get_cpufreq_base_freq(cpu);
+ if (scaling_min_freq < base_freq)
+ set_cpufreq_scaling_min_max(cpu, 0, base_freq);
+}
+
static int set_clx_pbf_cpufreq_scaling_min_max(int cpu)
{
struct isst_pkg_ctdp_level_info *ctdp_level;
continue;
set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 0);
+ adjust_scaling_min_from_base_freq(i);
}
}
BUILD_FAILURE = auto()
TEST_FAILURE = auto()
-def get_kernel_root_path():
- parts = sys.argv[0] if not __file__ else __file__
- parts = os.path.realpath(parts).split('tools/testing/kunit')
+def get_kernel_root_path() -> str:
+ path = sys.argv[0] if not __file__ else __file__
+ parts = os.path.realpath(path).split('tools/testing/kunit')
if len(parts) != 2:
sys.exit(1)
return parts[0]
exec_result.elapsed_time))
return parse_result
-def add_common_opts(parser):
+def add_common_opts(parser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
'directory.',
help='Run all KUnit tests through allyesconfig',
action='store_true')
-def add_build_opts(parser):
+def add_build_opts(parser) -> None:
parser.add_argument('--jobs',
help='As in the make command, "Specifies the number of '
'jobs (commands) to run simultaneously."',
type=int, default=8, metavar='jobs')
-def add_exec_opts(parser):
+def add_exec_opts(parser) -> None:
parser.add_argument('--timeout',
help='maximum number of seconds to allow for all tests '
'to run. This does not include time taken to build the '
default=300,
metavar='timeout')
-def add_parse_opts(parser):
+def add_parse_opts(parser) -> None:
parser.add_argument('--raw_output', help='don\'t format output from kernel',
action='store_true')
parser.add_argument('--json',
os.mkdir(cli_args.build_dir)
if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
request = KunitRequest(cli_args.raw_output,
cli_args.timeout,
os.mkdir(cli_args.build_dir)
if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
request = KunitConfigRequest(cli_args.build_dir,
cli_args.make_options)
sys.exit(1)
elif cli_args.subcommand == 'build':
if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
request = KunitBuildRequest(cli_args.jobs,
cli_args.build_dir,
sys.exit(1)
elif cli_args.subcommand == 'exec':
if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
exec_request = KunitExecRequest(cli_args.timeout,
cli_args.build_dir,
import collections
import re
+from typing import List, Set
CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
class Kconfig(object):
"""Represents defconfig or .config specified using the Kconfig language."""
- def __init__(self):
- self._entries = []
+ def __init__(self) -> None:
+ self._entries = [] # type: List[KconfigEntry]
- def entries(self):
+ def entries(self) -> Set[KconfigEntry]:
return set(self._entries)
def add_entry(self, entry: KconfigEntry) -> None:
from kunit_parser import TestStatus
-def get_json_result(test_result, def_config, build_dir, json_path):
+def get_json_result(test_result, def_config, build_dir, json_path) -> str:
sub_groups = []
# Each test suite is mapped to a KernelCI sub_group
import os
import shutil
import signal
+from typing import Iterator
from contextlib import ExitStack
class LinuxSourceTreeOperations(object):
"""An abstraction over command line operations performed on a source tree."""
- def make_mrproper(self):
+ def make_mrproper(self) -> None:
try:
subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT)
except OSError as e:
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
- def make_olddefconfig(self, build_dir, make_options):
+ def make_olddefconfig(self, build_dir, make_options) -> None:
command = ['make', 'ARCH=um', 'olddefconfig']
if make_options:
command.extend(make_options)
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
- def make_allyesconfig(self, build_dir, make_options):
+ def make_allyesconfig(self, build_dir, make_options) -> None:
kunit_parser.print_with_timestamp(
'Enabling all CONFIGs for UML...')
command = ['make', 'ARCH=um', 'allyesconfig']
kunit_parser.print_with_timestamp(
'Starting Kernel with all configs takes a few minutes...')
- def make(self, jobs, build_dir, make_options):
+ def make(self, jobs, build_dir, make_options) -> None:
command = ['make', 'ARCH=um', '--jobs=' + str(jobs)]
if make_options:
command.extend(make_options)
if stderr: # likely only due to build warnings
print(stderr.decode())
- def linux_bin(self, params, timeout, build_dir):
+ def linux_bin(self, params, timeout, build_dir) -> None:
"""Runs the Linux UML binary. Must be named 'linux'."""
linux_bin = get_file_path(build_dir, 'linux')
outfile = get_outfile_path(build_dir)
stderr=subprocess.STDOUT)
process.wait(timeout)
-def get_kconfig_path(build_dir):
+def get_kconfig_path(build_dir) -> str:
return get_file_path(build_dir, KCONFIG_PATH)
-def get_kunitconfig_path(build_dir):
+def get_kunitconfig_path(build_dir) -> str:
return get_file_path(build_dir, KUNITCONFIG_PATH)
-def get_outfile_path(build_dir):
+def get_outfile_path(build_dir) -> str:
return get_file_path(build_dir, OUTFILE_PATH)
class LinuxSourceTree(object):
"""Represents a Linux kernel source tree with KUnit tests."""
- def __init__(self):
- self._ops = LinuxSourceTreeOperations()
+ def __init__(self, build_dir: str, load_config=True, defconfig=DEFAULT_KUNITCONFIG_PATH) -> None:
signal.signal(signal.SIGINT, self.signal_handler)
- def clean(self):
- try:
- self._ops.make_mrproper()
- except ConfigError as e:
- logging.error(e)
- return False
- return True
+ self._ops = LinuxSourceTreeOperations()
+
+ if not load_config:
+ return
- def create_kunitconfig(self, build_dir, defconfig=DEFAULT_KUNITCONFIG_PATH):
kunitconfig_path = get_kunitconfig_path(build_dir)
if not os.path.exists(kunitconfig_path):
shutil.copyfile(defconfig, kunitconfig_path)
- def read_kunitconfig(self, build_dir):
- kunitconfig_path = get_kunitconfig_path(build_dir)
self._kconfig = kunit_config.Kconfig()
self._kconfig.read_from_file(kunitconfig_path)
- def validate_config(self, build_dir):
+ def clean(self) -> bool:
+ try:
+ self._ops.make_mrproper()
+ except ConfigError as e:
+ logging.error(e)
+ return False
+ return True
+
+ def validate_config(self, build_dir) -> bool:
kconfig_path = get_kconfig_path(build_dir)
validated_kconfig = kunit_config.Kconfig()
validated_kconfig.read_from_file(kconfig_path)
return False
return True
- def build_config(self, build_dir, make_options):
+ def build_config(self, build_dir, make_options) -> bool:
kconfig_path = get_kconfig_path(build_dir)
if build_dir and not os.path.exists(build_dir):
os.mkdir(build_dir)
return False
return self.validate_config(build_dir)
- def build_reconfig(self, build_dir, make_options):
+ def build_reconfig(self, build_dir, make_options) -> bool:
"""Creates a new .config if it is not a subset of the .kunitconfig."""
kconfig_path = get_kconfig_path(build_dir)
if os.path.exists(kconfig_path):
print('Generating .config ...')
return self.build_config(build_dir, make_options)
- def build_um_kernel(self, alltests, jobs, build_dir, make_options):
+ def build_um_kernel(self, alltests, jobs, build_dir, make_options) -> bool:
try:
if alltests:
self._ops.make_allyesconfig(build_dir, make_options)
return False
return self.validate_config(build_dir)
- def run_kernel(self, args=[], build_dir='', timeout=None):
+ def run_kernel(self, args=[], build_dir='', timeout=None) -> Iterator[str]:
args.extend(['mem=1G', 'console=tty'])
self._ops.linux_bin(args, timeout, build_dir)
outfile = get_outfile_path(build_dir)
for line in file:
yield line
- def signal_handler(self, sig, frame):
+ def signal_handler(self, sig, frame) -> None:
logging.error('Build interruption occurred. Cleaning console.')
subprocess.call(['stty', 'sane'])
from datetime import datetime
from enum import Enum, auto
from functools import reduce
-from typing import List, Optional, Tuple
+from typing import Iterable, Iterator, List, Optional, Tuple
TestResult = namedtuple('TestResult', ['status','suites','log'])
class TestSuite(object):
- def __init__(self):
- self.status = None
- self.name = None
- self.cases = []
+ def __init__(self) -> None:
+ self.status = TestStatus.SUCCESS
+ self.name = ''
+ self.cases = [] # type: List[TestCase]
- def __str__(self):
- return 'TestSuite(' + self.status + ',' + self.name + ',' + str(self.cases) + ')'
+ def __str__(self) -> str:
+ return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')'
- def __repr__(self):
+ def __repr__(self) -> str:
return str(self)
class TestCase(object):
- def __init__(self):
- self.status = None
+ def __init__(self) -> None:
+ self.status = TestStatus.SUCCESS
self.name = ''
- self.log = []
+ self.log = [] # type: List[str]
- def __str__(self):
- return 'TestCase(' + self.status + ',' + self.name + ',' + str(self.log) + ')'
+ def __str__(self) -> str:
+ return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')'
- def __repr__(self):
+ def __repr__(self) -> str:
return str(self)
class TestStatus(Enum):
kunit_end_re = re.compile('(List of all partitions:|'
'Kernel panic - not syncing: VFS:)')
-def isolate_kunit_output(kernel_output):
+def isolate_kunit_output(kernel_output) -> Iterator[str]:
started = False
for line in kernel_output:
line = line.rstrip() # line always has a trailing \n
elif started:
yield line[prefix_len:] if prefix_len > 0 else line
-def raw_output(kernel_output):
+def raw_output(kernel_output) -> None:
for line in kernel_output:
print(line.rstrip())
RESET = '\033[0;0m'
-def red(text):
+def red(text) -> str:
return '\033[1;31m' + text + RESET
-def yellow(text):
+def yellow(text) -> str:
return '\033[1;33m' + text + RESET
-def green(text):
+def green(text) -> str:
return '\033[1;32m' + text + RESET
-def print_with_timestamp(message):
+def print_with_timestamp(message) -> None:
print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
-def format_suite_divider(message):
+def format_suite_divider(message) -> str:
return '======== ' + message + ' ========'
-def print_suite_divider(message):
+def print_suite_divider(message) -> None:
print_with_timestamp(DIVIDER)
print_with_timestamp(format_suite_divider(message))
-def print_log(log):
+def print_log(log) -> None:
for m in log:
print_with_timestamp(m)
TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$')
-def consume_non_diagnositic(lines: List[str]) -> None:
+def consume_non_diagnostic(lines: List[str]) -> None:
while lines and not TAP_ENTRIES.match(lines[0]):
lines.pop(0)
-def save_non_diagnositic(lines: List[str], test_case: TestCase) -> None:
+def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
while lines and not TAP_ENTRIES.match(lines[0]):
test_case.log.append(lines[0])
lines.pop(0)
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
- save_non_diagnositic(lines, test_case)
+ save_non_diagnostic(lines, test_case)
if not lines:
test_case.status = TestStatus.TEST_CRASHED
return True
DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
- save_non_diagnositic(lines, test_case)
+ save_non_diagnostic(lines, test_case)
if not lines:
return False
line = lines[0]
def parse_test_case(lines: List[str]) -> Optional[TestCase]:
test_case = TestCase()
- save_non_diagnositic(lines, test_case)
+ save_non_diagnostic(lines, test_case)
while parse_diagnostic(lines, test_case):
pass
if parse_ok_not_ok_test_case(lines, test_case):
SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
def parse_subtest_header(lines: List[str]) -> Optional[str]:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
if not lines:
return None
match = SUBTEST_HEADER.match(lines[0])
SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
def parse_subtest_plan(lines: List[str]) -> Optional[int]:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
match = SUBTEST_PLAN.match(lines[0])
if match:
lines.pop(0)
def parse_ok_not_ok_test_suite(lines: List[str],
test_suite: TestSuite,
expected_suite_index: int) -> bool:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
if not lines:
test_suite.status = TestStatus.TEST_CRASHED
return False
else:
return False
-def bubble_up_errors(to_status, status_container_list) -> TestStatus:
- status_list = map(to_status, status_container_list)
- return reduce(max_status, status_list, TestStatus.SUCCESS)
+def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
+ return reduce(max_status, statuses, TestStatus.SUCCESS)
def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
- max_test_case_status = bubble_up_errors(lambda x: x.status, test_suite.cases)
+ max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
return max_status(max_test_case_status, test_suite.status)
def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[TestSuite]:
if not lines:
return None
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
test_suite = TestSuite()
test_suite.status = TestStatus.SUCCESS
name = parse_subtest_header(lines)
TAP_HEADER = re.compile(r'^TAP version 14$')
def parse_tap_header(lines: List[str]) -> bool:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
if TAP_HEADER.match(lines[0]):
lines.pop(0)
return True
TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
def parse_test_plan(lines: List[str]) -> Optional[int]:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
match = TEST_PLAN.match(lines[0])
if match:
lines.pop(0)
else:
return None
-def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
- return bubble_up_errors(lambda x: x.status, test_suite_list)
+def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
+ return bubble_up_errors(x.status for x in test_suites)
def parse_test_result(lines: List[str]) -> TestResult:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
if not lines or not parse_tap_header(lines):
return TestResult(TestStatus.NO_TESTS, [], lines)
expected_test_suite_num = parse_test_plan(lines)
struct bpf_spin_lock lock;
};
-/* Copies an rm binary to a temp file. dest is a mkstemp template */
-static int copy_rm(char *dest)
-{
- int fd_in, fd_out = -1, ret = 0;
- struct stat stat;
- char *buf = NULL;
-
- fd_in = open("/bin/rm", O_RDONLY);
- if (fd_in < 0)
- return -errno;
-
- fd_out = mkstemp(dest);
- if (fd_out < 0) {
- ret = -errno;
- goto out;
- }
-
- ret = fstat(fd_in, &stat);
- if (ret == -1) {
- ret = -errno;
- goto out;
- }
-
- buf = malloc(stat.st_blksize);
- if (!buf) {
- ret = -errno;
- goto out;
- }
-
- while (ret = read(fd_in, buf, stat.st_blksize), ret > 0) {
- ret = write(fd_out, buf, ret);
- if (ret < 0) {
- ret = -errno;
- goto out;
-
- }
- }
- if (ret < 0) {
- ret = -errno;
- goto out;
-
- }
-
- /* Set executable permission on the copied file */
- ret = chmod(dest, 0100);
- if (ret == -1)
- ret = -errno;
-
-out:
- free(buf);
- close(fd_in);
- close(fd_out);
- return ret;
-}
-
/* Fork and exec the provided rm binary and return the exit code of the
* forked process and its pid.
*/
void test_test_local_storage(void)
{
- char tmp_exec_path[PATH_MAX] = "/tmp/copy_of_rmXXXXXX";
+ char tmp_dir_path[64] = "/tmp/local_storageXXXXXX";
int err, serv_sk = -1, task_fd = -1, rm_fd = -1;
struct local_storage *skel = NULL;
+ char tmp_exec_path[64];
+ char cmd[256];
skel = local_storage__open_and_load();
if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
task_fd))
goto close_prog;
- err = copy_rm(tmp_exec_path);
- if (CHECK(err < 0, "copy_rm", "err %d errno %d\n", err, errno))
+ if (CHECK(!mkdtemp(tmp_dir_path), "mkdtemp",
+ "unable to create tmpdir: %d\n", errno))
goto close_prog;
+ snprintf(tmp_exec_path, sizeof(tmp_exec_path), "%s/copy_of_rm",
+ tmp_dir_path);
+ snprintf(cmd, sizeof(cmd), "cp /bin/rm %s", tmp_exec_path);
+ if (CHECK_FAIL(system(cmd)))
+ goto close_prog_rmdir;
+
rm_fd = open(tmp_exec_path, O_RDONLY);
if (CHECK(rm_fd < 0, "open", "failed to open %s err:%d, errno:%d",
tmp_exec_path, rm_fd, errno))
- goto close_prog;
+ goto close_prog_rmdir;
if (!check_syscall_operations(bpf_map__fd(skel->maps.inode_storage_map),
rm_fd))
- goto close_prog;
+ goto close_prog_rmdir;
/* Sets skel->bss->monitored_pid to the pid of the forked child
* forks a child process that executes tmp_exec_path and tries to
*/
err = run_self_unlink(&skel->bss->monitored_pid, tmp_exec_path);
if (CHECK(err != EPERM, "run_self_unlink", "err %d want EPERM\n", err))
- goto close_prog_unlink;
+ goto close_prog_rmdir;
/* Set the process being monitored to be the current process */
skel->bss->monitored_pid = getpid();
- /* Remove the temporary created executable */
- err = unlink(tmp_exec_path);
- if (CHECK(err != 0, "unlink", "unable to unlink %s: %d", tmp_exec_path,
- errno))
- goto close_prog_unlink;
+ /* Move copy_of_rm to a new location so that it triggers the
+ * inode_rename LSM hook with a new_dentry that has a NULL inode ptr.
+ */
+ snprintf(cmd, sizeof(cmd), "mv %s/copy_of_rm %s/check_null_ptr",
+ tmp_dir_path, tmp_dir_path);
+ if (CHECK_FAIL(system(cmd)))
+ goto close_prog_rmdir;
CHECK(skel->data->inode_storage_result != 0, "inode_storage_result",
"inode_local_storage not set\n");
serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
- goto close_prog;
+ goto close_prog_rmdir;
CHECK(skel->data->sk_storage_result != 0, "sk_storage_result",
"sk_local_storage not set\n");
if (!check_syscall_operations(bpf_map__fd(skel->maps.sk_storage_map),
serv_sk))
- goto close_prog;
+ goto close_prog_rmdir;
-close_prog_unlink:
- unlink(tmp_exec_path);
+close_prog_rmdir:
+ snprintf(cmd, sizeof(cmd), "rm -rf %s", tmp_dir_path);
+ system(cmd);
close_prog:
close(serv_sk);
close(rm_fd);
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
bool is_self_unlink;
- int err;
if (pid != monitored_pid)
return 0;
return -EPERM;
}
- storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0,
- BPF_LOCAL_STORAGE_GET_F_CREATE);
+ return 0;
+}
+
+SEC("lsm/inode_rename")
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct local_storage *storage;
+ int err;
+
+ /* new_dentry->d_inode can be NULL when the inode is renamed to a file
+ * that did not exist before. The helper should be able to handle this
+ * NULL pointer.
+ */
+ bpf_inode_storage_get(&inode_storage_map, new_dentry->d_inode, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+ storage = bpf_inode_storage_get(&inode_storage_map, old_dentry->d_inode,
+ 0, 0);
if (!storage)
return 0;
inode_storage_result = -1;
bpf_spin_unlock(&storage->lock);
- err = bpf_inode_storage_delete(&inode_storage_map, victim->d_inode);
+ err = bpf_inode_storage_delete(&inode_storage_map, old_dentry->d_inode);
if (!err)
inode_storage_result = err;
return 0;
}
-SEC("lsm/file_open")
-int BPF_PROG(file_open, struct file *file)
-{
- __u32 pid = bpf_get_current_pid_tgid() >> 32;
- struct local_storage *storage;
-
- if (pid != monitored_pid)
- return 0;
-
- if (!file->f_inode)
- return 0;
-
- storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0,
- BPF_LOCAL_STORAGE_GET_F_CREATE);
- if (!storage)
- return 0;
-
- bpf_spin_lock(&storage->lock);
- storage->value = DUMMY_STORAGE_VALUE;
- bpf_spin_unlock(&storage->lock);
- return 0;
-}
-
/* This uses the local storage to remember the inode of the binary that a
* process was originally executing.
*/
SEC("lsm/bprm_committed_creds")
void BPF_PROG(exec, struct linux_binprm *bprm)
{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
+ if (pid != monitored_pid)
+ return;
+
storage = bpf_task_storage_get(&task_storage_map,
bpf_get_current_task_btf(), 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
storage->exec_inode = bprm->file->f_inode;
bpf_spin_unlock(&storage->lock);
}
+
+ storage = bpf_inode_storage_get(&inode_storage_map, bprm->file->f_inode,
+ 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return;
+
+ bpf_spin_lock(&storage->lock);
+ storage->value = DUMMY_STORAGE_VALUE;
+ bpf_spin_unlock(&storage->lock);
}
#define MAX_INSNS BPF_MAXINSNS
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 20
+#define MAX_NR_MAPS 21
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
int fixup_sk_storage_map[MAX_FIXUPS];
int fixup_map_event_output[MAX_FIXUPS];
int fixup_map_reuseport_array[MAX_FIXUPS];
+ int fixup_map_ringbuf[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
uint32_t insn_processed;
int *fixup_sk_storage_map = test->fixup_sk_storage_map;
int *fixup_map_event_output = test->fixup_map_event_output;
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
+ int *fixup_map_ringbuf = test->fixup_map_ringbuf;
if (test->fill_helper) {
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
fixup_map_reuseport_array++;
} while (*fixup_map_reuseport_array);
}
+ if (*fixup_map_ringbuf) {
+ map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
+ 0, 4096);
+ do {
+ prog[*fixup_map_ringbuf].imm = map_fds[20];
+ fixup_map_ringbuf++;
+ } while (*fixup_map_ringbuf);
+ }
}
struct libcap {
.result = ACCEPT,
.result_unpriv = ACCEPT,
},
+{
+ "check valid spill/fill, ptr to mem",
+ .insns = {
+ /* reserve 8 byte ringbuf memory */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
+ /* store a pointer to the reserved memory in R6 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* check whether the reservation was successful */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* spill R6(mem) into the stack */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ /* fill it back in R7 */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
+ /* should be able to access *(R7) = 0 */
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
+ /* submit the reserved ringbuf memory */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_ringbuf = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+},
{
"check corrupted spill/fill",
.insns = {
check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
log_test $? 0 "Set metric with peer route on local side"
- log_test $? 0 "User specified metric on local address"
check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
log_test $? 0 "Set metric with peer route on peer side"
t0_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp13=$(link_stats_tx_packets_get $rp13)
- ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
+ ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
-d 1msec -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp13=$(link_stats_tx_packets_get $rp13)
- ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
+ ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
-d 1msec -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
# 1: iptables -m policy rule count != 0
rval=$1
ip=$2
- lret=0
+ local lret=0
ip netns exec ns1 ping -q -c 1 10.0.2.$ip > /dev/null
return 0
}
+# insert non-overlapping policies in a random order and check that
+# all of them can be fetched using the traffic selectors.
+check_random_order()
+{
+ local ns=$1
+ local log=$2
+
+ for i in $(seq 100); do
+ ip -net $ns xfrm policy flush
+ for j in $(seq 0 16 255 | sort -R); do
+ ip -net $ns xfrm policy add dst $j.0.0.0/24 dir out priority 10 action allow
+ done
+ for j in $(seq 0 16 255); do
+ if ! ip -net $ns xfrm policy get dst $j.0.0.0/24 dir out > /dev/null; then
+ echo "FAIL: $log" 1>&2
+ return 1
+ fi
+ done
+ done
+
+ for i in $(seq 100); do
+ ip -net $ns xfrm policy flush
+ for j in $(seq 0 16 255 | sort -R); do
+ local addr=$(printf "e000:0000:%02x00::/56" $j)
+ ip -net $ns xfrm policy add dst $addr dir out priority 10 action allow
+ done
+ for j in $(seq 0 16 255); do
+ local addr=$(printf "e000:0000:%02x00::/56" $j)
+ if ! ip -net $ns xfrm policy get dst $addr dir out > /dev/null; then
+ echo "FAIL: $log" 1>&2
+ return 1
+ fi
+ done
+ done
+
+ ip -net $ns xfrm policy flush
+
+ echo "PASS: $log"
+ return 0
+}
+
#check for needed privileges
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
check_hthresh_repeat "policies with repeated htresh change"
+check_random_order ns3 "policies inserted in random order"
+
for i in 1 2 3 4;do ip netns del ns$i;done
exit $ret
LOAD_DFORM_TEST(ldu);
LOAD_XFORM_TEST(ldx);
LOAD_XFORM_TEST(ldux);
- LOAD_DFORM_TEST(lmw);
STORE_DFORM_TEST(stb);
STORE_XFORM_TEST(stbx);
STORE_DFORM_TEST(stbu);
STORE_XFORM_TEST(stdx);
STORE_DFORM_TEST(stdu);
STORE_XFORM_TEST(stdux);
+
+#ifdef __BIG_ENDIAN__
+ LOAD_DFORM_TEST(lmw);
STORE_DFORM_TEST(stmw);
+#endif
return rc;
}
int main(void)
{
- test_harness(test, "pkey_exec_prot");
+ return test_harness(test, "pkey_exec_prot");
}
int main(void)
{
- test_harness(test, "pkey_siginfo");
+ return test_harness(test, "pkey_siginfo");
}
return -EINVAL;
/* We can read the guest memory with __xxx_user() later on. */
if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
+ (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
!access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))
return -EINVAL;