- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
+ - '__ata_qc_for_each'
+ - 'ata_qc_for_each'
+ - 'ata_qc_for_each_raw'
+ - 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- 'bio_for_each_integrity_vec'
- 'blk_queue_for_each_rl'
- 'bond_for_each_slave'
- 'bond_for_each_slave_rcu'
+ - 'bpf_for_each_spilled_reg'
- 'btree_for_each_safe128'
- 'btree_for_each_safe32'
- 'btree_for_each_safe64'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
+ - 'drm_atomic_for_each_plane_damage'
+ - 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_encoder'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_bvec'
+ - 'for_each_card_components'
+ - 'for_each_card_links'
+ - 'for_each_card_links_safe'
+ - 'for_each_card_prelinks'
+ - 'for_each_card_rtds'
+ - 'for_each_card_rtds_safe'
+ - 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_cmsghdr'
- 'for_each_compatible_node'
+ - 'for_each_component_dais'
+ - 'for_each_component_dais_safe'
+ - 'for_each_comp_order'
- 'for_each_console'
- 'for_each_cpu'
- 'for_each_cpu_and'
- 'for_each_cpu_wrap'
- 'for_each_dev_addr'
- 'for_each_dma_cap_mask'
+ - 'for_each_dpcm_be'
+ - 'for_each_dpcm_be_rollback'
+ - 'for_each_dpcm_be_safe'
+ - 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- 'for_each_efi_memory_desc'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
+ - 'for_each_link_codecs'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'for_each_mem_range_rev'
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
+ - 'for_each_msi_entry_safe'
- 'for_each_net'
- 'for_each_netdev'
- 'for_each_netdev_continue'
- 'for_each_node_with_property'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
+ - 'for_each_of_cpu_node'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_plane_in_state'
+ - 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_old_plane_in_state'
- 'for_each_old_private_obj_in_state'
- 'for_each_process'
- 'for_each_process_thread'
- 'for_each_property_of_node'
+ - 'for_each_registered_fb'
- 'for_each_reserved_mem_region'
- - 'for_each_resv_unavail_range'
+ - 'for_each_rtd_codec_dai'
+ - 'for_each_rtd_codec_dai_rollback'
- 'for_each_rtdcom'
- 'for_each_rtdcom_safe'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_sg'
- 'for_each_sg_page'
+ - 'for_each_sibling_event'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_zone'
- 'hlist_nulls_for_each_entry_from'
- 'hlist_nulls_for_each_entry_rcu'
- 'hlist_nulls_for_each_entry_safe'
+ - 'i3c_bus_for_each_i2cdev'
+ - 'i3c_bus_for_each_i3cdev'
- 'ide_host_for_each_port'
- 'ide_port_for_each_dev'
- 'ide_port_for_each_present_dev'
- 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu'
- 'list_for_each'
+ - 'list_for_each_codec'
+ - 'list_for_each_codec_safe'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
- 'list_for_each_entry_continue_reverse'
- 'list_for_each_entry_from'
+ - 'list_for_each_entry_from_rcu'
- 'list_for_each_entry_from_reverse'
- 'list_for_each_entry_lockless'
- 'list_for_each_entry_rcu'
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
+ - 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
- 'sk_nulls_for_each'
- 'sk_nulls_for_each_from'
- 'sk_nulls_for_each_rcu'
+ - 'snd_array_for_each'
- 'snd_pcm_group_for_each_entry'
- 'snd_soc_dapm_widget_for_each_path'
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
- 'tb_property_for_each'
+ - 'tcf_exts_for_each_action'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
- 'v4l2_m2m_for_each_dst_buf_safe'
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
+ - 'virtio_device_for_each_vq'
+ - 'xa_for_each'
+ - 'xas_for_each'
+ - 'xas_for_each_conflict'
+ - 'xas_for_each_marked'
- 'zorro_for_each_dev'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0
E: sparse@chrisli.org
D: Sparse maintainer 2009 - 2018
+N: Shaohua Li
+D: Worked on many parts of the kernel, from core x86, ACPI, PCI, KVM, MM,
+D: and much more. He was the maintainer of MD from 2016 to 2018. Shaohua
+D: passed away late 2018, he will be greatly missed.
+W: https://www.spinics.net/lists/raid/msg61993.html
+
N: Stephan Linz
E: linz@mazet.de
E: Stephan.Linz@gmx.de
size in 512B sectors of the zones of the device, with
the eventual exception of the last zone of the device
which may be smaller.
+
+What: /sys/block/<disk>/queue/io_timeout
+Date: November 2018
+Contact: Weiping Zhang <zhangweiping@didiglobal.com>
+Description:
+ io_timeout is the request timeout in milliseconds. If a request
+ does not complete in this time then the block driver timeout
+ handler is invoked. That timeout handler can decide to retry
+ the request, to fail it or to start a device recovery strategy.
statistics (bd_count, bd_reads, bd_writes) in a format
similar to block layer statistics file format.
+What: /sys/block/zram<id>/writeback_limit_enable
+Date: November 2018
+Contact: Minchan Kim <minchan@kernel.org>
+Description:
+ The writeback_limit_enable file is read-write and specifies
+ eanbe of writeback_limit feature. "1" means eable the feature.
+ No limit "0" is the initial state.
+
What: /sys/block/zram<id>/writeback_limit
Date: November 2018
Contact: Minchan Kim <minchan@kernel.org>
Description:
The writeback_limit file is read-write and specifies the maximum
amount of writeback ZRAM can do. The limit could be changed
- in run time and "0" means disable the limit.
- No limit is the initial state.
+ in run time.
a bug in kernel.org bugzilla and send email to
linux-kernel@vger.kernel.org, referencing the bugzilla URL. (For more
information on the linux-kernel mailing list see
-http://www.tux.org/lkml/).
+http://vger.kernel.org/lkml/).
Tips for reporting bugs
than maximum throughput. In these cases, consider setting the
strict_guarantees parameter.
+slice_idle_us
+-------------
+
+Controls the same tuning parameter as slice_idle, but in microseconds.
+Either tunable can be used to set idling behavior. Afterwards, the
+other tunable will reflect the newly set value in sysfs.
+
strict_guarantees
-----------------
zoned=[0/1]: Default: 0
0: Block device is exposed as a random-access block device.
- 1: Block device is exposed as a host-managed zoned block device.
+ 1: Block device is exposed as a host-managed zoned block device. Requires
+ CONFIG_BLK_DEV_ZONED.
zone_size=[MB]: Default: 256
Per zone size when exposed as a zoned block device. Must be a power of two.
IO to sleep for this amount of microseconds before entering classic
polling.
+io_timeout (RW)
+---------------
+io_timeout is the request timeout in milliseconds. If a request does not
+complete in this time then the block driver timeout handler is invoked.
+That timeout handler can decide to retry the request, to fail it or to start
+a device recovery strategy.
+
iostats (RW)
-------------
This file is used to control (on/off) the iostats accounting of the
A brief description of exported device attributes. For more details please
read Documentation/ABI/testing/sysfs-block-zram.
-Name access description
----- ------ -----------
-disksize RW show and set the device's disk size
-initstate RO shows the initialization state of the device
-reset WO trigger device reset
-mem_used_max WO reset the `mem_used_max' counter (see later)
-mem_limit WO specifies the maximum amount of memory ZRAM can use
- to store the compressed data
-writeback_limit WO specifies the maximum amount of write IO zram can
- write out to backing device as 4KB unit
-max_comp_streams RW the number of possible concurrent compress operations
-comp_algorithm RW show and change the compression algorithm
-compact WO trigger memory compaction
-debug_stat RO this file is used for zram debugging purposes
-backing_dev RW set up backend storage for zram to write out
-idle WO mark allocated slot as idle
+Name access description
+---- ------ -----------
+disksize RW show and set the device's disk size
+initstate RO shows the initialization state of the device
+reset WO trigger device reset
+mem_used_max WO reset the `mem_used_max' counter (see later)
+mem_limit WO specifies the maximum amount of memory ZRAM can use
+ to store the compressed data
+writeback_limit WO specifies the maximum amount of write IO zram can
+ write out to backing device as 4KB unit
+writeback_limit_enable RW show and set writeback_limit feature
+max_comp_streams RW the number of possible concurrent compress operations
+comp_algorithm RW show and change the compression algorithm
+compact WO trigger memory compaction
+debug_stat RO this file is used for zram debugging purposes
+backing_dev RW set up backend storage for zram to write out
+idle WO mark allocated slot as idle
User space is advised to use the following files to read the device statistics.
If there are lots of write IO with flash device, potentially, it has
flash wearout problem so that admin needs to design write limitation
to guarantee storage health for entire product life.
-To overcome the concern, zram supports "writeback_limit".
-The "writeback_limit"'s default value is 0 so that it doesn't limit
-any writeback. If admin want to measure writeback count in a certain
-period, he could know it via /sys/block/zram0/bd_stat's 3rd column.
+
+To overcome the concern, zram supports "writeback_limit" feature.
+The "writeback_limit_enable"'s default value is 0 so that it doesn't limit
+any writeback. IOW, if admin want to apply writeback budget, he should
+enable writeback_limit_enable via
+
+ $ echo 1 > /sys/block/zramX/writeback_limit_enable
+
+Once writeback_limit_enable is set, zram doesn't allow any writeback
+until admin set the budget via /sys/block/zramX/writeback_limit.
+
+(If admin doesn't enable writeback_limit_enable, writeback_limit's value
+assigned via /sys/block/zramX/writeback_limit is meaninless.)
If admin want to limit writeback as per-day 400M, he could do it
like below.
- MB_SHIFT=20
- 4K_SHIFT=12
- echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
- /sys/block/zram0/writeback_limit.
+ $ MB_SHIFT=20
+ $ 4K_SHIFT=12
+ $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
+ /sys/block/zram0/writeback_limit.
+ $ echo 1 > /sys/block/zram0/writeback_limit_enable
-If admin want to allow further write again, he could do it like below
+If admin want to allow further write again once the bugdet is exausted,
+he could do it like below
- echo 0 > /sys/block/zram0/writeback_limit
+ $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
+ /sys/block/zram0/writeback_limit
If admin want to see remaining writeback budget since he set,
- cat /sys/block/zram0/writeback_limit
+ $ cat /sys/block/zramX/writeback_limit
+
+If admin want to disable writeback limit, he could do
+
+ $ echo 0 > /sys/block/zramX/writeback_limit_enable
The writeback_limit count will reset whenever you reset zram(e.g.,
system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of
writeback happened until you reset the zram to allocate extra writeback
budget in next setting is user's job.
+If admin want to measure writeback count in a certain period, he could
+know it via /sys/block/zram0/bd_stat's 3rd column.
+
= memory tracking
With CONFIG_ZRAM_MEMORY_TRACKING, user can know information of the
------------------------------
A: YES. BPF instructions, arguments to BPF programs, set of helper
functions and their arguments, recognized return codes are all part
-of ABI. However when tracing programs are using bpf_probe_read() helper
-to walk kernel internal datastructures and compile with kernel
-internal headers these accesses can and will break with newer
-kernels. The union bpf_attr -> kern_version is checked at load time
-to prevent accidentally loading kprobe-based bpf programs written
-for a different kernel. Networking programs don't do kern_version check.
+of ABI. However there is one specific exception to tracing programs
+which are using helpers like bpf_probe_read() to walk kernel internal
+data structures and compile with kernel internal headers. Both of these
+kernel internals are subject to change and can break with newer kernels
+such that the program needs to be adapted accordingly.
Q: How much stack space a BPF program uses?
-------------------------------------------
.. kernel-doc:: block/blk-lib.c
:export:
-.. kernel-doc:: block/blk-tag.c
- :export:
-
-.. kernel-doc:: block/blk-tag.c
- :internal:
-
.. kernel-doc:: block/blk-integrity.c
:export:
Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
will not need to allocate memory. The :c:func:`xa_reserve` function
-will store a reserved entry at the indicated index. Users of the normal
-API will see this entry as containing ``NULL``. If you do not need to
-use the reserved entry, you can call :c:func:`xa_release` to remove the
-unused entry. If another user has stored to the entry in the meantime,
-:c:func:`xa_release` will do nothing; if instead you want the entry to
-become ``NULL``, you should use :c:func:`xa_erase`.
+will store a reserved entry at the indicated index. Users of the
+normal API will see this entry as containing ``NULL``. If you do
+not need to use the reserved entry, you can call :c:func:`xa_release`
+to remove the unused entry. If another user has stored to the entry
+in the meantime, :c:func:`xa_release` will do nothing; if instead you
+want the entry to become ``NULL``, you should use :c:func:`xa_erase`.
+Using :c:func:`xa_insert` on a reserved entry will fail.
If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
will return ``true``.
* :c:func:`xa_store_bh`
* :c:func:`xa_store_irq`
* :c:func:`xa_insert`
+ * :c:func:`xa_insert_bh`
+ * :c:func:`xa_insert_irq`
* :c:func:`xa_erase`
* :c:func:`xa_erase_bh`
* :c:func:`xa_erase_irq`
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
- Documentation/devicetree/bindings/arm/cpus.txt
+ Documentation/devicetree/bindings/arm/cpus.yaml
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
- Documentation/devicetree/bindings/arm/cpus.txt
+ Documentation/devicetree/bindings/arm/cpus.yaml
[2] ARM Linux Kernel documentation - PSCI bindings
Documentation/devicetree/bindings/arm/psci.txt
Required properties:
- compatible: standard compatible string for a Primecell peripheral,
- see Documentation/devicetree/bindings/arm/primecell.txt
+ see Documentation/devicetree/bindings/arm/primecell.yaml
for more details
should be: "arm,sp810", "arm,primecell"
===============================================================================
[1] ARM Linux kernel documentation
- Documentation/devicetree/bindings/arm/cpus.txt
+ Documentation/devicetree/bindings/arm/cpus.yaml
Each clock is assigned an identifier and client nodes use this identifier
to specify the clock which they consume.
-All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>.
+All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.
* ARM PrimeCell Color LCD Controller PL110/PL111
-See also Documentation/devicetree/bindings/arm/primecell.txt
+See also Documentation/devicetree/bindings/arm/primecell.yaml
Required properties:
"atmel,24c256",
"atmel,24c512",
"atmel,24c1024",
+ "atmel,24c2048",
If <manufacturer> is not "atmel", then a fallback must be used
with the same <model> and "atmel" as manufacturer.
"marvell,armada-8k-gpio" should be used for the Armada 7K and 8K
SoCs (either from AP or CP), see
- Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
- and
Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
for specific details about the offset property.
--- /dev/null
+STM32 Hardware Spinlock Device Binding
+-------------------------------------
+
+Required properties :
+- compatible : should be "st,stm32-hwspinlock".
+- reg : the register address of hwspinlock.
+- #hwlock-cells : hwlock users only use the hwlock id to represent a specific
+ hwlock, so the number of cells should be <1> here.
+- clock-names : Must contain "hsem".
+- clocks : Must contain a phandle entry for the clock in clock-names, see the
+ common clock bindings.
+
+Please look at the generic hwlock binding for usage information for consumers,
+"Documentation/devicetree/bindings/hwlock/hwlock.txt"
+
+Example of hwlock provider:
+ hwspinlock@4c000000 {
+ compatible = "st,stm32-hwspinlock";
+ #hwlock-cells = <1>;
+ reg = <0x4c000000 0x400>;
+ clocks = <&rcc HSEM>;
+ clock-names = "hsem";
+ };
clock-frequency = <400000>;
24c512@50 {
- compatible = "24c512";
+ compatible = "atmel,24c512";
reg = <0x50>;
pagesize = <128>;
}
reg = <0>;
eeprom@50 {
- compatible = "at,24c02";
+ compatible = "atmel,24c02";
reg = <0x50>;
};
};
reg = <1>;
eeprom@50 {
- compatible = "at,24c02";
+ compatible = "atmel,24c02";
reg = <0x50>;
};
};
reg = <2>;
eeprom@54 {
- compatible = "at,24c08";
+ compatible = "atmel,24c08";
reg = <0x54>;
};
};
Required properties:
-- compatible : Should be "actions,s900-i2c".
+- compatible : Should be one of the following:
+ - "actions,s700-i2c" for S700 SoC
+ - "actions,s900-i2c" for S900 SoC
- reg : Offset and length of the register set for the device.
- #address-cells : Should be 1.
- #size-cells : Should be 0.
"renesas,i2c-r8a7745" if the device is a part of a R8A7745 SoC.
"renesas,i2c-r8a77470" if the device is a part of a R8A77470 SoC.
"renesas,i2c-r8a774a1" if the device is a part of a R8A774A1 SoC.
+ "renesas,i2c-r8a774c0" if the device is a part of a R8A774C0 SoC.
"renesas,i2c-r8a7778" if the device is a part of a R8A7778 SoC.
"renesas,i2c-r8a7779" if the device is a part of a R8A7779 SoC.
"renesas,i2c-r8a7790" if the device is a part of a R8A7790 SoC.
- "renesas,iic-r8a7744" (RZ/G1N)
- "renesas,iic-r8a7745" (RZ/G1E)
- "renesas,iic-r8a774a1" (RZ/G2M)
+ - "renesas,iic-r8a774c0" (RZ/G2E)
- "renesas,iic-r8a7790" (R-Car H2)
- "renesas,iic-r8a7791" (R-Car M2-W)
- "renesas,iic-r8a7792" (R-Car V2H)
- "renesas,iic-r8a7795" (R-Car H3)
- "renesas,iic-r8a7796" (R-Car M3-W)
- "renesas,iic-r8a77965" (R-Car M3-N)
+ - "renesas,iic-r8a77990" (R-Car E3)
- "renesas,iic-sh73a0" (SH-Mobile AG5)
- "renesas,rcar-gen2-iic" (generic R-Car Gen2 or RZ/G1
compatible device)
the platform first followed by the generic R-Car
version.
- renesas,rmobile-iic must always follow.
+ When compatible with "renesas,rmobile-iic" it should
+ be the last compatibility string listed.
+
+ The r8a77990 (R-Car E3) and r8a774c0 (RZ/G2E)
+ controllers are not considered compatible with
+ "renesas,rcar-gen3-iic" or "renesas,rmobile-iic"
+ due to the absence of automatic transmission registers.
- reg : address start and address range size of device
- interrupts : interrupt of device
- i2c-scl-falling-time-ns : Only for STM32F7, I2C SCL Falling time for the board
(default: 10)
I2C Timings are derived from these 2 values
+- st,syscfg-fmp: Only for STM32F7, use to set Fast Mode Plus bit within SYSCFG
+ whether Fast Mode Plus speed is selected by slave.
+ 1st cell : phandle to syscfg
+ 2nd cell : register offset within SYSCFG
+ 3rd cell : register bitmask for FMP bit
Example :
clocks = <&rcc 1 CLK_I2C1>;
pinctrl-0 = <&i2c1_sda_pin>, <&i2c1_scl_pin>;
pinctrl-names = "default";
+ st,syscfg-fmp = <&syscfg 0x4 0x1>;
};
#size-cells = <0>;
eeprom@54 {
- compatible = "at,24c08";
+ compatible = "atmel,24c08";
reg = <0x54>;
};
};
PPI affinity can be expressed as a single "ppi-partitions" node,
containing a set of sub-nodes, each with the following property:
- affinity: Should be a list of phandles to CPU nodes (as described in
-Documentation/devicetree/bindings/arm/cpus.txt).
+ Documentation/devicetree/bindings/arm/cpus.yaml).
GICv3 has one or more Interrupt Translation Services (ITS) that are
used to route Message Signalled Interrupts (MSI) to the CPUs.
--- /dev/null
+Amlogic Meson AXG DWC PCIE SoC controller
+
+Amlogic Meson PCIe host controller is based on the Synopsys DesignWare PCI core.
+It shares common functions with the PCIe DesignWare core driver and
+inherits common properties defined in
+Documentation/devicetree/bindings/pci/designware-pci.txt.
+
+Additional properties are described here:
+
+Required properties:
+- compatible:
+ should contain "amlogic,axg-pcie" to identify the core.
+- reg:
+ should contain the configuration address space.
+- reg-names: Must be
+ - "elbi" External local bus interface registers
+ - "cfg" Meson specific registers
+ - "phy" Meson PCIE PHY registers
+ - "config" PCIe configuration space
+- reset-gpios: The GPIO to generate PCIe PERST# assert and deassert signal.
+- clocks: Must contain an entry for each entry in clock-names.
+- clock-names: Must include the following entries:
+ - "pclk" PCIe GEN 100M PLL clock
+ - "port" PCIe_x(A or B) RC clock gate
+ - "general" PCIe Phy clock
+ - "mipi" PCIe_x(A or B) 100M ref clock gate
+- resets: phandle to the reset lines.
+- reset-names: must contain "phy" "port" and "apb"
+ - "phy" Share PHY reset
+ - "port" Port A or B reset
+ - "apb" Share APB reset
+- device_type:
+ should be "pci". As specified in designware-pcie.txt
+
+
+Example configuration:
+
+ pcie: pcie@f9800000 {
+ compatible = "amlogic,axg-pcie", "snps,dw-pcie";
+ reg = <0x0 0xf9800000 0x0 0x400000
+ 0x0 0xff646000 0x0 0x2000
+ 0x0 0xff644000 0x0 0x2000
+ 0x0 0xf9f00000 0x0 0x100000>;
+ reg-names = "elbi", "cfg", "phy", "config";
+ reset-gpios = <&gpio GPIOX_19 GPIO_ACTIVE_HIGH>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_EDGE_RISING>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 179 IRQ_TYPE_EDGE_RISING>;
+ bus-range = <0x0 0xff>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x82000000 0 0 0x0 0xf9c00000 0 0x00300000>;
+
+ clocks = <&clkc CLKID_USB
+ &clkc CLKID_MIPI_ENABLE
+ &clkc CLKID_PCIE_A
+ &clkc CLKID_PCIE_CML_EN0>;
+ clock-names = "general",
+ "mipi",
+ "pclk",
+ "port";
+ resets = <&reset RESET_PCIE_PHY>,
+ <&reset RESET_PCIE_A>,
+ <&reset RESET_PCIE_APB>;
+ reset-names = "phy",
+ "port",
+ "apb";
+ };
Additional required properties for imx6sx-pcie:
- clock names: Must include the following additional entries:
- "pcie_inbound_axi"
-- power-domains: Must be set to a phandle pointing to the PCIE_PHY power domain
+- power-domains: Must be set to phandles pointing to the DISPLAY and
+ PCIE_PHY power domains
+- power-domain-names: Must be "pcie", "pcie_phy"
Additional required properties for imx7d-pcie:
- power-domains: Must be set to a phandle pointing to PCIE_PHY power domain
explanation.
- ranges: Sub-ranges distributed from the PCIe controller node. An empty
property is sufficient.
-- num-lanes: Number of lanes to use for this port.
Examples for MT7623:
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>;
ranges;
- num-lanes = <1>;
};
pcie@1,0 {
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
ranges;
- num-lanes = <1>;
};
pcie@2,0 {
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
ranges;
- num-lanes = <1>;
};
};
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
- num-lanes = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
<0 0 0 2 &pcie_intc0 1>,
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
- num-lanes = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc1 0>,
<0 0 0 2 &pcie_intc1 1>,
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
- num-lanes = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
<0 0 0 2 &pcie_intc0 1>,
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
- num-lanes = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc1 0>,
<0 0 0 2 &pcie_intc1 1>,
--- /dev/null
+Socionext UniPhier PCIe host controller bindings
+
+This describes the devicetree bindings for PCIe host controller implemented
+on Socionext UniPhier SoCs.
+
+UniPhier PCIe host controller is based on the Synopsys DesignWare PCI core.
+It shares common functions with the PCIe DesignWare core driver and inherits
+common properties defined in
+Documentation/devicetree/bindings/pci/designware-pcie.txt.
+
+Required properties:
+- compatible: Should be "socionext,uniphier-pcie".
+- reg: Specifies offset and length of the register set for the device.
+ According to the reg-names, appropriate register sets are required.
+- reg-names: Must include the following entries:
+ "dbi" - controller configuration registers
+ "link" - SoC-specific glue layer registers
+ "config" - PCIe configuration space
+- clocks: A phandle to the clock gate for PCIe glue layer including
+ the host controller.
+- resets: A phandle to the reset line for PCIe glue layer including
+ the host controller.
+- interrupts: A list of interrupt specifiers. According to the
+ interrupt-names, appropriate interrupts are required.
+- interrupt-names: Must include the following entries:
+ "dma" - DMA interrupt
+ "msi" - MSI interrupt
+
+Optional properties:
+- phys: A phandle to generic PCIe PHY. According to the phy-names, appropriate
+ phys are required.
+- phy-names: Must be "pcie-phy".
+
+Required sub-node:
+- legacy-interrupt-controller: Specifies interrupt controller for legacy PCI
+ interrupts.
+
+Required properties for legacy-interrupt-controller:
+- interrupt-controller: identifies the node as an interrupt controller.
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- interrupt-parent: Phandle to the parent interrupt controller.
+- interrupts: An interrupt specifier for legacy interrupt.
+
+Example:
+
+ pcie: pcie@66000000 {
+ compatible = "socionext,uniphier-pcie", "snps,dw-pcie";
+ status = "disabled";
+ reg-names = "dbi", "link", "config";
+ reg = <0x66000000 0x1000>, <0x66010000 0x10000>,
+ <0x2fff0000 0x10000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ clocks = <&sys_clk 24>;
+ resets = <&sys_rst 24>;
+ num-lanes = <1>;
+ num-viewport = <1>;
+ bus-range = <0x0 0xff>;
+ device_type = "pci";
+ ranges =
+ /* downstream I/O */
+ <0x81000000 0 0x00000000 0x2ffe0000 0 0x00010000
+ /* non-prefetchable memory */
+ 0x82000000 0 0x00000000 0x20000000 0 0x0ffe0000>;
+ #interrupt-cells = <1>;
+ interrupt-names = "dma", "msi";
+ interrupts = <0 224 4>, <0 225 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc 0>, /* INTA */
+ <0 0 0 2 &pcie_intc 1>, /* INTB */
+ <0 0 0 3 &pcie_intc 2>, /* INTC */
+ <0 0 0 4 &pcie_intc 3>; /* INTD */
+
+ pcie_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 226 4>;
+ };
+ };
Altera SOCFPGA Reset Manager
Required properties:
-- compatible : "altr,rst-mgr"
+- compatible : "altr,rst-mgr" for (Cyclone5/Arria5/Arria10)
+ "altr,stratix10-rst-mgr","altr,rst-mgr" for Stratix10 ARM64 SoC
- reg : Should contain 1 register ranges(address and length)
- altr,modrst-offset : Should contain the offset of the first modrst register.
- #reset-cells: 1
};
-USB3 core reset
----------------
+Peripheral core reset in glue layer
+-----------------------------------
-USB3 core reset belongs to USB3 glue layer. Before using the core reset,
-it is necessary to control the clocks and resets to enable this layer.
-These clocks and resets should be described in each property.
+Some peripheral core reset belongs to its own glue layer. Before using
+this core reset, it is necessary to control the clocks and resets to enable
+this layer. These clocks and resets should be described in each property.
Required properties:
- compatible: Should be
- "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC
- "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC
- "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC
- "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC
+ "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC USB3
+ "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC USB3
+ "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC USB3
+ "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC USB3
+ "socionext,uniphier-pro4-ahci-reset" - for Pro4 SoC AHCI
+ "socionext,uniphier-pxs2-ahci-reset" - for PXs2 SoC AHCI
+ "socionext,uniphier-pxs3-ahci-reset" - for PXs3 SoC AHCI
- #reset-cells: Should be 1.
- reg: Specifies offset and length of the register set for the device.
-- clocks: A list of phandles to the clock gate for USB3 glue layer.
+- clocks: A list of phandles to the clock gate for the glue layer.
According to the clock-names, appropriate clocks are required.
- clock-names: Should contain
"gio", "link" - for Pro4 SoC
"link" - for others
-- resets: A list of phandles to the reset control for USB3 glue layer.
+- resets: A list of phandles to the reset control for the glue layer.
According to the reset-names, appropriate resets are required.
- reset-names: Should contain
"gio", "link" - for Pro4 SoC
= EXAMPLE
The following example represents the GLINK RPM node on a MSM8996 device, with
the function for the "rpm_request" channel defined, which is used for
-regualtors and root clocks.
+regulators and root clocks.
apcs_glb: mailbox@9820000 {
compatible = "qcom,msm8996-apcs-hmss-global";
- qcom,local-pid:
Usage: required
Value type: <u32>
- Definition: specifies the identfier of the local endpoint of this edge
+ Definition: specifies the identifier of the local endpoint of this edge
- qcom,remote-pid:
Usage: required
Value type: <u32>
- Definition: specifies the identfier of the remote endpoint of this edge
+ Definition: specifies the identifier of the remote endpoint of this edge
= SUBNODES
Each SMP2P pair contain a set of inbound and outbound entries, these are
in that it exposes any CMB (Controller Memory Buffer) as a P2P memory
resource (provider), it accepts P2P memory pages as buffers in requests
to be used directly (client) and it can also make use of the CMB as
- submission queue entries (orchastrator).
+ submission queue entries (orchestrator).
* The RDMA driver is a client in this arrangement so that an RNIC
can DMA directly to the memory exposed by the NVMe device.
* The NVMe Target driver (nvmet) can orchestrate the data from the RNIC
If more than one provider is supported, the one nearest to all the clients will
be chosen first. If more than one provider is an equal distance away, the
one returned will be chosen at random (it is not an arbitrary but
-truely random). This function returns the PCI device to use for the provider
+truly random). This function returns the PCI device to use for the provider
with a reference taken and therefore when it's no longer needed it should be
returned with pci_dev_put().
ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
};
-Bus drivers can export attributes using the BUS_ATTR macro that works
-similarly to the DEVICE_ATTR macro for devices. For example, a definition
-like this:
+Bus drivers can export attributes using the BUS_ATTR_RW macro that works
+similarly to the DEVICE_ATTR_RW macro for devices. For example, a
+definition like this:
-static BUS_ATTR(debug,0644,show_debug,store_debug);
+static BUS_ATTR_RW(debug);
is equivalent to declaring:
dmaenginem_async_device_register()
dmam_alloc_coherent()
dmam_alloc_attrs()
- dmam_declare_coherent_memory()
dmam_free_coherent()
dmam_pool_create()
dmam_pool_destroy()
be preserved until there actually is some text is output to the console.
This option causes fbcon to bind immediately to the fbdev device.
+7. fbcon=logo-pos:<location>
+
+ The only possible 'location' is 'center' (without quotes), and when
+ given, the bootup logo is moved from the default top-left corner
+ location to the center of the framebuffer. If more than one logo is
+ displayed due to multiple CPUs, the collected line of logos is moved
+ as a whole.
+
C. Attaching, Detaching and Unloading
Before going on to how to attach, detach and unload the framebuffer console, an
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | ok |
| h8300: | TODO |
| hexagon: | ok |
| ia64: | ok |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | ok |
+ | csky: | ok |
| h8300: | TODO |
| hexagon: | ok |
| ia64: | ok |
| arm: | TODO |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | ok |
| hexagon: | ok |
| ia64: | TODO |
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | ok |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | ok |
| arm: | ok |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | TODO |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | ok |
| ia64: | TODO |
| arm: | TODO |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | ok |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | ok |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | .. |
| arm64: | ok |
| c6x: | .. |
+ | csky: | .. |
| h8300: | .. |
| hexagon: | .. |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | ok |
+ | csky: | ok |
| h8300: | ok |
| hexagon: | ok |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | .. |
| arm: | TODO |
| arm64: | ok |
| c6x: | ok |
+ | csky: | ok |
| h8300: | ok |
| hexagon: | ok |
| ia64: | ok |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | ok |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | ok |
| arm: | ok |
| arm64: | ok |
| c6x: | .. |
+ | csky: | .. |
| h8300: | .. |
| hexagon: | .. |
| ia64: | TODO |
| arm: | TODO |
| arm64: | TODO |
| c6x: | .. |
+ | csky: | TODO |
| h8300: | .. |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | TODO |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| arm: | .. |
| arm64: | ok |
| c6x: | .. |
+ | csky: | .. |
| h8300: | .. |
| hexagon: | .. |
| ia64: | ok |
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
These filesystems may be used for inspiration:
- ext2: see Documentation/filesystems/ext2.txt
-- ext4: see Documentation/filesystems/ext4/ext4.rst
+- ext4: see Documentation/filesystems/ext4/
- xfs: see Documentation/filesystems/xfs.txt
the time of the crash, then there is no guarantee of consistency for
the blocks in that transaction so they are discarded (which means any
filesystem changes they represent are also lost).
-Check Documentation/filesystems/ext4/ext4.rst if you want to read more about
+Check Documentation/filesystems/ext4/ if you want to read more about
ext4 and journaling.
References
Per-file keys
-------------
-Master keys are not used to encrypt file contents or names directly.
-Instead, a unique key is derived for each encrypted file, including
-each regular file, directory, and symbolic link. This has several
-advantages:
-
-- In cryptosystems, the same key material should never be used for
- different purposes. Using the master key as both an XTS key for
- contents encryption and as a CTS-CBC key for filenames encryption
- would violate this rule.
-- Per-file keys simplify the choice of IVs (Initialization Vectors)
- for contents encryption. Without per-file keys, to ensure IV
- uniqueness both the inode and logical block number would need to be
- encoded in the IVs. This would make it impossible to renumber
- inodes, which e.g. ``resize2fs`` can do when resizing an ext4
- filesystem. With per-file keys, it is sufficient to encode just the
- logical block number in the IVs.
-- Per-file keys strengthen the encryption of filenames, where IVs are
- reused out of necessity. With a unique key per directory, IV reuse
- is limited to within a single directory.
-- Per-file keys allow individual files to be securely erased simply by
- securely erasing their keys. (Not yet implemented.)
-
-A KDF (Key Derivation Function) is used to derive per-file keys from
-the master key. This is done instead of wrapping a randomly-generated
-key for each file because it reduces the size of the encryption xattr,
-which for some filesystems makes the xattr more likely to fit in-line
-in the filesystem's inode table. With a KDF, only a 16-byte nonce is
-required --- long enough to make key reuse extremely unlikely. A
-wrapped key, on the other hand, would need to be up to 64 bytes ---
-the length of an AES-256-XTS key. Furthermore, currently there is no
-requirement to support unlocking a file with multiple alternative
-master keys or to support rotating master keys. Instead, the master
-keys may be wrapped in userspace, e.g. as done by the `fscrypt
-<https://github.com/google/fscrypt>`_ tool.
-
-The current KDF encrypts the master key using the 16-byte nonce as an
-AES-128-ECB key. The output is used as the derived key. If the
-output is longer than needed, then it is truncated to the needed
-length. Truncation is the norm for directories and symlinks, since
-those use the CTS-CBC encryption mode which requires a key half as
-long as that required by the XTS encryption mode.
+Since each master key can protect many files, it is necessary to
+"tweak" the encryption of each file so that the same plaintext in two
+files doesn't map to the same ciphertext, or vice versa. In most
+cases, fscrypt does this by deriving per-file keys. When a new
+encrypted inode (regular file, directory, or symlink) is created,
+fscrypt randomly generates a 16-byte nonce and stores it in the
+inode's encryption xattr. Then, it uses a KDF (Key Derivation
+Function) to derive the file's key from the master key and nonce.
+
+The Adiantum encryption mode (see `Encryption modes and usage`_) is
+special, since it accepts longer IVs and is suitable for both contents
+and filenames encryption. For it, a "direct key" option is offered
+where the file's nonce is included in the IVs and the master key is
+used for encryption directly. This improves performance; however,
+users must not use the same master key for any other encryption mode.
+
+Below, the KDF and design considerations are described in more detail.
+
+The current KDF works by encrypting the master key with AES-128-ECB,
+using the file's nonce as the AES key. The output is used as the
+derived key. If the output is longer than needed, then it is
+truncated to the needed length.
Note: this KDF meets the primary security requirement, which is to
produce unique derived keys that preserve the entropy of the master
reversible, so it is generally considered to be a mistake! It may be
replaced with HKDF or another more standard KDF in the future.
+Key derivation was chosen over key wrapping because wrapped keys would
+require larger xattrs which would be less likely to fit in-line in the
+filesystem's inode table, and there didn't appear to be any
+significant advantages to key wrapping. In particular, currently
+there is no requirement to support unlocking a file with multiple
+alternative master keys or to support rotating master keys. Instead,
+the master keys may be wrapped in userspace, e.g. as is done by the
+`fscrypt <https://github.com/google/fscrypt>`_ tool.
+
+Including the inode number in the IVs was considered. However, it was
+rejected as it would have prevented ext4 filesystems from being
+resized, and by itself still wouldn't have been sufficient to prevent
+the same key from being directly reused for both XTS and CTS-CBC.
+
Encryption modes and usage
==========================
- AES-256-XTS for contents and AES-256-CTS-CBC for filenames
- AES-128-CBC for contents and AES-128-CTS-CBC for filenames
+- Adiantum for both contents and filenames
+
+If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair.
-It is strongly recommended to use AES-256-XTS for contents encryption.
AES-128-CBC was added only for low-powered embedded devices with
crypto accelerators such as CAAM or CESA that do not support XTS.
+Adiantum is a (primarily) stream cipher-based mode that is fast even
+on CPUs without dedicated crypto instructions. It's also a true
+wide-block mode, unlike XTS. It can also eliminate the need to derive
+per-file keys. However, it depends on the security of two primitives,
+XChaCha12 and AES-256, rather than just one. See the paper
+"Adiantum: length-preserving encryption for entry-level processors"
+(https://eprint.iacr.org/2018/720.pdf) for more details. To use
+Adiantum, CONFIG_CRYPTO_ADIANTUM must be enabled. Also, fast
+implementations of ChaCha and NHPoly1305 should be enabled, e.g.
+CONFIG_CRYPTO_CHACHA20_NEON and CONFIG_CRYPTO_NHPOLY1305_NEON for ARM.
+
New encryption modes can be added relatively easily, without changes
to individual filesystems. However, authenticated encryption (AE)
modes are not currently supported because of the difficulty of dealing
with ciphertext expansion.
+Contents encryption
+-------------------
+
For file contents, each filesystem block is encrypted independently.
Currently, only the case where the filesystem block size is equal to
-the system's page size (usually 4096 bytes) is supported. With the
-XTS mode of operation (recommended), the logical block number within
-the file is used as the IV. With the CBC mode of operation (not
-recommended), ESSIV is used; specifically, the IV for CBC is the
-logical block number encrypted with AES-256, where the AES-256 key is
-the SHA-256 hash of the inode's data encryption key.
-
-For filenames, the full filename is encrypted at once. Because of the
-requirements to retain support for efficient directory lookups and
-filenames of up to 255 bytes, a constant initialization vector (IV) is
-used. However, each encrypted directory uses a unique key, which
-limits IV reuse to within a single directory. Note that IV reuse in
-the context of CTS-CBC encryption means that when the original
-filenames share a common prefix at least as long as the cipher block
-size (16 bytes for AES), the corresponding encrypted filenames will
-also share a common prefix. This is undesirable; it may be fixed in
-the future by switching to an encryption mode that is a strong
-pseudorandom permutation on arbitrary-length messages, e.g. the HEH
-(Hash-Encrypt-Hash) mode.
-
-Since filenames are encrypted with the CTS-CBC mode of operation, the
-plaintext and ciphertext filenames need not be multiples of the AES
-block size, i.e. 16 bytes. However, the minimum size that can be
-encrypted is 16 bytes, so shorter filenames are NUL-padded to 16 bytes
-before being encrypted. In addition, to reduce leakage of filename
-lengths via their ciphertexts, all filenames are NUL-padded to the
-next 4, 8, 16, or 32-byte boundary (configurable). 32 is recommended
-since this provides the best confidentiality, at the cost of making
-directory entries consume slightly more space. Note that since NUL
-(``\0``) is not otherwise a valid character in filenames, the padding
-will never produce duplicate plaintexts.
+the system's page size (usually 4096 bytes) is supported.
+
+Each block's IV is set to the logical block number within the file as
+a little endian number, except that:
+
+- With CBC mode encryption, ESSIV is also used. Specifically, each IV
+ is encrypted with AES-256 where the AES-256 key is the SHA-256 hash
+ of the file's data encryption key.
+
+- In the "direct key" configuration (FS_POLICY_FLAG_DIRECT_KEY set in
+ the fscrypt_policy), the file's nonce is also appended to the IV.
+ Currently this is only allowed with the Adiantum encryption mode.
+
+Filenames encryption
+--------------------
+
+For filenames, each full filename is encrypted at once. Because of
+the requirements to retain support for efficient directory lookups and
+filenames of up to 255 bytes, the same IV is used for every filename
+in a directory.
+
+However, each encrypted directory still uses a unique key; or
+alternatively (for the "direct key" configuration) has the file's
+nonce included in the IVs. Thus, IV reuse is limited to within a
+single directory.
+
+With CTS-CBC, the IV reuse means that when the plaintext filenames
+share a common prefix at least as long as the cipher block size (16
+bytes for AES), the corresponding encrypted filenames will also share
+a common prefix. This is undesirable. Adiantum does not have this
+weakness, as it is a wide-block encryption mode.
+
+All supported filenames encryption modes accept any plaintext length
+>= 16 bytes; cipher block alignment is not required. However,
+filenames shorter than 16 bytes are NUL-padded to 16 bytes before
+being encrypted. In addition, to reduce leakage of filename lengths
+via their ciphertexts, all filenames are NUL-padded to the next 4, 8,
+16, or 32-byte boundary (configurable). 32 is recommended since this
+provides the best confidentiality, at the cost of making directory
+entries consume slightly more space. Note that since NUL (``\0``) is
+not otherwise a valid character in filenames, the padding will never
+produce duplicate plaintexts.
Symbolic link targets are considered a type of filename and are
-encrypted in the same way as filenames in directory entries. Each
-symlink also uses a unique key; hence, the hardcoded IV is not a
-problem for symlinks.
+encrypted in the same way as filenames in directory entries, except
+that IV reuse is not a problem as each symlink has its own inode.
User API
========
and FS_ENCRYPTION_MODE_AES_256_CTS (4) for
``filenames_encryption_mode``.
-- ``flags`` must be set to a value from ``<linux/fs.h>`` which
+- ``flags`` must contain a value from ``<linux/fs.h>`` which
identifies the amount of NUL-padding to use when encrypting
filenames. If unsure, use FS_POLICY_FLAGS_PAD_32 (0x3).
+ In addition, if the chosen encryption modes are both
+ FS_ENCRYPTION_MODE_ADIANTUM, this can contain
+ FS_POLICY_FLAG_DIRECT_KEY to specify that the master key should be
+ used directly, without key derivation.
- ``master_key_descriptor`` specifies how to find the master key in
the keyring; see `Adding keys`_. It is up to userspace to choose a
Declaring:
-BUS_ATTR(_name, _mode, _show, _store)
+static BUS_ATTR_RW(name);
+static BUS_ATTR_RO(name);
+static BUS_ATTR_WO(name);
Creation/Removal:
--- 7.4 mandatory-y
- mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm
- to define the minimum set of headers that must be exported in
- include/asm.
+ mandatory-y is essentially used by include/(uapi/)asm-generic/Kbuild.asm
+ to define the minimum set of ASM headers that all architectures must have.
+
+ This works like optional generic-y. If a mandatory header is missing
+ in arch/$(ARCH)/include/(uapi/)/asm, Kbuild will automatically generate
+ a wrapper of the asm-generic one.
The convention is to list one subdir per line and
preferably in alphabetic order.
batman-adv
can
can_ucan_protocol
- dpaa2/index
- e100
- e1000
- e1000e
- fm10k
- igb
- igbvf
- ixgb
- ixgbe
- ixgbevf
- i40e
- iavf
- ice
+ device_drivers/freescale/dpaa2/index
+ device_drivers/intel/e100
+ device_drivers/intel/e1000
+ device_drivers/intel/e1000e
+ device_drivers/intel/fm10k
+ device_drivers/intel/igb
+ device_drivers/intel/igbvf
+ device_drivers/intel/ixgb
+ device_drivers/intel/ixgbe
+ device_drivers/intel/ixgbevf
+ device_drivers/intel/i40e
+ device_drivers/intel/iavf
+ device_drivers/intel/ice
kapi
z8530book
msg_zerocopy
size should be set when the call is begun. tx_total_len may not be less
than zero.
- (*) Check to see the completion state of a call so that the caller can assess
- whether it needs to be retried.
-
- enum rxrpc_call_completion {
- RXRPC_CALL_SUCCEEDED,
- RXRPC_CALL_REMOTELY_ABORTED,
- RXRPC_CALL_LOCALLY_ABORTED,
- RXRPC_CALL_LOCAL_ERROR,
- RXRPC_CALL_NETWORK_ERROR,
- };
-
- int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
- enum rxrpc_call_completion *_compl,
- u32 *_abort_code);
-
- On return, -EINPROGRESS will be returned if the call is still ongoing; if
- it is finished, *_compl will be set to indicate the manner of completion,
- *_abort_code will be set to any abort code that occurred. 0 will be
- returned on a successful completion, -ECONNABORTED will be returned if the
- client failed due to a remote abort and anything else will return an
- appropriate error code.
-
- The caller should look at this information to decide if it's worth
- retrying the call.
-
- (*) Retry a client call.
-
- int rxrpc_kernel_retry_call(struct socket *sock,
- struct rxrpc_call *call,
- struct sockaddr_rxrpc *srx,
- struct key *key);
-
- This attempts to partially reinitialise a call and submit it again while
- reusing the original call's Tx queue to avoid the need to repackage and
- re-encrypt the data to be sent. call indicates the call to retry, srx the
- new address to send it to and key the encryption key to use for signing or
- encrypting the packets.
-
- For this to work, the first Tx data packet must still be in the transmit
- queue, and currently this is only permitted for local and network errors
- and the call must not have been aborted. Any partially constructed Tx
- packet is left as is and can continue being filled afterwards.
-
- It returns 0 if the call was requeued and an error otherwise.
-
(*) Get call RTT.
u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call);
to the accept queue.
-TCP Fast Open
+* TcpEstabResets
+Defined in `RFC1213 tcpEstabResets`_.
+
+.. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48
+
+* TcpAttemptFails
+Defined in `RFC1213 tcpAttemptFails`_.
+
+.. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48
+
+* TcpOutRsts
+Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates
+the 'segments sent containing the RST flag', but in linux kernel, this
+couner indicates the segments kerenl tried to send. The sending
+process might be failed due to some errors (e.g. memory alloc failed).
+
+.. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52
+
+
+TCP Fast Path
============
When kernel receives a TCP packet, it has two paths to handler the
packet, one is fast path, another is slow path. The comment in kernel
TCP abort
========
-
-
* TcpExtTCPAbortOnData
It means TCP layer has data in flight, but need to close the
connection. So TCP layer sends a RST to the other side, indicate the
stack of kernel will increase TcpExtTCPSACKReorder for both of the
above scenarios.
-
DSACK
=====
The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report
DSACK to the sender.
* TcpExtTCPDSACKRecv
-The TCP stack receives a DSACK, which indicate an acknowledged
+The TCP stack receives a DSACK, which indicates an acknowledged
duplicate packet is received.
* TcpExtTCPDSACKOfoRecv
The TCP stack receives a DSACK, which indicate an out of order
duplicate packet is received.
+invalid SACK and DSACK
+====================
+When a SACK (or DSACK) block is invalid, a corresponding counter would
+be updated. The validation method is base on the start/end sequence
+number of the SACK block. For more details, please refer the comment
+of the function tcp_is_sackblock_valid in the kernel source code. A
+SACK option could have up to 4 blocks, they are checked
+individually. E.g., if 3 blocks of a SACk is invalid, the
+corresponding counter would be updated 3 times. The comment of the
+`Add counters for discarded SACK blocks`_ patch has additional
+explaination:
+
+.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
+
+* TcpExtTCPSACKDiscard
+This counter indicates how many SACK blocks are invalid. If the invalid
+SACK block is caused by ACK recording, the TCP stack will only ignore
+it and won't update this counter.
+
+* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
+When a DSACK block is invalid, one of these two counters would be
+updated. Which counter will be updated depends on the undo_marker flag
+of the TCP socket. If the undo_marker is not set, the TCP stack isn't
+likely to re-transmit any packets, and we still receive an invalid
+DSACK block, the reason might be that the packet is duplicated in the
+middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo
+will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
+will be updated. As implied in its name, it might be an old packet.
+
+SACK shift
+=========
+The linux networking stack stores data in sk_buff struct (skb for
+short). If a SACK block acrosses multiple skb, the TCP stack will try
+to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
+10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and
+15 in skb2 would be moved to skb1. This operation is 'shift'. If a
+SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has
+seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
+discard, this operation is 'merge'.
+
+* TcpExtTCPSackShifted
+A skb is shifted
+
+* TcpExtTCPSackMerged
+A skb is merged
+
+* TcpExtTCPSackShiftFallback
+A skb should be shifted or merged, but the TCP stack doesn't do it for
+some reasons.
+
TCP out of order
===============
* TcpExtTCPOFOQueue
.. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9
.. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11
+TCP receive window
+=================
+* TcpExtTCPWantZeroWindowAdv
+Depending on current memory usage, the TCP stack tries to set receive
+window to zero. But the receive window might still be a no-zero
+value. For example, if the previous window size is 10, and the TCP
+stack receives 3 bytes, the current window size would be 7 even if the
+window size calculated by the memory usage is zero.
+
+* TcpExtTCPToZeroWindowAdv
+The TCP receive window is set to zero from a no-zero value.
+
+* TcpExtTCPFromZeroWindowAdv
+The TCP receive window is set to no-zero value from zero.
+
+
+Delayed ACK
+==========
+The TCP Delayed ACK is a technique which is used for reducing the
+packet count in the network. For more details, please refer the
+`Delayed ACK wiki`_
+
+.. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment
+
+* TcpExtDelayedACKs
+A delayed ACK timer expires. The TCP stack will send a pure ACK packet
+and exit the delayed ACK mode.
+
+* TcpExtDelayedACKLocked
+A delayed ACK timer expires, but the TCP stack can't send an ACK
+immediately due to the socket is locked by a userspace program. The
+TCP stack will send a pure ACK later (after the userspace program
+unlock the socket). When the TCP stack sends the pure ACK later, the
+TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK
+mode.
+
+* TcpExtDelayedACKLost
+It will be updated when the TCP stack receives a packet which has been
+ACKed. A Delayed ACK loss might cause this issue, but it would also be
+triggered by other reasons, such as a packet is duplicated in the
+network.
+
+Tail Loss Probe (TLP)
+===================
+TLP is an algorithm which is used to detect TCP packet loss. For more
+details, please refer the `TLP paper`_.
+
+.. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01
+
+* TcpExtTCPLossProbes
+A TLP probe packet is sent.
+
+* TcpExtTCPLossProbeRecovery
+A packet loss is detected and recovered by TLP.
examples
=======
Hardware time stamping must also be initialized for each device driver
that is expected to do hardware time stamping. The parameter is defined in
-/include/linux/net_tstamp.h as:
+include/uapi/linux/net_tstamp.h as:
struct hwtstamp_config {
int flags; /* no flags defined right now, must be zero */
HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
/* for the complete list of values, please check
- * the include file /include/linux/net_tstamp.h
+ * the include file include/uapi/linux/net_tstamp.h
*/
};
The same can also be done from an application program.
Disable specific CPU's specific idle state from cpuidle sysfs (see
-Documentation/cpuidle/sysfs.txt):
+Documentation/admin-guide/pm/cpuidle.rst):
# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable
==========
.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf
-.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM_API_Specification.pdf
.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34)
.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf
Tony Luck <tony.luck@intel.com>
Vikas Shivappa <vikas.shivappa@intel.com>
-This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo
+This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo
flag bits:
RDT (Resource Director Technology) Allocation - "rdt_a"
CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
targets += $(timeconst-file)
-define filechk_gentimeconst
- (echo $(CONFIG_HZ) | bc -q $< )
-endef
+filechk_gentimeconst = echo $(CONFIG_HZ) | bc -q $<
$(timeconst-file): kernel/time/timeconst.bc FORCE
$(call filechk,gentimeconst)
F: drivers/i2c/busses/i2c-thunderx*
CAVIUM LIQUIDIO NETWORK DRIVER
-M: Derek Chickles <derek.chickles@caviumnetworks.com>
-M: Satanand Burla <satananda.burla@caviumnetworks.com>
-M: Felix Manlunas <felix.manlunas@caviumnetworks.com>
-M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
+M: Derek Chickles <dchickles@marvell.com>
+M: Satanand Burla <sburla@marvell.com>
+M: Felix Manlunas <fmanlunas@marvell.com>
L: netdev@vger.kernel.org
W: http://www.cavium.com
S: Supported
CHROME HARDWARE PLATFORM SUPPORT
M: Benson Leung <bleung@chromium.org>
-M: Olof Johansson <olof@lixom.net>
+M: Enric Balletbo i Serra <enric.balletbo@collabora.com>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bleung/chrome-platform.git
F: drivers/platform/chrome/
+CHROMEOS EC SUBDRIVERS
+M: Benson Leung <bleung@chromium.org>
+M: Enric Balletbo i Serra <enric.balletbo@collabora.com>
+R: Guenter Roeck <groeck@chromium.org>
+S: Maintained
+N: cros_ec
+N: cros-ec
+F: drivers/power/supply/cros_usbpd-charger.c
+
CIRRUS LOGIC AUDIO CODEC DRIVERS
M: Brian Austin <brian.austin@cirrus.com>
M: Paul Handrigan <Paul.Handrigan@cirrus.com>
S: Maintained
F: drivers/net/ethernet/ti/cpmac.c
-CPU FREQUENCY DRIVERS
+CPU FREQUENCY SCALING FRAMEWORK
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Viresh Kumar <viresh.kumar@linaro.org>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
B: https://bugzilla.kernel.org
+F: Documentation/admin-guide/pm/cpufreq.rst
+F: Documentation/admin-guide/pm/intel_pstate.rst
F: Documentation/cpu-freq/
F: Documentation/devicetree/bindings/cpufreq/
F: drivers/cpufreq/
F: drivers/cpuidle/cpuidle-exynos.c
F: arch/arm/mach-exynos/pm.c
-CPUIDLE DRIVERS
+CPU IDLE TIME MANAGEMENT FRAMEWORK
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
B: https://bugzilla.kernel.org
+F: Documentation/admin-guide/pm/cpuidle.rst
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
Q: http://patchwork.ozlabs.org/project/linux-ext4/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4.git
S: Maintained
-F: Documentation/filesystems/ext4/ext4.rst
+F: Documentation/filesystems/ext4/
F: fs/ext4/
Extended Verification Module (EVM)
F: lib/pci*
F: arch/x86/pci/
F: arch/x86/kernel/quirks.c
+F: arch/x86/kernel/early-quirks.c
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
S: Supported
F: drivers/pci/controller/
+PCIE DRIVER FOR AMLOGIC MESON
+M: Yue Wang <yue.wang@Amlogic.com>
+L: linux-pci@vger.kernel.org
+L: linux-amlogic@lists.infradead.org
+S: Maintained
+F: drivers/pci/controller/dwc/pci-meson.c
+
PCIE DRIVER FOR AXIS ARTPEC
M: Jesper Nilsson <jesper.nilsson@axis.com>
L: linux-arm-kernel@axis.com
F: drivers/pci/controller/dwc/pcie-kirin.c
PCIE DRIVER FOR HISILICON STB
-M: Jianguo Sun <sunjianguo1@huawei.com>
M: Shawn Guo <shawn.guo@linaro.org>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
F: drivers/pci/controller/pci-v3-semi.c
+PCIE DRIVER FOR SOCIONEXT UNIPHIER
+M: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/uniphier-pcie.txt
+F: drivers/pci/controller/dwc/pcie-uniphier.c
+
PCIE DRIVER FOR ST SPEAR13XX
M: Pratyush Anand <pratyush.anand@gmail.com>
L: linux-pci@vger.kernel.org
SIFIVE DRIVERS
M: Palmer Dabbelt <palmer@sifive.com>
+M: Paul Walmsley <paul.walmsley@sifive.com>
L: linux-riscv@lists.infradead.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git
+T: git git://github.com/sifive/riscv-linux.git
S: Supported
K: sifive
N: sifive
S: Odd Fixes
F: drivers/staging/rtl8712/
+STAGING - REALTEK RTL8188EU DRIVERS
+M: Larry Finger <Larry.Finger@lwfinger.net>
+S: Odd Fixes
+F: drivers/staging/rtl8188eu/
+
STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
M: Teddy Wang <teddy.wang@siliconmotion.com>
L: linux-usb@vger.kernel.org
L: usb-storage@lists.one-eyed-alien.net
S: Maintained
-W: http://www.one-eyed-alien.net/~mdharm/linux-usb/
F: drivers/usb/storage/
USB MIDI DRIVER
# SPDX-License-Identifier: GPL-2.0
-VERSION = 4
-PATCHLEVEL = 20
+VERSION = 5
+PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc3
NAME = Shy Crocodile
# *DOCUMENTATION*
export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
- CC_HAVE_ASM_GOTO := 1
- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
endif
endif
+PHONY += prepare0
ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
$(vmlinux-dirs): prepare
$(Q)$(MAKE) $(build)=$@ need-builtin=1
-define filechk_kernel.release
+filechk_kernel.release = \
echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
-endef
# Store (new) KERNELRELEASE string in include/config/kernel.release
include/config/kernel.release: $(srctree)/Makefile FORCE
# archprepare is used in arch Makefiles and when processed asm symlink,
# version.h and scripts_basic is processed / created.
-# Listed in dependency order
-PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
+PHONY += prepare archprepare prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory,
# and if so do:
echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \
exit 1; \
fi; \
- (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\";)
+ echo \#define UTS_RELEASE \"$(KERNELRELEASE)\"
endef
define filechk_version.h
- (echo \#define LINUX_VERSION_CODE $(shell \
+ echo \#define LINUX_VERSION_CODE $(shell \
expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
- echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
+ echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'
endef
$(version_h): FORCE
# If we do an all arch process set dst to include/arch-$(SRCARCH)
hdr-dst = $(if $(KBUILD_HEADERS), dst=include/arch-$(SRCARCH), dst=include)
-PHONY += archheaders
-archheaders:
-
-PHONY += archscripts
-archscripts:
+PHONY += archheaders archscripts
PHONY += __headers
__headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts
mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
mrproper-dirs := $(addprefix _mrproper_,scripts)
-PHONY += $(mrproper-dirs) mrproper archmrproper
+PHONY += $(mrproper-dirs) mrproper
$(mrproper-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
-mrproper: clean archmrproper $(mrproper-dirs)
+mrproper: clean $(mrproper-dirs)
$(call cmd,rmdirs)
$(call cmd,rmfiles)
config JUMP_LABEL
bool "Optimize very unlikely/likely branches"
depends on HAVE_ARCH_JUMP_LABEL
+ depends on CC_HAS_ASM_GOTO
help
This option enables a transparent branch optimization that
makes certain almost-always-true or almost-always-false branch
* Address valid if:
* - "addr" doesn't have any high-bits set
* - AND "size" doesn't have any high-bits set
- * - AND "addr+size" doesn't have any high-bits set
+ * - AND "addr+size-(size != 0)" doesn't have any high-bits set
* - OR we are in kernel mode.
*/
-#define __access_ok(addr, size) \
- ((get_fs().seg & (addr | size | (addr+size))) == 0)
+#define __access_ok(addr, size) ({ \
+ unsigned long __ao_a = (addr), __ao_b = (size); \
+ unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
+ (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; })
#define access_ok(addr, size) \
({ \
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
-generic-y += bpf_perf_event.h
-generic-y += ipcbuf.h
-generic-y += msgbuf.h
-generic-y += poll.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
generic-y += compat.h
generic-y += device.h
generic-y += div64.h
-generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += extable.h
-generic-y += fb.h
generic-y += ftrace.h
generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
-generic-y += kmap_types.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msi.h
generic-y += parport.h
-generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += topology.h
#endif
};
+struct bcr_actionpoint {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:21, min:1, num:2, ver:8;
+#else
+ unsigned int ver:8, num:2, min:1, pad:21;
+#endif
+};
+
#include <soc/arc/timers.h>
struct bcr_bpu_arcompact {
};
struct cpuinfo_arc_bpu {
- unsigned int ver, full, num_cache, num_pred;
+ unsigned int ver, full, num_cache, num_pred, ret_stk;
};
struct cpuinfo_arc_ccm {
struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
- debug:1, ap:1, smart:1, rtt:1, pad3:4,
+ ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn;
struct bcr_mpy extn_mpy;
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long word)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{
if (!word)
return word;
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
{
- int n;
+ unsigned long n;
asm volatile(
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
/* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
+ /* All jump instructions that are taken */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
#ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += auxvec.h
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += siginfo.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
generic-y += ucontext.h
-/*
- * Linux performance counter support for ARC700 series
- *
- * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com)
- *
- * This code is inspired by the perf support of various other architectures.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Linux performance counter support for ARC CPUs.
+// This code is inspired by the perf support of various other architectures.
+//
+// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
+
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/arcregs.h>
#include <asm/stacktrace.h>
+/* HW holds 8 symbols + one for null terminator */
+#define ARCPMU_EVENT_NAME_LEN 9
+
+enum arc_pmu_attr_groups {
+ ARCPMU_ATTR_GR_EVENTS,
+ ARCPMU_ATTR_GR_FORMATS,
+ ARCPMU_NR_ATTR_GR
+};
+
+struct arc_pmu_raw_event_entry {
+ char name[ARCPMU_EVENT_NAME_LEN];
+};
+
struct arc_pmu {
struct pmu pmu;
unsigned int irq;
int n_counters;
+ int n_events;
u64 max_period;
int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
+
+ struct arc_pmu_raw_event_entry *raw_entry;
+ struct attribute **attrs;
+ struct perf_pmu_events_attr *attr;
+ const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
};
struct arc_pmu_cpu {
{
struct arc_callchain_trace *ctrl = data;
struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
+
perf_callchain_store(entry, addr);
if (ctrl->depth++ < 3)
return -1;
}
-void
-perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
{
struct arc_callchain_trace ctrl = {
.depth = 0,
arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
}
-void
-perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
{
/*
* User stack can't be unwound trivially with kernel dwarf unwinder
static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
/* read counter #idx; note that counter# != event# on ARC! */
-static uint64_t arc_pmu_read_counter(int idx)
+static u64 arc_pmu_read_counter(int idx)
{
- uint32_t tmp;
- uint64_t result;
+ u32 tmp;
+ u64 result;
/*
* ARC supports making 'snapshots' of the counters, so we don't
write_aux_reg(ARC_REG_PCT_INDEX, idx);
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
- result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
+ result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
result |= read_aux_reg(ARC_REG_PCT_SNAPL);
return result;
static void arc_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
- uint64_t prev_raw_count = local64_read(&hwc->prev_count);
- uint64_t new_raw_count = arc_pmu_read_counter(idx);
- int64_t delta = new_raw_count - prev_raw_count;
+ u64 prev_raw_count = local64_read(&hwc->prev_count);
+ u64 new_raw_count = arc_pmu_read_counter(idx);
+ s64 delta = new_raw_count - prev_raw_count;
/*
* We aren't afraid of hwc->prev_count changing beneath our feet
int ret;
if (!is_sampling_event(event)) {
- hwc->sample_period = arc_pmu->max_period;
+ hwc->sample_period = arc_pmu->max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
pr_debug("init cache event with h/w %08x \'%s\'\n",
(int)hwc->config, arc_pmu_ev_hw_map[ret]);
return 0;
+
+ case PERF_TYPE_RAW:
+ if (event->attr.config >= arc_pmu->n_events)
+ return -ENOENT;
+
+ hwc->config |= event->attr.config;
+ pr_debug("init raw event with idx %lld \'%s\'\n",
+ event->attr.config,
+ arc_pmu->raw_entry[event->attr.config].name);
+
+ return 0;
+
default:
return -ENOENT;
}
/* starts all counters */
static void arc_pmu_enable(struct pmu *pmu)
{
- uint32_t tmp;
+ u32 tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
}
/* stops all counters */
static void arc_pmu_disable(struct pmu *pmu)
{
- uint32_t tmp;
+ u32 tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
}
local64_set(&hwc->period_left, left);
hwc->last_period = period;
overflow = 1;
- } else if (unlikely(left <= 0)) {
+ } else if (unlikely(left <= 0)) {
/* left underflowed by less than period. */
left += period;
local64_set(&hwc->period_left, left);
write_aux_reg(ARC_REG_PCT_INDEX, idx);
/* Write value */
- write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value);
- write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32));
+ write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
+ write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
perf_event_update_userpage(event);
/* Enable interrupt for this counter */
if (is_sampling_event(event))
write_aux_reg(ARC_REG_PCT_INT_CTRL,
- read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
+ read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
/* enable ARC pmu here */
write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
* Reset interrupt flag by writing of 1. This is required
* to make sure pending interrupt was not left.
*/
- write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
+ write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
write_aux_reg(ARC_REG_PCT_INT_CTRL,
- read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx));
+ read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
}
if (!(event->hw.state & PERF_HES_STOPPED)) {
if (is_sampling_event(event)) {
/* Mimic full counter overflow as other arches do */
- write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period);
+ write_aux_reg(ARC_REG_PCT_INT_CNTL,
+ lower_32_bits(arc_pmu->max_period));
write_aux_reg(ARC_REG_PCT_INT_CNTH,
- (arc_pmu->max_period >> 32));
+ upper_32_bits(arc_pmu->max_period));
}
write_aux_reg(ARC_REG_PCT_CONFIG, 0);
idx = __ffs(active_ints);
/* Reset interrupt flag by writing of 1 */
- write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
+ write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
/*
* On reset of "interrupt active" bit corresponding
* Now we need to re-enable interrupt for the counter.
*/
write_aux_reg(ARC_REG_PCT_INT_CTRL,
- read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
+ read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
event = pmu_cpu->act_counter[idx];
hwc = &event->hw;
arc_pmu_stop(event, 0);
}
- active_ints &= ~(1U << idx);
+ active_ints &= ~BIT(idx);
} while (active_ints);
done:
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
}
+/* Event field occupies the bottom 15 bits of our config field */
+PMU_FORMAT_ATTR(event, "config:0-14");
+static struct attribute *arc_pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group arc_pmu_format_attr_gr = {
+ .name = "format",
+ .attrs = arc_pmu_format_attrs,
+};
+
+static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
+}
+
+/*
+ * We don't add attrs here as we don't have pre-defined list of perf events.
+ * We will generate and add attrs dynamically in probe() after we read HW
+ * configuration.
+ */
+static struct attribute_group arc_pmu_events_attr_gr = {
+ .name = "events",
+};
+
+static void arc_pmu_add_raw_event_attr(int j, char *str)
+{
+ memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
+ arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
+ arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
+ arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
+ arc_pmu->attr[j].id = j;
+ arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
+}
+
+static int arc_pmu_raw_alloc(struct device *dev)
+{
+ arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
+ sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
+ if (!arc_pmu->attr)
+ return -ENOMEM;
+
+ arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
+ sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
+ if (!arc_pmu->attrs)
+ return -ENOMEM;
+
+ arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
+ sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
+ if (!arc_pmu->raw_entry)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline bool event_in_hw_event_map(int i, char *name)
+{
+ if (!arc_pmu_ev_hw_map[i])
+ return false;
+
+ if (!strlen(arc_pmu_ev_hw_map[i]))
+ return false;
+
+ if (strcmp(arc_pmu_ev_hw_map[i], name))
+ return false;
+
+ return true;
+}
+
+static void arc_pmu_map_hw_event(int j, char *str)
+{
+ int i;
+
+ /* See if HW condition has been mapped to a perf event_id */
+ for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
+ if (event_in_hw_event_map(i, str)) {
+ pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
+ i, str, j);
+ arc_pmu->ev_hw_idx[i] = j;
+ }
+ }
+}
+
static int arc_pmu_device_probe(struct platform_device *pdev)
{
struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr;
- int i, j, has_interrupts;
+ int i, has_interrupts;
int counter_size; /* in bits */
union cc_name {
struct {
- uint32_t word0, word1;
+ u32 word0, word1;
char sentinel;
} indiv;
- char str[9];
+ char str[ARCPMU_EVENT_NAME_LEN];
} cc_name;
return -ENODEV;
}
BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
- BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS);
+ if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
+ return -EINVAL;
READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
- BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */
+ if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
+ return -EINVAL;
arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
if (!arc_pmu)
return -ENOMEM;
+ arc_pmu->n_events = cc_bcr.c;
+
+ if (arc_pmu_raw_alloc(&pdev->dev))
+ return -ENOMEM;
+
has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
arc_pmu->n_counters = pct_bcr.c;
pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
arc_pmu->n_counters, counter_size, cc_bcr.c,
- has_interrupts ? ", [overflow IRQ support]":"");
+ has_interrupts ? ", [overflow IRQ support]" : "");
- cc_name.str[8] = 0;
+ cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
arc_pmu->ev_hw_idx[i] = -1;
/* loop thru all available h/w condition indexes */
- for (j = 0; j < cc_bcr.c; j++) {
- write_aux_reg(ARC_REG_CC_INDEX, j);
+ for (i = 0; i < cc_bcr.c; i++) {
+ write_aux_reg(ARC_REG_CC_INDEX, i);
cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
- /* See if it has been mapped to a perf event_id */
- for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
- if (arc_pmu_ev_hw_map[i] &&
- !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
- strlen(arc_pmu_ev_hw_map[i])) {
- pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
- i, cc_name.str, j);
- arc_pmu->ev_hw_idx[i] = j;
- }
- }
+ arc_pmu_map_hw_event(i, cc_name.str);
+ arc_pmu_add_raw_event_attr(i, cc_name.str);
}
+ arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
+ arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
+ arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
+
arc_pmu->pmu = (struct pmu) {
.pmu_enable = arc_pmu_enable,
.pmu_disable = arc_pmu_disable,
.start = arc_pmu_start,
.stop = arc_pmu_stop,
.read = arc_pmu_read,
+ .attr_groups = arc_pmu->attr_groups,
};
if (has_interrupts) {
} else
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
- return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
+ /*
+ * perf parser doesn't really like '-' symbol in events name, so let's
+ * use '_' in arc pct name as it goes to kernel PMU event prefix.
+ */
+ return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
}
-#ifdef CONFIG_OF
static const struct of_device_id arc_pmu_match[] = {
{ .compatible = "snps,arc700-pct" },
{ .compatible = "snps,archs-pct" },
{},
};
MODULE_DEVICE_TABLE(of, arc_pmu_match);
-#endif
static struct platform_driver arc_pmu_driver = {
.driver = {
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
const struct id_to_str *tbl;
struct bcr_isa_arcv2 isa;
+ struct bcr_actionpoint ap;
FIX_PTR(cpu);
cpu->bpu.full = bpu.ft;
cpu->bpu.num_cache = 256 << bpu.bce;
cpu->bpu.num_pred = 2048 << bpu.pte;
+ cpu->bpu.ret_stk = 4 << bpu.rse;
if (cpu->core.family >= 0x54) {
unsigned int exec_ctrl;
}
}
- READ_BCR(ARC_REG_AP_BCR, bcr);
- cpu->extn.ap = bcr.ver ? 1 : 0;
+ READ_BCR(ARC_REG_AP_BCR, ap);
+ if (ap.ver) {
+ cpu->extn.ap_num = 2 << ap.num;
+ cpu->extn.ap_full = !!ap.min;
+ }
READ_BCR(ARC_REG_SMART_BCR, bcr);
cpu->extn.smart = bcr.ver ? 1 : 0;
READ_BCR(ARC_REG_RTT_BCR, bcr);
cpu->extn.rtt = bcr.ver ? 1 : 0;
- cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
-
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
/* some hacks for lack of feature BCR info in old ARC700 cores */
if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n,
- "BPU\t\t: %s%s match, cache:%d, Predict Table:%d",
+ "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
IS_AVAIL1(cpu->bpu.full, "full"),
IS_AVAIL1(!cpu->bpu.full, "partial"),
- cpu->bpu.num_cache, cpu->bpu.num_pred);
+ cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
if (is_isa_arcv2()) {
struct bcr_lpb lpb;
IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
- if (cpu->extn.debug)
- n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n",
- IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
+ if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
+ n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
IS_AVAIL1(cpu->extn.smart, "smaRT "),
IS_AVAIL1(cpu->extn.rtt, "RTT "));
+ if (cpu->extn.ap_num) {
+ n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
+ cpu->extn.ap_num,
+ cpu->extn.ap_full ? "full":"min");
+ }
+ n += scnprintf(buf + n, len - n, "\n");
+ }
if (cpu->dccm.sz || cpu->iccm.sz)
n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
#include <asm/arcregs.h>
#include <asm/irqflags.h>
+#define ARC_PATH_MAX 256
+
/*
* Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
* -Prints 3 regs per line and a CR.
print_reg_file(&(cregs->r13), 13);
}
-static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+static void print_task_path_n_nm(struct task_struct *tsk)
{
char *path_nm = NULL;
struct mm_struct *mm;
struct file *exe_file;
+ char buf[ARC_PATH_MAX];
mm = get_task_mm(tsk);
if (!mm)
mmput(mm);
if (exe_file) {
- path_nm = file_path(exe_file, buf, 255);
+ path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
fput(exe_file);
}
pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
}
-static void show_faulting_vma(unsigned long address, char *buf)
+static void show_faulting_vma(unsigned long address)
{
struct vm_area_struct *vma;
- char *nm = buf;
struct mm_struct *active_mm = current->active_mm;
/* can't use print_vma_addr() yet as it doesn't check for
* if the container VMA is not found
*/
if (vma && (vma->vm_start <= address)) {
+ char buf[ARC_PATH_MAX];
+ char *nm = "?";
+
if (vma->vm_file) {
- nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+ nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
if (IS_ERR(nm))
nm = "?";
}
{
struct task_struct *tsk = current;
struct callee_regs *cregs;
- char *buf;
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return;
+ /*
+ * generic code calls us with preemption disabled, but some calls
+ * here could sleep, so re-enable to avoid lockdep splat
+ */
+ preempt_enable();
- print_task_path_n_nm(tsk, buf);
+ print_task_path_n_nm(tsk);
show_regs_print_info(KERN_INFO);
show_ecr_verbose(regs);
(void *)regs->blink, (void *)regs->ret);
if (user_mode(regs))
- show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+ show_faulting_vma(regs->ret); /* faulting code, not data */
pr_info("[STAT32]: 0x%08lx", regs->status32);
if (cregs)
show_callee_regs(cregs);
- free_page((unsigned long)buf);
+ preempt_disable();
}
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
*/
#include <linux/linkage.h>
+#include <asm/cache.h>
-#undef PREALLOC_NOT_AVAIL
+/*
+ * The memset implementation below is optimized to use prefetchw and prealloc
+ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
+ * If you want to implement optimized memset for other possible L1 data cache
+ * line lengths (32B and 128B) you should rewrite code carefully checking
+ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
+ * don't belongs to memset area.
+ */
+
+#if L1_CACHE_SHIFT == 6
+
+.macro PREALLOC_INSTR reg, off
+ prealloc [\reg, \off]
+.endm
+
+.macro PREFETCHW_INSTR reg, off
+ prefetchw [\reg, \off]
+.endm
+
+#else
+
+.macro PREALLOC_INSTR
+.endm
+
+.macro PREFETCHW_INSTR
+.endm
+
+#endif
ENTRY_CFI(memset)
- prefetchw [r0] ; Prefetch the write location
+ PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
lpnz @.Lset64bytes
;; LOOP START
-#ifdef PREALLOC_NOT_AVAIL
- prefetchw [r3, 64] ;Prefetch the next write location
-#else
- prealloc [r3, 64]
-#endif
+ PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
+
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
;; LOOP START
- prefetchw [r3, 32] ;Prefetch the next write location
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
*/
fault = handle_mm_fault(vma, address, flags);
- /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
if (fatal_signal_pending(current)) {
- if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
- up_read(&mm->mmap_sem);
- if (user_mode(regs))
+
+ /*
+ * if fault retry, mmap_sem already relinquished by core mm
+ * so OK to return to user mode (with signal handled first)
+ */
+ if (fault & VM_FAULT_RETRY) {
+ if (!user_mode(regs))
+ goto no_context;
return;
+ }
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
*/
memblock_add_node(low_mem_start, low_mem_sz, 0);
- memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
+ memblock_reserve(CONFIG_LINUX_LINK_BASE,
+ __pa(_end) - CONFIG_LINUX_LINK_BASE);
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) {
ifeq ($(CONFIG_XIP_KERNEL),y)
cmd_deflate_xip_data = $(CONFIG_SHELL) -c \
- '$(srctree)/$(src)/deflate_xip_data.sh $< $@ || { rm -f $@; false; }'
+ '$(srctree)/$(src)/deflate_xip_data.sh $< $@'
ifeq ($(CONFIG_XIP_DEFLATED_DATA),y)
quiet_cmd_mkxip = XIPZ $@
bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \
[ -z "$$bad_syms" ] || \
( echo "following symbols must have non local/private scope:" >&2; \
- echo "$$bad_syms" >&2; rm -f $@; false )
+ echo "$$bad_syms" >&2; false )
check_for_multiple_zreladdr = \
if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \
regulator-boot-on;
};
+ baseboard_3v3: fixedregulator-3v3 {
+ /* TPS73701DCQ */
+ compatible = "regulator-fixed";
+ regulator-name = "baseboard_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vbat>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ baseboard_1v8: fixedregulator-1v8 {
+ /* TPS73701DCQ */
+ compatible = "regulator-fixed";
+ regulator-name = "baseboard_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vbat>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
backlight_lcd: backlight-regulator {
compatible = "regulator-fixed";
regulator-name = "lcd_backlight_pwr";
sound {
compatible = "simple-audio-card";
- simple-audio-card,name = "DA850/OMAP-L138 EVM";
+ simple-audio-card,name = "DA850-OMAPL138 EVM";
simple-audio-card,widgets =
"Line", "Line In",
"Line", "Line Out";
/* Regulators */
IOVDD-supply = <&vdcdc2_reg>;
- /* Derived from VBAT: Baseboard 3.3V / 1.8V */
- AVDD-supply = <&vbat>;
- DRVDD-supply = <&vbat>;
- DVDD-supply = <&vbat>;
+ AVDD-supply = <&baseboard_3v3>;
+ DRVDD-supply = <&baseboard_3v3>;
+ DVDD-supply = <&baseboard_1v8>;
};
tca6416: gpio@20 {
compatible = "ti,tca6416";
};
};
+ vcc_5vd: fixedregulator-vcc_5vd {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_5vd";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ };
+
+ vcc_3v3d: fixedregulator-vcc_3v3d {
+ /* TPS650250 - VDCDC1 */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3d";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_5vd>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_1v8d: fixedregulator-vcc_1v8d {
+ /* TPS650250 - VDCDC2 */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v8d";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_5vd>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
sound {
compatible = "simple-audio-card";
- simple-audio-card,name = "DA850/OMAP-L138 LCDK";
+ simple-audio-card,name = "DA850-OMAPL138 LCDK";
simple-audio-card,widgets =
"Line", "Line In",
"Line", "Line Out";
compatible = "ti,tlv320aic3106";
reg = <0x18>;
status = "okay";
+
+ /* Regulators */
+ IOVDD-supply = <&vcc_3v3d>;
+ AVDD-supply = <&vcc_3v3d>;
+ DRVDD-supply = <&vcc_3v3d>;
+ DVDD-supply = <&vcc_1v8d>;
};
};
compatible = "gpio-fan";
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
pinctrl-names = "default";
- gpios = <&gpio1 14 GPIO_ACTIVE_LOW
- &gpio1 13 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
+ &gpio1 13 GPIO_ACTIVE_HIGH>;
gpio-fan,speed-map = <0 0
3000 1
6000 2>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>;
ranges;
- num-lanes = <1>;
status = "disabled";
};
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
ranges;
- num-lanes = <1>;
status = "disabled";
};
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
ranges;
- num-lanes = <1>;
status = "disabled";
};
};
# SPDX-License-Identifier: GPL-2.0
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd-common.h
generated-y += unistd-oabi.h
generated-y += unistd-eabi.h
-
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += siginfo.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += termbits.h
-generic-y += termios.h
#include <asm/patch.h>
#include <asm/insn.h>
-#ifdef HAVE_JUMP_LABEL
-
static void __arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
bool is_static)
{
__arch_jump_label_transform(entry, type, true);
}
-
-#endif
.dev_id = "da830-mmc.0",
.table = {
/* gpio chip 1 contains gpio range 32-63 */
- GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd",
+ GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_CD_PIN, "cd",
GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp",
+ GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_WP_PIN, "wp",
GPIO_ACTIVE_LOW),
},
};
.dev_id = "da830-mmc.0",
.table = {
/* gpio chip 2 contains gpio range 64-95 */
- GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
+ GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_CD_PIN, "cd",
GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
+ GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_WP_PIN, "wp",
GPIO_ACTIVE_HIGH),
},
};
static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
.dev_id = "i2c_davinci.1",
.table = {
- GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SDA_PIN, "sda",
+ GPIO_LOOKUP("davinci_gpio", DM355_I2C_SDA_PIN, "sda",
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SCL_PIN, "scl",
+ GPIO_LOOKUP("davinci_gpio", DM355_I2C_SCL_PIN, "scl",
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
},
};
static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
.dev_id = "i2c_davinci.1",
.table = {
- GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SDA_PIN, "sda",
+ GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SDA_PIN, "sda",
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SCL_PIN, "scl",
+ GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SCL_PIN, "scl",
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
},
};
static struct gpiod_lookup_table mmc_gpios_table = {
.dev_id = "da830-mmc.0",
.table = {
- GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd",
+ GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_CD_PIN, "cd",
GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp",
+ GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_WP_PIN, "wp",
GPIO_ACTIVE_LOW),
},
};
char *mmciname;
lookup = devm_kzalloc(&dev->dev,
- sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
+ struct_size(lookup, table, 3),
GFP_KERNEL);
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
- mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
+ mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
+ "lm%x:00700", dev->id);
+ if (!lookup || !chipname || !mmciname)
+ return -ENOMEM;
+
lookup->dev_id = mmciname;
/*
* Offsets on GPIO block 1:
void __iomem *sdr_ctl_base_addr;
unsigned long socfpga_cpu1start_addr;
+extern void __init socfpga_reset_init(void);
+
static void __init socfpga_sysmgr_init(void)
{
struct device_node *np;
if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM))
socfpga_init_ocram_ecc();
+ socfpga_reset_init();
}
static void __init socfpga_arria10_init_irq(void)
socfpga_init_arria10_l2_ecc();
if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM))
socfpga_init_arria10_ocram_ecc();
+ socfpga_reset_init();
}
static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd)
quiet_cmd_gen_mach = GEN $@
cmd_gen_mach = mkdir -p $(dir $@) && \
- $(AWK) -f $(filter-out $(PHONY),$^) > $@ || \
- { rm -f $@; /bin/false; }
+ $(AWK) -f $(filter-out $(PHONY),$^) > $@
$(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE
$(call if_changed,gen_mach)
pinctrl-0 = <&cp0_pcie_pins>;
num-lanes = <4>;
num-viewport = <8>;
- reset-gpio = <&cp0_gpio1 20 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&cp0_gpio2 20 GPIO_ACTIVE_LOW>;
status = "okay";
};
method = "smc";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /*
+ * This area matches the mapping done with a
+ * mainline U-Boot, and should be updated by the
+ * bootloader.
+ */
+
+ psci-area@4000000 {
+ reg = <0x0 0x4000000 0x0 0x200000>;
+ no-map;
+ };
+ };
+
ap806 {
#address-cells = <2>;
#size-cells = <2>;
ranges;
status = "disabled";
- num-lanes = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
<0 0 0 2 &pcie_intc0 1>,
ranges;
status = "disabled";
- num-lanes = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc1 0>,
<0 0 0 2 &pcie_intc1 1>,
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_RT5645=m
CONFIG_SND_SOC_RK3399_GRU_SOUND=m
+CONFIG_SND_MESON_AXG_SOUND_CARD=m
CONFIG_SND_SOC_SAMSUNG=y
CONFIG_SND_SOC_RCAR=m
CONFIG_SND_SOC_AK4613=m
CONFIG_SND_SIMPLE_CARD=m
CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_SND_SOC_ES7134=m
+CONFIG_SND_SOC_ES7241=m
+CONFIG_SND_SOC_TAS571X=m
CONFIG_I2C_HID=m
CONFIG_USB=y
CONFIG_USB_OTG=y
#ifndef __ASM_PROTOTYPES_H
#define __ASM_PROTOTYPES_H
/*
- * CONFIG_MODEVERIONS requires a C declaration to generate the appropriate CRC
+ * CONFIG_MODVERSIONS requires a C declaration to generate the appropriate CRC
* for each symbol. Since commit:
*
* 4efca4ed05cbdfd1 ("kbuild: modversions for EXPORT_SYMBOL() for asm")
*/
#define ARCH_DMA_MINALIGN (128)
+#ifdef CONFIG_KASAN_SW_TAGS
+#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
#ifndef __ASM_MMU_H
#define __ASM_MMU_H
+#include <asm/cputype.h>
+
#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
#define USER_ASID_BIT 48
#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
+static inline bool arm64_kernel_use_ng_mappings(void)
+{
+ bool tx1_bug;
+
+ /* What's a kpti? Use global mappings if we don't know. */
+ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
+ return false;
+
+ /*
+ * Note: this function is called before the CPU capabilities have
+ * been configured, so our early mappings will be global. If we
+ * later determine that kpti is required, then
+ * kpti_install_ng_mappings() will make them non-global.
+ */
+ if (arm64_kernel_unmapped_at_el0())
+ return true;
+
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return false;
+
+ /*
+ * KASLR is enabled so we're going to be enabling kpti on non-broken
+ * CPUs regardless of their susceptibility to Meltdown. Rather
+ * than force everybody to go through the G -> nG dance later on,
+ * just put down non-global mappings from the beginning.
+ */
+ if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
+ tx1_bug = false;
+#ifndef MODULE
+ } else if (!static_branch_likely(&arm64_const_caps_ready)) {
+ extern const struct midr_range cavium_erratum_27456_cpus[];
+
+ tx1_bug = is_midr_in_range_list(read_cpuid_id(),
+ cavium_erratum_27456_cpus);
+#endif
+ } else {
+ tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
+ }
+
+ return !tx1_bug && kaslr_offset() > 0;
+}
+
typedef void (*bp_hardening_cb_t)(void);
struct bp_hardening_data {
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
-#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
+#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
+#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
# SPDX-License-Identifier: GPL-2.0
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += errno.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += poll.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += swab.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
-generic-y += siginfo.h
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_27456
-static const struct midr_range cavium_erratum_27456_cpus[] = {
+const struct midr_range cavium_erratum_27456_cpus[] = {
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
/* Cavium ThunderX, T81 pass 1.0 */
/* Useful for KASLR robustness */
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return true;
+ return kaslr_offset() > 0;
/* Don't force KPTI for CPUs that are not vulnerable */
if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
static bool kpti_applied = false;
int cpu = smp_processor_id();
- if (kpti_applied)
+ /*
+ * We don't need to rewrite the page-tables if either we've done
+ * it already or we have KASLR enabled and therefore have not
+ * created any global mappings at all.
+ */
+ if (kpti_applied || kaslr_offset() > 0)
return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
ENTRY(kimage_vaddr)
.quad _text - TEXT_OFFSET
+EXPORT_SYMBOL(kimage_vaddr)
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
#include <linux/jump_label.h>
#include <asm/insn.h>
-#ifdef HAVE_JUMP_LABEL
-
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
* NOP needs to be replaced by a branch.
*/
}
-
-#endif /* HAVE_JUMP_LABEL */
#include <linux/sched.h>
#include <linux/types.h>
+#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
return ret;
}
-static __init const u8 *get_cmdline(void *fdt)
+static __init const u8 *kaslr_get_cmdline(void *fdt)
{
static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
* Check if 'nokaslr' appears on the command line, and
* return 0 if that is the case.
*/
- cmdline = get_cmdline(fdt);
+ cmdline = kaslr_get_cmdline(fdt);
str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
return 0;
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK;
+ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+ __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+
return offset;
}
/* add kaslr-seed */
ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED);
- if (ret && (ret != -FDT_ERR_NOTFOUND))
+ if (ret == -FDT_ERR_NOTFOUND)
+ ret = 0;
+ else if (ret)
goto out;
if (rng_is_initialized()) {
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += auxvec.h
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
generic-y += shmparam.h
-generic-y += siginfo.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
generic-y += ucontext.h
extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
size_t size, unsigned long flags);
+/*
+ * I/O memory access primitives. Reads are ordered relative to any
+ * following Normal memory access. Writes are ordered relative to any prior
+ * Normal memory access.
+ *
+ * For CACHEV1 (807, 810), store instruction could fast retire, so we need
+ * another mb() to prevent st fast retire.
+ *
+ * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't
+ * fast retire.
+ */
+#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; })
+
+#ifdef CONFIG_CPU_HAS_CACHEV2
+#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); })
+#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); })
+#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); })
+#else
+#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); })
+#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); })
+#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
+#endif
+
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
extern void pgd_init(unsigned long *p);
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
- unsigned long *kaddr, i;
+ unsigned long i;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL,
- PTE_ORDER);
- kaddr = (unsigned long *)pte;
- if (address & 0x80000000)
- for (i = 0; i < (PAGE_SIZE/4); i++)
- *(kaddr + i) = 0x1;
- else
- clear_page(kaddr);
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
+ for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
+ (pte + i)->pte_low = _PAGE_GLOBAL;
return pte;
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
- unsigned long *kaddr, i;
-
- pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER);
- if (pte) {
- kaddr = kmap_atomic(pte);
- if (address & 0x80000000) {
- for (i = 0; i < (PAGE_SIZE/4); i++)
- *(kaddr + i) = 0x1;
- } else
- clear_page(kaddr);
- kunmap_atomic(kaddr);
- pgtable_page_ctor(pte);
+
+ pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
+ if (!pte)
+ return NULL;
+
+ if (!pgtable_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
}
+
return pte;
}
include include/uapi/asm-generic/Kbuild.asm
-generic-y += auxvec.h
-generic-y += param.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
-generic-y += shmbuf.h
-generic-y += bitsperlong.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += siginfo.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += statfs.h
-generic-y += stat.h
-generic-y += setup.h
-generic-y += swab.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
generic-y += ucontext.h
#include <linux/spinlock.h>
#include <asm/pgtable.h>
-#if defined(__CSKYABIV2__)
+#ifdef CONFIG_CPU_CK810
#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000)
#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0)
*(uint16_t *)(addr) = 0xE8Fa; \
*((uint16_t *)(addr) + 1) = 0x0000; \
} while (0)
+
+static void jsri_2_lrw_jsr(uint32_t *location)
+{
+ uint16_t *location_tmp = (uint16_t *)location;
+
+ if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
+ return;
+
+ if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
+ /* jsri 0x... --> lrw r26, 0x... */
+ CHANGE_JSRI_TO_LRW(location);
+ /* lsli r0, r0 --> jsr r26 */
+ SET_JSR32_R26(location + 1);
+ }
+}
+#else
+static void inline jsri_2_lrw_jsr(uint32_t *location)
+{
+ return;
+}
#endif
int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
Elf32_Sym *sym;
uint32_t *location;
short *temp;
-#if defined(__CSKYABIV2__)
- uint16_t *location_tmp;
-#endif
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
case R_CSKY_PCRELJSR_IMM11BY2:
break;
case R_CSKY_PCRELJSR_IMM26BY2:
-#if defined(__CSKYABIV2__)
- location_tmp = (uint16_t *)location;
- if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
- break;
-
- if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
- /* jsri 0x... --> lrw r26, 0x... */
- CHANGE_JSRI_TO_LRW(location);
- /* lsli r0, r0 --> jsr r26 */
- SET_JSR32_R26(location + 1);
- }
-#endif
+ jsri_2_lrw_jsr(location);
break;
case R_CSKY_ADDR_HI16:
temp = ((short *)location) + 1;
boot := arch/h8300/boot
-archmrproper:
-
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += auxvec.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += setup.h
-generic-y += shmbuf.h
generic-y += shmparam.h
-generic-y += siginfo.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += swab.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
generic-y += ucontext.h
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += auxvec.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
generic-y += shmparam.h
-generic-y += siginfo.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
generic-y += ucontext.h
NM := $(CROSS_COMPILE)nm -B
READELF := $(CROSS_COMPILE)readelf
-export AWK
-
CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
OBJCOPYFLAGS := --strip-all
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_64.h
-generic-y += bpf_perf_event.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += msgbuf.h
-generic-y += poll.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
-generic-y += auxvec.h
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
generic-y += shmparam.h
-generic-y += siginfo.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
generic-y += shmparam.h
-generic-y += siginfo.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += swab.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
generic-y += ucontext.h
config MIPS32_N32
bool "Kernel support for n32 binaries"
depends on 64BIT
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select COMPAT
select MIPS32_COMPAT
select SYSVIPC_COMPAT if SYSVIPC
pm_power_off = bcm47xx_machine_halt;
}
+#ifdef CONFIG_BCM47XX_BCMA
+static struct device * __init bcm47xx_setup_device(void)
+{
+ struct device *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ err = dev_set_name(dev, "bcm47xx_soc");
+ if (err) {
+ pr_err("Failed to set SoC device name: %d\n", err);
+ kfree(dev);
+ return NULL;
+ }
+
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (err)
+ pr_err("Failed to set SoC DMA mask: %d\n", err);
+
+ return dev;
+}
+#endif
+
/*
* This finishes bus initialization doing things that were not possible without
* kmalloc. Make sure to call it late enough (after mm_init).
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
int err;
+ bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
+ if (!bcm47xx_bus.bcma.dev)
+ panic("Failed to setup SoC device\n");
+
err = bcma_host_soc_init(&bcm47xx_bus.bcma);
if (err)
panic("Failed to initialize BCMA bus (err %d)", err);
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
+ if (device_register(bcm47xx_bus.bcma.dev))
+ pr_err("Failed to register SoC device\n");
bcma_bus_register(&bcm47xx_bus.bcma.bus);
break;
#endif
" sync \n"
" synci ($0) \n");
- relocated_kexec_smp_wait(NULL);
+ kexec_reboot();
}
#endif
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set
#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
-#define MIPS_CPU_TIMER_IRQ 7
-
#define MAX_IM 5
#endif /* _FALCON_IRQ__ */
#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
-#define MIPS_CPU_TIMER_IRQ 7
-
#define MAX_IM 5
#endif
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_n32.h
generated-y += unistd_nr_n32.h
generated-y += unistd_nr_n64.h
generated-y += unistd_nr_o32.h
-generic-y += bpf_perf_event.h
-generic-y += ipcbuf.h
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
+ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
+ CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
#include <asm/cacheflush.h>
#include <asm/inst.h>
-#ifdef HAVE_JUMP_LABEL
-
/*
* Define parameters for the standard MIPS and the microMIPS jump
* instruction encoding respectively:
mutex_unlock(&text_mutex);
}
-
-#endif /* HAVE_JUMP_LABEL */
.irq_set_type = ltq_eiu_settype,
};
-static void ltq_hw_irqdispatch(int module)
+static void ltq_hw_irq_handler(struct irq_desc *desc)
{
+ int module = irq_desc_get_irq(desc) - 2;
u32 irq;
+ int hwirq;
irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
if (irq == 0)
* other bits might be bogus
*/
irq = __fls(irq);
- do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
+ hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
+ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
/* if this is a EBU irq, we need to ack it or get a deadlock */
if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
LTQ_EBU_PCC_ISTAT);
}
-#define DEFINE_HWx_IRQDISPATCH(x) \
- static void ltq_hw ## x ## _irqdispatch(void) \
- { \
- ltq_hw_irqdispatch(x); \
- }
-DEFINE_HWx_IRQDISPATCH(0)
-DEFINE_HWx_IRQDISPATCH(1)
-DEFINE_HWx_IRQDISPATCH(2)
-DEFINE_HWx_IRQDISPATCH(3)
-DEFINE_HWx_IRQDISPATCH(4)
-
-#if MIPS_CPU_TIMER_IRQ == 7
-static void ltq_hw5_irqdispatch(void)
-{
- do_IRQ(MIPS_CPU_TIMER_IRQ);
-}
-#else
-DEFINE_HWx_IRQDISPATCH(5)
-#endif
-
-static void ltq_hw_irq_handler(struct irq_desc *desc)
-{
- ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
- int irq;
-
- if (!pending) {
- spurious_interrupt();
- return;
- }
-
- pending >>= CAUSEB_IP;
- while (pending) {
- irq = fls(pending) - 1;
- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
- pending &= ~BIT(irq);
- }
-}
-
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
struct irq_chip *chip = <q_irq_type;
for (i = 0; i < MAX_IM; i++)
irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
- if (cpu_has_vint) {
- pr_info("Setting up vectored interrupts\n");
- set_vi_handler(2, ltq_hw0_irqdispatch);
- set_vi_handler(3, ltq_hw1_irqdispatch);
- set_vi_handler(4, ltq_hw2_irqdispatch);
- set_vi_handler(5, ltq_hw3_irqdispatch);
- set_vi_handler(6, ltq_hw4_irqdispatch);
- set_vi_handler(7, ltq_hw5_irqdispatch);
- }
-
ltq_domain = irq_domain_add_linear(node,
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0);
-#ifndef CONFIG_MIPS_MT_SMP
- set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
- IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#else
- set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
- IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#endif
-
/* tell oprofile which irq to use */
ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
- /*
- * if the timer irq is not one of the mips irqs we need to
- * create a mapping
- */
- if (MIPS_CPU_TIMER_IRQ != 7)
- irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
-
/* the external interrupts are optional and xway only */
eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
unsigned int get_c0_compare_int(void)
{
- return MIPS_CPU_TIMER_IRQ;
+ return CP0_LEGACY_COMPARE_IRQ;
}
static struct of_device_id __initdata of_irq_ids[] = {
unsigned long flags;
ch->desc = 0;
- ch->desc_base = dma_zalloc_coherent(ch->dev,
- LTQ_DESC_NUM * LTQ_DESC_SIZE,
- &ch->phys, GFP_ATOMIC);
+ ch->desc_base = dma_alloc_coherent(ch->dev,
+ LTQ_DESC_NUM * LTQ_DESC_SIZE,
+ &ch->phys, GFP_ATOMIC);
spin_lock_irqsave(<q_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
int irq;
struct irq_chip *msi;
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
+ return 0;
+ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
KBUILD_DEFCONFIG := defconfig
-comma = ,
-
-
ifdef CONFIG_FUNCTION_TRACER
arch-y += -malways-save-lp -mno-relax
endif
boot := arch/nds32/boot
core-y += $(boot)/dts/
-.PHONY: FORCE
-
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h
-CLEAN_FILES += include/asm-nds32/constants.h*
-
-# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
generic-y += asm-offsets.h
generic-y += atomic.h
generic-y += bitops.h
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
generic-y += bug.h
generic-y += bugs.h
generic-y += checksum.h
generic-y += dma.h
generic-y += dma-mapping.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += export.h
generic-y += fb.h
-generic-y += fcntl.h
-generic-y += ftrace.h
generic-y += gpio.h
generic-y += hardirq.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
generic-y += irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
generic-y += parport.h
generic-y += pci.h
generic-y += percpu.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
-generic-y += shmbuf.h
generic-y += sizes.h
-generic-y += stat.h
generic-y += switch_to.h
generic-y += timex.h
generic-y += topology.h
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
-generic-y += shmbuf.h
-generic-y += bitsperlong.h
-generic-y += fcntl.h
-generic-y += stat.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += setup.h
-generic-y += siginfo.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += swab.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
generic-y += ucontext.h
config NO_IOPORT_MAP
def_bool y
-config HAS_DMA
- def_bool y
-
config FPU
def_bool n
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += auxvec.h
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += setup.h
-generic-y += shmbuf.h
-generic-y += siginfo.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
generic-y += ucontext.h
KBUILD_DEFCONFIG := or1ksim_defconfig
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
-LDFLAGS_vmlinux :=
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__
BUILTIN_DTB := n
endif
core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/
-
-all: vmlinux
/* Ensure that addr is below task's addr_limit */
#define __addr_ok(addr) ((unsigned long) addr < get_fs())
-#define access_ok(addr, size) \
- __range_ok((unsigned long)addr, (unsigned long)size)
+#define access_ok(addr, size) \
+({ \
+ unsigned long __ao_addr = (unsigned long)(addr); \
+ unsigned long __ao_size = (unsigned long)(size); \
+ __range_ok(__ao_addr, __ao_size); \
+})
/*
* These are the main single-value transfer routines. They automatically
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += auxvec.h
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += setup.h
-generic-y += shmbuf.h
generic-y += shmparam.h
-generic-y += siginfo.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += swab.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
generic-y += ucontext.h
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generated-y += unistd_64.h
-generic-y += auxvec.h
-generic-y += bpf_perf_event.h
generic-y += kvm_para.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += resource.h
-generic-y += siginfo.h
void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
/* OPAL tracing */
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
extern struct static_key opal_tracepoint_key;
#endif
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generated-y += unistd_64.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += resource.h
-generic-y += sockios.h
-generic-y += statfs.h
-generic-y += siginfo.h
PERF_REG_POWERPC_DAR,
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
+ PERF_REG_POWERPC_MMCRA,
PERF_REG_POWERPC_MAX,
};
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
/* set up the PTE pointers for the Abatron bdiGDB.
*/
- tovirt(r6,r6)
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5)
+ lis r6, swapper_pg_dir@h
+ ori r6, r6, swapper_pg_dir@l
stw r6, 0(r5)
/* Now turn on the MMU for real! */
#include <linux/jump_label.h>
#include <asm/code-patching.h>
-#ifdef HAVE_JUMP_LABEL
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
else
patch_instruction(addr, PPC_INST_NOP);
}
-#endif
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext))
goto badframe;
- }
+ } else
#endif
- /* Fall through, for non-TM restore */
- if (!MSR_TM_ACTIVE(msr)) {
+ {
/*
+ * Fall through, for non-TM restore
+ *
* Unset MSR[TS] on the thread regs since MSR from user
* context does not have MSR active, and recheckpoint was
* not called since restore_tm_sigcontexts() was not called
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
-unsigned long __init arch_syscall_addr(int nr)
-{
- return sys_call_table[nr*2];
-}
-#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
-
#ifdef PPC64_ELF_ABI_v1
char *arch_ftrace_match_adjust(char *str, const char *search)
{
PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
+ PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
};
u64 perf_reg_value(struct pt_regs *regs, int idx)
!is_sier_available()))
return 0;
+ if (idx == PERF_REG_POWERPC_MMCRA &&
+ (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
+ IS_ENABLED(CONFIG_PPC32)))
+ return 0;
+
return regs_get_register(regs, pt_regs_offset[idx]);
}
continue;
seq_printf(m, "PPC4XX OCM : %d\n", ocm->index);
- seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys));
+ seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys));
seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal);
seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal);
- seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys));
+ seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys));
seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt);
seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree);
blk->size, blk->owner);
}
- seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys));
+ seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys));
seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt);
seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal);
seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);
/* see if there is a keyboard in the device tree
with a parent of type "adb" */
for_each_node_by_name(kbd, "keyboard")
- if (kbd->parent && kbd->parent->type
- && strcmp(kbd->parent->type, "adb") == 0)
+ if (of_node_is_type(kbd->parent, "adb"))
break;
of_node_put(kbd);
if (kbd)
chan->ring_size = ring_size;
- chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev,
+ chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
ring_size * sizeof(u64),
&chan->ring_dma, GFP_KERNEL);
}
} else {
/* Create a group for 1 GPU and attached NPUs for POWER8 */
- pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL);
+ pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
table_group = &pe->npucomp->table_group;
table_group->ops = &pnv_npu_peers_ops;
iommu_register_group(table_group, hose->global_number,
#include <asm/trace.h>
#include <asm/asm-prototypes.h>
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
int opal_tracepoint_regfunc(void)
.section ".text"
#ifdef CONFIG_TRACEPOINTS
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
#define OPAL_BRANCH(LABEL) \
ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key)
#else
list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
- if (phb->type == PNV_PHB_NPU_NVLINK)
+ if (phb->type == PNV_PHB_NPU_NVLINK ||
+ phb->type == PNV_PHB_NPU_OCAPI)
continue;
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
#ifdef CONFIG_TRACEPOINTS
-#ifndef HAVE_JUMP_LABEL
+#ifndef CONFIG_JUMP_LABEL
.section ".toc","aw"
.globl hcall_tracepoint_refcount
mr r5,BUFREG; \
__HCALL_INST_POSTCALL
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
#define HCALL_BRANCH(LABEL) \
ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key)
#else
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_TRACEPOINTS
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
int hcall_tracepoint_regfunc(void)
if (!of_device_is_compatible(nvdn->parent,
"ibm,power9-npu"))
continue;
+#ifdef CONFIG_PPC_POWERNV
WARN_ON_ONCE(pnv_npu2_init(hose));
+#endif
break;
}
}
}
/* Initialize outbound message descriptor ring */
- rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev,
- rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
- &rmu->msg_tx_ring.phys, GFP_KERNEL);
+ rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
+ rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+ &rmu->msg_tx_ring.phys,
+ GFP_KERNEL);
if (!rmu->msg_tx_ring.virt) {
rc = -ENOMEM;
goto out_dma;
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
+ select HAVE_ARCH_AUDITSYSCALL
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GENERIC_DMA_COHERENT
select HAVE_PERF_EVENTS
+ select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
select RISCV_ISA_A if SMP
select SPARSE_IRQ
select HAVE_ARCH_TRACEHOOK
select HAVE_PCI
select MODULES_USE_ELF_RELA if MODULES
+ select MODULE_SECTIONS if MODULES
select THREAD_INFO_IN_TASK
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
bool "2GiB"
config MAXPHYSMEM_128GB
depends on 64BIT && CMODEL_MEDANY
- select MODULE_SECTIONS if MODULES
bool "128GiB"
endchoice
generic-y += bugs.h
-generic-y += cacheflush.h
generic-y += checksum.h
generic-y += compat.h
generic-y += cputime.h
generic-y += dma-contiguous.h
generic-y += dma-mapping.h
generic-y += emergency-restart.h
-generic-y += errno.h
generic-y += exec.h
generic-y += fb.h
-generic-y += fcntl.h
generic-y += hardirq.h
generic-y += hash.h
generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
-generic-y += mman.h
-generic-y += module.h
-generic-y += msgbuf.h
generic-y += mutex.h
-generic-y += param.h
generic-y += percpu.h
-generic-y += poll.h
-generic-y += posix_types.h
generic-y += preempt.h
-generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
-generic-y += sembuf.h
generic-y += serial.h
-generic-y += setup.h
-generic-y += shmbuf.h
generic-y += shmparam.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += swab.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += types.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
#define MODULE_ARCH_VERMAGIC "riscv"
struct module;
-u64 module_emit_got_entry(struct module *mod, u64 val);
-u64 module_emit_plt_entry(struct module *mod, u64 val);
+unsigned long module_emit_got_entry(struct module *mod, unsigned long val);
+unsigned long module_emit_plt_entry(struct module *mod, unsigned long val);
#ifdef CONFIG_MODULE_SECTIONS
struct mod_section {
- struct elf64_shdr *shdr;
+ Elf_Shdr *shdr;
int num_entries;
int max_entries;
};
};
struct got_entry {
- u64 symbol_addr; /* the real variable address */
+ unsigned long symbol_addr; /* the real variable address */
};
-static inline struct got_entry emit_got_entry(u64 val)
+static inline struct got_entry emit_got_entry(unsigned long val)
{
return (struct got_entry) {val};
}
-static inline struct got_entry *get_got_entry(u64 val,
+static inline struct got_entry *get_got_entry(unsigned long val,
const struct mod_section *sec)
{
- struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr;
+ struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr);
int i;
for (i = 0; i < sec->num_entries; i++) {
if (got[i].symbol_addr == val)
#define REG_T0 0x5
#define REG_T1 0x6
-static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt)
+static inline struct plt_entry emit_plt_entry(unsigned long val,
+ unsigned long plt,
+ unsigned long got_plt)
{
/*
* U-Type encoding:
* +------------+------------+--------+----------+----------+
*
*/
- u64 offset = got_plt - plt;
+ unsigned long offset = got_plt - plt;
u32 hi20 = (offset + 0x800) & 0xfffff000;
u32 lo12 = (offset - hi20);
return (struct plt_entry) {
};
}
-static inline int get_got_plt_idx(u64 val, const struct mod_section *sec)
+static inline int get_got_plt_idx(unsigned long val, const struct mod_section *sec)
{
struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr;
int i;
return -1;
}
-static inline struct plt_entry *get_plt_entry(u64 val,
- const struct mod_section *sec_plt,
- const struct mod_section *sec_got_plt)
+static inline struct plt_entry *get_plt_entry(unsigned long val,
+ const struct mod_section *sec_plt,
+ const struct mod_section *sec_got_plt)
{
struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
SET_FP(regs, val);
}
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PTRACE_H */
#ifndef _ASM_RISCV_SYSCALL_H
#define _ASM_RISCV_SYSCALL_H
+#include <uapi/linux/audit.h>
#include <linux/sched.h>
#include <linux/err.h>
memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
}
+static inline int syscall_get_arch(void)
+{
+#ifdef CONFIG_64BIT
+ return AUDIT_ARCH_RISCV64;
+#else
+ return AUDIT_ARCH_RISCV32;
+#endif
+}
+
#endif /* _ASM_RISCV_SYSCALL_H */
#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_WORK \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
+
#endif /* _ASM_RISCV_THREAD_INFO_H */
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-generic-y += setup.h
-generic-y += unistd.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += swab.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
-generic-y += siginfo.h
REG_S s2, PT_SEPC(sp)
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
- andi t0, t0, _TIF_SYSCALL_TRACE
+ andi t0, t0, _TIF_SYSCALL_WORK
bnez t0, handle_syscall_trace_enter
check_syscall_nr:
/* Check to make sure we don't jump to a bogus syscall number. */
REG_S a0, PT_A0(sp)
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
- andi t0, t0, _TIF_SYSCALL_TRACE
+ andi t0, t0, _TIF_SYSCALL_WORK
bnez t0, handle_syscall_trace_exit
ret_from_exception:
#include <linux/kernel.h>
#include <linux/module.h>
-u64 module_emit_got_entry(struct module *mod, u64 val)
+unsigned long module_emit_got_entry(struct module *mod, unsigned long val)
{
struct mod_section *got_sec = &mod->arch.got;
int i = got_sec->num_entries;
struct got_entry *got = get_got_entry(val, got_sec);
if (got)
- return (u64)got;
+ return (unsigned long)got;
/* There is no duplicate entry, create a new one */
got = (struct got_entry *)got_sec->shdr->sh_addr;
got_sec->num_entries++;
BUG_ON(got_sec->num_entries > got_sec->max_entries);
- return (u64)&got[i];
+ return (unsigned long)&got[i];
}
-u64 module_emit_plt_entry(struct module *mod, u64 val)
+unsigned long module_emit_plt_entry(struct module *mod, unsigned long val)
{
struct mod_section *got_plt_sec = &mod->arch.got_plt;
struct got_entry *got_plt;
int i = plt_sec->num_entries;
if (plt)
- return (u64)plt;
+ return (unsigned long)plt;
/* There is no duplicate entry, create a new one */
got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr;
got_plt[i] = emit_got_entry(val);
plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
- plt[i] = emit_plt_entry(val, (u64)&plt[i], (u64)&got_plt[i]);
+ plt[i] = emit_plt_entry(val,
+ (unsigned long)&plt[i],
+ (unsigned long)&got_plt[i]);
plt_sec->num_entries++;
got_plt_sec->num_entries++;
BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
- return (u64)&plt[i];
+ return (unsigned long)&plt[i];
}
-static int is_rela_equal(const Elf64_Rela *x, const Elf64_Rela *y)
+static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
{
return x->r_info == y->r_info && x->r_addend == y->r_addend;
}
-static bool duplicate_rela(const Elf64_Rela *rela, int idx)
+static bool duplicate_rela(const Elf_Rela *rela, int idx)
{
int i;
for (i = 0; i < idx; i++) {
return false;
}
-static void count_max_entries(Elf64_Rela *relas, int num,
+static void count_max_entries(Elf_Rela *relas, int num,
unsigned int *plts, unsigned int *gots)
{
unsigned int type, i;
for (i = 0; i < num; i++) {
- type = ELF64_R_TYPE(relas[i].r_info);
+ type = ELF_RISCV_R_TYPE(relas[i].r_info);
if (type == R_RISCV_CALL_PLT) {
if (!duplicate_rela(relas, i))
(*plts)++;
/* Calculate the maxinum number of entries */
for (i = 0; i < ehdr->e_shnum; i++) {
- Elf64_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
- int num_rela = sechdrs[i].sh_size / sizeof(Elf64_Rela);
- Elf64_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
+ Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
+ int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
+ Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
if (sechdrs[i].sh_type != SHT_RELA)
continue;
#include <asm/ptrace.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
+#include <linux/audit.h>
#include <linux/ptrace.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/tracehook.h>
+
+#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
enum riscv_regset {
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs));
#endif
+
+ audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
}
void do_syscall_trace_exit(struct pt_regs *regs)
{
+ audit_syscall_exit(regs);
+
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
- trace_sys_exit(regs, regs->regs[0]);
+ trace_sys_exit(regs, regs_return_value(regs));
#endif
}
void __init parse_dtb(unsigned int hartid, void *dtb)
{
- early_init_dt_scan(__va(dtb));
+ if (!early_init_dt_scan(__va(dtb)))
+ return;
+
+ pr_err("No DTB passed to the kernel\n");
+#ifdef CONFIG_CMDLINE_FORCE
+ strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ pr_info("Forcing kernel command line to: %s\n", boot_command_line);
+#endif
}
static void __init setup_bootmem(void)
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <linux/delay.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
enum ipi_message_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
+ IPI_CPU_STOP,
IPI_MAX
};
return -EINVAL;
}
+static void ipi_stop(void)
+{
+ set_cpu_online(smp_processor_id(), false);
+ while (1)
+ wait_for_interrupt();
+}
+
void riscv_software_interrupt(void)
{
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
generic_smp_call_function_interrupt();
}
+ if (ops & (1 << IPI_CPU_STOP)) {
+ stats[IPI_CPU_STOP]++;
+ ipi_stop();
+ }
+
BUG_ON((ops >> IPI_MAX) != 0);
/* Order data access and bit testing. */
static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_CPU_STOP] = "CPU stop interrupts",
};
void show_ipi_stats(struct seq_file *p, int prec)
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
}
-static void ipi_stop(void *unused)
-{
- while (1)
- wait_for_interrupt();
-}
-
void smp_send_stop(void)
{
- on_each_cpu(ipi_stop, NULL, 1);
+ unsigned long timeout;
+
+ if (num_online_cpus() > 1) {
+ cpumask_t mask;
+
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+
+ if (system_state <= SYSTEM_RUNNING)
+ pr_crit("SMP: stopping secondary CPUs\n");
+ send_ipi_message(&mask, IPI_CPU_STOP);
+ }
+
+ /* Wait up to one second for other CPUs to stop */
+ timeout = USEC_PER_SEC;
+ while (num_online_cpus() > 1 && timeout--)
+ udelay(1);
+
+ if (num_online_cpus() > 1)
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+ cpumask_pr_args(cpu_online_mask));
}
void smp_send_reschedule(int cpu)
#include <asm/cache.h>
#include <asm/thread_info.h>
+#define MAX_BYTES_PER_LONG 0x10
+
OUTPUT_ARCH(riscv)
ENTRY(_start)
*(.sbss*)
}
- BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
-
EXCEPTION_TABLE(0x10)
NOTES
*(.rel.dyn*)
}
+ BSS_SECTION(MAX_BYTES_PER_LONG,
+ MAX_BYTES_PER_LONG,
+ MAX_BYTES_PER_LONG)
+
_end = .;
STABS_DEBUG
# SPDX-License-Identifier: GPL-2.0
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generated-y += unistd_64.h
-
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += sockios.h
-generic-y += swab.h
-generic-y += termbits.h
-generic-y += siginfo.h
\ No newline at end of file
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
-obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
#include <linux/jump_label.h>
#include <asm/ipl.h>
-#ifdef HAVE_JUMP_LABEL
-
struct insn {
u16 opcode;
s32 offset;
{
__jump_label_transform(entry, type, 1);
}
-
-#endif
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-define filechk_syshdr
- $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2" < $<
-endef
+filechk_syshdr = $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2" < $<
-define filechk_sysnr
- $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget)) < $<
-endef
+filechk_sysnr = $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget)) < $<
-define filechk_syscalls
- $(CONFIG_SHELL) '$(systbl)' -S < $<
-endef
+filechk_syscalls = $(CONFIG_SHELL) '$(systbl)' -S < $<
syshdr_abi_unistd_32 := common,32
$(uapi)/unistd_32.h: $(syscall) FORCE
struct resource *res;
int i;
+ if (pdev->is_physfn)
+ pdev->no_vf_scan = 1;
+
pdev->dev.groups = zpci_attr_groups;
pdev->dev.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev);
# Ensure output directory exists
_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-define filechk_facility-defs.h
- $(obj)/gen_facilities
-endef
+filechk_facility-defs.h = $(obj)/gen_facilities
-define filechk_dis-defs.h
- ( $(obj)/gen_opcode_table < $(srctree)/arch/$(ARCH)/tools/opcodes.txt )
-endef
+filechk_dis-defs.h = \
+ $(obj)/gen_opcode_table < $(srctree)/arch/$(ARCH)/tools/opcodes.txt
$(kapi)/facility-defs.h: $(obj)/gen_facilities FORCE
$(call filechk,facility-defs.h)
* sum := addr + size; carry? --> flag = true;
* if (sum >= addr_limit) flag = true;
*/
-#define __access_ok(addr, size) \
- (__addr_ok((addr) + (size)))
+#define __access_ok(addr, size) ({ \
+ unsigned long __ao_a = (addr), __ao_b = (size); \
+ unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
+ __ao_end >= __ao_a && __addr_ok(__ao_end); })
+
#define access_ok(addr, size) \
(__chk_user_ptr(addr), \
__access_ok((unsigned long __force)(addr), (size)))
# SPDX-License-Identifier: GPL-2.0
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += siginfo.h
-generic-y += socket.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
generic-y += ucontext.h
include/generated/machtypes.h: $(src)/gen-mach-types $(src)/mach-types
@echo ' Generating $@'
$(Q)mkdir -p $(dir $@)
- $(Q)LC_ALL=C $(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
+ $(Q)LC_ALL=C $(AWK) -f $^ > $@
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generated-y += unistd_64.h
-generic-y += bpf_perf_event.h
-generic-y += types.h
obj-$(CONFIG_SPARC64) += $(pc--y)
obj-$(CONFIG_UPROBES) += uprobes.o
-obj-$(CONFIG_SPARC64) += jump_label.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
#include <asm/cacheflush.h>
-#ifdef HAVE_JUMP_LABEL
-
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
flushi(insn);
mutex_unlock(&text_mutex);
}
-
-#endif
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += auxvec.h
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
generic-y += kvm_para.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += setup.h
-generic-y += shmbuf.h
generic-y += shmparam.h
-generic-y += siginfo.h
-generic-y += signal.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += swab.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
generic-y += ucontext.h
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
-config RESCTRL
+config X86_RESCTRL
bool "Resource Control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
select KERNFS
config X86_INTEL_LPSS
bool "Intel Low Power Subsystem Support"
- depends on X86 && ACPI
+ depends on X86 && ACPI && PCI
select COMMON_CLK
select PINCTRL
select IOSF_MBI
archprepare: checkbin
checkbin:
-ifndef CC_HAVE_ASM_GOTO
+ifndef CONFIG_CC_HAS_ASM_GOTO
@echo Compiler lacks asm-goto support.
@exit 1
endif
suffix-$(CONFIG_KERNEL_LZ4) := lz4
quiet_cmd_mkpiggy = MKPIGGY $@
- cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false )
+ cmd_mkpiggy = $(obj)/mkpiggy $< > $@
targets += piggy.S
$(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE
*/
.macro CALL_enter_from_user_mode
#ifdef CONFIG_CONTEXT_TRACKING
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
#endif
call enter_from_user_mode
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
-#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO)
+#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
/*
* Workaround for the sake of BPF compilation which utilizes kernel
#ifndef _ASM_X86_JUMP_LABEL_H
#define _ASM_X86_JUMP_LABEL_H
-#ifndef HAVE_JUMP_LABEL
-/*
- * For better or for worse, if jump labels (the gcc extension) are missing,
- * then the entire static branch patching infrastructure is compiled out.
- * If that happens, the code in here will malfunction. Raise a compiler
- * error instead.
- *
- * In theory, jump labels and the static branch patching infrastructure
- * could be decoupled to fix this.
- */
-#error asm/jump_label.h included on a non-jump-label kernel
-#endif
-
#define JUMP_LABEL_NOP_SIZE 5
#ifdef CONFIG_X86_64
#ifndef _ASM_X86_RESCTRL_SCHED_H
#define _ASM_X86_RESCTRL_SCHED_H
-#ifdef CONFIG_RESCTRL
+#ifdef CONFIG_X86_RESCTRL
#include <linux/sched.h>
#include <linux/jump_label.h>
static inline void resctrl_sched_in(void) {}
-#endif /* CONFIG_RESCTRL */
+#endif /* CONFIG_X86_RESCTRL */
#endif /* _ASM_X86_RESCTRL_SCHED_H */
#define __CLOBBERS_MEM(clb...) "memory", ## clb
-#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
+#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO)
/* Use asm goto */
c; \
})
-#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
+#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
/* Use flags output or a set instruction */
c; \
})
-#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
+#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
#define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \
__GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
{
if (unlikely(!access_ok(ptr,len)))
return 0;
- __uaccess_begin();
+ __uaccess_begin_nospec();
return 1;
}
#define user_access_begin(a,b) user_access_begin(a,b)
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += bpf_perf_event.h
generated-y += unistd_32.h
generated-y += unistd_64.h
generated-y += unistd_x32.h
-generic-y += poll.h
obj-y += traps.o idt.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o dumpstack.o nmi.o
obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
-obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
+obj-y += setup.o x86_init.o i8259.o irqinit.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o
obj-$(CONFIG_X86_64) += sys_x86_64.o
int npages;
int i;
- if (dma_addr == DMA_MAPPING_ERROR ||
+ if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR))
+ return;
+
+ /*
+ * This driver will not always use a GART mapping, but might have
+ * created a direct mapping instead. If that is the case there is
+ * nothing to unmap here.
+ */
+ if (dma_addr < iommu_bus_base ||
dma_addr >= iommu_bus_base + iommu_size)
return;
obj-$(CONFIG_X86_MCE) += mce/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
-obj-$(CONFIG_RESCTRL) += resctrl/
+obj-$(CONFIG_X86_RESCTRL) += resctrl/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
SPECTRE_V2_USER_NONE;
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
static bool spectre_v2_bad_module;
bool retpoline_module_ok(bool has_retpoline)
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o
-obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o
+obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o
+obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o
CFLAGS_pseudo_lock.o = -I$(src)
#include <asm/alternative.h>
#include <asm/text-patching.h>
-#ifdef HAVE_JUMP_LABEL
-
union jump_code_union {
char code[JUMP_LABEL_NOP_SIZE];
struct {
if (jlstate == JL_STATE_UPDATE)
__jump_label_transform(entry, type, text_poke_early, 1);
}
-
-#endif
/*
* XXX: inoutclob user must know where the argument is being expanded.
- * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
+ * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
*/
#define asm_safe(insn, inoutclob...) \
({ \
int asid, ret;
ret = -EBUSY;
+ if (unlikely(sev->active))
+ return ret;
+
asid = sev_asid_new();
if (asid < 0)
return ret;
* given physical address won't match the required
* VMCS12_REVISION identifier.
*/
- nested_vmx_failValid(vcpu,
+ return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
- return kvm_skip_emulated_instruction(vcpu);
}
new_vmcs12 = kmap(page);
if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
struct kvm_tlb_range *range)
{
struct kvm_vcpu *vcpu;
- int ret = -ENOTSUPP, i;
+ int ret = 0, i;
spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
/* unmask address range configure area */
for (i = 0; i < vmx->pt_desc.addr_range; i++)
- vmx->pt_desc.ctl_bitmask &= ~(0xf << (32 + i * 4));
+ vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
}
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
quiet_cmd_inat_tables = GEN $@
- cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
+ cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
$(call cmd,inat_tables)
word1 = read_pci_config_16(bus, slot, func, 0xc0);
word2 = read_pci_config_16(bus, slot, func, 0xc2);
if (word1 != word2) {
- res.start = (word1 << 16) | 0x0000;
- res.end = (word2 << 16) | 0xffff;
+ res.start = ((resource_size_t) word1 << 16) | 0x0000;
+ res.end = ((resource_size_t) word2 << 16) | 0xffff;
res.flags = IORESOURCE_MEM;
update_res(info, res.start, res.end, res.flags, 0);
}
val = native_read_msr_safe(msr, err);
switch (msr) {
case MSR_IA32_APICBASE:
-#ifdef CONFIG_X86_X2APIC
- if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
-#endif
- val &= ~X2APIC_ENABLE;
+ val &= ~X2APIC_ENABLE;
break;
}
return val;
{
int cpu;
- pvclock_resume();
-
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
};
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
+static u64 xen_clock_value_saved;
void xen_save_time_memory_area(void)
{
struct vcpu_register_time_memory_area t;
int ret;
+ xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
+
if (!xen_clock)
return;
int ret;
if (!xen_clock)
- return;
+ goto out;
t.addr.v = &xen_clock->pvti;
if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
ret);
+
+out:
+ /* Need pvclock_resume() before using xen_clocksource_read(). */
+ pvclock_resume();
+ xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
}
static void xen_setup_vsyscall_time_info(void)
-# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
-generic-y += bitsperlong.h
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
generic-y += kvm_para.h
-generic-y += resource.h
-generic-y += siginfo.h
-generic-y += statfs.h
-generic-y += termios.h
#include <asm/cacheflush.h>
-#ifdef HAVE_JUMP_LABEL
-
#define J_OFFSET_MASK 0x0003ffff
#define J_SIGN_MASK (~(J_OFFSET_MASK >> 1))
patch_text(jump_entry_code(e), &insn, JUMP_LABEL_NOP_SIZE);
}
-
-#endif /* HAVE_JUMP_LABEL */
}
/**
- * __bfq_deactivate_entity - deactivate an entity from its service tree.
- * @entity: the entity to deactivate.
+ * __bfq_deactivate_entity - update sched_data and service trees for
+ * entity, so as to represent entity as inactive
+ * @entity: the entity being deactivated.
* @ins_into_idle_tree: if false, the entity will not be put into the
* idle tree.
*
- * Deactivates an entity, independently of its previous state. Must
- * be invoked only if entity is on a service tree. Extracts the entity
- * from that tree, and if necessary and allowed, puts it into the idle
- * tree.
+ * If necessary and allowed, puts entity into the idle tree. NOTE:
+ * entity may be on no tree if in service.
*/
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
{
* blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at
* @bio: new bio being queued
- * @request_count: out parameter for number of traversed plugged requests
* @same_queue_rq: pointer to &struct request that gets filled in when
* another request associated with @q is found on the plug list
* (optional, may be %NULL)
* @plug: The &struct blk_plug that needs to be initialized
*
* Description:
+ * blk_start_plug() indicates to the block layer an intent by the caller
+ * to submit multiple I/O requests in a batch. The block layer may use
+ * this hint to defer submitting I/Os from the caller until blk_finish_plug()
+ * is called. However, the block layer may choose to submit requests
+ * before a call to blk_finish_plug() if the number of queued I/Os
+ * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
+ * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
+ * the task schedules (see below).
+ *
* Tracking blk_plug inside the task_struct will help with auto-flushing the
* pending I/O should the task end up blocking between blk_start_plug() and
* blk_finish_plug(). This is important from a performance perspective, but
blk_mq_flush_plug_list(plug, from_schedule);
}
+/**
+ * blk_finish_plug - mark the end of a batch of submitted I/O
+ * @plug: The &struct blk_plug passed to blk_start_plug()
+ *
+ * Description:
+ * Indicate that a batch of I/O submissions is complete. This function
+ * must be paired with an initial call to blk_start_plug(). The intent
+ * is to allow the block layer to optimize I/O submission. See the
+ * documentation for blk_start_plug() for more information.
+ */
void blk_finish_plug(struct blk_plug *plug)
{
if (plug != current->plug)
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
- *
- * This file is released under the GPL.
*/
#include <linux/blkdev.h>
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
- struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
+ struct blk_mq_alloc_data data = { .flags = 0};
struct request *rq;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
rq_qos_throttle(q, bio);
+ data.cmd_flags = bio->bi_opf;
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
AFLAGS_system_certificates.o := -I$(srctree)
quiet_cmd_extract_certs = EXTRACT_CERTS $(patsubst "%",%,$(2))
- cmd_extract_certs = scripts/extract-cert $(2) $@ || ( rm $@; exit 1)
+ cmd_extract_certs = scripts/extract-cert $(2) $@
targets += x509_certificate_list
$(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE
ictx = skcipher_instance_ctx(inst);
/* Stream cipher, e.g. "xchacha12" */
+ crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
+ skcipher_crypto_instance(inst));
err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
0, crypto_requires_sync(algt->type,
algt->mask));
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */
+ crypto_set_spawn(&ictx->blockcipher_spawn,
+ skcipher_crypto_instance(inst));
err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
if (err)
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+
+ /*
+ * RTA_OK() didn't align the rtattr's payload when validating that it
+ * fits in the buffer. Yet, the keys should start on the next 4-byte
+ * aligned boundary. To avoid confusion, require that the rtattr
+ * payload be exactly the param struct, which has a 4-byte aligned size.
+ */
+ if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ key += rta->rta_len;
+ keylen -= rta->rta_len;
if (keylen < keys->enckeylen)
return -EINVAL;
struct aead_request *req = areq->data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
- aead_request_complete(req, err);
+ authenc_esn_request_complete(req, err);
}
static int crypto_authenc_esn_decrypt(struct aead_request *req)
for (i = 0; i <= 63; i++) {
- ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
+ ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
ss2 = ss1 ^ rol32(a, 12);
bool "ACPI (Advanced Configuration and Power Interface) Support"
depends on ARCH_SUPPORTS_ACPI
select PNP
+ select NLS
default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
-acpi-y += acpi_lpss.o acpi_apd.o
+acpi-$(CONFIG_PCI) += acpi_lpss.o
+acpi-y += acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
return (resv == its->its_count) ? resv : -ENODEV;
}
#else
-static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev);
+static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
{ return NULL; }
static inline int iort_add_device_replay(const struct iommu_ops *ops,
struct device *dev)
{
struct acpi_iort_node *node;
struct acpi_iort_root_complex *rc;
+ struct pci_bus *pbus = to_pci_dev(dev)->bus;
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
- iort_match_node_callback, dev);
+ iort_match_node_callback, &pbus->dev);
if (!node || node->revision < 1)
return -ENODEV;
goto error0;
}
- /*
- * ACPI 2.0 requires the EC driver to be loaded and work before
- * the EC device is found in the namespace (i.e. before
- * acpi_load_tables() is called).
- *
- * This is accomplished by looking for the ECDT table, and getting
- * the EC parameters out of that.
- *
- * Ignore the result. Not having an ECDT is not fatal.
- */
- status = acpi_ec_ecdt_probe();
-
#ifdef CONFIG_X86
if (!acpi_ioapic) {
/* compatible (0) means level (3) */
goto error1;
}
+ /*
+ * ACPI 2.0 requires the EC driver to be loaded and work before the EC
+ * device is found in the namespace.
+ *
+ * This is accomplished by looking for the ECDT table and getting the EC
+ * parameters out of that.
+ *
+ * Do that before calling acpi_initialize_objects() which may trigger EC
+ * address space accesses.
+ */
+ acpi_ec_ecdt_probe();
+
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
#else
static inline void acpi_debugfs_init(void) { return; }
#endif
+#ifdef CONFIG_PCI
void acpi_lpss_init(void);
+#else
+static inline void acpi_lpss_init(void) {}
+#endif
void acpi_apd_init(void);
#include <acpi/nfit.h>
#include "intel.h"
#include "nfit.h"
-#include "intel.h"
/*
* For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
}
EXPORT_SYMBOL(to_nfit_uuid);
-static struct acpi_nfit_desc *to_acpi_nfit_desc(
- struct nvdimm_bus_descriptor *nd_desc)
-{
- return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
-}
-
static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
{
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
union acpi_object in_obj, in_buf, *out_obj;
const struct nd_cmd_desc *desc = NULL;
struct acpi_nfit_memory_map *memdev;
struct acpi_nfit_desc *acpi_desc;
struct nfit_mem *nfit_mem;
+ u16 physical_id;
mutex_lock(&acpi_desc_lock);
list_for_each_entry(acpi_desc, &acpi_descs, list) {
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
memdev = __to_nfit_memdev(nfit_mem);
if (memdev->device_handle == device_handle) {
+ *flags = memdev->flags;
+ physical_id = memdev->physical_id;
mutex_unlock(&acpi_desc->init_mutex);
mutex_unlock(&acpi_desc_lock);
- *flags = memdev->flags;
- return memdev->physical_id;
+ return physical_id;
}
}
mutex_unlock(&acpi_desc->init_mutex);
nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
if (!nd_set)
return -ENOMEM;
- ndr_desc->nd_set = nd_set;
guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
{
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct device *dev = acpi_desc->dev;
/* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd)
{
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
if (nvdimm)
return 0;
static void nvdimm_invalidate_cache(void);
-static int intel_security_unlock(struct nvdimm *nvdimm,
+static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
return 0;
}
-static int intel_security_erase(struct nvdimm *nvdimm,
+static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key,
enum nvdimm_passphrase_type ptype)
{
return 0;
}
-static int intel_security_query_overwrite(struct nvdimm *nvdimm)
+static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
return 0;
}
-static int intel_security_overwrite(struct nvdimm *nvdimm,
+static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
const struct nvdimm_key_data *nkey)
{
int rc;
{
struct acpi_srat_mem_affinity *p =
(struct acpi_srat_mem_affinity *)header;
- pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
- (unsigned long)p->base_address,
- (unsigned long)p->length,
+ pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
+ (unsigned long long)p->base_address,
+ (unsigned long long)p->length,
p->proximity_domain,
(p->flags & ACPI_SRAT_MEM_ENABLED) ?
"enabled" : "disabled",
#define GPI1_LDO_ON (3 << 0)
#define GPI1_LDO_OFF (4 << 0)
-#define AXP288_ADC_TS_PIN_GPADC 0xf2
-#define AXP288_ADC_TS_PIN_ON 0xf3
+#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
+#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
+#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
static struct pmic_table power_table[] = {
{
*/
static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
{
+ int ret, adc_ts_pin_ctrl;
u8 buf[2];
- int ret;
- ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL,
- AXP288_ADC_TS_PIN_GPADC);
+ /*
+ * The current-source used for the battery temp-sensor (TS) is shared
+ * with the GPADC. For proper fuel-gauge and charger operation the TS
+ * current-source needs to be permanently on. But to read the GPADC we
+ * need to temporary switch the TS current-source to ondemand, so that
+ * the GPADC can use it, otherwise we will always read an all 0 value.
+ *
+ * Note that the switching from on to on-ondemand is not necessary
+ * when the TS current-source is off (this happens on devices which
+ * do not use the TS-pin).
+ */
+ ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl);
if (ret)
return ret;
- /* After switching to the GPADC pin give things some time to settle */
- usleep_range(6000, 10000);
+ if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
+ ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+ AXP288_ADC_TS_CURRENT_ON_ONDEMAND);
+ if (ret)
+ return ret;
+
+ /* Wait a bit after switching the current-source */
+ usleep_range(6000, 10000);
+ }
ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
if (ret == 0)
ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
- regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON);
+ if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
+ regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+ AXP288_ADC_TS_CURRENT_ON);
+ }
return ret;
}
}
}
+static bool acpi_power_resource_is_dup(union acpi_object *package,
+ unsigned int start, unsigned int i)
+{
+ acpi_handle rhandle, dup;
+ unsigned int j;
+
+ /* The caller is expected to check the package element types */
+ rhandle = package->package.elements[i].reference.handle;
+ for (j = start; j < i; j++) {
+ dup = package->package.elements[j].reference.handle;
+ if (dup == rhandle)
+ return true;
+ }
+
+ return false;
+}
+
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list)
{
err = -ENODEV;
break;
}
+
+ /* Some ACPI tables contain duplicate power resource references */
+ if (acpi_power_resource_is_dup(package, start, i))
+ continue;
+
err = acpi_add_power_resource(rhandle);
if (err)
break;
config PATA_ACPI
tristate "ACPI firmware driver for PATA"
- depends on ATA_ACPI && ATA_BMDMA
+ depends on ATA_ACPI && ATA_BMDMA && PCI
help
This option enables an ACPI method driver which drives
motherboard PATA controller interfaces through the ACPI
AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
SATA_MOBILE_LPM_POLICY
as default lpm_policy */
+ AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
+ suspend/resume */
/* ap->flags bits */
#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4))
#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4))
+struct ahci_mvebu_plat_data {
+ int (*plat_config)(struct ahci_host_priv *hpriv);
+ unsigned int flags;
+};
+
static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
const struct mbus_dram_target_info *dram)
{
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
}
+static int ahci_mvebu_armada_380_config(struct ahci_host_priv *hpriv)
+{
+ const struct mbus_dram_target_info *dram;
+ int rc = 0;
+
+ dram = mv_mbus_dram_info();
+ if (dram)
+ ahci_mvebu_mbus_config(hpriv, dram);
+ else
+ rc = -ENODEV;
+
+ ahci_mvebu_regret_option(hpriv);
+
+ return rc;
+}
+
+static int ahci_mvebu_armada_3700_config(struct ahci_host_priv *hpriv)
+{
+ u32 reg;
+
+ writel(0, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR);
+
+ reg = readl(hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
+ reg |= BIT(6);
+ writel(reg, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
+
+ return 0;
+}
+
/**
* ahci_mvebu_stop_engine
*
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ahci_host_priv *hpriv = host->private_data;
- const struct mbus_dram_target_info *dram;
+ const struct ahci_mvebu_plat_data *pdata = hpriv->plat_data;
- dram = mv_mbus_dram_info();
- if (dram)
- ahci_mvebu_mbus_config(hpriv, dram);
-
- ahci_mvebu_regret_option(hpriv);
+ pdata->plat_config(hpriv);
return ahci_platform_resume_host(&pdev->dev);
}
static int ahci_mvebu_probe(struct platform_device *pdev)
{
+ const struct ahci_mvebu_plat_data *pdata;
struct ahci_host_priv *hpriv;
- const struct mbus_dram_target_info *dram;
int rc;
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (!pdata)
+ return -EINVAL;
+
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
+ hpriv->flags |= pdata->flags;
+ hpriv->plat_data = (void *)pdata;
+
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
hpriv->stop_engine = ahci_mvebu_stop_engine;
- if (of_device_is_compatible(pdev->dev.of_node,
- "marvell,armada-380-ahci")) {
- dram = mv_mbus_dram_info();
- if (!dram)
- return -ENODEV;
-
- ahci_mvebu_mbus_config(hpriv, dram);
- ahci_mvebu_regret_option(hpriv);
- }
+ rc = pdata->plat_config(hpriv);
+ if (rc)
+ goto disable_resources;
rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
&ahci_platform_sht);
return rc;
}
+static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = {
+ .plat_config = ahci_mvebu_armada_380_config,
+};
+
+static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = {
+ .plat_config = ahci_mvebu_armada_3700_config,
+ .flags = AHCI_HFLAG_SUSPEND_PHYS,
+};
+
static const struct of_device_id ahci_mvebu_of_match[] = {
- { .compatible = "marvell,armada-380-ahci", },
- { .compatible = "marvell,armada-3700-ahci", },
+ {
+ .compatible = "marvell,armada-380-ahci",
+ .data = &ahci_mvebu_armada_380_plat_data,
+ },
+ {
+ .compatible = "marvell,armada-3700-ahci",
+ .data = &ahci_mvebu_armada_3700_plat_data,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
-/*
- * We currently don't provide power management related operations,
- * since there is no suspend/resume support at the platform level for
- * Armada 38x for the moment.
- */
static struct platform_driver ahci_mvebu_driver = {
.probe = ahci_mvebu_probe,
.remove = ata_platform_remove_one,
if (rc)
goto disable_phys;
+ rc = phy_set_mode(hpriv->phys[i], PHY_MODE_SATA);
+ if (rc) {
+ phy_exit(hpriv->phys[i]);
+ goto disable_phys;
+ }
+
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
+ if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
+ ahci_platform_disable_phys(hpriv);
+
return ata_host_suspend(host, PMSG_SUSPEND);
}
EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
int ahci_platform_resume_host(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
int rc;
if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
ahci_init_controller(host);
}
+ if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
+ ahci_platform_enable_phys(hpriv);
+
ata_host_resume(host);
return 0;
if (!pp)
return -ENOMEM;
- mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
- GFP_KERNEL);
+ mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
+ GFP_KERNEL);
if (!mem) {
kfree(pp);
return -ENOMEM;
static int he_init_tpdrq(struct he_dev *he_dev)
{
- he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
- &he_dev->tpdrq_phys, GFP_KERNEL);
+ he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
+ &he_dev->tpdrq_phys,
+ GFP_KERNEL);
if (he_dev->tpdrq_base == NULL) {
hprintk("failed to alloc tpdrq\n");
return -ENOMEM;
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
- rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
+ rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
if (rate_cps < 10)
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
goto out_free_rbpl_virt;
}
- he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
- &he_dev->rbpl_phys, GFP_KERNEL);
+ he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
+ &he_dev->rbpl_phys, GFP_KERNEL);
if (he_dev->rbpl_base == NULL) {
hprintk("failed to alloc rbpl_base\n");
goto out_destroy_rbpl_pool;
/* rx buffer ready queue */
- he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
- &he_dev->rbrq_phys, GFP_KERNEL);
+ he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
+ &he_dev->rbrq_phys, GFP_KERNEL);
if (he_dev->rbrq_base == NULL) {
hprintk("failed to allocate rbrq\n");
goto out_free_rbpl;
/* tx buffer ready queue */
- he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
- &he_dev->tbrq_phys, GFP_KERNEL);
+ he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+ &he_dev->tbrq_phys, GFP_KERNEL);
if (he_dev->tbrq_base == NULL) {
hprintk("failed to allocate tbrq\n");
goto out_free_rbpq_base;
/* 2.9.3.5 tail offset for each interrupt queue is located after the
end of the interrupt queue */
- he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- (CONFIG_IRQ_SIZE + 1)
- * sizeof(struct he_irq),
- &he_dev->irq_phys,
- GFP_KERNEL);
+ he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
+ &he_dev->irq_phys, GFP_KERNEL);
if (he_dev->irq_base == NULL) {
hprintk("failed to allocate irq\n");
return -ENOMEM;
/* host status page */
- he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- sizeof(struct he_hsp),
- &he_dev->hsp_phys, GFP_KERNEL);
+ he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ sizeof(struct he_hsp),
+ &he_dev->hsp_phys, GFP_KERNEL);
if (he_dev->hsp == NULL) {
hprintk("failed to allocate host status page\n");
return -ENOMEM;
scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
if (!scq)
return NULL;
- scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE,
- &scq->paddr, GFP_KERNEL);
+ scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE,
+ &scq->paddr, GFP_KERNEL);
if (scq->base == NULL) {
kfree(scq);
return NULL;
{
struct rsq_entry *rsqe;
- card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE,
- &card->rsq.paddr, GFP_KERNEL);
+ card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
+ &card->rsq.paddr, GFP_KERNEL);
if (card->rsq.base == NULL) {
printk("%s: can't allocate RSQ.\n", card->name);
return -1;
writel(0, SAR_REG_GP);
/* Initialize RAW Cell Handle Register */
- card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev,
- 2 * sizeof(u32),
- &card->raw_cell_paddr,
- GFP_KERNEL);
+ card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev,
+ 2 * sizeof(u32),
+ &card->raw_cell_paddr,
+ GFP_KERNEL);
if (!card->raw_cell_hnd) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
#include <trace/events/power.h>
#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
+#include <linux/devfreq.h>
#include <linux/timer.h>
#include "../base.h"
dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume();
+ devfreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
}
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
+ devfreq_suspend();
cpufreq_suspend();
mutex_lock(&dpm_list_mtx);
* Compute the autosuspend-delay expiration time based on the device's
* power.last_busy time. If the delay has already expired or is disabled
* (negative) or the power.use_autosuspend flag isn't set, return 0.
- * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
+ * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
*
* This function may be called either with or without dev->power.lock held.
* Either way it can be racy, since power.last_busy may be updated at any time.
last_busy = READ_ONCE(dev->power.last_busy);
- expires = last_busy + autosuspend_delay * NSEC_PER_MSEC;
+ expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
if (expires <= now)
expires = 0; /* Already expired. */
* We add a slack of 25% to gather wakeups
* without sacrificing the granularity.
*/
- u64 slack = READ_ONCE(dev->power.autosuspend_delay) *
+ u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
(NSEC_PER_MSEC >> 2);
dev->power.timer_expires = expires;
spin_lock_irqsave(&dev->power.lock, flags);
expires = dev->power.timer_expires;
- /* If 'expire' is after 'jiffies' we've been called too early. */
+ /*
+ * If 'expires' is after the current time, we've been called
+ * too early.
+ */
if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
* suppress pointless writes.
*/
for (i = 0; i < d->chip->num_regs; i++) {
+ if (!d->chip->mask_base)
+ continue;
+
reg = d->chip->mask_base +
(i * map->reg_stride * d->irq_reg_stride);
if (d->chip->mask_invert) {
const struct regmap_irq_type *t = &irq_data->type;
if ((t->types_supported & type) != type)
- return -ENOTSUPP;
+ return 0;
reg = t->type_reg_offset / map->reg_stride;
/* Mask all the interrupts by default */
for (i = 0; i < chip->num_regs; i++) {
d->mask_buf[i] = d->mask_buf_def[i];
+ if (!chip->mask_base)
+ continue;
+
reg = chip->mask_base +
(i * map->reg_stride * d->irq_reg_stride);
if (chip->mask_invert)
goto out_unlock;
}
+ if (lo->lo_offset != info->lo_offset ||
+ lo->lo_sizelimit != info->lo_sizelimit) {
+ sync_blockdev(lo->lo_device);
+ kill_bdev(lo->lo_device);
+ }
+
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
+ /* kill_bdev should have truncated all the pages */
+ if (lo->lo_device->bd_inode->i_mapping->nrpages) {
+ err = -EAGAIN;
+ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ __func__, lo->lo_number, lo->lo_file_name,
+ lo->lo_device->bd_inode->i_mapping->nrpages);
+ goto out_unfreeze;
+ }
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
goto out_unfreeze;
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
+ int err = 0;
+
if (lo->lo_state != Lo_bound)
return -ENXIO;
if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
return -EINVAL;
+ if (lo->lo_queue->limits.logical_block_size != arg) {
+ sync_blockdev(lo->lo_device);
+ kill_bdev(lo->lo_device);
+ }
+
blk_mq_freeze_queue(lo->lo_queue);
+ /* kill_bdev should have truncated all the pages */
+ if (lo->lo_queue->limits.logical_block_size != arg &&
+ lo->lo_device->bd_inode->i_mapping->nrpages) {
+ err = -EAGAIN;
+ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ __func__, lo->lo_number, lo->lo_file_name,
+ lo->lo_device->bd_inode->i_mapping->nrpages);
+ goto out_unfreeze;
+ }
+
blk_queue_logical_block_size(lo->lo_queue, arg);
blk_queue_physical_block_size(lo->lo_queue, arg);
blk_queue_io_min(lo->lo_queue, arg);
loop_update_dio(lo);
-
+out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
- return 0;
+ return err;
}
static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
set_capacity(nbd->disk, config->bytesize >> 9);
if (bdev) {
- if (bdev->bd_disk)
+ if (bdev->bd_disk) {
bd_set_size(bdev, config->bytesize);
- else
+ set_blocksize(bdev, config->blksize);
+ } else
bdev->bd_invalidated = 1;
bdput(bdev);
}
#else
static inline int null_zone_init(struct nullb_device *dev)
{
+ pr_err("null_blk: CONFIG_BLK_DEV_ZONED not enabled\n");
return -EINVAL;
}
static inline void null_zone_exit(struct nullb_device *dev) {}
struct list_head *tmp;
int dev_id;
char opt_buf[6];
- bool already = false;
bool force = false;
int ret;
spin_lock_irq(&rbd_dev->lock);
if (rbd_dev->open_count && !force)
ret = -EBUSY;
- else
- already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
- &rbd_dev->flags);
+ else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
+ &rbd_dev->flags))
+ ret = -EINPROGRESS;
spin_unlock_irq(&rbd_dev->lock);
}
spin_unlock(&rbd_dev_list_lock);
- if (ret < 0 || already)
+ if (ret)
return ret;
if (force) {
"comp pci_alloc, total bytes %zd entries %d\n",
SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
- skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
- &skdev->cq_dma_address, GFP_KERNEL);
+ skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
+ &skdev->cq_dma_address, GFP_KERNEL);
if (skcomp == NULL) {
rc = -ENOMEM;
* allocated a disk.
*/
if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
- blk_mq_start_hw_queues(port->disk->queue);
+ blk_mq_start_stopped_hw_queues(port->disk->queue, true);
}
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
* See the comment in writeback_store.
*/
zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index) ||
- zram_test_flag(zram, index, ZRAM_UNDER_WB))
- goto next;
- zram_set_flag(zram, index, ZRAM_IDLE);
-next:
+ if (zram_allocated(zram, index) &&
+ !zram_test_flag(zram, index, ZRAM_UNDER_WB))
+ zram_set_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
}
}
#ifdef CONFIG_ZRAM_WRITEBACK
+static ssize_t writeback_limit_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 val;
+ ssize_t ret = -EINVAL;
+
+ if (kstrtoull(buf, 10, &val))
+ return ret;
+
+ down_read(&zram->init_lock);
+ spin_lock(&zram->wb_limit_lock);
+ zram->wb_limit_enable = val;
+ spin_unlock(&zram->wb_limit_lock);
+ up_read(&zram->init_lock);
+ ret = len;
+
+ return ret;
+}
+
+static ssize_t writeback_limit_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ bool val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ spin_lock(&zram->wb_limit_lock);
+ val = zram->wb_limit_enable;
+ spin_unlock(&zram->wb_limit_lock);
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
static ssize_t writeback_limit_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
return ret;
down_read(&zram->init_lock);
- atomic64_set(&zram->stats.bd_wb_limit, val);
- if (val == 0)
- zram->stop_writeback = false;
+ spin_lock(&zram->wb_limit_lock);
+ zram->bd_wb_limit = val;
+ spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
ret = len;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- val = atomic64_read(&zram->stats.bd_wb_limit);
+ spin_lock(&zram->wb_limit_lock);
+ val = zram->bd_wb_limit;
+ spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
return 1;
}
-#define HUGE_WRITEBACK 0x1
-#define IDLE_WRITEBACK 0x2
+#define HUGE_WRITEBACK 1
+#define IDLE_WRITEBACK 2
static ssize_t writeback_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
struct page *page;
ssize_t ret, sz;
char mode_buf[8];
- unsigned long mode = -1UL;
+ int mode = -1;
unsigned long blk_idx = 0;
sz = strscpy(mode_buf, buf, sizeof(mode_buf));
else if (!strcmp(mode_buf, "huge"))
mode = HUGE_WRITEBACK;
- if (mode == -1UL)
+ if (mode == -1)
return -EINVAL;
down_read(&zram->init_lock);
bvec.bv_len = PAGE_SIZE;
bvec.bv_offset = 0;
- if (zram->stop_writeback) {
+ spin_lock(&zram->wb_limit_lock);
+ if (zram->wb_limit_enable && !zram->bd_wb_limit) {
+ spin_unlock(&zram->wb_limit_lock);
ret = -EIO;
break;
}
+ spin_unlock(&zram->wb_limit_lock);
if (!blk_idx) {
blk_idx = alloc_block_bdev(zram);
zram_test_flag(zram, index, ZRAM_UNDER_WB))
goto next;
- if ((mode & IDLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_IDLE)) &&
- (mode & HUGE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_HUGE)))
+ if (mode == IDLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+ if (mode == HUGE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
goto next;
/*
* Clearing ZRAM_UNDER_WB is duty of caller.
zram_set_element(zram, index, blk_idx);
blk_idx = 0;
atomic64_inc(&zram->stats.pages_stored);
- if (atomic64_add_unless(&zram->stats.bd_wb_limit,
- -1 << (PAGE_SHIFT - 12), 0)) {
- if (atomic64_read(&zram->stats.bd_wb_limit) == 0)
- zram->stop_writeback = true;
- }
+ spin_lock(&zram->wb_limit_lock);
+ if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
+ zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
+ spin_unlock(&zram->wb_limit_lock);
next:
zram_slot_unlock(zram, index);
}
static DEVICE_ATTR_RW(backing_dev);
static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
+static DEVICE_ATTR_RW(writeback_limit_enable);
#endif
static struct attribute *zram_disk_attrs[] = {
&dev_attr_backing_dev.attr,
&dev_attr_writeback.attr,
&dev_attr_writeback_limit.attr,
+ &dev_attr_writeback_limit_enable.attr,
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
device_id = ret;
init_rwsem(&zram->init_lock);
-
+#ifdef CONFIG_ZRAM_WRITEBACK
+ spin_lock_init(&zram->wb_limit_lock);
+#endif
queue = blk_alloc_queue(GFP_KERNEL);
if (!queue) {
pr_err("Error allocating disk queue for device %d\n",
atomic64_t bd_count; /* no. of pages in backing device */
atomic64_t bd_reads; /* no. of reads from backing device */
atomic64_t bd_writes; /* no. of writes from backing device */
- atomic64_t bd_wb_limit; /* writeback limit of backing device */
#endif
};
*/
bool claim; /* Protected by bdev->bd_mutex */
struct file *backing_dev;
- bool stop_writeback;
#ifdef CONFIG_ZRAM_WRITEBACK
+ spinlock_t wb_limit_lock;
+ bool wb_limit_enable;
+ u64 bd_wb_limit;
struct block_device *bdev;
unsigned int old_block_size;
unsigned long *bitmap;
{
unsigned int ret_freq = 0;
- if (!cpufreq_driver->get)
+ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
return ret_freq;
ret_freq = cpufreq_driver->get(policy->cpu);
/*
- * Updating inactive policies is invalid, so avoid doing that. Also
- * if fast frequency switching is used with the given policy, the check
+ * If fast frequency switching is used with the given policy, the check
* against policy->cur is pointless, so skip it in that case too.
*/
- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
+ if (policy->fast_switch_enabled)
return ret_freq;
if (ret_freq && policy->cur &&
if (policy) {
down_read(&policy->rwsem);
-
- if (!policy_is_inactive(policy))
- ret_freq = __cpufreq_get(policy);
-
+ ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
int ret;
struct scmi_data *priv = policy->driver_data;
struct scmi_perf_ops *perf_ops = handle->perf_ops;
- u64 freq = policy->freq_table[index].frequency * 1000;
+ u64 freq = policy->freq_table[index].frequency;
- ret = perf_ops->freq_set(handle, priv->domain_id, freq, false);
+ ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
if (!ret)
arch_set_freq_scale(policy->related_cpus, freq,
policy->cpuinfo.max_freq);
out_free_priv:
kfree(priv);
out_free_opp:
- dev_pm_opp_cpumask_remove_table(policy->cpus);
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
return ret;
}
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
kfree(priv);
- dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
return 0;
}
out_free_priv:
kfree(priv);
out_free_opp:
- dev_pm_opp_cpumask_remove_table(policy->cpus);
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
return ret;
}
clk_put(priv->clk);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
kfree(priv);
- dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
return 0;
}
depends on ARCH_BCM_IPROC
depends on MAILBOX
default m
+ select CRYPTO_AUTHENC
select CRYPTO_DES
select CRYPTO_MD5
select CRYPTO_SHA1
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;
struct spu_hw *spu = &iproc_priv.spu;
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- const u8 *origkey = key;
- const unsigned int origkeylen = keylen;
-
- int ret = 0;
+ struct crypto_authenc_keys keys;
+ int ret;
flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
keylen);
flow_dump(" key: ", key, keylen);
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+ ret = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (ret)
goto badkey;
- param = RTA_DATA(rta);
- ctx->enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < ctx->enckeylen)
- goto badkey;
- if (ctx->enckeylen > MAX_KEY_SIZE)
+ if (keys.enckeylen > MAX_KEY_SIZE ||
+ keys.authkeylen > MAX_KEY_SIZE)
goto badkey;
- ctx->authkeylen = keylen - ctx->enckeylen;
-
- if (ctx->authkeylen > MAX_KEY_SIZE)
- goto badkey;
+ ctx->enckeylen = keys.enckeylen;
+ ctx->authkeylen = keys.authkeylen;
- memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
/* May end up padding auth key. So make sure it's zeroed. */
memset(ctx->authkey, 0, sizeof(ctx->authkey));
- memcpy(ctx->authkey, key, ctx->authkeylen);
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
switch (ctx->alg->cipher_info.alg) {
case CIPHER_ALG_DES:
u32 tmp[DES_EXPKEY_WORDS];
u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
- if (des_ekey(tmp, key) == 0) {
+ if (des_ekey(tmp, keys.enckey) == 0) {
if (crypto_aead_get_flags(cipher) &
CRYPTO_TFM_REQ_WEAK_KEY) {
crypto_aead_set_flags(cipher, flags);
break;
case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
- const u32 *K = (const u32 *)key;
+ const u32 *K = (const u32 *)keys.enckey;
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
- ret =
- crypto_aead_setkey(ctx->fallback_cipher, origkey,
- origkeylen);
+ ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
if (ret) {
flow_log(" fallback setkey() returned:%d\n", ret);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device.
*/
- if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
+ if (is_mdha(c2_alg_sel) &&
(!md_inst || t_alg->aead.maxauthsize > md_limit))
continue;
desc = edesc->hw_desc;
- state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, state->buf_dma)) {
- dev_err(jrdev, "unable to map src\n");
- goto unmap;
- }
+ if (buflen) {
+ state->buf_dma = dma_map_single(jrdev, buf, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, state->buf_dma)) {
+ dev_err(jrdev, "unable to map src\n");
+ goto unmap;
+ }
- append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+ }
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
+
+#include "desc.h"
+
#define CAAM_ERROR_STR_MAX 302
void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii);
+
+static inline bool is_mdha(u32 algtype)
+{
+ return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
+ OP_ALG_CHA_MDHA;
+}
#endif /* CAAM_ERROR_H */
mcode->num_cores = is_ae ? 6 : 10;
/* Allocate DMAable space */
- mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size,
- &mcode->phys_base, GFP_KERNEL);
+ mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
+ &mcode->phys_base, GFP_KERNEL);
if (!mcode->code) {
dev_err(dev, "Unable to allocate space for microcode");
ret = -ENOMEM;
c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
rem_q_size;
- curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev,
- c_size + CPT_NEXT_CHUNK_PTR_SIZE,
- &curr->dma_addr, GFP_KERNEL);
+ curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
+ c_size + CPT_NEXT_CHUNK_PTR_SIZE,
+ &curr->dma_addr,
+ GFP_KERNEL);
if (!curr->head) {
dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
i, queue->nchunks);
struct nitrox_device *ndev = cmdq->ndev;
cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
- cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
- &cmdq->unalign_dma,
- GFP_KERNEL);
+ cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
+ &cmdq->unalign_dma,
+ GFP_KERNEL);
if (!cmdq->unalign_base)
return -ENOMEM;
/* ORH error code */
err = READ_ONCE(*sr->resp.orh) & 0xff;
- softreq_destroy(sr);
if (sr->callback)
sr->callback(sr->cb_arg, err);
+ softreq_destroy(sr);
req_completed++;
}
/* Page alignment satisfies our needs for N <= 128 */
BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
- cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
- &cmd_q->qbase_dma,
- GFP_KERNEL);
+ cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
+ &cmd_q->qbase_dma,
+ GFP_KERNEL);
if (!cmd_q->qbase) {
dev_err(dev, "unable to allocate command queue\n");
ret = -ENOMEM;
unsigned int keylen)
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct rtattr *rta = (struct rtattr *)key;
struct cc_crypto_req cc_req = {};
- struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
- int rc = -EINVAL;
unsigned int seq_len = 0;
struct device *dev = drvdata_to_dev(ctx->drvdata);
+ const u8 *enckey, *authkey;
+ int rc;
dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
/* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
- param = RTA_DATA(rta);
- ctx->enc_keylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < ctx->enc_keylen)
+ struct crypto_authenc_keys keys;
+
+ rc = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (rc)
goto badkey;
- ctx->auth_keylen = keylen - ctx->enc_keylen;
+ enckey = keys.enckey;
+ authkey = keys.authkey;
+ ctx->enc_keylen = keys.enckeylen;
+ ctx->auth_keylen = keys.authkeylen;
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
+ rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
- memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
- ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
- CTR_RFC3686_NONCE_SIZE);
+ memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
+ CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
}
} else { /* non-authenc - has just one key */
+ enckey = key;
+ authkey = NULL;
ctx->enc_keylen = keylen;
ctx->auth_keylen = 0;
}
/* STAT_PHASE_1: Copy key to ctx */
/* Get key material */
- memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+ memcpy(ctx->enckey, enckey, ctx->enc_keylen);
if (ctx->enc_keylen == 24)
memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
- memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+ memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
+ ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
- rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
goto badkey;
}
memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
} else {
/* new key */
- ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
- &ctx->pkey, GFP_KERNEL);
+ ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
+ &ctx->pkey, GFP_KERNEL);
if (!ctx->key) {
mutex_unlock(&ctx->lock);
return -ENOMEM;
struct sec_queue_ring_db *ring_db = &queue->ring_db;
int ret;
- ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE,
- &ring_cmd->paddr,
- GFP_KERNEL);
+ ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
+ &ring_cmd->paddr, GFP_KERNEL);
if (!ring_cmd->vaddr)
return -ENOMEM;
mutex_init(&ring_cmd->lock);
ring_cmd->callback = sec_alg_callback;
- ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE,
- &ring_cq->paddr,
- GFP_KERNEL);
+ ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
+ &ring_cq->paddr, GFP_KERNEL);
if (!ring_cq->vaddr) {
ret = -ENOMEM;
goto err_free_ring_cmd;
}
- ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE,
- &ring_db->paddr,
- GFP_KERNEL);
+ ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
+ &ring_db->paddr, GFP_KERNEL);
if (!ring_db->vaddr) {
ret = -ENOMEM;
goto err_free_ring_cq;
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
- crypt_virt = dma_zalloc_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- &crypt_phys, GFP_ATOMIC);
+ crypt_virt = dma_alloc_coherent(dev,
+ NPE_QLEN * sizeof(struct crypt_ctl),
+ &crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
return 0;
if (!ring[i])
goto err_cleanup;
- ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev,
- MTK_DESC_RING_SZ,
- &ring[i]->cmd_dma,
- GFP_KERNEL);
+ ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
+ MTK_DESC_RING_SZ,
+ &ring[i]->cmd_dma,
+ GFP_KERNEL);
if (!ring[i]->cmd_base)
goto err_cleanup;
- ring[i]->res_base = dma_zalloc_coherent(cryp->dev,
- MTK_DESC_RING_SZ,
- &ring[i]->res_dma,
- GFP_KERNEL);
+ ring[i]->res_base = dma_alloc_coherent(cryp->dev,
+ MTK_DESC_RING_SZ,
+ &ring[i]->res_dma,
+ GFP_KERNEL);
if (!ring[i]->res_base)
goto err_cleanup;
dev_to_node(&GET_DEV(accel_dev)));
if (!admin)
return -ENOMEM;
- admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
- &admin->phy_addr, GFP_KERNEL);
+ admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &admin->phy_addr, GFP_KERNEL);
if (!admin->virt_addr) {
dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
kfree(admin);
return -ENOMEM;
}
- admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev),
- PAGE_SIZE,
- &admin->const_tbl_addr,
- GFP_KERNEL);
+ admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+ PAGE_SIZE,
+ &admin->const_tbl_addr,
+ GFP_KERNEL);
if (!admin->virt_tbl_addr) {
dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
dev = &GET_DEV(inst->accel_dev);
ctx->inst = inst;
- ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
- &ctx->enc_cd_paddr,
- GFP_ATOMIC);
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->enc_cd) {
return -ENOMEM;
}
- ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
- &ctx->dec_cd_paddr,
- GFP_ATOMIC);
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->dec_cd) {
goto out_free_enc;
}
dev = &GET_DEV(inst->accel_dev);
ctx->inst = inst;
- ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
- &ctx->enc_cd_paddr,
- GFP_ATOMIC);
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->enc_cd) {
spin_unlock(&ctx->lock);
return -ENOMEM;
}
- ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
- &ctx->dec_cd_paddr,
- GFP_ATOMIC);
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->dec_cd) {
spin_unlock(&ctx->lock);
goto out_free_enc;
} else {
int shift = ctx->p_size - req->src_len;
- qat_req->src_align = dma_zalloc_coherent(dev,
- ctx->p_size,
- &qat_req->in.dh.in.b,
- GFP_KERNEL);
+ qat_req->src_align = dma_alloc_coherent(dev,
+ ctx->p_size,
+ &qat_req->in.dh.in.b,
+ GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
goto unmap_src;
} else {
- qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
- &qat_req->out.dh.r,
- GFP_KERNEL);
+ qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
+ &qat_req->out.dh.r,
+ GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
}
return -EINVAL;
ctx->p_size = params->p_size;
- ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+ ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
return -ENOMEM;
memcpy(ctx->p, params->p, ctx->p_size);
return 0;
}
- ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+ ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
if (!ctx->g)
return -ENOMEM;
memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
if (ret < 0)
goto err_clear_ctx;
- ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
- GFP_KERNEL);
+ ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+ GFP_KERNEL);
if (!ctx->xa) {
ret = -ENOMEM;
goto err_clear_ctx;
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.enc.m,
- GFP_KERNEL);
+ qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.rsa.enc.m,
+ GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
goto unmap_src;
} else {
- qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.enc.c,
- GFP_KERNEL);
+ qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->out.rsa.enc.c,
+ GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.dec.c,
- GFP_KERNEL);
+ qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.rsa.dec.c,
+ GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
goto unmap_src;
} else {
- qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.dec.m,
- GFP_KERNEL);
+ qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->out.rsa.dec.m,
+ GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
goto err;
ret = -ENOMEM;
- ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+ ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
if (!ctx->n)
goto err;
return -EINVAL;
}
- ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+ ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
if (!ctx->e)
return -ENOMEM;
goto err;
ret = -ENOMEM;
- ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+ ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
if (!ctx->d)
goto err;
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto err;
- ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+ ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
goto err;
memcpy(ctx->p + (half_key_sz - len), ptr, len);
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_p;
- ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+ ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
if (!ctx->q)
goto free_p;
memcpy(ctx->q + (half_key_sz - len), ptr, len);
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_q;
- ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
- GFP_KERNEL);
+ ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+ GFP_KERNEL);
if (!ctx->dp)
goto free_q;
memcpy(ctx->dp + (half_key_sz - len), ptr, len);
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_dp;
- ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
- GFP_KERNEL);
+ ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+ GFP_KERNEL);
if (!ctx->dq)
goto free_dp;
memcpy(ctx->dq + (half_key_sz - len), ptr, len);
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_dq;
- ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
- GFP_KERNEL);
+ ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+ GFP_KERNEL);
if (!ctx->qinv)
goto free_dq;
memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
- void *err;
if (cryptlen + authsize > max_len) {
dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL);
}
- if (ivsize)
- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
-
if (!dst || dst == src) {
src_len = assoclen + cryptlen + authsize;
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
dst_nents = sg_nents_for_len(dst, dst_len);
if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
/* if its a ahash, add space for a second desc next to the first one */
if (is_sec1 && !dst)
alloc_len += sizeof(struct talitos_desc);
+ alloc_len += ivsize;
edesc = kmalloc(alloc_len, GFP_DMA | flags);
- if (!edesc) {
- err = ERR_PTR(-ENOMEM);
- goto error_sg;
+ if (!edesc)
+ return ERR_PTR(-ENOMEM);
+ if (ivsize) {
+ iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
}
memset(&edesc->desc, 0, sizeof(edesc->desc));
DMA_BIDIRECTIONAL);
}
return edesc;
-error_sg:
- if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
- return err;
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
{
int ret = -EBUSY;
- sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
- GFP_NOWAIT);
+ sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
+ GFP_NOWAIT);
if (!sdma->bd0) {
ret = -ENOMEM;
goto out;
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
int ret = 0;
- desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
- GFP_NOWAIT);
+ desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys,
+ GFP_NOWAIT);
if (!desc->bd) {
ret = -ENOMEM;
goto out;
* and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
*/
pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
- ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
- &ring->tphys, GFP_NOWAIT);
+ ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
+ &ring->tphys, GFP_NOWAIT);
if (!ring->txd)
return -ENOMEM;
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
- mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
- CCW_BLOCK_SIZE,
- &mxs_chan->ccw_phys, GFP_KERNEL);
+ mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
+ CCW_BLOCK_SIZE,
+ &mxs_chan->ccw_phys, GFP_KERNEL);
if (!mxs_chan->ccw) {
ret = -ENOMEM;
goto err_alloc;
ring->size = ret;
/* Allocate memory for DMA ring descriptor */
- ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
- &ring->desc_paddr, GFP_KERNEL);
+ ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
+ &ring->desc_paddr, GFP_KERNEL);
if (!ring->desc_vaddr) {
chan_err(chan, "Failed to allocate ring desc\n");
return -ENOMEM;
*/
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
/* Allocate the buffer descriptors. */
- chan->seg_v = dma_zalloc_coherent(chan->dev,
- sizeof(*chan->seg_v) *
- XILINX_DMA_NUM_DESCS,
- &chan->seg_p, GFP_KERNEL);
+ chan->seg_v = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
if (!chan->seg_v) {
dev_err(chan->dev,
"unable to allocate channel %d descriptors\n",
* so allocating a desc segment during channel allocation for
* programming tail descriptor.
*/
- chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
- sizeof(*chan->cyclic_seg_v),
- &chan->cyclic_seg_p, GFP_KERNEL);
+ chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->cyclic_seg_v),
+ &chan->cyclic_seg_p,
+ GFP_KERNEL);
if (!chan->cyclic_seg_v) {
dev_err(chan->dev,
"unable to allocate desc segment for cyclic DMA\n");
list_add_tail(&desc->node, &chan->free_list);
}
- chan->desc_pool_v = dma_zalloc_coherent(chan->dev,
- (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
- &chan->desc_pool_p, GFP_KERNEL);
+ chan->desc_pool_v = dma_alloc_coherent(chan->dev,
+ (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
+ &chan->desc_pool_p, GFP_KERNEL);
if (!chan->desc_pool_v)
return -ENOMEM;
menu "IEEE 1394 (FireWire) support"
- depends on HAS_DMA
depends on PCI || COMPILE_TEST
# firewire-core does not depend on PCI but is
# not useful without PCI controller driver
return pca953x_check_register(chip, reg, bank);
}
-const struct regmap_config pca953x_i2c_regmap = {
+static const struct regmap_config pca953x_i2c_regmap = {
.reg_bits = 8,
.val_bits = 8,
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
- struct gpio_desc *desc;
-
if (event->irq_requested) {
if (event->irq_is_wake)
disable_irq_wake(event->irq);
free_irq(event->irq, event);
}
- desc = event->desc;
- if (WARN_ON(IS_ERR(desc)))
- continue;
gpiochip_unlock_as_irq(chip, event->pin);
- gpiochip_free_own_desc(desc);
+ gpiochip_free_own_desc(event->desc);
list_del(&event->node);
kfree(event);
}
if (IS_ERR(fence))
return PTR_ERR(fence);
+ if (!fence)
+ fence = dma_fence_get_stub();
+
switch (info->in.what) {
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
r = drm_syncobj_create(&syncobj, 0, fence);
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+ }
return 0;
}
goto failed;
}
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_init_data_exchange(adev);
-
amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
- if (amdgpu_crtc->cursor_bo) {
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->cursor_bo) {
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_ib_ring_tests(adev);
error:
+ amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
atomic_inc(&adev->vram_lost_counter);
mutex_lock(&adev->lock_reset);
atomic_inc(&adev->gpu_reset_counter);
adev->in_gpu_reset = 1;
- /* Block kfd */
- amdgpu_amdkfd_pre_reset(adev);
+ /* Block kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_pre_reset(adev);
}
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
- /*unlock kfd */
- amdgpu_amdkfd_post_reset(adev);
+ /*unlock kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to pin new abo buffer before flip\n");
- goto unreserve;
+ if (!adev->enable_virtual_display) {
+ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to pin new abo buffer before flip\n");
+ goto unreserve;
+ }
}
r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
- work->base = amdgpu_bo_gpu_offset(new_abo);
+ if (!adev->enable_virtual_display)
+ work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
goto cleanup;
}
unpin:
- if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
- DRM_ERROR("failed to unpin new abo in error path\n");
- }
+ if (!adev->enable_virtual_display)
+ if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
+ DRM_ERROR("failed to unpin new abo in error path\n");
+
unreserve:
amdgpu_bo_unreserve(new_abo);
/* VEGAM */
{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+ {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
/* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
+ struct amdgpu_task_info ti;
+
+ memset(&ti, 0, sizeof(struct amdgpu_task_info));
if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
return;
}
+ amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
ring->fence_drv.sync_seq);
+ DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
+ ti.process_name, ti.tgid, ti.task_name, ti.pid);
if (amdgpu_device_should_recover_gpu(ring->adev))
amdgpu_device_gpu_recover(ring->adev, job);
struct ttm_operation_ctx ctx = { false, false };
int r, i;
- if (!bo->pin_count) {
+ if (WARN_ON_ONCE(!bo->pin_count)) {
dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
if (adev->pm.sysfs_initialized)
"pp_power_profile_mode\n");
return ret;
}
- ret = device_create_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_od_clk_voltage\n");
- return ret;
+ if (hwmgr->od_enabled) {
+ ret = device_create_file(adev->dev,
+ &dev_attr_pp_od_clk_voltage);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "pp_od_clk_voltage\n");
+ return ret;
+ }
}
ret = device_create_file(adev->dev,
&dev_attr_gpu_busy_percent);
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
if (adev->pm.dpm_enabled == 0)
return;
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
device_remove_file(adev->dev,
&dev_attr_pp_power_profile_mode);
- device_remove_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
+ if (hwmgr->od_enabled)
+ device_remove_file(adev->dev,
+ &dev_attr_pp_od_clk_voltage);
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
}
return ret;
}
-bool psp_support_vmr_ring(struct psp_context *psp)
-{
- if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
- return true;
- else
- return false;
-}
-
static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
- uint64_t (*xgmi_get_node_id)(struct psp_context *psp);
- uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
+ int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
+ int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
+ bool (*support_vmr_ring)(struct psp_context *psp);
};
struct psp_xgmi_context {
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
+#define psp_support_vmr_ring(psp) \
+ ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_node_id(psp) \
- ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0)
-#define psp_xgmi_get_hive_id(psp) \
- ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
+#define psp_xgmi_get_node_id(psp, node_id) \
+ ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
+#define psp_xgmi_get_hive_id(psp, hive_id) \
+ ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
#define psp_xgmi_get_topology_info(psp, num_device, topology) \
((psp)->funcs->xgmi_get_topology_info ? \
(psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
-bool psp_support_vmr_ring(struct psp_context *psp);
-
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
#endif
#include <drm/drm_print.h>
/* max number of rings */
-#define AMDGPU_MAX_RINGS 21
+#define AMDGPU_MAX_RINGS 23
#define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
ring = &adev->vcn.ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
ring = &adev->vcn.ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
+ unsigned int fences = 0;
+ unsigned int i;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+ }
+ if (fences)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
- new_state.fw_based = adev->vcn.pause_state.fw_based;
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
new_state.jpeg = VCN_DPG_STATE__PAUSE;
else
- new_state.jpeg = adev->vcn.pause_state.jpeg;
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
amdgpu_vcn_pause_dpg_mode(adev, &new_state);
}
bp->size = amdgpu_vm_bo_size(adev, level);
bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
- adev->flags & AMD_IS_APU)
- bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
if (!adev->gmc.xgmi.supported)
return 0;
- adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
- adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
+ ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get node id\n");
+ return ret;
+ }
+
+ ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get hive id\n");
+ return ret;
+ }
mutex_lock(&xgmi_mutex);
hive = amdgpu_get_xgmi_hive(adev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct amdgpu_bo *abo;
-
- abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r))
- DRM_ERROR("failed to reserve abo before unpin\n");
- else {
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- }
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
- schedule_work(&works->unpin_work);
+ amdgpu_bo_unref(&works->old_abo);
+ kfree(works->shared);
+ kfree(works);
return 0;
}
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
- int r;
/* Set the write pointer delay */
WREG32(mmCP_RB_WPTR_DELAY, 0);
amdgpu_ring_clear_ring(ring);
gfx_v8_0_cp_gfx_start(adev);
ring->sched.ready = true;
- r = amdgpu_ring_test_helper(ring);
- return r;
+ return 0;
}
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ enable failed\n");
- return r;
+ amdgpu_ring_commit(kiq_ring);
+
+ return 0;
}
static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
if (r)
goto done;
- /* Test KCQs - reversing the order of rings seems to fix ring test failure
- * after GPU reset
- */
- for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
+done:
+ return r;
+}
+
+static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
+{
+ int r, i;
+ struct amdgpu_ring *ring;
+
+ /* collect all the ring_tests here, gfx, kiq, compute */
+ ring = &adev->gfx.gfx_ring[0];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ ring = &adev->gfx.kiq.ring;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- r = amdgpu_ring_test_helper(ring);
+ amdgpu_ring_test_helper(ring);
}
-done:
- return r;
+ return 0;
}
static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
r = gfx_v8_0_kcq_resume(adev);
if (r)
return r;
+
+ r = gfx_v8_0_cp_test_all_rings(adev);
+ if (r)
+ return r;
+
gfx_v8_0_enable_gui_idle_interrupt(adev, true);
return 0;
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
gfx_v8_0_cp_gfx_resume(adev);
+ gfx_v8_0_cp_test_all_rings(adev);
+
adev->gfx.rlc.funcs->start(adev);
return 0;
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
{
uint32_t data, def;
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
}
}
-static int gmc_v9_0_late_init(void *handle)
+static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /*
- * The latest engine allocation on gfx9 is:
- * Engine 0, 1: idle
- * Engine 2, 3: firmware
- * Engine 4~13: amdgpu ring, subject to change when ring number changes
- * Engine 14~15: idle
- * Engine 16: kfd tlb invalidation
- * Engine 17: Gart flushes
- */
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
+ struct amdgpu_ring *ring;
+ unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
+ {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
unsigned i;
- int r;
+ unsigned vmhub, inv_eng;
- if (!gmc_v9_0_keep_stolen_memory(adev))
- amdgpu_bo_late_init(adev);
+ for (i = 0; i < adev->num_rings; ++i) {
+ ring = adev->rings[i];
+ vmhub = ring->funcs->vmhub;
+
+ inv_eng = ffs(vm_inv_engs[vmhub]);
+ if (!inv_eng) {
+ dev_err(adev->dev, "no VM inv eng for ring %s\n",
+ ring->name);
+ return -EINVAL;
+ }
- for(i = 0; i < adev->num_rings; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
- unsigned vmhub = ring->funcs->vmhub;
+ ring->vm_inv_eng = inv_eng - 1;
+ change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
- ring->vm_inv_eng = vm_inv_eng[vmhub]++;
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
}
- /* Engine 16 is used for KFD and 17 for GART flushes */
- for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 16);
+ return 0;
+}
+
+static int gmc_v9_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ if (!gmc_v9_0_keep_stolen_memory(adev))
+ amdgpu_bo_late_init(adev);
+
+ r = gmc_v9_0_allocate_vm_inv_eng(adev);
+ if (r)
+ return r;
if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
r = gmc_v9_0_ecc_available(adev);
#ifndef __GMC_V9_0_H__
#define __GMC_V9_0_H__
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 2, 3: firmware
+ * Engine 0, 1, 4~16: amdgpu ring,
+ * subject to change when ring number changes
+ * Engine 17: Gart flushes
+ */
+#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
return r;
}
/* Retrieve checksum from mailbox2 */
- if (req == IDH_REQ_GPU_INIT_ACCESS) {
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
#define smnPCIE_CONFIG_CNTL 0x11180044
+#define smnPCIE_CI_CNTL 0x11180080
static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+#define smnPCIE_CI_CNTL 0x11180080
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
#include "nbio/nbio_7_4_offset.h"
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
/* address block */
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_0 *asd_hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
le32_to_cpu(sos_hdr->sos_offset_bytes);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out1;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out1;
+
+ asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+ le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err)
- goto out;
+ goto out2;
err = amdgpu_ucode_validate(adev->psp.ta_fw);
if (err)
- goto out;
+ goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+out1:
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
out:
- if (err) {
- dev_err(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
- }
+ dev_err(adev->dev,
+ "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
return err;
}
return 0;
}
+static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
+{
+ if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
+ return true;
+ return false;
+}
+
static int psp_v11_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- if (psp_support_vmr_ring(psp)) {
+ if (psp_v11_0_support_vmr_ring(psp)) {
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
struct amdgpu_device *adev = psp->adev;
/* Write the ring destroy command*/
- if (psp_support_vmr_ring(psp))
+ if (psp_v11_0_support_vmr_ring(psp))
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
else
mdelay(20);
/* Wait for response flag (bit 31) */
- if (psp_support_vmr_ring(psp))
+ if (psp_v11_0_support_vmr_ring(psp))
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
0x80000000, 0x80000000, false);
else
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
- if (psp_support_vmr_ring(psp))
+ if (psp_v11_0_support_vmr_ring(psp))
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
else
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- if (psp_support_vmr_ring(psp)) {
+ if (psp_v11_0_support_vmr_ring(psp)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
} else
return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
}
-static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
+static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
/* Invoke xgmi ta to get hive id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
- return 0;
- else
- return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+ return ret;
+
+ *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
+ return 0;
}
-static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
+static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
/* Invoke xgmi ta to get the node id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
- return 0;
- else
- return xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+ return ret;
+
+ *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+ return 0;
}
static const struct psp_funcs psp_v11_0_funcs = {
.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
+ .support_vmr_ring = psp_v11_0_support_vmr_ring,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
};
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
};
/*return fw_version >= 31;*/
return false;
case CHIP_VEGA20:
- /*return fw_version >= 115;*/
- return false;
+ return fw_version >= 123;
default:
return false;
}
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
break;
case 1:
- /* XXX compute */
+ if (adev->asic_type == CHIP_VEGA20)
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
case 2:
/* XXX compute */
break;
case 3:
- amdgpu_fence_process(&adev->sdma.instance[instance].page);
+ if (adev->asic_type != CHIP_VEGA20)
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
return 0;
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \
do { \
+ uint32_t old_ = 0; \
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
uint32_t loop = adev->usec_timeout; \
while ((tmp_ & (mask)) != (expected_value)) { \
- udelay(2); \
+ if (old_ != tmp_) { \
+ loop = adev->usec_timeout; \
+ old_ = tmp_; \
+ } else \
+ udelay(1); \
tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
loop--; \
if (!loop) { \
- DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
+ DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \
ret = -ETIMEDOUT; \
break; \
continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
- sprintf(ring->name, "uvd<%d>", j);
+ sprintf(ring->name, "uvd_%d", ring->me);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
if (r)
return r;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
- sprintf(ring->name, "uvd_enc%d<%d>", i, j);
+ sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
- if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->sched.ready = false;
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
- /* initialize wptr */
+ /* initialize JPEG wptr */
+ ring = &adev->vcn.ring_jpeg;
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
/* copy patch commands to the jpeg ring */
static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
{
int ret_code = 0;
+ uint32_t tmp;
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- if (!ret_code) {
- int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
- /* wait for read ptr to be equal to write ptr */
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
- UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- }
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
u32 r;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(mmPCIE_INDEX, reg);
- (void)RREG32(mmPCIE_INDEX);
- r = RREG32(mmPCIE_DATA);
+ WREG32_NO_KIQ(mmPCIE_INDEX, reg);
+ (void)RREG32_NO_KIQ(mmPCIE_INDEX);
+ r = RREG32_NO_KIQ(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
return r;
}
unsigned long flags;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(mmPCIE_INDEX, reg);
- (void)RREG32(mmPCIE_INDEX);
- WREG32(mmPCIE_DATA, v);
- (void)RREG32(mmPCIE_DATA);
+ WREG32_NO_KIQ(mmPCIE_INDEX, reg);
+ (void)RREG32_NO_KIQ(mmPCIE_INDEX);
+ WREG32_NO_KIQ(mmPCIE_DATA, v);
+ (void)RREG32_NO_KIQ(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_11, (reg));
- WREG32(mmSMC_IND_DATA_11, (v));
+ WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
+ WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && X86_64
- imply AMD_IOMMU_V2
+ depends on DRM_AMDGPU && (X86_64 || ARM64)
+ imply AMD_IOMMU_V2 if X86_64
select MMU_NOTIFIER
help
Enable this if you want to use HSA features on AMD GPU devices.
return -EINVAL;
dmabuf = dma_buf_get(args->dmabuf_fd);
- if (!dmabuf)
- return -EINVAL;
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
mutex_lock(&p->mutex);
return 0;
}
+#if CONFIG_X86_64
static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
uint32_t *num_entries,
struct crat_subtype_iolink *sub_type_hdr)
return 0;
}
+#endif
/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
*
struct crat_subtype_generic *sub_type_hdr;
int avail_size = *size;
int numa_node_id;
+#ifdef CONFIG_X86_64
uint32_t entries = 0;
+#endif
int ret = 0;
if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
sub_type_hdr->length);
/* Fill in Subtype: IO Link */
+#ifdef CONFIG_X86_64
ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
&entries,
(struct crat_subtype_iolink *)sub_type_hdr);
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length * entries);
+#else
+ pr_info("IO link not available for non x86 platforms\n");
+#endif
crat_table->num_domains++;
}
* the GPU device is not already present in the topology device
* list then return NULL. This means a new topology device has to
* be created for this GPU.
- * TODO: Rather than assiging @gpu to first topology device withtout
- * gpu attached, it will better to have more stringent check.
*/
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
struct kfd_topology_device *out_dev = NULL;
down_write(&topology_lock);
- list_for_each_entry(dev, &topology_device_list, list)
+ list_for_each_entry(dev, &topology_device_list, list) {
+ /* Discrete GPUs need their own topology device list
+ * entries. Don't assign them to CPU/APU nodes.
+ */
+ if (!gpu->device_info->needs_iommu_device &&
+ dev->node_props.cpu_cores_count)
+ continue;
+
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
}
+ }
up_write(&topology_lock);
return out_dev;
}
static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
{
- const struct cpuinfo_x86 *cpuinfo;
int first_cpu_of_numa_node;
if (!cpumask || cpumask == cpu_none_mask)
first_cpu_of_numa_node = cpumask_first(cpumask);
if (first_cpu_of_numa_node >= nr_cpu_ids)
return -1;
- cpuinfo = &cpu_data(first_cpu_of_numa_node);
-
- return cpuinfo->apicid;
+#ifdef CONFIG_X86_64
+ return cpu_data(first_cpu_of_numa_node).apicid;
+#else
+ return first_cpu_of_numa_node;
+#endif
}
/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
+ struct dm_crtc_state *acrtc_state;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (acrtc) {
drm_crtc_handle_vblank(&acrtc->base);
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+
+ acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+ if (acrtc_state->stream &&
+ acrtc_state->vrr_params.supported &&
+ acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
+ }
}
}
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_dp_mst_topology_mgr *mgr;
+ int ret;
+ bool need_hotplug = false;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- aconnector = to_amdgpu_dm_connector(connector);
- if (aconnector->dc_link->type == dc_connection_mst_branch &&
- !aconnector->mst_port) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_port)
+ continue;
+
+ mgr = &aconnector->mst_mgr;
- if (suspend)
- drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
- else
- drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
- }
+ if (suspend) {
+ drm_dp_mst_topology_mgr_suspend(mgr);
+ } else {
+ ret = drm_dp_mst_topology_mgr_resume(mgr);
+ if (ret < 0) {
+ drm_dp_mst_topology_mgr_set_mst(mgr, false);
+ need_hotplug = true;
+ }
+ }
}
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ if (need_hotplug)
+ drm_kms_helper_hotplug_event(dev);
}
/**
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
enum dc_connection_type new_connection_type = dc_connection_none;
- int ret;
int i;
/* power on hardware */
}
}
- ret = drm_atomic_helper_resume(ddev, dm->cached_state);
+ drm_atomic_helper_resume(ddev, dm->cached_state);
dm->cached_state = NULL;
amdgpu_dm_irq_resume_late(adev);
- return ret;
+ return 0;
}
/**
+ caps.min_input_signal * 0x101;
if (dc_link_set_backlight_level(dm->backlight_link,
- brightness, 0, 0))
+ brightness, 0))
return 0;
else
return 1;
dc_stream_retain(state->stream);
}
- state->adjust = cur->adjust;
+ state->vrr_params = cur->vrr_params;
state->vrr_infopacket = cur->vrr_infopacket;
state->abm_level = cur->abm_level;
state->vrr_supported = cur->vrr_supported;
static int dm_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *new_plane_state)
{
+ struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(new_plane_state->state, plane);
+
/* Only support async updates on cursor planes. */
if (plane->type != DRM_PLANE_TYPE_CURSOR)
return -EINVAL;
+ /*
+ * DRM calls prepare_fb and cleanup_fb on new_plane_state for
+ * async commits so don't allow fb changes.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
return 0;
}
static void update_freesync_state_on_stream(
struct amdgpu_display_manager *dm,
struct dm_crtc_state *new_crtc_state,
- struct dc_stream_state *new_stream)
+ struct dc_stream_state *new_stream,
+ struct dc_plane_state *surface,
+ u32 flip_timestamp_in_us)
{
- struct mod_vrr_params vrr = {0};
+ struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
struct dc_info_packet vrr_infopacket = {0};
struct mod_freesync_config config = new_crtc_state->freesync_config;
mod_freesync_build_vrr_params(dm->freesync_module,
new_stream,
- &config, &vrr);
+ &config, &vrr_params);
+
+ if (surface) {
+ mod_freesync_handle_preflip(
+ dm->freesync_module,
+ surface,
+ new_stream,
+ flip_timestamp_in_us,
+ &vrr_params);
+ }
mod_freesync_build_vrr_infopacket(
dm->freesync_module,
new_stream,
- &vrr,
+ &vrr_params,
PACKET_TYPE_VRR,
TRANSFER_FUNC_UNKNOWN,
&vrr_infopacket);
new_crtc_state->freesync_timing_changed =
- (memcmp(&new_crtc_state->adjust,
- &vrr.adjust,
- sizeof(vrr.adjust)) != 0);
+ (memcmp(&new_crtc_state->vrr_params.adjust,
+ &vrr_params.adjust,
+ sizeof(vrr_params.adjust)) != 0);
new_crtc_state->freesync_vrr_info_changed =
(memcmp(&new_crtc_state->vrr_infopacket,
&vrr_infopacket,
sizeof(vrr_infopacket)) != 0);
- new_crtc_state->adjust = vrr.adjust;
+ new_crtc_state->vrr_params = vrr_params;
new_crtc_state->vrr_infopacket = vrr_infopacket;
- new_stream->adjust = new_crtc_state->adjust;
+ new_stream->adjust = new_crtc_state->vrr_params.adjust;
new_stream->vrr_infopacket = vrr_infopacket;
if (new_crtc_state->freesync_vrr_info_changed)
DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
new_crtc_state->base.crtc->base.id,
(int)new_crtc_state->base.vrr_enabled,
- (int)vrr.state);
+ (int)vrr_params.state);
if (new_crtc_state->freesync_timing_changed)
DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
new_crtc_state->base.crtc->base.id,
- vrr.adjust.v_total_min,
- vrr.adjust.v_total_max);
+ vrr_params.adjust.v_total_min,
+ vrr_params.adjust.v_total_max);
}
/*
struct dc_state *state)
{
unsigned long flags;
+ uint64_t timestamp_ns;
uint32_t target_vblank;
int r, vpos, hpos;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct dc_stream_update stream_update = {0};
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
struct dc_stream_status *stream_status;
+ struct dc_plane_state *surface;
/* Prepare wait for target vblank early - before the fence-waits */
addr.address.grph.addr.high_part = upper_32_bits(afb->address);
addr.flip_immediate = async_flip;
+ timestamp_ns = ktime_get_ns();
+ addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+
if (acrtc->base.state->event)
prepare_flip_isr(acrtc);
return;
}
- surface_updates->surface = stream_status->plane_states[0];
- if (!surface_updates->surface) {
+ surface = stream_status->plane_states[0];
+ surface_updates->surface = surface;
+
+ if (!surface) {
DRM_ERROR("No surface for CRTC: id=%d\n",
acrtc->crtc_id);
return;
update_freesync_state_on_stream(
&adev->dm,
acrtc_state,
- acrtc_state->stream);
+ acrtc_state->stream,
+ surface,
+ addr.flip_timestamp_in_us);
if (acrtc_state->freesync_timing_changed)
stream_update.adjust =
&acrtc_state->stream->vrr_infopacket;
}
+ /* Update surface timing information. */
+ surface->time.time_elapsed_in_us[surface->time.index] =
+ addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us;
+ surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us;
+ surface->time.index++;
+ if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
+ surface->time.index = 0;
+
mutex_lock(&adev->dm.dc_lock);
+
dc_commit_updates_for_stream(adev->dm.dc,
surface_updates,
1,
config.max_refresh_in_uhz =
aconnector->max_vfreq * 1000000;
config.vsif_supported = true;
+ config.btr = true;
}
new_crtc_state->freesync_config = config;
{
new_crtc_state->vrr_supported = false;
- memset(&new_crtc_state->adjust, 0,
- sizeof(new_crtc_state->adjust));
+ memset(&new_crtc_state->vrr_params, 0,
+ sizeof(new_crtc_state->vrr_params));
memset(&new_crtc_state->vrr_infopacket, 0,
sizeof(new_crtc_state->vrr_infopacket));
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
- !new_crtc_state->vrr_enabled)
+ old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
continue;
if (!new_crtc_state->enable)
bool vrr_supported;
struct mod_freesync_config freesync_config;
- struct dc_crtc_timing_adjust adjust;
+ struct mod_vrr_params vrr_params;
struct dc_info_packet vrr_infopacket;
int abm_level;
{
enum bp_result result = BP_RESULT_OK;
struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
+ struct atom_smu_info_v3_3 *smu_info = NULL;
if (!ss_info)
return BP_RESULT_BADINPUT;
if (!disp_cntl_tbl)
return BP_RESULT_BADBIOSTABLE;
+
ss_info->type.STEP_AND_DELAY_INFO = false;
ss_info->spread_percentage_divider = 1000;
/* BIOS no longer uses target clock. Always enable for now */
*/
result = BP_RESULT_UNSUPPORTED;
break;
+ case AS_SIGNAL_TYPE_XGMI:
+ smu_info = GET_IMAGE(struct atom_smu_info_v3_3,
+ DATA_TABLES(smu_info));
+ if (!smu_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ ss_info->spread_spectrum_percentage =
+ smu_info->waflclk_ss_percentage;
+ ss_info->spread_spectrum_range =
+ smu_info->gpuclk_ss_rate_10hz * 10;
+ if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
default:
result = BP_RESULT_UNSUPPORTED;
}
return true;
#endif
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
return false;
}
- if (connectors_num == 0 && num_virtual_links == 0) {
- dm_error("DC: Number of connectors is zero!\n");
- }
-
dm_output_to_console(
"DC: %s: connectors_num: physical:%d, virtual:%d\n",
__func__,
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
- stream_update->vsc_infopacket) {
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket) {
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
}
}
}
- if (update_type == UPDATE_TYPE_FULL)
- context_timing_trace(dc, &context->res_ctx);
-
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
/* Lock the top pipe while updating plane addrs, since freesync requires
return true;
}
+ if (link->connector_signal == SIGNAL_TYPE_EDP)
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+
/* todo: may need to lock gpio access */
hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
if (hpd_pin == NULL)
{
enum gpio_result gpio_result;
uint32_t clock_pin = 0;
-
+ uint8_t retry = 0;
struct ddc *ddc;
enum connector_id connector_id =
return present;
}
- /* Read GPIO: DP sink is present if both clock and data pins are zero */
- /* [anaumov] in DAL2, there was no check for GPIO failure */
-
- gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
- ASSERT(gpio_result == GPIO_RESULT_OK);
+ /*
+ * Read GPIO: DP sink is present if both clock and data pins are zero
+ *
+ * [W/A] plug-unplug DP cable, sometimes customer board has
+ * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
+ * then monitor can't br light up. Add retry 3 times
+ * But in real passive dongle, it need additional 3ms to detect
+ */
+ do {
+ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+ ASSERT(gpio_result == GPIO_RESULT_OK);
+ if (clock_pin)
+ udelay(1000);
+ else
+ break;
+ } while (retry++ < 3);
present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
same_dpcd = false;
}
- /* Active dongle downstream unplug */
+ /* Active dongle plug in without display or downstream unplug*/
if (link->type == dc_connection_active_dongle
&& link->dpcd_caps.sink_count.
bits.SINK_COUNT == 0) {
- if (prev_sink != NULL)
+ if (prev_sink != NULL) {
+ /* Downstream unplug */
dc_sink_release(prev_sink);
+ } else {
+ /* Empty dongle plug in */
+ for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
+ int fail_count = 0;
+
+ dp_verify_link_cap(link,
+ &link->reported_link_cap,
+ &fail_count);
+
+ if (fail_count == 0)
+ break;
+ }
+ }
return true;
}
bool dc_link_set_backlight_level(const struct dc_link *link,
uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp,
- const struct dc_stream_state *stream)
+ uint32_t frame_ramp)
{
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
(abm->funcs->set_backlight_level_pwm == NULL))
return false;
- if (stream)
- ((struct dc_stream_state *)stream)->bl_pwm_level =
- backlight_pwm_u16_16;
-
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
-
- dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
- pipe_ctx->stream->bl_pwm_level,
- 0,
- pipe_ctx->stream);
}
}
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ core_dc->hwss.blank_stream(pipe_ctx);
+
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
- core_dc->hwss.blank_stream(pipe_ctx);
-
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
return max_link_cap;
}
+static enum dc_status read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ static enum dc_status retval;
+
+ /* The HW reads 16 bytes from 200h on HPD,
+ * but if we get an AUX_DEFER, the HW cannot retry
+ * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+ * fail, so we now explicitly read 6 bytes which is
+ * the req from the above mentioned test cases.
+ *
+ * For DP 1.4 we need to read those from 2002h range.
+ */
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ irq_data->raw,
+ sizeof(union hpd_irq_data));
+ else {
+ /* Read 14 bytes in a single read and then copy only the required fields.
+ * This is more efficient than doing it in two separate AUX reads. */
+
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT_ESI,
+ tmp,
+ sizeof(tmp));
+
+ if (retval != DC_OK)
+ return retval;
+
+ irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+ }
+
+ return retval;
+}
+
+static bool hpd_rx_irq_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ uint8_t irq_reg_rx_power_state = 0;
+ enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+ union lane_status lane_status;
+ uint32_t lane;
+ bool sink_status_changed;
+ bool return_code;
+
+ sink_status_changed = false;
+ return_code = false;
+
+ if (link->cur_link_settings.lane_count == 0)
+ return return_code;
+
+ /*1. Check that Link Status changed, before re-training.*/
+
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /* check status of lanes 0,1
+ * changed DpcdAddress_Lane01Status (0x202)
+ */
+ lane_status.raw = get_nibble_at_index(
+ &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+ lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ sink_status_changed = true;
+ break;
+ }
+ }
+
+ /* Check interlane align.*/
+ if (sink_status_changed ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+
+ DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
+
+ return_code = true;
+
+ /*2. Check that we can handle interrupt: Not in FS DOS,
+ * Not in "Display Timeout" state, Link is trained.
+ */
+ dpcd_result = core_link_read_dpcd(link,
+ DP_SET_POWER,
+ &irq_reg_rx_power_state,
+ sizeof(irq_reg_rx_power_state));
+
+ if (dpcd_result != DC_OK) {
+ DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
+ __func__);
+ } else {
+ if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+ return_code = false;
+ }
+ }
+
+ return return_code;
+}
+
bool dp_verify_link_cap(
struct dc_link *link,
struct dc_link_settings *known_limit_link_setting,
struct clock_source *dp_cs;
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
enum link_training_result status;
+ union hpd_irq_data irq_data;
if (link->dc->debug.skip_detection_link_training) {
link->verified_link_cap = *known_limit_link_setting;
return true;
}
+ memset(&irq_data, 0, sizeof(irq_data));
success = false;
skip_link_training = false;
(*fail_count)++;
}
- if (success)
+ if (success) {
link->verified_link_cap = *cur;
-
+ udelay(1000);
+ if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK)
+ if (hpd_rx_irq_check_link_loss_status(
+ link,
+ &irq_data))
+ (*fail_count)++;
+ }
/* always disable the link before trying another
* setting or before returning we'll enable it later
* based on the actual mode we're driving
}
/*************************Short Pulse IRQ***************************/
-
-static bool hpd_rx_irq_check_link_loss_status(
- struct dc_link *link,
- union hpd_irq_data *hpd_irq_dpcd_data)
-{
- uint8_t irq_reg_rx_power_state = 0;
- enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
- union lane_status lane_status;
- uint32_t lane;
- bool sink_status_changed;
- bool return_code;
-
- sink_status_changed = false;
- return_code = false;
-
- if (link->cur_link_settings.lane_count == 0)
- return return_code;
-
- /*1. Check that Link Status changed, before re-training.*/
-
- /*parse lane status*/
- for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
- /* check status of lanes 0,1
- * changed DpcdAddress_Lane01Status (0x202)
- */
- lane_status.raw = get_nibble_at_index(
- &hpd_irq_dpcd_data->bytes.lane01_status.raw,
- lane);
-
- if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
- !lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
- /* if one of the channel equalization, clock
- * recovery or symbol lock is dropped
- * consider it as (link has been
- * dropped) dp sink status has changed
- */
- sink_status_changed = true;
- break;
- }
- }
-
- /* Check interlane align.*/
- if (sink_status_changed ||
- !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
-
- DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
-
- return_code = true;
-
- /*2. Check that we can handle interrupt: Not in FS DOS,
- * Not in "Display Timeout" state, Link is trained.
- */
- dpcd_result = core_link_read_dpcd(link,
- DP_SET_POWER,
- &irq_reg_rx_power_state,
- sizeof(irq_reg_rx_power_state));
-
- if (dpcd_result != DC_OK) {
- DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
- __func__);
- } else {
- if (irq_reg_rx_power_state != DP_SET_POWER_D0)
- return_code = false;
- }
- }
-
- return return_code;
-}
-
-static enum dc_status read_hpd_rx_irq_data(
- struct dc_link *link,
- union hpd_irq_data *irq_data)
-{
- static enum dc_status retval;
-
- /* The HW reads 16 bytes from 200h on HPD,
- * but if we get an AUX_DEFER, the HW cannot retry
- * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
- * fail, so we now explicitly read 6 bytes which is
- * the req from the above mentioned test cases.
- *
- * For DP 1.4 we need to read those from 2002h range.
- */
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
- retval = core_link_read_dpcd(
- link,
- DP_SINK_COUNT,
- irq_data->raw,
- sizeof(union hpd_irq_data));
- else {
- /* Read 14 bytes in a single read and then copy only the required fields.
- * This is more efficient than doing it in two separate AUX reads. */
-
- uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
-
- retval = core_link_read_dpcd(
- link,
- DP_SINK_COUNT_ESI,
- tmp,
- sizeof(tmp));
-
- if (retval != DC_OK)
- return retval;
-
- irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
- }
-
- return retval;
-}
-
static bool allow_hpd_rx_irq(const struct dc_link *link)
{
/*
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
- link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
break;
link_settings,
clock_source);
}
+ link->cur_link_settings = *link_settings;
dp_receiver_power_ctrl(link, true);
}
link->link_enc,
link_setting,
pipes[i].clock_source->id);
+ link->cur_link_settings = *link_setting;
dp_receiver_power_ctrl(link, true);
skip_video_pattern,
LINK_TRAINING_ATTEMPTS);
- link->cur_link_settings = *link_setting;
link->dc->hwss.enable_stream(&pipes[i]);
dc_version = DCE_VERSION_11_22;
break;
case FAMILY_AI:
- dc_version = DCE_VERSION_12_0;
+ if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
+ dc_version = DCE_VERSION_12_1;
+ else
+ dc_version = DCE_VERSION_12_0;
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case FAMILY_RV:
num_virtual_links, dc);
break;
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
res_pool = dce120_create_resource_pool(
num_virtual_links, dc);
break;
if (field_value == condition_value) {
if (i * delay_between_poll_us > 1000 &&
!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n",
+ DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
delay_between_poll_us * i / 1000,
func_name, line);
return reg_val;
}
}
- dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
+ DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
delay_between_poll_us, time_out_num_tries,
func_name, line);
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
- SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
+ SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_INVALID
/*grow 444 video here if necessary */
*/
bool dc_link_set_backlight_level(const struct dc_link *dc_link,
uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp,
- const struct dc_stream_state *stream);
+ uint32_t frame_ramp);
int dc_link_get_backlight_level(const struct dc_link *dc_link);
/* DMCU info */
unsigned int abm_level;
- unsigned int bl_pwm_level;
/* from core_stream struct */
struct dc_context *ctx;
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
+ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
/* get max clock state from PPLIB */
clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+ context->bw.dce.dispclk_khz = unpatched_disp_clk;
}
static void dce12_update_clocks(struct clk_mgr *clk_mgr,
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* un-mute audio */
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+
if (option != KEEP_ACQUIRED_RESOURCE ||
!dc->debug.az_endpoint_mute_only) {
/*only disalbe az_endpoint if power down or free*/
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
+ if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */
pipe_ctx->plane_res.scl_data.lb_params.depth,
&pipe_ctx->stream->bit_depth_params);
- if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
+ if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) {
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ color.color_r_cr = color.color_g_y;
+
pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
pipe_ctx->stream_res.tg,
&color);
+ }
pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
&pipe_ctx->plane_res.scl_data);
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
- if (src_y_offset < 0)
+ if (src_y_offset + (int)height <= 0)
cur_en = 0; /* not visible beyond top edge*/
REG_UPDATE(CURSOR0_CONTROL,
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
- if (src_y_offset < 0) //+ (int)hubp->curs_attr.height
+ if (src_y_offset + (int)hubp->curs_attr.height <= 0)
cur_en = 0; /* not visible beyond top edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ black_color.color_r_cr = black_color.color_g_y;
+
+
if (stream_res->tg->funcs->set_blank_color)
stream_res->tg->funcs->set_blank_color(
stream_res->tg,
top_pipe_to_program->plane_state->update_flags.bits.full_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
+ tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream
+ || !pipe_ctx->plane_state
+ || !tg->funcs->is_tg_enabled(tg))
continue;
- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+ tg->funcs->lock(tg);
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
- }
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx->stream || pipe_ctx->stream == stream)
- continue;
-
- dcn10_pipe_control_lock(dc, pipe_ctx, false);
- }
+ tg->funcs->unlock(tg);
+ }
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
dal_hw_factory_dce110_init(factory);
return true;
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
dal_hw_factory_dce120_init(factory);
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
dal_hw_translate_dce110_init(translate);
return true;
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
dal_hw_translate_dce120_init(translate);
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCE_VERSION_10_0:
return dal_i2caux_dce100_create(ctx);
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
return dal_i2caux_dce120_create(ctx);
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
AS_SIGNAL_TYPE_LVDS,
AS_SIGNAL_TYPE_DISPLAY_PORT,
AS_SIGNAL_TYPE_GPU_PLL,
+ AS_SIGNAL_TYPE_XGMI,
AS_SIGNAL_TYPE_UNKNOWN
};
DCE_VERSION_11_2,
DCE_VERSION_11_22,
DCE_VERSION_12_0,
+ DCE_VERSION_12_1,
DCE_VERSION_MAX,
DCN_VERSION_1_0,
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
#define NUM_POWER_FN_SEGS 8
#define NUM_BL_CURVE_SEGS 16
+#pragma pack(push, 1)
/* NOTE: iRAM is 256B in size */
struct iram_table_v_2 {
/* flags */
uint8_t dummy8; /* 0xfe */
uint8_t dummy9; /* 0xff */
};
+#pragma pack(pop)
static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
{
};
enum PP_SMC_POWER_PROFILE {
- PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0,
- PP_SMC_POWER_PROFILE_POWERSAVING = 0x1,
- PP_SMC_POWER_PROFILE_VIDEO = 0x2,
- PP_SMC_POWER_PROFILE_VR = 0x3,
- PP_SMC_POWER_PROFILE_COMPUTE = 0x4,
- PP_SMC_POWER_PROFILE_CUSTOM = 0x5,
+ PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
+ PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
+ PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
+ PP_SMC_POWER_PROFILE_VIDEO = 0x3,
+ PP_SMC_POWER_PROFILE_VR = 0x4,
+ PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
+ PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
};
enum {
static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
{
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4;
-
- hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING;
- hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO;
- hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
- hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR;
- hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
+
+ hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
+ hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
+ hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
}
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-static const struct profile_mode_setting smu7_profiling[6] =
- {{1, 0, 100, 30, 1, 0, 100, 10},
+static const struct profile_mode_setting smu7_profiling[7] =
+ {{0, 0, 0, 0, 0, 0, 0, 0},
+ {1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 10, 16, 31},
{1, 0, 11, 50, 1, 0, 100, 10},
uint32_t i, size = 0;
uint32_t len;
- static const char *profile_name[6] = {"3D_FULL_SCREEN",
+ static const char *profile_name[7] = {"BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
"VR",
hwmgr->backend = data;
- hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
- hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
- hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
vega10_set_default_registry_data(hwmgr);
data->disable_dpm_mask = 0xff;
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, size = 0;
- static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
+ static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
+ {70, 60, 1, 3,},
{90, 60, 0, 0,},
{70, 60, 0, 0,},
{70, 90, 0, 0,},
{30, 60, 0, 6,},
};
- static const char *profile_name[6] = {"3D_FULL_SCREEN",
+ static const char *profile_name[7] = {"BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
"VR",
return 0;
}
+static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
+{
+ uint32_t result;
+
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
+ "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
+ return -EINVAL);
+
+ result = smum_get_argument(hwmgr);
+ PP_ASSERT_WITH_CODE(result == 1,
+ "Failed to run ACG BTC!", return -EINVAL);
+
+ return 0;
+}
+
static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
"Failed to initialize SMC table!",
result = tmp_result);
+ tmp_result = vega12_run_acg_btc(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to run ACG BTC!",
+ result = tmp_result);
+
result = vega12_enable_all_smu_features(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to enable all smu features!",
#include "soc15_common.h"
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
{
hwmgr->backend = data;
- hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
- hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
- hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
vega20_set_default_registry_data(hwmgr);
pp_table->FanZeroRpmEnable)
od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
+ if (!od_settings->overdrive8_capabilities)
+ hwmgr->od_enabled = false;
+
return 0;
}
(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
"Failed to set soft min memclk !",
return ret);
-
- min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
- PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
- hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
- "Failed to set hard min memclk !",
- return ret);
}
if (data->smu_features[GNLD_DPM_UVD].enabled &&
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
+ if (soft_max_level >= data->dpm_table.gfx_table.count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level,
+ data->dpm_table.gfx_table.count - 1);
+ return -EINVAL;
+ }
+
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
data->dpm_table.gfx_table.dpm_state.soft_max_level =
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
+ if (soft_max_level >= data->dpm_table.mem_table.count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level,
+ data->dpm_table.mem_table.count - 1);
+ return -EINVAL;
+ }
+
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
data->dpm_table.mem_table.dpm_state.soft_max_level =
break;
case PP_PCIE:
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+ if (soft_min_level >= NUM_LINK_LEVELS ||
+ soft_max_level >= NUM_LINK_LEVELS)
+ return -EINVAL;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to set min link dpm level!",
+ return ret);
+
break;
default:
data->od8_settings.od8_settings_array;
OverDriveTable_t *od_table =
&(data->smc_state_table.overdrive_table);
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
+ PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
+ struct amdgpu_device *adev = hwmgr->adev;
struct pp_clock_levels_with_latency clocks;
int i, now, size = 0;
int ret = 0;
+ uint32_t gen_speed, lane_width;
switch (type) {
case PP_SCLK:
break;
case PP_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," :
+ (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "",
+ (pptable->PcieLaneCount[i] == 1) ? "x1" :
+ (pptable->PcieLaneCount[i] == 2) ? "x2" :
+ (pptable->PcieLaneCount[i] == 3) ? "x4" :
+ (pptable->PcieLaneCount[i] == 4) ? "x8" :
+ (pptable->PcieLaneCount[i] == 5) ? "x12" :
+ (pptable->PcieLaneCount[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == pptable->PcieGenSpeed[i]) &&
+ (lane_width == pptable->PcieLaneCount[i]) ?
+ "*" : "");
break;
case OD_SCLK:
int pplib_workload = 0;
switch (power_profile) {
+ case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
+ pplib_workload = WORKLOAD_DEFAULT_BIT;
+ break;
case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
break;
uint32_t i, size = 0;
uint16_t workload_type = 0;
static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
/**
* The main hardware manager structure.
*/
-#define Workload_Policy_Max 5
+#define Workload_Policy_Max 6
struct pp_hwmgr {
void *adev;
#define DP0_STARTVAL 0x064c
#define DP0_ACTIVEVAL 0x0650
#define DP0_SYNCVAL 0x0654
+#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
+#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
#define DP0_MISC 0x0658
#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
#define BPC_6 (0 << 5)
#define DP0_LTLOOPCTRL 0x06d8
#define DP0_SNKLTCTRL 0x06e4
+#define DP1_SRCCTRL 0x07a0
+
/* PHY */
#define DP_PHY_CTRL 0x0800
#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
+#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
unsigned long rate;
u32 value;
int ret;
+ u32 dp_phy_ctrl;
rate = clk_get_rate(tc->refclk);
switch (rate) {
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
+ if (tc->link.base.num_lanes == 2)
+ dp_phy_ctrl |= PHY_2LANE;
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
/*
* Initially PLLs are in bypass. Force PLL parameter update,
tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
+ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
+ ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
+ ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
if (!tc->mode)
return -EINVAL;
- /* from excel file - DP0_SrcCtrl */
- tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
- DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
- DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
- /* from excel file - DP1_SrcCtrl */
- tc_write(0x07a0, 0x00003083);
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
+ /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
+ tc_write(DP1_SRCCTRL,
+ (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
+ ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
rate = clk_get_rate(tc->refclk);
switch (rate) {
}
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
+
/* Setup Main Link */
- dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
+ if (tc->link.base.num_lanes == 2)
+ dp_phy_ctrl |= PHY_2LANE;
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
msleep(100);
static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct tc_data *tc = connector_to_tc(connector);
+ u32 req, avail;
+ u32 bits_per_pixel = 24;
+
/* DPI interface clock limitation: upto 154 MHz */
if (mode->clock > 154000)
return MODE_CLOCK_HIGH;
+ req = mode->clock * bits_per_pixel / 8;
+ avail = tc->link.base.num_lanes * tc->link.base.rate;
+
+ if (req > avail)
+ return MODE_BAD;
+
return MODE_OK;
}
/* Create eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
- DRM_MODE_CONNECTOR_eDP);
+ tc->panel ? DRM_MODE_CONNECTOR_eDP :
+ DRM_MODE_CONNECTOR_DisplayPort);
if (ret)
return ret;
drm_display_info_set_bus_formats(&tc->connector.display_info,
&bus_format, 1);
+ tc->connector.display_info.bus_flags =
+ DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_NEGEDGE;
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
state->fence = NULL;
state->commit = NULL;
+ state->fb_damage_clips = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
if (state->commit)
drm_crtc_commit_put(state->commit);
+
+ drm_property_blob_put(state->fb_damage_clips);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
(arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
return -EINVAL;
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
state->acquire_ctx = &ctx;
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
state = drm_atomic_state_alloc(fb->dev);
if (!state) {
ret = -ENOMEM;
- goto out;
+ goto out_drop_locks;
}
state->acquire_ctx = &ctx;
kfree(rects);
drm_atomic_state_put(state);
+out_drop_locks:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
/* LG LP140WF6-SPM1 eDP panel */
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
+ /* Apple panels need some additional handling to support PSR */
+ { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
};
#undef OUI
var_1->transp.msb_right == var_2->transp.msb_right;
}
+static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
+ u8 depth)
+{
+ switch (depth) {
+ case 8:
+ var->red.offset = 0;
+ var->green.offset = 0;
+ var->blue.offset = 0;
+ var->red.length = 8; /* 8bit DAC */
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 15:
+ var->red.offset = 10;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 5;
+ var->blue.length = 5;
+ var->transp.offset = 15;
+ var->transp.length = 1;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 6;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ break;
+ case 24:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 32:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ break;
+ default:
+ break;
+ }
+}
+
/**
* drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
* @var: screeninfo to check
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
- if (var->pixclock != 0 || in_dbg_master())
+ if (in_dbg_master())
return -EINVAL;
+ if (var->pixclock != 0) {
+ DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
+ var->pixclock = 0;
+ }
+
if ((drm_format_info_block_width(fb->format, 0) > 1) ||
(drm_format_info_block_height(fb->format, 0) > 1))
return -EINVAL;
}
/*
+ * Workaround for SDL 1.2, which is known to be setting all pixel format
+ * fields values to zero in some cases. We treat this situation as a
+ * kind of "use some reasonable autodetected values".
+ */
+ if (!var->red.offset && !var->green.offset &&
+ !var->blue.offset && !var->transp.offset &&
+ !var->red.length && !var->green.length &&
+ !var->blue.length && !var->transp.length &&
+ !var->red.msb_right && !var->green.msb_right &&
+ !var->blue.msb_right && !var->transp.msb_right) {
+ drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
+ }
+
+ /*
* drm fbdev emulation doesn't support changing the pixel format at all,
* so reject all pixel format changing requests.
*/
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
- switch (fb->format->depth) {
- case 8:
- info->var.red.offset = 0;
- info->var.green.offset = 0;
- info->var.blue.offset = 0;
- info->var.red.length = 8; /* 8bit DAC */
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 0;
- info->var.transp.length = 0;
- break;
- case 15:
- info->var.red.offset = 10;
- info->var.green.offset = 5;
- info->var.blue.offset = 0;
- info->var.red.length = 5;
- info->var.green.length = 5;
- info->var.blue.length = 5;
- info->var.transp.offset = 15;
- info->var.transp.length = 1;
- break;
- case 16:
- info->var.red.offset = 11;
- info->var.green.offset = 5;
- info->var.blue.offset = 0;
- info->var.red.length = 5;
- info->var.green.length = 6;
- info->var.blue.length = 5;
- info->var.transp.offset = 0;
- break;
- case 24:
- info->var.red.offset = 16;
- info->var.green.offset = 8;
- info->var.blue.offset = 0;
- info->var.red.length = 8;
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 0;
- info->var.transp.length = 0;
- break;
- case 32:
- info->var.red.offset = 16;
- info->var.green.offset = 8;
- info->var.blue.offset = 0;
- info->var.red.length = 8;
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 24;
- info->var.transp.length = 8;
- break;
- default:
- break;
- }
+ drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
info->var.xres = fb_width;
info->var.yres = fb_height;
struct drm_modeset_acquire_ctx ctx;
int ret;
- drm_modeset_acquire_init(&ctx, 0);
-
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
+
+ drm_modeset_acquire_init(&ctx, 0);
state->acquire_ctx = &ctx;
retry:
if (prop == state->dev->mode_config.dpms_property) {
return NULL;
dmah->size = size;
- dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr,
- GFP_KERNEL | __GFP_COMP);
+ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
+ &dmah->busaddr,
+ GFP_KERNEL | __GFP_COMP);
if (dmah->vaddr == NULL) {
kfree(dmah);
{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
- {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
+ {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
D_BDW_PLUS, 0, 8, NULL},
- {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
- ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+ {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL,
+ D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
ret = intel_gvt_debugfs_init(gvt);
if (ret)
- gvt_err("debugfs registeration failed, go on.\n");
+ gvt_err("debugfs registration failed, go on.\n");
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
struct kmem_cache *workloads;
atomic_t running_workload_num;
struct i915_gem_context *shadow_ctx;
+ union {
+ u64 i915_context_pml4;
+ u64 i915_context_pdps[GEN8_3LVL_PDPES];
+ };
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
void *ring_scan_buffer[I915_NUM_ENGINES];
_MMIO(0x7704),
_MMIO(0x7708),
_MMIO(0x770c),
+ _MMIO(0x83a8),
_MMIO(0xb110),
GEN8_L3SQCREG4,//_MMIO(0xb118)
_MMIO(0xe100),
MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
- void (*detach_vgpu)(unsigned long handle);
+ void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*enable_page_track)(unsigned long handle, u64 gfn);
[FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
[AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
[AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
- [ERR_AND_DBG] = "South Error and Debug Interupts Combined",
+ [ERR_AND_DBG] = "South Error and Debug Interrupts Combined",
[GMBUS] = "Gmbus",
[SDVO_B_HOTPLUG] = "SDVO B hotplug",
[CRT_HOTPLUG] = "CRT Hotplug",
{
unsigned int index;
u64 virtaddr;
- unsigned long req_size, pgoff = 0;
+ unsigned long req_size, pgoff, req_start;
pgprot_t pg_prot;
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
pg_prot = vma->vm_page_prot;
virtaddr = vma->vm_start;
req_size = vma->vm_end - vma->vm_start;
- pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ req_start = pgoff << PAGE_SHIFT;
+
+ if (!intel_vgpu_in_aperture(vgpu, req_start))
+ return -EINVAL;
+ if (req_start + req_size >
+ vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
+ return -EINVAL;
+
+ pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
}
return 0;
}
-static void kvmgt_detach_vgpu(unsigned long handle)
+static void kvmgt_detach_vgpu(void *p_vgpu)
{
- /* nothing to do here */
+ int i;
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+ if (!vgpu->vdev.region)
+ return;
+
+ for (i = 0; i < vgpu->vdev.num_regions; i++)
+ if (vgpu->vdev.region[i].ops->release)
+ vgpu->vdev.region[i].ops->release(vgpu,
+ &vgpu->vdev.region[i]);
+ vgpu->vdev.num_regions = 0;
+ kfree(vgpu->vdev.region);
+ vgpu->vdev.region = NULL;
}
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
if (!intel_gvt_host.mpt->detach_vgpu)
return;
- intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
+ intel_gvt_host.mpt->detach_vgpu(vgpu);
}
#define MSI_CAP_CONTROL(offset) (offset + 2)
return 0;
}
+static int
+intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
+ struct i915_request *rq;
+ int ret = 0;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (workload->req)
+ goto out;
+
+ rq = i915_request_alloc(engine, shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
+ goto out;
+ }
+ workload->req = i915_request_get(rq);
+out:
+ return ret;
+}
+
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
struct intel_context *ce;
- struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (workload->req)
+ if (workload->shadow)
return 0;
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
goto err_shadow;
}
- rq = i915_request_alloc(engine, shadow_ctx);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto err_shadow;
- }
- workload->req = i915_request_get(rq);
-
- ret = populate_shadow_context(workload);
- if (ret)
- goto err_req;
-
+ workload->shadow = true;
return 0;
-err_req:
- rq = fetch_and_zero(&workload->req);
- i915_request_put(rq);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_unpin:
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
+ ret = intel_gvt_workload_req_alloc(workload);
+ if (ret)
+ goto err_req;
+
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
goto out;
- ret = prepare_workload(workload);
+ ret = populate_shadow_context(workload);
+ if (ret) {
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ goto out;
+ }
+ ret = prepare_workload(workload);
out:
- if (ret)
- workload->status = ret;
-
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
i915_request_add(workload->req);
workload->dispatched = true;
}
-
+err_req:
+ if (ret)
+ workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
mutex_unlock(&vgpu->vgpu_lock);
return ret;
return ret;
}
+static void
+i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
+{
+ struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
+ int i;
+
+ if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
+ else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++)
+ px_dma(i915_ppgtt->pdp.page_directory[i]) =
+ s->i915_context_pdps[i];
+ }
+}
+
/**
* intel_vgpu_clean_submission - free submission-related resource for vGPU
* @vgpu: a vGPU
struct intel_vgpu_submission *s = &vgpu->submission;
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
+ i915_context_ppgtt_root_restore(s);
i915_gem_context_put(s->shadow_ctx);
kmem_cache_destroy(s->workloads);
}
s->ops->reset(vgpu, engine_mask);
}
+static void
+i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
+{
+ struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
+ int i;
+
+ if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
+ else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++)
+ s->i915_context_pdps[i] =
+ px_dma(i915_ppgtt->pdp.page_directory[i]);
+ }
+}
+
/**
* intel_vgpu_setup_submission - setup submission-related resource for vGPU
* @vgpu: a vGPU
if (IS_ERR(s->shadow_ctx))
return PTR_ERR(s->shadow_ctx);
+ i915_context_ppgtt_root_save(s);
+
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
+ bool shadow; /* if workload has done shadow of guest request */
int status;
struct intel_vgpu_mm *shadow_mm;
intel_runtime_pm_get(i915);
gpu = i915_capture_gpu_state(i915);
intel_runtime_pm_put(i915);
- if (!gpu)
- return -ENOMEM;
+ if (IS_ERR(gpu))
+ return PTR_ERR(gpu);
file->private_data = gpu;
return 0;
static int i915_error_state_open(struct inode *inode, struct file *file)
{
- file->private_data = i915_first_error_state(inode->i_private);
+ struct i915_gpu_state *error;
+
+ error = i915_first_error_state(inode->i_private);
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ file->private_data = error;
return 0;
}
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ int err;
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- return i915_vma_pin(ppgtt->vma,
- 0, GEN6_PD_ALIGN,
- PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(ppgtt->vma,
+ 0, GEN6_PD_ALIGN,
+ PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto unpin;
+
+ return 0;
+
+unpin:
+ ppgtt->pin_count = 0;
+ return err;
}
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
{
struct i915_gpu_state *error;
+ /* Check if GPU capture has been disabled */
+ error = READ_ONCE(i915->gpu_error.first_error);
+ if (IS_ERR(error))
+ return error;
+
error = kzalloc(sizeof(*error), GFP_ATOMIC);
- if (!error)
- return NULL;
+ if (!error) {
+ i915_disable_error_state(i915, -ENOMEM);
+ return ERR_PTR(-ENOMEM);
+ }
kref_init(&error->ref);
error->i915 = i915;
return;
error = i915_capture_gpu_state(i915);
- if (!error) {
- DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
- i915_disable_error_state(i915, -ENOMEM);
+ if (IS_ERR(error))
return;
- }
i915_error_capture_msg(i915, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
- if (error)
+ if (!IS_ERR_OR_NULL(error))
i915_gpu_state_get(error);
spin_unlock_irq(&i915->gpu_error.lock);
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
- i915->gpu_error.first_error = NULL;
+ if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
+ i915->gpu_error.first_error = NULL;
spin_unlock_irq(&i915->gpu_error.lock);
- if (!IS_ERR(error))
+ if (!IS_ERR_OR_NULL(error))
i915_gpu_state_put(error);
}
ssize_t ret;
gpu = i915_first_error_state(i915);
- if (gpu) {
+ if (IS_ERR(gpu)) {
+ ret = PTR_ERR(gpu);
+ } else if (gpu) {
ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
i915_gpu_state_put(gpu);
} else {
if (ret)
return ret;
+ intel_engine_init_workarounds(engine);
+
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
}
intel_engine_init_whitelist(engine);
- intel_engine_init_workarounds(engine);
return 0;
}
DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
+ DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
+ return;
+ }
+
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
return;
}
+
dev_priv->psr.sink_support = true;
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
- bool enabled;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
};
-static void meson_crtc_enable(struct drm_crtc *crtc)
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
drm_crtc_vblank_on(crtc);
- meson_crtc->enabled = true;
-}
-
-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
-{
- struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
- struct meson_drm *priv = meson_crtc->priv;
-
- DRM_DEBUG_DRIVER("\n");
-
- if (!meson_crtc->enabled)
- meson_crtc_enable(crtc);
-
priv->viu.osd1_enabled = true;
}
crtc->state->event = NULL;
}
-
- meson_crtc->enabled = false;
}
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
unsigned long flags;
- if (crtc->state->enable && !meson_crtc->enabled)
- meson_crtc_enable(crtc);
-
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
.fb_create = drm_gem_fb_create,
};
+static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
static irqreturn_t meson_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs;
+ drm->mode_config.helper_private = &meson_mode_config_helpers;
/* Hardware Initialization */
remote_node = of_graph_get_remote_port_parent(ep);
if (!remote_node ||
remote_node == parent || /* Ignore parent endpoint */
- !of_device_is_available(remote_node))
+ !of_device_is_available(remote_node)) {
+ of_node_put(remote_node);
continue;
+ }
count += meson_probe_remote(pdev, match, remote, remote_node);
for_each_endpoint_of_node(np, ep) {
remote = of_graph_get_remote_port_parent(ep);
- if (!remote || !of_device_is_available(remote))
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
continue;
+ }
count += meson_probe_remote(pdev, &match, np, remote);
+ of_node_put(remote);
}
if (count && !match)
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
- select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+ select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
+ select BACKLIGHT_LCD_SUPPORT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
+ case NV_DEVICE_INFO_V0_PASCAL:
+ case NV_DEVICE_INFO_V0_VOLTA:
+ case NV_DEVICE_INFO_V0_TURING:
ret = nv50_backlight_init(nv_encoder, &props, &ops);
break;
default:
};
static const struct nvkm_device_chip
+nv162_chipset = {
+ .name = "TU102",
+ .bar = tu104_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu104_devinit_new,
+ .fault = tu104_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu104_mc_new,
+ .mmu = tu104_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu104_ce_new,
+ .ce[1] = tu104_ce_new,
+ .ce[2] = tu104_ce_new,
+ .ce[3] = tu104_ce_new,
+ .ce[4] = tu104_ce_new,
+ .disp = tu104_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu104_fifo_new,
+};
+
+static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
.bar = tu104_bar_new,
case 0x138: device->chip = &nv138_chipset; break;
case 0x13b: device->chip = &nv13b_chipset; break;
case 0x140: device->chip = &nv140_chipset; break;
+ case 0x162: device->chip = &nv162_chipset; break;
case 0x164: device->chip = &nv164_chipset; break;
case 0x166: device->chip = &nv166_chipset; break;
default:
#include <engine/falcon.h>
#include <core/gpuobj.h>
+#include <subdev/mc.h>
#include <subdev/timer.h>
#include <engine/fifo.h>
}
}
- nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
- nvkm_wr32(device, base + 0x014, 0xffffffff);
+ if (nvkm_mc_enabled(device, engine->subdev.index)) {
+ nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
+ nvkm_wr32(device, base + 0x014, 0xffffffff);
+ }
return 0;
}
duty = nvkm_therm_update_linear(therm);
break;
case NVBIOS_THERM_FAN_OTHER:
- if (therm->cstate)
+ if (therm->cstate) {
duty = therm->cstate;
- else
+ poll = false;
+ } else {
duty = nvkm_therm_update_linear_fallback(therm);
- poll = false;
+ }
break;
}
immd = false;
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = qxl_gem_prime_pin,
.gem_prime_unpin = qxl_gem_prime_unpin,
- .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
WARN_ONCE(1, "not implemented");
}
-struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
-}
-
-struct drm_gem_object *qxl_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *table)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
-}
-
void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
child_count++;
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
&panel, &bridge);
- if (!ret)
+ if (!ret) {
+ of_node_put(endpoint);
break;
+ }
}
of_node_put(port);
remote = of_graph_get_remote_port_parent(ep);
if (!remote)
continue;
+ of_node_put(remote);
/* does this node match any registered engines? */
list_for_each_entry(frontend, &drv->frontend_list, list) {
if (remote == frontend->node) {
- of_node_put(remote);
of_node_put(port);
+ of_node_put(ep);
return frontend;
}
}
}
-
+ of_node_put(port);
return ERR_PTR(-EINVAL);
}
return 0;
}
-static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
+ int mem_type)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct drm_printer p = drm_debug_printer(TTM_PFX);
- pr_err(" has_type: %d\n", man->has_type);
- pr_err(" use_type: %d\n", man->use_type);
- pr_err(" flags: 0x%08X\n", man->flags);
- pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
- pr_err(" size: %llu\n", man->size);
- pr_err(" available_caching: 0x%08X\n", man->available_caching);
- pr_err(" default_caching: 0x%08X\n", man->default_caching);
+ drm_printf(p, " has_type: %d\n", man->has_type);
+ drm_printf(p, " use_type: %d\n", man->use_type);
+ drm_printf(p, " flags: 0x%08X\n", man->flags);
+ drm_printf(p, " gpu_offset: 0x%08llX\n", man->gpu_offset);
+ drm_printf(p, " size: %llu\n", man->size);
+ drm_printf(p, " available_caching: 0x%08X\n", man->available_caching);
+ drm_printf(p, " default_caching: 0x%08X\n", man->default_caching);
if (mem_type != TTM_PL_SYSTEM)
- (*man->func->debug)(man, &p);
+ (*man->func->debug)(man, p);
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
+ struct drm_printer p = drm_debug_printer(TTM_PFX);
int i, ret, mem_type;
- pr_err("No space for %p (%lu pages, %luK, %luM)\n",
- bo, bo->mem.num_pages, bo->mem.size >> 10,
- bo->mem.size >> 20);
+ drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
ret = ttm_mem_type_from_place(&placement->placement[i],
&mem_type);
if (ret)
return;
- pr_err(" placement[%d]=0x%08X (%d)\n",
- i, placement->placement[i].flags, mem_type);
- ttm_mem_type_debug(bo->bdev, mem_type);
+ drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i].flags, mem_type);
+ ttm_mem_type_debug(bo->bdev, &p, mem_type);
}
}
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = virtgpu_gem_prime_pin,
.gem_prime_unpin = virtgpu_gem_prime_unpin,
- .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
/* virtgpu_prime.c */
int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt);
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
WARN_ONCE(1, "not implemented");
}
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
-struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *table)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
bool "Laptop Hybrid Graphics - GPU switching support"
depends on X86
depends on ACPI
+ depends on PCI
select VGA_ARB
help
Many laptops released in 2008/9/10 have two GPUs with a multiplexer
}
rv = lm80_read_value(client, LM80_REG_FANDIV);
- if (rv < 0)
+ if (rv < 0) {
+ mutex_unlock(&data->update_lock);
return rv;
+ }
reg = (rv & ~(3 << (2 * (nr + 1))))
| (data->fan_div[nr] << (2 * (nr + 1)));
lm80_write_value(client, LM80_REG_FANDIV, reg);
* nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3
* nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3
* (0xd451)
- * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3
- * (0xd459)
+ * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3
+ * (0xd429)
*
* #temp lists the number of monitored temperature sources (first value) plus
* the number of directly connectable temperature sensors (second value).
#define SIO_NCT6795_ID 0xd350
#define SIO_NCT6796_ID 0xd420
#define SIO_NCT6797_ID 0xd450
-#define SIO_NCT6798_ID 0xd458
+#define SIO_NCT6798_ID 0xd428
#define SIO_ID_MASK 0xFFF8
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
if (data->kind == nct6791 || data->kind == nct6792 ||
data->kind == nct6793 || data->kind == nct6795 ||
- data->kind == nct6796)
+ data->kind == nct6796 || data->kind == nct6797 ||
+ data->kind == nct6798)
nct6791_enable_io_mapping(sioreg);
superio_exit(sioreg);
if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
- sio_data->kind == nct6796)
+ sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
+ sio_data->kind == nct6798)
nct6791_enable_io_mapping(sioaddr);
superio_exit(sioaddr);
val *= 1000000ULL;
break;
case 2:
- val = get_unaligned_be32(&power->update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->update_tag) *
+ occ->powr_sample_time_us;
break;
case 3:
val = get_unaligned_be16(&power->value) * 1000000ULL;
&power->update_tag);
break;
case 2:
- val = get_unaligned_be32(&power->update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->update_tag) *
+ occ->powr_sample_time_us;
break;
case 3:
val = get_unaligned_be16(&power->value) * 1000000ULL;
&power->system.update_tag);
break;
case 2:
- val = get_unaligned_be32(&power->system.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->system.update_tag) *
+ occ->powr_sample_time_us;
break;
case 3:
val = get_unaligned_be16(&power->system.value) * 1000000ULL;
&power->proc.update_tag);
break;
case 6:
- val = get_unaligned_be32(&power->proc.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->proc.update_tag) *
+ occ->powr_sample_time_us;
break;
case 7:
val = get_unaligned_be16(&power->proc.value) * 1000000ULL;
&power->vdd.update_tag);
break;
case 10:
- val = get_unaligned_be32(&power->vdd.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->vdd.update_tag) *
+ occ->powr_sample_time_us;
break;
case 11:
val = get_unaligned_be16(&power->vdd.value) * 1000000ULL;
&power->vdn.update_tag);
break;
case 14:
- val = get_unaligned_be32(&power->vdn.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->vdn.update_tag) *
+ occ->powr_sample_time_us;
break;
case 15:
val = get_unaligned_be16(&power->vdn.value) * 1000000ULL;
.data = (void *)2
},
{
- .compatible = "ti,tmp422",
+ .compatible = "ti,tmp442",
.data = (void *)3
},
{ },
If unsure, say N.
+config HWSPINLOCK_STM32
+ tristate "STM32 Hardware Spinlock device"
+ depends on MACH_STM32MP157
+ depends on HWSPINLOCK
+ help
+ Say y here to support the STM32 Hardware Spinlock device.
+
+ If unsure, say N.
+
config HSEM_U8500
tristate "STE Hardware Semaphore functionality"
depends on HWSPINLOCK
obj-$(CONFIG_HWSPINLOCK_QCOM) += qcom_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_SIRF) += sirf_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_SPRD) += sprd_hwspinlock.o
+obj-$(CONFIG_HWSPINLOCK_STM32) += stm32_hwspinlock.o
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2018
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "hwspinlock_internal.h"
+
+#define STM32_MUTEX_COREID BIT(8)
+#define STM32_MUTEX_LOCK_BIT BIT(31)
+#define STM32_MUTEX_NUM_LOCKS 32
+
+struct stm32_hwspinlock {
+ struct clk *clk;
+ struct hwspinlock_device bank;
+};
+
+static int stm32_hwspinlock_trylock(struct hwspinlock *lock)
+{
+ void __iomem *lock_addr = lock->priv;
+ u32 status;
+
+ writel(STM32_MUTEX_LOCK_BIT | STM32_MUTEX_COREID, lock_addr);
+ status = readl(lock_addr);
+
+ return status == (STM32_MUTEX_LOCK_BIT | STM32_MUTEX_COREID);
+}
+
+static void stm32_hwspinlock_unlock(struct hwspinlock *lock)
+{
+ void __iomem *lock_addr = lock->priv;
+
+ writel(STM32_MUTEX_COREID, lock_addr);
+}
+
+static const struct hwspinlock_ops stm32_hwspinlock_ops = {
+ .trylock = stm32_hwspinlock_trylock,
+ .unlock = stm32_hwspinlock_unlock,
+};
+
+static int stm32_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct stm32_hwspinlock *hw;
+ void __iomem *io_base;
+ struct resource *res;
+ size_t array_size;
+ int i, ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ array_size = STM32_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock);
+ hw = devm_kzalloc(&pdev->dev, sizeof(*hw) + array_size, GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ hw->clk = devm_clk_get(&pdev->dev, "hsem");
+ if (IS_ERR(hw->clk))
+ return PTR_ERR(hw->clk);
+
+ for (i = 0; i < STM32_MUTEX_NUM_LOCKS; i++)
+ hw->bank.lock[i].priv = io_base + i * sizeof(u32);
+
+ platform_set_drvdata(pdev, hw);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = hwspin_lock_register(&hw->bank, &pdev->dev, &stm32_hwspinlock_ops,
+ 0, STM32_MUTEX_NUM_LOCKS);
+
+ if (ret)
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int stm32_hwspinlock_remove(struct platform_device *pdev)
+{
+ struct stm32_hwspinlock *hw = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = hwspin_lock_unregister(&hw->bank);
+ if (ret)
+ dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_hwspinlock_runtime_suspend(struct device *dev)
+{
+ struct stm32_hwspinlock *hw = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(hw->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_hwspinlock_runtime_resume(struct device *dev)
+{
+ struct stm32_hwspinlock *hw = dev_get_drvdata(dev);
+
+ clk_prepare_enable(hw->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops stm32_hwspinlock_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_hwspinlock_runtime_suspend,
+ stm32_hwspinlock_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id stm32_hwpinlock_ids[] = {
+ { .compatible = "st,stm32-hwspinlock", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_hwpinlock_ids);
+
+static struct platform_driver stm32_hwspinlock_driver = {
+ .probe = stm32_hwspinlock_probe,
+ .remove = stm32_hwspinlock_remove,
+ .driver = {
+ .name = "stm32_hwspinlock",
+ .of_match_table = stm32_hwpinlock_ids,
+ .pm = &stm32_hwspinlock_pm_ops,
+ },
+};
+
+static int __init stm32_hwspinlock_init(void)
+{
+ return platform_driver_register(&stm32_hwspinlock_driver);
+}
+/* board init code might need to reserve hwspinlocks for predefined purposes */
+postcore_initcall(stm32_hwspinlock_init);
+
+static void __exit stm32_hwspinlock_exit(void)
+{
+ platform_driver_unregister(&stm32_hwspinlock_driver);
+}
+module_exit(stm32_hwspinlock_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock driver for STM32 SoCs");
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
*/
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/init.h>
#define I2C_XFER_TIMEOUT (msecs_to_jiffies(250))
#define I2C_STOP_TIMEOUT (msecs_to_jiffies(100))
#define FIFO_SIZE 8
+#define SEQ_LEN 2
#define GLOBAL_CONTROL 0x00
#define GLOBAL_MST_EN BIT(0)
#define CMD_BUSY (1<<3)
#define CMD_MANUAL (0x00 | CMD_BUSY)
#define CMD_AUTO (0x01 | CMD_BUSY)
+#define CMD_SEQUENCE (0x02 | CMD_BUSY)
#define MST_RX_XFER 0x2c
#define MST_TX_XFER 0x30
#define MST_ADDR_1 0x34
* axxia_i2c_dev - I2C device context
* @base: pointer to register struct
* @msg: pointer to current message
- * @msg_xfrd: number of bytes transferred in msg
+ * @msg_r: pointer to current read message (sequence transfer)
+ * @msg_xfrd: number of bytes transferred in tx_fifo
+ * @msg_xfrd_r: number of bytes transferred in rx_fifo
* @msg_err: error code for completed message
* @msg_complete: xfer completion object
* @dev: device reference
struct axxia_i2c_dev {
void __iomem *base;
struct i2c_msg *msg;
+ struct i2c_msg *msg_r;
size_t msg_xfrd;
+ size_t msg_xfrd_r;
int msg_err;
struct completion msg_complete;
struct device *dev;
*/
static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
{
- struct i2c_msg *msg = idev->msg;
+ struct i2c_msg *msg = idev->msg_r;
size_t rx_fifo_avail = readl(idev->base + MST_RX_FIFO);
- int bytes_to_transfer = min(rx_fifo_avail, msg->len - idev->msg_xfrd);
+ int bytes_to_transfer = min(rx_fifo_avail, msg->len - idev->msg_xfrd_r);
while (bytes_to_transfer-- > 0) {
int c = readl(idev->base + MST_DATA);
- if (idev->msg_xfrd == 0 && i2c_m_recv_len(msg)) {
+ if (idev->msg_xfrd_r == 0 && i2c_m_recv_len(msg)) {
/*
* Check length byte for SMBus block read
*/
msg->len = 1 + c;
writel(msg->len, idev->base + MST_RX_XFER);
}
- msg->buf[idev->msg_xfrd++] = c;
+ msg->buf[idev->msg_xfrd_r++] = c;
}
return 0;
}
/* RX FIFO needs service? */
- if (i2c_m_rd(idev->msg) && (status & MST_STATUS_RFL))
+ if (i2c_m_rd(idev->msg_r) && (status & MST_STATUS_RFL))
axxia_i2c_empty_rx_fifo(idev);
/* TX FIFO needs service? */
i2c_int_disable(idev, MST_STATUS_TFL);
}
- if (status & MST_STATUS_SCC) {
- /* Stop completed */
- i2c_int_disable(idev, ~MST_STATUS_TSS);
- complete(&idev->msg_complete);
- } else if (status & MST_STATUS_SNS) {
- /* Transfer done */
- i2c_int_disable(idev, ~MST_STATUS_TSS);
- if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
- axxia_i2c_empty_rx_fifo(idev);
- complete(&idev->msg_complete);
- } else if (status & MST_STATUS_TSS) {
- /* Transfer timeout */
- idev->msg_err = -ETIMEDOUT;
- i2c_int_disable(idev, ~MST_STATUS_TSS);
- complete(&idev->msg_complete);
- } else if (unlikely(status & MST_STATUS_ERR)) {
+ if (unlikely(status & MST_STATUS_ERR)) {
/* Transfer error */
i2c_int_disable(idev, ~0);
if (status & MST_STATUS_AL)
readl(idev->base + MST_TX_BYTES_XFRD),
readl(idev->base + MST_TX_XFER));
complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_SCC) {
+ /* Stop completed */
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
+ complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_SNS) {
+ /* Transfer done */
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
+ if (i2c_m_rd(idev->msg_r) && idev->msg_xfrd_r < idev->msg_r->len)
+ axxia_i2c_empty_rx_fifo(idev);
+ complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_SS) {
+ /* Auto/Sequence transfer done */
+ complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_TSS) {
+ /* Transfer timeout */
+ idev->msg_err = -ETIMEDOUT;
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
+ complete(&idev->msg_complete);
}
out:
return IRQ_HANDLED;
}
-static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
+static void axxia_i2c_set_addr(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
{
- u32 int_mask = MST_STATUS_ERR | MST_STATUS_SNS;
- u32 rx_xfer, tx_xfer;
u32 addr_1, addr_2;
- unsigned long time_left;
- unsigned int wt_value;
-
- idev->msg = msg;
- idev->msg_xfrd = 0;
- reinit_completion(&idev->msg_complete);
if (i2c_m_ten(msg)) {
/* 10-bit address
addr_2 = 0;
}
+ writel(addr_1, idev->base + MST_ADDR_1);
+ writel(addr_2, idev->base + MST_ADDR_2);
+}
+
+/* The NAK interrupt will be sent _before_ issuing STOP command
+ * so the controller might still be busy processing it. No
+ * interrupt will be sent at the end so we have to poll for it
+ */
+static int axxia_i2c_handle_seq_nak(struct axxia_i2c_dev *idev)
+{
+ unsigned long timeout = jiffies + I2C_XFER_TIMEOUT;
+
+ do {
+ if ((readl(idev->base + MST_COMMAND) & CMD_BUSY) == 0)
+ return 0;
+ usleep_range(1, 100);
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int axxia_i2c_xfer_seq(struct axxia_i2c_dev *idev, struct i2c_msg msgs[])
+{
+ u32 int_mask = MST_STATUS_ERR | MST_STATUS_SS | MST_STATUS_RFL;
+ u32 rlen = i2c_m_recv_len(&msgs[1]) ? I2C_SMBUS_BLOCK_MAX : msgs[1].len;
+ unsigned long time_left;
+
+ axxia_i2c_set_addr(idev, &msgs[0]);
+
+ writel(msgs[0].len, idev->base + MST_TX_XFER);
+ writel(rlen, idev->base + MST_RX_XFER);
+
+ idev->msg = &msgs[0];
+ idev->msg_r = &msgs[1];
+ idev->msg_xfrd = 0;
+ idev->msg_xfrd_r = 0;
+ axxia_i2c_fill_tx_fifo(idev);
+
+ writel(CMD_SEQUENCE, idev->base + MST_COMMAND);
+
+ reinit_completion(&idev->msg_complete);
+ i2c_int_enable(idev, int_mask);
+
+ time_left = wait_for_completion_timeout(&idev->msg_complete,
+ I2C_XFER_TIMEOUT);
+
+ i2c_int_disable(idev, int_mask);
+
+ axxia_i2c_empty_rx_fifo(idev);
+
+ if (idev->msg_err == -ENXIO) {
+ if (axxia_i2c_handle_seq_nak(idev))
+ axxia_i2c_init(idev);
+ } else if (readl(idev->base + MST_COMMAND) & CMD_BUSY) {
+ dev_warn(idev->dev, "busy after xfer\n");
+ }
+
+ if (time_left == 0) {
+ idev->msg_err = -ETIMEDOUT;
+ i2c_recover_bus(&idev->adapter);
+ axxia_i2c_init(idev);
+ }
+
+ if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
+ axxia_i2c_init(idev);
+
+ return idev->msg_err;
+}
+
+static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
+{
+ u32 int_mask = MST_STATUS_ERR | MST_STATUS_SNS;
+ u32 rx_xfer, tx_xfer;
+ unsigned long time_left;
+ unsigned int wt_value;
+
+ idev->msg = msg;
+ idev->msg_r = msg;
+ idev->msg_xfrd = 0;
+ idev->msg_xfrd_r = 0;
+ reinit_completion(&idev->msg_complete);
+
+ axxia_i2c_set_addr(idev, msg);
+
if (i2c_m_rd(msg)) {
/* I2C read transfer */
rx_xfer = i2c_m_recv_len(msg) ? I2C_SMBUS_BLOCK_MAX : msg->len;
writel(rx_xfer, idev->base + MST_RX_XFER);
writel(tx_xfer, idev->base + MST_TX_XFER);
- writel(addr_1, idev->base + MST_ADDR_1);
- writel(addr_2, idev->base + MST_ADDR_2);
if (i2c_m_rd(msg))
int_mask |= MST_STATUS_RFL;
return 0;
}
+/* This function checks if the msgs[] array contains messages compatible with
+ * Sequence mode of operation. This mode assumes there will be exactly one
+ * write of non-zero length followed by exactly one read of non-zero length,
+ * both targeted at the same client device.
+ */
+static bool axxia_i2c_sequence_ok(struct i2c_msg msgs[], int num)
+{
+ return num == SEQ_LEN && !i2c_m_rd(&msgs[0]) && i2c_m_rd(&msgs[1]) &&
+ msgs[0].len > 0 && msgs[0].len <= FIFO_SIZE &&
+ msgs[1].len > 0 && msgs[0].addr == msgs[1].addr;
+}
+
static int
axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
int ret = 0;
idev->msg_err = 0;
+
+ if (axxia_i2c_sequence_ok(msgs, num)) {
+ ret = axxia_i2c_xfer_seq(idev, msgs);
+ return ret ? : SEQ_LEN;
+ }
+
i2c_int_enable(idev, MST_STATUS_TSS);
for (i = 0; ret == 0 && i < num; ++i)
+// SPDX-License-Identifier: GPL-2.0
/*
* BCM2835 master mode driver
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
-/*
- * Copyright (C) 2013 Google, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Expose an I2C passthrough to the ChromeOS EC.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Expose an I2C passthrough to the ChromeOS EC.
+//
+// Copyright (C) 2013 Google, Inc.
#include <linux/module.h>
#include <linux/i2c.h>
break;
}
- if (unlikely(signal_pending(current))){
+ if (signal_pending(current)){
DBG("%d: poll interrupted\n", dev->idx);
ret = -ERESTARTSYS;
break;
/* Get I2C clock */
i2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c_imx->clk)) {
- dev_err(&pdev->dev, "can't get I2C clock\n");
+ if (PTR_ERR(i2c_imx->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "can't get I2C clock\n");
return PTR_ERR(i2c_imx->clk);
}
/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+#define PCI_DEVICE_ID_INTEL_CDF_SMT 0x18ac
#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
static const struct pci_device_id ismt_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
{ 0, }
}
static const struct of_device_id owl_i2c_of_match[] = {
+ { .compatible = "actions,s700-i2c" },
{ .compatible = "actions,s900-i2c" },
{ /* sentinel */ }
};
return (be32_to_cpup(prop) & 0xff) >> 1;
/* Now handle some devices with missing "reg" properties */
- if (!strcmp(node->name, "cereal"))
+ if (of_node_name_eq(node, "cereal"))
return 0x60;
- else if (!strcmp(node->name, "deq"))
+ else if (of_node_name_eq(node, "deq"))
return 0x34;
dev_warn(&adap->dev, "No i2c address for %pOF\n", node);
}
/* Now look for known workarounds */
- if (!strcmp(node->name, "deq")) {
+ if (of_node_name_eq(node, "deq")) {
/* Apple uses address 0x34 for TAS3001 and 0x35 for TAS3004 */
if (addr == 0x34) {
snprintf(type, type_size, "MAC,tas3001");
* case we skip this function completely as the device-tree will
* not contain anything useful.
*/
- if (!strcmp(adap->dev.of_node->name, "via-pmu"))
+ if (of_node_name_eq(adap->dev.of_node, "via-pmu"))
return;
for_each_child_of_node(adap->dev.of_node, node) {
static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
+ { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7791", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config },
{ .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config },
{ .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a77990", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config },
{ .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
{},
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
#define STM32F7_SCLH_MAX BIT(8)
#define STM32F7_SCLL_MAX BIT(8)
+#define STM32F7_AUTOSUSPEND_DELAY (HZ / 100)
+
/**
* struct stm32f7_i2c_spec - private i2c specification timing
* @rate: I2C bus speed (Hz)
* slave)
* @dma: dma data
* @use_dma: boolean to know if dma is used in the current transfer
+ * @regmap: holds SYSCFG phandle for Fast Mode Plus bits
*/
struct stm32f7_i2c_dev {
struct i2c_adapter adap;
bool master_mode;
struct stm32_i2c_dma *dma;
bool use_dma;
+ struct regmap *regmap;
};
/**
i2c_dev->msg_id = 0;
f7_msg->smbus = false;
- ret = clk_enable(i2c_dev->clk);
- if (ret) {
- dev_err(i2c_dev->dev, "Failed to enable clock\n");
+ ret = pm_runtime_get_sync(i2c_dev->dev);
+ if (ret < 0)
return ret;
- }
ret = stm32f7_i2c_wait_free_bus(i2c_dev);
if (ret)
- goto clk_free;
+ goto pm_free;
stm32f7_i2c_xfer_msg(i2c_dev, msgs);
ret = -ETIMEDOUT;
}
-clk_free:
- clk_disable(i2c_dev->clk);
+pm_free:
+ pm_runtime_mark_last_busy(i2c_dev->dev);
+ pm_runtime_put_autosuspend(i2c_dev->dev);
return (ret < 0) ? ret : num;
}
f7_msg->read_write = read_write;
f7_msg->smbus = true;
- ret = clk_enable(i2c_dev->clk);
- if (ret) {
- dev_err(i2c_dev->dev, "Failed to enable clock\n");
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
return ret;
- }
ret = stm32f7_i2c_wait_free_bus(i2c_dev);
if (ret)
- goto clk_free;
+ goto pm_free;
ret = stm32f7_i2c_smbus_xfer_msg(i2c_dev, flags, command, data);
if (ret)
- goto clk_free;
+ goto pm_free;
timeout = wait_for_completion_timeout(&i2c_dev->complete,
i2c_dev->adap.timeout);
ret = f7_msg->result;
if (ret)
- goto clk_free;
+ goto pm_free;
if (!timeout) {
dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
if (i2c_dev->use_dma)
dmaengine_terminate_all(dma->chan_using);
ret = -ETIMEDOUT;
- goto clk_free;
+ goto pm_free;
}
/* Check PEC */
if ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && read_write) {
ret = stm32f7_i2c_smbus_check_pec(i2c_dev);
if (ret)
- goto clk_free;
+ goto pm_free;
}
if (read_write && size != I2C_SMBUS_QUICK) {
}
}
-clk_free:
- clk_disable(i2c_dev->clk);
+pm_free:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return ret;
}
if (ret)
return ret;
- if (!(stm32f7_i2c_is_slave_registered(i2c_dev))) {
- ret = clk_enable(i2c_dev->clk);
- if (ret) {
- dev_err(dev, "Failed to enable clock\n");
- return ret;
- }
- }
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
if (id == 0) {
/* Configure Own Address 1 */
oar2 &= ~STM32F7_I2C_OAR2_MASK;
if (slave->flags & I2C_CLIENT_TEN) {
ret = -EOPNOTSUPP;
- goto exit;
+ goto pm_free;
}
oar2 |= STM32F7_I2C_OAR2_OA2_7(slave->addr);
writel_relaxed(oar2, i2c_dev->base + STM32F7_I2C_OAR2);
} else {
ret = -ENODEV;
- goto exit;
+ goto pm_free;
}
/* Enable ACK */
STM32F7_I2C_CR1_PE;
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask);
- return 0;
-
-exit:
- if (!(stm32f7_i2c_is_slave_registered(i2c_dev)))
- clk_disable(i2c_dev->clk);
+ ret = 0;
+pm_free:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return ret;
}
WARN_ON(!i2c_dev->slave[id]);
+ ret = pm_runtime_get_sync(i2c_dev->dev);
+ if (ret < 0)
+ return ret;
+
if (id == 0) {
mask = STM32F7_I2C_OAR1_OA1EN;
stm32f7_i2c_clr_bits(base + STM32F7_I2C_OAR1, mask);
i2c_dev->slave[id] = NULL;
- if (!(stm32f7_i2c_is_slave_registered(i2c_dev))) {
+ if (!(stm32f7_i2c_is_slave_registered(i2c_dev)))
stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK);
- clk_disable(i2c_dev->clk);
- }
+
+ pm_runtime_mark_last_busy(i2c_dev->dev);
+ pm_runtime_put_autosuspend(i2c_dev->dev);
return 0;
}
+static int stm32f7_i2c_setup_fm_plus_bits(struct platform_device *pdev,
+ struct stm32f7_i2c_dev *i2c_dev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ u32 reg, mask;
+
+ i2c_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg-fmp");
+ if (IS_ERR(i2c_dev->regmap)) {
+ /* Optional */
+ return 0;
+ }
+
+ ret = of_property_read_u32_index(np, "st,syscfg-fmp", 1, ®);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, "st,syscfg-fmp", 2, &mask);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(i2c_dev->regmap, reg, mask, mask);
+}
+
static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SLAVE |
dev_err(&pdev->dev, "Error: Missing controller clock\n");
return PTR_ERR(i2c_dev->clk);
}
+
ret = clk_prepare_enable(i2c_dev->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to prepare_enable clock\n");
i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
ret = device_property_read_u32(&pdev->dev, "clock-frequency",
&clk_rate);
- if (!ret && clk_rate >= 1000000)
+ if (!ret && clk_rate >= 1000000) {
i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS;
- else if (!ret && clk_rate >= 400000)
+ ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
+ if (ret)
+ goto clk_free;
+ } else if (!ret && clk_rate >= 400000) {
i2c_dev->speed = STM32_I2C_SPEED_FAST;
- else if (!ret && clk_rate >= 100000)
+ } else if (!ret && clk_rate >= 100000) {
i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
+ }
rst = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(rst)) {
if (ret)
goto clk_free;
- stm32f7_i2c_hw_config(i2c_dev);
-
adap = &i2c_dev->adap;
i2c_set_adapdata(adap, i2c_dev);
snprintf(adap->name, sizeof(adap->name), "STM32F7 I2C(%pa)",
STM32F7_I2C_TXDR,
STM32F7_I2C_RXDR);
- ret = i2c_add_adapter(adap);
- if (ret)
- goto clk_free;
-
platform_set_drvdata(pdev, i2c_dev);
- clk_disable(i2c_dev->clk);
+ pm_runtime_set_autosuspend_delay(i2c_dev->dev,
+ STM32F7_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(i2c_dev->dev);
+ pm_runtime_set_active(i2c_dev->dev);
+ pm_runtime_enable(i2c_dev->dev);
+
+ pm_runtime_get_noresume(&pdev->dev);
+
+ stm32f7_i2c_hw_config(i2c_dev);
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ goto pm_disable;
dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr);
+ pm_runtime_mark_last_busy(i2c_dev->dev);
+ pm_runtime_put_autosuspend(i2c_dev->dev);
+
return 0;
+pm_disable:
+ pm_runtime_put_noidle(i2c_dev->dev);
+ pm_runtime_disable(i2c_dev->dev);
+ pm_runtime_set_suspended(i2c_dev->dev);
+ pm_runtime_dont_use_autosuspend(i2c_dev->dev);
+
clk_free:
clk_disable_unprepare(i2c_dev->clk);
}
i2c_del_adapter(&i2c_dev->adap);
+ pm_runtime_get_sync(i2c_dev->dev);
- clk_unprepare(i2c_dev->clk);
+ clk_disable_unprepare(i2c_dev->clk);
+
+ pm_runtime_put_noidle(i2c_dev->dev);
+ pm_runtime_disable(i2c_dev->dev);
+ pm_runtime_set_suspended(i2c_dev->dev);
+ pm_runtime_dont_use_autosuspend(i2c_dev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32f7_i2c_runtime_suspend(struct device *dev)
+{
+ struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+
+ if (!stm32f7_i2c_is_slave_registered(i2c_dev))
+ clk_disable_unprepare(i2c_dev->clk);
+
+ return 0;
+}
+
+static int stm32f7_i2c_runtime_resume(struct device *dev)
+{
+ struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (!stm32f7_i2c_is_slave_registered(i2c_dev)) {
+ ret = clk_prepare_enable(i2c_dev->clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare_enable clock\n");
+ return ret;
+ }
+ }
return 0;
}
+#endif
+
+static const struct dev_pm_ops stm32f7_i2c_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32f7_i2c_runtime_suspend,
+ stm32f7_i2c_runtime_resume, NULL)
+};
static const struct of_device_id stm32f7_i2c_match[] = {
{ .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup},
.driver = {
.name = "stm32f7-i2c",
.of_match_table = stm32f7_i2c_match,
+ .pm = &stm32f7_i2c_pm_ops,
},
.probe = stm32f7_i2c_probe,
.remove = stm32f7_i2c_remove,
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/i2c/busses/i2c-tegra.c
*
* Copyright (C) 2010 Google, Inc.
* Author: Colin Cross <ccross@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/kernel.h>
* @has_continue_xfer_support: Continue transfer supports.
* @has_per_pkt_xfer_complete_irq: Has enable/disable capability for transfer
* complete interrupt per packet basis.
- * @has_single_clk_source: The i2c controller has single clock source. Tegra30
- * and earlier Socs has two clock sources i.e. div-clk and
+ * @has_single_clk_source: The I2C controller has single clock source. Tegra30
+ * and earlier SoCs have two clock sources i.e. div-clk and
* fast-clk.
* @has_config_load_reg: Has the config load register to load the new
* configuration.
* @clk_divisor_std_fast_mode: Clock divisor in standard/fast mode. It is
* applicable if there is no fast clock source i.e. single clock
* source.
+ * @clk_divisor_fast_plus_mode: Clock divisor in fast mode plus. It is
+ * applicable if there is no fast clock source (i.e. single
+ * clock source).
+ * @has_multi_master_mode: The I2C controller supports running in single-master
+ * or multi-master mode.
+ * @has_slcg_override_reg: The I2C controller supports a register that
+ * overrides the second level clock gating.
+ * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that
+ * provides additional features and allows for longer messages to
+ * be transferred in one go.
+ * @quirks: i2c adapter quirks for limiting write/read transfer size and not
+ * allowing 0 length transfers.
*/
-
struct tegra_i2c_hw_feature {
bool has_continue_xfer_support;
bool has_per_pkt_xfer_complete_irq;
bool has_multi_master_mode;
bool has_slcg_override_reg;
bool has_mst_fifo;
+ const struct i2c_adapter_quirks *quirks;
};
/**
- * struct tegra_i2c_dev - per device i2c context
+ * struct tegra_i2c_dev - per device I2C context
* @dev: device reference for power management
- * @hw: Tegra i2c hw feature.
- * @adapter: core i2c layer adapter information
- * @div_clk: clock reference for div clock of i2c controller.
- * @fast_clk: clock reference for fast clock of i2c controller.
+ * @hw: Tegra I2C HW feature
+ * @adapter: core I2C layer adapter information
+ * @div_clk: clock reference for div clock of I2C controller
+ * @fast_clk: clock reference for fast clock of I2C controller
+ * @rst: reset control for the I2C controller
* @base: ioremapped registers cookie
- * @cont_id: i2c controller id, used for for packet header
- * @irq: irq number of transfer complete interrupt
- * @is_dvc: identifies the DVC i2c controller, has a different register layout
+ * @cont_id: I2C controller ID, used for packet header
+ * @irq: IRQ number of transfer complete interrupt
+ * @irq_disabled: used to track whether or not the interrupt is enabled
+ * @is_dvc: identifies the DVC I2C controller, has a different register layout
* @msg_complete: transfer completion notifier
* @msg_err: error code for completed message
* @msg_buf: pointer to current message data
* @msg_buf_remaining: size of unsent data in the message buffer
* @msg_read: identifies read transfers
- * @bus_clk_rate: current i2c bus clock rate
+ * @bus_clk_rate: current I2C bus clock rate
+ * @clk_divisor_non_hs_mode: clock divider for non-high-speed modes
+ * @is_multimaster_mode: track if I2C controller is in multi-master mode
+ * @xfer_lock: lock to serialize transfer submission and processing
*/
struct tegra_i2c_dev {
struct device *dev;
u32 status;
const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
struct tegra_i2c_dev *i2c_dev = dev_id;
- unsigned long flags;
status = i2c_readl(i2c_dev, I2C_INT_STATUS);
- spin_lock_irqsave(&i2c_dev->xfer_lock, flags);
+ spin_lock(&i2c_dev->xfer_lock);
if (status == 0) {
dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n",
i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS),
complete(&i2c_dev->msg_complete);
done:
- spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags);
+ spin_unlock(&i2c_dev->xfer_lock);
return IRQ_HANDLED;
}
.max_write_len = 4096,
};
+static const struct i2c_adapter_quirks tegra194_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_continue_xfer_support = false,
.has_per_pkt_xfer_complete_irq = false,
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
.has_mst_fifo = true,
+ .quirks = &tegra194_i2c_quirks,
};
/* Match table for of_platform binding */
i2c_dev->base = base;
i2c_dev->div_clk = div_clk;
i2c_dev->adapter.algo = &tegra_i2c_algo;
- i2c_dev->adapter.quirks = &tegra_i2c_quirks;
i2c_dev->irq = irq;
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
i2c_dev->hw = of_device_get_match_data(&pdev->dev);
i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
"nvidia,tegra20-i2c-dvc");
+ i2c_dev->adapter.quirks = i2c_dev->hw->quirks;
init_completion(&i2c_dev->msg_complete);
spin_lock_init(&i2c_dev->xfer_lock);
data_arg.data);
}
case I2C_RETRIES:
+ if (arg > INT_MAX)
+ return -EINVAL;
+
client->adapter->retries = arg;
break;
case I2C_TIMEOUT:
+ if (arg > INT_MAX)
+ return -EINVAL;
+
/* For historical reasons, user-space sets the timeout
* value in units of 10 ms.
*/
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
- if (!old_dyn_addr)
- return 0;
-
master->addrs[data->index] = dev->info.dyn_addr;
return 0;
return -ENOMEM;
data->index = pos;
- master->addrs[pos] = dev->info.dyn_addr;
+ master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
master->free_pos &= ~BIT(pos);
i3c_dev_set_master_data(dev, data);
- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
return PTR_ERR(master->pclk);
master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ if (IS_ERR(master->sysclk))
+ return PTR_ERR(master->sysclk);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
drive->proc = proc_mkdir(drive->name, parent);
if (drive->proc) {
ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
- proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR,
+ proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
drive->proc, &ide_settings_proc_fops,
drive);
}
stepconfig |= STEPCONFIG_MODE_SWCNT;
tiadc_writel(adc_dev, REG_STEPCONFIG(steps),
- stepconfig | STEPCONFIG_INP(chan));
+ stepconfig | STEPCONFIG_INP(chan) |
+ STEPCONFIG_INM_ADCREFM |
+ STEPCONFIG_RFP_VREFP |
+ STEPCONFIG_RFM_VREFN);
if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) {
dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n",
id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list);
- rdma_restrack_kadd(&id_priv->res);
+ if (id_priv->res.kern_name)
+ rdma_restrack_kadd(&id_priv->res);
+ else
+ rdma_restrack_uadd(&id_priv->res);
}
static void cma_attach_to_dev(struct rdma_id_private *id_priv,
} while (0)
SET_DEVICE_OP(dev_ops, add_gid);
+ SET_DEVICE_OP(dev_ops, advise_mr);
SET_DEVICE_OP(dev_ops, alloc_dm);
SET_DEVICE_OP(dev_ops, alloc_fmr);
SET_DEVICE_OP(dev_ops, alloc_hw_stats);
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
goto err;
- if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
- nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
- pd->unsafe_global_rkey))
- goto err;
if (fill_res_name_pid(msg, res))
goto err;
enum uverbs_obj_access access,
bool commit);
+int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
+
void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
{
int ret;
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
+ return uverbs_copy_to_struct_or_zero(
+ attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
+
if (copy_to_user(attrs->ucore.outbuf, resp,
min(attrs->ucore.outlen, resp_len)))
return -EFAULT;
goto out_put;
}
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
+ ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
+
ret = 0;
out_put:
return -ENOMEM;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp)
+ if (!qp) {
+ ret = -EINVAL;
goto out;
+ }
is_ud = qp->qp_type == IB_QPT_UD;
sg_ind = 0;
0, uattr->len - len);
}
+static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
+ const struct uverbs_attr *attr)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+ u16 flags;
+
+ flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
+ UVERBS_ATTR_F_VALID_OUTPUT;
+ if (put_user(flags,
+ &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
+ return -EFAULT;
+ return 0;
+}
+
static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
const struct uverbs_api_attr *attr_uapi,
struct uverbs_objs_arr_attr *attr,
}
/*
+ * Until the drivers are revised to use the bundle directly we have to
+ * assume that the driver wrote to its UHW_OUT and flag userspace
+ * appropriately.
+ */
+ if (!ret && pbundle->method_elm->has_udata) {
+ const struct uverbs_attr *attr =
+ uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
+
+ if (!IS_ERR(attr))
+ ret = uverbs_set_output(&pbundle->bundle, attr);
+ }
+
+ /*
* EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
* not invoke the method because the request is not supported. No
* other cases should return this code.
int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
const void *from, size_t size)
{
- struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
- u16 flags;
size_t min_size;
if (IS_ERR(attr))
if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
return -EFAULT;
- flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
- UVERBS_ATTR_F_VALID_OUTPUT;
- if (put_user(flags,
- &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
- return -EFAULT;
-
- return 0;
+ return uverbs_set_output(bundle, attr);
}
EXPORT_SYMBOL(uverbs_copy_to);
+
+/*
+ * This is only used if the caller has directly used copy_to_use to write the
+ * data. It signals to user space that the buffer is filled in.
+ */
+int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ return uverbs_set_output(bundle, attr);
+}
+
int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, s64 lower_bound, u64 upper_bound,
s64 *def_val)
{
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
- if (clear_user(u64_to_user_ptr(attr->ptr_attr.data),
- attr->ptr_attr.len))
- return -EFAULT;
+ if (size < attr->ptr_attr.len) {
+ if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
+ attr->ptr_attr.len - size))
+ return -EFAULT;
+ }
return uverbs_copy_to(bundle, idx, from, size);
}
buf += sizeof(hdr);
+ memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
bundle.ufile = file;
if (!method_elm->is_ex) {
size_t in_len = hdr.in_words * 4 - sizeof(hdr);
return NULL;
sbuf->size = size;
- sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
- &sbuf->dma_addr, GFP_ATOMIC);
+ sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
+ &sbuf->dma_addr, GFP_ATOMIC);
if (!sbuf->sb)
goto bail;
if (!sghead) {
for (i = 0; i < pages; i++) {
- pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
- pbl->pg_size,
- &pbl->pg_map_arr[i],
- GFP_KERNEL);
+ pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ pbl->pg_size,
+ &pbl->pg_map_arr[i],
+ GFP_KERNEL);
if (!pbl->pg_arr[i])
goto fail;
pbl->pg_count++;
req.cos0 = cpu_to_le16(cids[0]);
req.cos1 = cpu_to_le16(cids[1]);
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
- 0);
- return 0;
+ return bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 0);
}
int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
if (!wq->sq)
goto err3;
- wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- depth * sizeof(union t3_wr),
- &(wq->dma_addr), GFP_KERNEL);
+ wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+ depth * sizeof(union t3_wr),
+ &(wq->dma_addr), GFP_KERNEL);
if (!wq->queue)
goto err4;
wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
T4_RQT_ENTRY_SHIFT;
- wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev,
- wq->memsize, &wq->dma_addr,
- GFP_KERNEL);
+ wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
+ &wq->dma_addr, GFP_KERNEL);
if (!wq->queue)
goto err_free_rqtpool;
goto done;
/* allocate dummy tail memory for all receive contexts */
- dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, sizeof(u64),
- &dd->rcvhdrtail_dummy_dma,
- GFP_KERNEL);
+ dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
+ sizeof(u64),
+ &dd->rcvhdrtail_dummy_dma,
+ GFP_KERNEL);
if (!dd->rcvhdrtail_dummy_kvaddr) {
dd_dev_err(dd, "cannot allocate dummy tail memory\n");
gfp_flags = GFP_KERNEL;
else
gfp_flags = GFP_USER;
- rcd->rcvhdrq = dma_zalloc_coherent(
- &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
- gfp_flags | __GFP_COMP);
+ rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
+ &rcd->rcvhdrq_dma,
+ gfp_flags | __GFP_COMP);
if (!rcd->rcvhdrq) {
dd_dev_err(dd,
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
- rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE,
- &rcd->rcvhdrqtailaddr_dma, gfp_flags);
+ rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
+ PAGE_SIZE,
+ &rcd->rcvhdrqtailaddr_dma,
+ gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
}
while (alloced_bytes < rcd->egrbufs.size &&
rcd->egrbufs.alloced < rcd->egrbufs.count) {
rcd->egrbufs.buffers[idx].addr =
- dma_zalloc_coherent(&dd->pcidev->dev,
- rcd->egrbufs.rcvtid_size,
- &rcd->egrbufs.buffers[idx].dma,
- gfp_flags);
+ dma_alloc_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.rcvtid_size,
+ &rcd->egrbufs.buffers[idx].dma,
+ gfp_flags);
if (rcd->egrbufs.buffers[idx].addr) {
rcd->egrbufs.buffers[idx].len =
rcd->egrbufs.rcvtid_size;
int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
set_dev_node(&dd->pcidev->dev, i);
- dd->cr_base[i].va = dma_zalloc_coherent(
- &dd->pcidev->dev,
- bytes,
- &dd->cr_base[i].dma,
- GFP_KERNEL);
+ dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
+ bytes,
+ &dd->cr_base[i].dma,
+ GFP_KERNEL);
if (!dd->cr_base[i].va) {
set_dev_node(&dd->pcidev->dev, dd->node);
dd_dev_err(dd,
timer_setup(&sde->err_progress_check_timer,
sdma_err_progress_check, 0);
- sde->descq = dma_zalloc_coherent(
- &dd->pcidev->dev,
- descq_cnt * sizeof(u64[2]),
- &sde->descq_phys,
- GFP_KERNEL
- );
+ sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
+ descq_cnt * sizeof(u64[2]),
+ &sde->descq_phys, GFP_KERNEL);
if (!sde->descq)
goto bail;
sde->tx_ring =
dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
/* Allocate memory for DMA of head registers to memory */
- dd->sdma_heads_dma = dma_zalloc_coherent(
- &dd->pcidev->dev,
- dd->sdma_heads_size,
- &dd->sdma_heads_phys,
- GFP_KERNEL
- );
+ dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
+ dd->sdma_heads_size,
+ &dd->sdma_heads_phys,
+ GFP_KERNEL);
if (!dd->sdma_heads_dma) {
dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
goto bail;
}
/* Allocate memory for pad */
- dd->sdma_pad_dma = dma_zalloc_coherent(
- &dd->pcidev->dev,
- sizeof(u32),
- &dd->sdma_pad_phys,
- GFP_KERNEL
- );
+ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
+ &dd->sdma_pad_phys, GFP_KERNEL);
if (!dd->sdma_pad_dma) {
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
goto bail;
buf->npages = 1 << order;
buf->page_shift = page_shift;
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
- buf->direct.buf = dma_zalloc_coherent(dev,
- size, &t, GFP_KERNEL);
+ buf->direct.buf = dma_alloc_coherent(dev, size, &t,
+ GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
- buf->page_list[i].buf = dma_zalloc_coherent(dev,
- page_size, &t,
- GFP_KERNEL);
+ buf->page_list[i].buf = dma_alloc_coherent(dev,
+ page_size,
+ &t,
+ GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
size = (eq->entries - eqe_alloc) * eq->eqe_size;
}
- eq->buf[i] = dma_zalloc_coherent(dev, size,
+ eq->buf[i] = dma_alloc_coherent(dev, size,
&(eq->buf_dma[i]),
GFP_KERNEL);
if (!eq->buf[i])
size = (eq->entries - eqe_alloc)
* eq->eqe_size;
}
- eq->buf[idx] = dma_zalloc_coherent(dev, size,
- &(eq->buf_dma[idx]),
- GFP_KERNEL);
+ eq->buf[idx] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[idx]),
+ GFP_KERNEL);
if (!eq->buf[idx])
goto err_dma_alloc_buf;
goto free_cmd_mbox;
}
- eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz,
+ eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
&(eq->buf_list->map),
GFP_KERNEL);
if (!eq->buf_list->buf) {
if (!mem)
return I40IW_ERR_PARAM;
mem->size = ALIGN(size, alignment);
- mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
- (dma_addr_t *)&mem->pa, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
+ (dma_addr_t *)&mem->pa, GFP_KERNEL);
if (!mem->va)
return I40IW_ERR_NO_MEMORY;
return 0;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* Wait until all page fault handlers using the mr complete. */
- if (mr->umem && mr->umem->is_odp)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
return err;
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- bool odp_mkey_exist = false;
-#endif
struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr;
LIST_HEAD(del_list);
break;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (mr->umem && mr->umem->is_odp)
- odp_mkey_exist = true;
-#endif
list_move(&mr->list, &del_list);
ent->cur--;
ent->size--;
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (odp_mkey_exist)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
- bool odp_mkey_exist = false;
struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr;
LIST_HEAD(del_list);
break;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- if (mr->umem && mr->umem->is_odp)
- odp_mkey_exist = true;
list_move(&mr->list, &del_list);
ent->cur--;
ent->size--;
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (odp_mkey_exist)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
goto err_umem;
}
- uid = (attr->qp_type != IB_QPT_XRC_TGT) ? to_mpd(pd)->uid : 0;
+ uid = (attr->qp_type != IB_QPT_XRC_TGT &&
+ attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
MLX5_SET(create_qp_in, *in, uid, uid);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
page = dev->db_tab->page + end;
alloc:
- page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
- &page->mapping, GFP_KERNEL);
+ page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
+ MTHCA_ICM_PAGE_SIZE, &page->mapping,
+ GFP_KERNEL);
if (!page->db_rec) {
ret = -ENOMEM;
goto out;
{
struct mthca_ucontext *context;
- qp = kmalloc(sizeof *qp, GFP_KERNEL);
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
if (udata)
return ERR_PTR(-EINVAL);
- qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
+ qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
q->len = len;
q->entry_size = entry_size;
q->size = len * entry_size;
- q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
- &q->dma, GFP_KERNEL);
+ q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
+ GFP_KERNEL);
if (!q->va)
return -ENOMEM;
return 0;
return -ENOMEM;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
+ cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
if (!cq->va) {
status = -ENOMEM;
goto mem_err;
qp->sq.max_cnt = max_wqe_allocated;
len = (hw_pages * hw_page_size);
- qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->sq.va)
return -EINVAL;
qp->sq.len = len;
qp->rq.max_cnt = max_rqe_allocated;
len = (hw_pages * hw_page_size);
- qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
return -ENOMEM;
qp->rq.pa = pa;
if (dev->attr.ird == 0)
return 0;
- qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
- GFP_KERNEL);
+ qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
+ GFP_KERNEL);
if (!qp->ird_q_va)
return -ENOMEM;
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
- mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
- ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
- &ctx->ah_tbl.pa, GFP_KERNEL);
+ ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
+ &ctx->ah_tbl.pa, GFP_KERNEL);
if (!ctx->ah_tbl.va) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
return -ENOMEM;
for (i = 0; i < mr->num_pbls; i++) {
- va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
+ va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
if (!va) {
ocrdma_free_mr_pbl_tbl(dev, mr);
status = -ENOMEM;
int i;
qp = idr_find(&dev->qpidr.idr, conn_param->qpn);
+ if (unlikely(!qp))
+ return -EINVAL;
laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
return ERR_PTR(-ENOMEM);
for (i = 0; i < pbl_info->num_pbls; i++) {
- va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
- &pa, flags);
+ va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
+ flags);
if (!va)
goto err;
static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
{
- return (enum pvrdma_wr_opcode)op;
+ switch (op) {
+ case IB_WR_RDMA_WRITE:
+ return PVRDMA_WR_RDMA_WRITE;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
+ case IB_WR_SEND:
+ return PVRDMA_WR_SEND;
+ case IB_WR_SEND_WITH_IMM:
+ return PVRDMA_WR_SEND_WITH_IMM;
+ case IB_WR_RDMA_READ:
+ return PVRDMA_WR_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
+ case IB_WR_LSO:
+ return PVRDMA_WR_LSO;
+ case IB_WR_SEND_WITH_INV:
+ return PVRDMA_WR_SEND_WITH_INV;
+ case IB_WR_RDMA_READ_WITH_INV:
+ return PVRDMA_WR_RDMA_READ_WITH_INV;
+ case IB_WR_LOCAL_INV:
+ return PVRDMA_WR_LOCAL_INV;
+ case IB_WR_REG_MR:
+ return PVRDMA_WR_FAST_REG_MR;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
+ case IB_WR_REG_SIG_MR:
+ return PVRDMA_WR_REG_SIG_MR;
+ default:
+ return PVRDMA_WR_ERROR;
+ }
}
static inline enum ib_wc_status pvrdma_wc_status_to_ib(
dev_info(&pdev->dev, "device version %d, driver version %d\n",
dev->dsr_version, PVRDMA_VERSION);
- dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
- &dev->dsrbase, GFP_KERNEL);
+ dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+ &dev->dsrbase, GFP_KERNEL);
if (!dev->dsr) {
dev_err(&pdev->dev, "failed to allocate shared region\n");
ret = -ENOMEM;
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
wqe_hdr->ex.imm_data = wr->ex.imm_data;
+ if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
switch (qp->ibqp.qp_type) {
case IB_QPT_GSI:
case IB_QPT_UD:
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_ah *ah, *tah;
- LIST_HEAD(remove_list);
unsigned long flags;
netif_tx_lock_bh(dev);
return -ENOMEM;
ts->pdev = pdev;
- ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
- GFP_KERNEL);
+ ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
+ GFP_KERNEL);
if (!ts->fw_regs_va) {
dev_err(dev, "failed to dma_alloc_coherent\n");
return -ENOMEM;
spin_lock_init(&dom->pgtlock);
- dom->pgt_va = dma_zalloc_coherent(data->dev,
- M2701_IOMMU_PGT_SIZE,
- &dom->pgt_pa, GFP_KERNEL);
+ dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
+ &dom->pgt_pa, GFP_KERNEL);
if (!dom->pgt_va)
return -ENOMEM;
* If we have reason to believe the IOMMU driver missed the initial
* probe for dev, replay it to get things in order.
*/
- if (dev->bus && !device_iommu_mapped(dev))
+ if (!err && dev->bus && !device_iommu_mapped(dev))
err = iommu_probe_device(dev);
/* Ignore all other errors apart from EPROBE_DEFER */
/* Setup 64 channel slots */
for (i = 0; i < INTC_IRQS; i += 4)
- writel_relaxed(build_channel_val(i, magic), reg_addr + i);
+ writel(build_channel_val(i, magic), reg_addr + i);
}
static int __init
static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
u32 irq_base)
{
- u32 irq;
-
if (hwirq == 0)
return 0;
- while (hwirq) {
- irq = __ffs(hwirq);
- hwirq &= ~BIT(irq);
- handle_domain_irq(root_domain, irq_base + irq, regs);
- }
+ handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs);
return 1;
}
{
bool ret;
- do {
- ret = handle_irq_perbit(regs,
- readl_relaxed(reg_base + GX_INTC_PEN31_00), 0);
- ret |= handle_irq_perbit(regs,
- readl_relaxed(reg_base + GX_INTC_PEN63_32), 32);
- } while (ret);
+retry:
+ ret = handle_irq_perbit(regs,
+ readl(reg_base + GX_INTC_PEN63_32), 32);
+ if (ret)
+ goto retry;
+
+ ret = handle_irq_perbit(regs,
+ readl(reg_base + GX_INTC_PEN31_00), 0);
+ if (ret)
+ goto retry;
}
static int __init
/*
* Initial enable reg to disable all interrupts
*/
- writel_relaxed(0x0, reg_base + GX_INTC_NEN31_00);
- writel_relaxed(0x0, reg_base + GX_INTC_NEN63_32);
+ writel(0x0, reg_base + GX_INTC_NEN31_00);
+ writel(0x0, reg_base + GX_INTC_NEN63_32);
/*
* Initial mask reg with all unmasked, because we only use enalbe reg
*/
- writel_relaxed(0x0, reg_base + GX_INTC_NMASK31_00);
- writel_relaxed(0x0, reg_base + GX_INTC_NMASK63_32);
+ writel(0x0, reg_base + GX_INTC_NMASK31_00);
+ writel(0x0, reg_base + GX_INTC_NMASK63_32);
setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE);
void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00;
void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32;
- do {
- /* handle 0 - 31 irqs */
- ret = handle_irq_perbit(regs, readl_relaxed(reg_pen_lo), 0);
- ret |= handle_irq_perbit(regs, readl_relaxed(reg_pen_hi), 32);
+retry:
+ /* handle 0 - 63 irqs */
+ ret = handle_irq_perbit(regs, readl(reg_pen_hi), 32);
+ if (ret)
+ goto retry;
- if (nr_irq == INTC_IRQS)
- continue;
+ ret = handle_irq_perbit(regs, readl(reg_pen_lo), 0);
+ if (ret)
+ goto retry;
+
+ if (nr_irq == INTC_IRQS)
+ return;
- /* handle 64 - 127 irqs */
- ret |= handle_irq_perbit(regs,
- readl_relaxed(reg_pen_lo + CK_INTC_DUAL_BASE), 64);
- ret |= handle_irq_perbit(regs,
- readl_relaxed(reg_pen_hi + CK_INTC_DUAL_BASE), 96);
- } while (ret);
+ /* handle 64 - 127 irqs */
+ ret = handle_irq_perbit(regs,
+ readl(reg_pen_hi + CK_INTC_DUAL_BASE), 96);
+ if (ret)
+ goto retry;
+
+ ret = handle_irq_perbit(regs,
+ readl(reg_pen_lo + CK_INTC_DUAL_BASE), 64);
+ if (ret)
+ goto retry;
}
static int __init
return ret;
/* Initial enable reg to disable all interrupts */
- writel_relaxed(0, reg_base + CK_INTC_NEN31_00);
- writel_relaxed(0, reg_base + CK_INTC_NEN63_32);
+ writel(0, reg_base + CK_INTC_NEN31_00);
+ writel(0, reg_base + CK_INTC_NEN63_32);
/* Enable irq intc */
- writel_relaxed(BIT(31), reg_base + CK_INTC_ICR);
+ writel(BIT(31), reg_base + CK_INTC_ICR);
ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0);
ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32);
return ret;
/* Initial enable reg to disable all interrupts */
- writel_relaxed(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE);
- writel_relaxed(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE);
+ writel(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE);
+ writel(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE);
ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64);
ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96);
int i, j;
for (j = 0; j < AVM_MAXVERSION; j++)
- cinfo->version[j] = "\0\0" + 1;
+ cinfo->version[j] = "";
for (i = 0, j = 0;
j < AVM_MAXVERSION && i < cinfo->versionlen;
j++, i += cinfo->versionbuf[i] + 1)
struct dchannel *dch = &hw->dch;
int i;
- phi = kzalloc(sizeof(struct ph_info) +
- dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC);
+ phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
phi->dch.ch.protocol = hw->protocol;
phi->dch.ch.Flags = dch->Flags;
phi->dch.state = dch->state;
{
modem_info *info = (modem_info *) tty->driver_data;
+ mutex_lock(&modem_info_mutex);
if (!old_termios)
isdn_tty_change_speed(info);
else {
if (tty->termios.c_cflag == old_termios->c_cflag &&
tty->termios.c_ispeed == old_termios->c_ispeed &&
- tty->termios.c_ospeed == old_termios->c_ospeed)
+ tty->termios.c_ospeed == old_termios->c_ospeed) {
+ mutex_unlock(&modem_info_mutex);
return;
+ }
isdn_tty_change_speed(info);
}
+ mutex_unlock(&modem_info_mutex);
}
/*
/* Let the programs run for couple of ms and check the engine status */
usleep_range(3000, 6000);
- lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ if (ret)
+ return ret;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev)
{
- struct bio *b;
-
if (!mddev || !bioset_initialized(&mddev->bio_set))
return bio_alloc(gfp_mask, nr_iovecs);
- b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
- if (!b)
- return NULL;
- return b;
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
*/
int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
- struct blk_integrity *bi_rdev;
struct blk_integrity *bi_mddev;
char name[BDEVNAME_SIZE];
if (!mddev->gendisk)
return 0;
- bi_rdev = bdev_get_integrity(rdev->bdev);
bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
return 0;
abort:
- if (mddev->flush_bio_pool) {
- mempool_destroy(mddev->flush_bio_pool);
- mddev->flush_bio_pool = NULL;
- }
- if (mddev->flush_pool){
- mempool_destroy(mddev->flush_pool);
- mddev->flush_pool = NULL;
- }
+ mempool_destroy(mddev->flush_bio_pool);
+ mddev->flush_bio_pool = NULL;
+ mempool_destroy(mddev->flush_pool);
+ mddev->flush_pool = NULL;
return err;
}
kfree(plug);
}
+/*
+ * 1. Register the new request and wait if the reconstruction thread has put
+ * up a bar for new requests. Continue immediately if no resync is active
+ * currently.
+ * 2. If IO spans the reshape position. Need to wait for reshape to pass.
+ */
+static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
+ struct bio *bio, sector_t sectors)
+{
+ wait_barrier(conf);
+ while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ bio->bi_iter.bi_sector < conf->reshape_progress &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
+ raid10_log(conf->mddev, "wait reshape");
+ allow_barrier(conf);
+ wait_event(conf->wait_barrier,
+ conf->reshape_progress <= bio->bi_iter.bi_sector ||
+ conf->reshape_progress >= bio->bi_iter.bi_sector +
+ sectors);
+ wait_barrier(conf);
+ }
+}
+
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct r10bio *r10_bio)
{
const int op = bio_op(bio);
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
int max_sectors;
- sector_t sectors;
struct md_rdev *rdev;
char b[BDEVNAME_SIZE];
int slot = r10_bio->read_slot;
}
rcu_read_unlock();
}
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
- wait_barrier(conf);
-
- sectors = r10_bio->sectors;
- while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio->bi_iter.bi_sector < conf->reshape_progress &&
- bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
- /*
- * IO spans the reshape position. Need to wait for reshape to
- * pass
- */
- raid10_log(conf->mddev, "wait reshape");
- allow_barrier(conf);
- wait_event(conf->wait_barrier,
- conf->reshape_progress <= bio->bi_iter.bi_sector ||
- conf->reshape_progress >= bio->bi_iter.bi_sector +
- sectors);
- wait_barrier(conf);
- }
+ regular_request_wait(mddev, conf, bio, r10_bio->sectors);
rdev = read_balance(conf, r10_bio, &max_sectors);
if (!rdev) {
if (err_rdev) {
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
bio_chain(split, bio);
+ allow_barrier(conf);
generic_make_request(bio);
+ wait_barrier(conf);
bio = split;
r10_bio->master_bio = bio;
r10_bio->sectors = max_sectors;
finish_wait(&conf->wait_barrier, &w);
}
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
- wait_barrier(conf);
-
sectors = r10_bio->sectors;
- while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio->bi_iter.bi_sector < conf->reshape_progress &&
- bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
- /*
- * IO spans the reshape position. Need to wait for reshape to
- * pass
- */
- raid10_log(conf->mddev, "wait reshape");
- allow_barrier(conf);
- wait_event(conf->wait_barrier,
- conf->reshape_progress <= bio->bi_iter.bi_sector ||
- conf->reshape_progress >= bio->bi_iter.bi_sector +
- sectors);
- wait_barrier(conf);
- }
-
+ regular_request_wait(mddev, conf, bio, sectors);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
(mddev->reshape_backwards
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
struct bio *split = bio_split(bio, r10_bio->sectors,
GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
+ allow_barrier(conf);
generic_make_request(bio);
+ wait_barrier(conf);
bio = split;
r10_bio->master_bio = bio;
}
{
struct device *dev = &cio2->pci_dev->dev;
- q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
- GFP_KERNEL);
+ q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
+ GFP_KERNEL);
if (!q->fbpt)
return -ENOMEM;
struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
struct device *dev = &ctx->dev->plat_dev->dev;
- mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
if (!mem->va) {
mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
size);
struct vb2_v4l2_buffer *vbuf;
unsigned long flags;
- cancel_delayed_work_sync(&dev->work_run);
+ if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
+ cancel_delayed_work_sync(&dev->work_run);
+
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(q->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
const struct v4l2_window *win;
const struct v4l2_sdr_format *sdr;
const struct v4l2_meta_format *meta;
+ u32 planes;
unsigned i;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
prt_names(mp->field, v4l2_field_names),
mp->colorspace, mp->num_planes, mp->flags,
mp->ycbcr_enc, mp->quantization, mp->xfer_func);
- for (i = 0; i < mp->num_planes; i++)
+ planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
+ for (i = 0; i < planes; i++)
printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
mp->plane_fmt[i].bytesperline,
mp->plane_fmt[i].sizeimage);
if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
config MFD_AT91_USART
tristate "AT91 USART Driver"
select MFD_CORE
+ depends on ARCH_AT91 || COMPILE_TEST
help
Select this to get support for AT91 USART IP. This is a wrapper
over at91-usart-serial driver and usart-spi-driver. Only one function
mutex_unlock(&ab8500->lock);
dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
- return ret;
+ return (ret < 0) ? ret : 0;
}
static int ab8500_get_register(struct device *dev, u8 bank,
static const struct mfd_cell axp223_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp22x_pek_resources),
- .resources = axp22x_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp22x_pek_resources),
+ .resources = axp22x_pek_resources,
}, {
.name = "axp22x-adc",
.of_compatible = "x-powers,axp221-adc",
.name = "axp20x-battery-power-supply",
.of_compatible = "x-powers,axp221-battery-power-supply",
}, {
- .name = "axp20x-regulator",
+ .name = "axp20x-regulator",
}, {
.name = "axp20x-ac-power-supply",
.of_compatible = "x-powers,axp221-ac-power-supply",
static const struct mfd_cell axp152_cells[] = {
{
- .name = "axp20x-pek",
- .num_resources = ARRAY_SIZE(axp152_pek_resources),
- .resources = axp152_pek_resources,
+ .name = "axp20x-pek",
+ .num_resources = ARRAY_SIZE(axp152_pek_resources),
+ .resources = axp152_pek_resources,
},
};
static const struct mfd_cell axp288_cells[] = {
{
- .name = "axp288_adc",
- .num_resources = ARRAY_SIZE(axp288_adc_resources),
- .resources = axp288_adc_resources,
- },
- {
- .name = "axp288_extcon",
- .num_resources = ARRAY_SIZE(axp288_extcon_resources),
- .resources = axp288_extcon_resources,
- },
- {
- .name = "axp288_charger",
- .num_resources = ARRAY_SIZE(axp288_charger_resources),
- .resources = axp288_charger_resources,
- },
- {
- .name = "axp288_fuel_gauge",
- .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
- .resources = axp288_fuel_gauge_resources,
- },
- {
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp288_power_button_resources),
- .resources = axp288_power_button_resources,
- },
- {
- .name = "axp288_pmic_acpi",
+ .name = "axp288_adc",
+ .num_resources = ARRAY_SIZE(axp288_adc_resources),
+ .resources = axp288_adc_resources,
+ }, {
+ .name = "axp288_extcon",
+ .num_resources = ARRAY_SIZE(axp288_extcon_resources),
+ .resources = axp288_extcon_resources,
+ }, {
+ .name = "axp288_charger",
+ .num_resources = ARRAY_SIZE(axp288_charger_resources),
+ .resources = axp288_charger_resources,
+ }, {
+ .name = "axp288_fuel_gauge",
+ .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
+ .resources = axp288_fuel_gauge_resources,
+ }, {
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp288_power_button_resources),
+ .resources = axp288_power_button_resources,
+ }, {
+ .name = "axp288_pmic_acpi",
},
};
static const struct mfd_cell axp803_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp803_pek_resources),
- .resources = axp803_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp803_pek_resources),
+ .resources = axp803_pek_resources,
+ }, {
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
+ }, {
+ .name = "axp813-adc",
+ .of_compatible = "x-powers,axp813-adc",
+ }, {
+ .name = "axp20x-battery-power-supply",
+ .of_compatible = "x-powers,axp813-battery-power-supply",
+ }, {
+ .name = "axp20x-ac-power-supply",
+ .of_compatible = "x-powers,axp813-ac-power-supply",
+ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
+ .resources = axp20x_ac_power_supply_resources,
},
- { .name = "axp20x-regulator" },
+ { .name = "axp20x-regulator" },
};
static const struct mfd_cell axp806_self_working_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp806_pek_resources),
- .resources = axp806_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp806_pek_resources),
+ .resources = axp806_pek_resources,
},
- { .name = "axp20x-regulator" },
+ { .name = "axp20x-regulator" },
};
static const struct mfd_cell axp806_cells[] = {
{
- .id = 2,
- .name = "axp20x-regulator",
+ .id = 2,
+ .name = "axp20x-regulator",
},
};
static const struct mfd_cell axp809_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp809_pek_resources),
- .resources = axp809_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp809_pek_resources),
+ .resources = axp809_pek_resources,
}, {
- .id = 1,
- .name = "axp20x-regulator",
+ .id = 1,
+ .name = "axp20x-regulator",
},
};
static const struct mfd_cell axp813_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp803_pek_resources),
- .resources = axp803_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp803_pek_resources),
+ .resources = axp803_pek_resources,
}, {
- .name = "axp20x-regulator",
+ .name = "axp20x-regulator",
}, {
- .name = "axp20x-gpio",
- .of_compatible = "x-powers,axp813-gpio",
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
}, {
- .name = "axp813-adc",
- .of_compatible = "x-powers,axp813-adc",
+ .name = "axp813-adc",
+ .of_compatible = "x-powers,axp813-adc",
}, {
.name = "axp20x-battery-power-supply",
.of_compatible = "x-powers,axp813-battery-power-supply",
+ }, {
+ .name = "axp20x-ac-power-supply",
+ .of_compatible = "x-powers,axp813-ac-power-supply",
+ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
+ .resources = axp20x_ac_power_supply_resources,
},
};
};
static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
+ regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
cros_ec_debugfs_remove(ec);
+ mfd_remove_devices(ec->dev);
cdev_del(&ec->cdev);
device_unregister(&ec->class_dev);
return 0;
.irq_unmask = prcmu_irq_unmask,
};
-static __init char *fw_project_name(u32 project)
+static char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
}
-static void __init init_prcm_registers(void)
+static void init_prcm_registers(void)
{
u32 val;
LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S);
regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK,
- LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S);
+ LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S |
+ LPASS_INTR_UART);
exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET);
exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET);
exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET);
+ exynos_lpass_core_sw_reset(lpass, LPASS_UART_SW_RESET);
}
static void exynos_lpass_disable(struct exynos_lpass *lpass)
#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2,
MADERA_BOOT_POLL_INTERVAL_USEC);
regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
- };
+ }
if (!(val & MADERA_BOOT_DONE_STS1)) {
dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n");
dev_set_drvdata(madera->dev, madera);
BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier);
+ mutex_init(&madera->dapm_ptr_lock);
+
madera_set_micbias_info(madera);
/*
for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) {
sprintf(fps_name, "fps%d", fps_id);
- if (!strcmp(fps_np->name, fps_name))
+ if (of_node_name_eq(fps_np, fps_name))
break;
}
mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
- mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
+ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
+ if (ret)
+ goto out;
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
MC13XXX_ADC0_CHRGRAWDIV;
default:
dev_err(&pdev->dev, "unsupported chip: %d\n", id);
- ret = -ENODEV;
- break;
+ return -ENODEV;
}
if (ret) {
return -EFAULT;
}
+ writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
+ writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
+ writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
+
dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
fw_version[1],
fw_version[2]);
/**
* struct rave_sp_checksum - Variant specific checksum implementation details
*
- * @length: Caculated checksum length
+ * @length: Calculated checksum length
* @subroutine: Utilized checksum algorithm implementation
*/
struct rave_sp_checksum {
pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
for_each_child_of_node(np, child) {
- if (!strcmp(child->name, "stmpe_gpio")) {
+ if (of_node_name_eq(child, "stmpe_gpio")) {
pdata->blocks |= STMPE_BLOCK_GPIO;
- } else if (!strcmp(child->name, "stmpe_keypad")) {
+ } else if (of_node_name_eq(child, "stmpe_keypad")) {
pdata->blocks |= STMPE_BLOCK_KEYPAD;
- } else if (!strcmp(child->name, "stmpe_touchscreen")) {
+ } else if (of_node_name_eq(child, "stmpe_touchscreen")) {
pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
- } else if (!strcmp(child->name, "stmpe_adc")) {
+ } else if (of_node_name_eq(child, "stmpe_adc")) {
pdata->blocks |= STMPE_BLOCK_ADC;
- } else if (!strcmp(child->name, "stmpe_pwm")) {
+ } else if (of_node_name_eq(child, "stmpe_pwm")) {
pdata->blocks |= STMPE_BLOCK_PWM;
- } else if (!strcmp(child->name, "stmpe_rotator")) {
+ } else if (of_node_name_eq(child, "stmpe_rotator")) {
pdata->blocks |= STMPE_BLOCK_ROTATOR;
}
}
cell->pdata_size = sizeof(tscadc);
}
- err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
- tscadc->used_cells, NULL, 0, NULL);
+ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ tscadc->cells, tscadc->used_cells, NULL,
+ 0, NULL);
if (err < 0)
goto err_disable_clk;
mutex_init(&tps->tps_lock);
- ret = regmap_add_irq_chip(tps->regmap, tps->irq,
- IRQF_ONESHOT, 0, &tps65218_irq_chip,
- &tps->irq_data);
+ ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
+ IRQF_ONESHOT, 0, &tps65218_irq_chip,
+ &tps->irq_data);
if (ret < 0)
return ret;
ARRAY_SIZE(tps65218_cells), NULL, 0,
regmap_irq_get_domain(tps->irq_data));
- if (ret < 0)
- goto err_irq;
-
- return 0;
-
-err_irq:
- regmap_del_irq_chip(tps->irq, tps->irq_data);
-
return ret;
}
-static int tps65218_remove(struct i2c_client *client)
-{
- struct tps65218 *tps = i2c_get_clientdata(client);
-
- regmap_del_irq_chip(tps->irq, tps->irq_data);
-
- return 0;
-}
-
static const struct i2c_device_id tps65218_id_table[] = {
{ "tps65218", TPS65218 },
{ },
.of_match_table = of_tps65218_match_table,
},
.probe = tps65218_probe,
- .remove = tps65218_remove,
.id_table = tps65218_id_table,
};
return 0;
}
+static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+ if (tps6586x->client->irq)
+ disable_irq(tps6586x->client->irq);
+
+ return 0;
+}
+
+static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+ if (tps6586x->client->irq)
+ enable_irq(tps6586x->client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
+ tps6586x_i2c_resume);
+
static const struct i2c_device_id tps6586x_id_table[] = {
{ "tps6586x", 0 },
{ },
.driver = {
.name = "tps6586x",
.of_match_table = of_match_ptr(tps6586x_of_match),
+ .pm = &tps6586x_pm_ops,
},
.probe = tps6586x_i2c_probe,
.remove = tps6586x_i2c_remove,
* letting it generate the right frequencies for USB, MADC, and
* other purposes.
*/
-static inline int __init protect_pm_master(void)
+static inline int protect_pm_master(void)
{
int e = 0;
return e;
}
-static inline int __init unprotect_pm_master(void)
+static inline int unprotect_pm_master(void)
{
int e = 0;
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
+ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
case ARIZONA_ASRC_ENABLE:
case ARIZONA_ASRC_STATUS:
case ARIZONA_ASRC_RATE1:
+ case ARIZONA_ASRC_RATE2:
case ARIZONA_ISRC_1_CTRL_1:
case ARIZONA_ISRC_1_CTRL_2:
case ARIZONA_ISRC_1_CTRL_3:
ones like at24c64, 24lc02 or fm24c04:
24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
- 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
+ 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048
Unless you like data loss puzzles, always be sure that any chip
you configure as a 24c32 (32 kbit) or larger is NOT really a
AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16);
+AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16);
/* identical to 24c08 ? */
AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0);
{ "24c256", (kernel_ulong_t)&at24_data_24c256 },
{ "24c512", (kernel_ulong_t)&at24_data_24c512 },
{ "24c1024", (kernel_ulong_t)&at24_data_24c1024 },
+ { "24c2048", (kernel_ulong_t)&at24_data_24c2048 },
{ "at24", 0 },
{ /* END OF LIST */ }
};
{ .compatible = "atmel,24c256", .data = &at24_data_24c256 },
{ .compatible = "atmel,24c512", .data = &at24_data_24c512 },
{ .compatible = "atmel,24c1024", .data = &at24_data_24c1024 },
+ { .compatible = "atmel,24c2048", .data = &at24_data_24c2048 },
{ /* END OF LIST */ },
};
MODULE_DEVICE_TABLE(of, at24_of_match);
if (get_order(size) >= MAX_ORDER)
return NULL;
- return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
- GFP_KERNEL);
+ return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
+ GFP_KERNEL);
}
void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
struct _vop_vdev *vdev = to_vopvdev(dev);
struct vop_device *vpdev = vdev->vpdev;
struct mic_device_ctrl __iomem *dc = vdev->dc;
- int i, err, retry;
+ int i, err, retry, queue_idx = 0;
/* We must have this many virtqueues. */
if (nvqs > ioread8(&vdev->desc->num_vq))
return -ENOENT;
for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
__func__, i, names[i]);
- vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i],
+ vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
if (device_property_read_bool(dev, "broken-cd"))
host->caps |= MMC_CAP_NEEDS_POLL;
- ret = mmc_gpiod_request_cd(host, "cd", 0, true,
+ ret = mmc_gpiod_request_cd(host, "cd", 0, false,
cd_debounce_delay_ms * 1000,
&cd_gpio_invert);
if (!ret)
* Use zalloc to zero the reserved high 32-bits of 128-bit
* descriptors so that they never need to be written.
*/
- buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
- host->adma_table_sz, &dma, GFP_KERNEL);
+ buf = dma_alloc_coherent(mmc_dev(mmc),
+ host->align_buffer_sz + host->adma_table_sz,
+ &dma, GFP_KERNEL);
if (!buf) {
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
mmc_hostname(mmc));
mtd->nvmem = nvmem_register(&config);
if (IS_ERR(mtd->nvmem)) {
/* Just ignore if there is no NVMEM support in the kernel */
- if (PTR_ERR(mtd->nvmem) == -ENOSYS) {
+ if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
mtd->nvmem = NULL;
} else {
dev_err(&mtd->dev, "Failed to register NVMEM device\n");
extern struct mutex mtd_table_mutex;
struct mtd_info *__mtd_next_device(int i);
-int add_mtd_device(struct mtd_info *mtd);
+int __must_check add_mtd_device(struct mtd_info *mtd);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
list_add(&new->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
- add_mtd_device(&new->mtd);
+ ret = add_mtd_device(&new->mtd);
+ if (ret)
+ goto err_remove_part;
mtd_add_partition_attrs(new);
+ return 0;
+
+err_remove_part:
+ mutex_lock(&mtd_partitions_mutex);
+ list_del(&new->list);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ free_partition(new);
+ pr_info("%s:%i\n", __func__, __LINE__);
+
return ret;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
{
struct mtd_part *slave;
uint64_t cur_offset = 0;
- int i;
+ int i, ret;
printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
for (i = 0; i < nbparts; i++) {
slave = allocate_partition(master, parts + i, i, cur_offset);
if (IS_ERR(slave)) {
- del_mtd_partitions(master);
- return PTR_ERR(slave);
+ ret = PTR_ERR(slave);
+ goto err_del_partitions;
}
mutex_lock(&mtd_partitions_mutex);
list_add(&slave->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
- add_mtd_device(&slave->mtd);
+ ret = add_mtd_device(&slave->mtd);
+ if (ret) {
+ mutex_lock(&mtd_partitions_mutex);
+ list_del(&slave->list);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ free_partition(slave);
+ goto err_del_partitions;
+ }
+
mtd_add_partition_attrs(slave);
/* Look for subpartitions */
parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
}
return 0;
+
+err_del_partitions:
+ del_mtd_partitions(master);
+
+ return ret;
}
static DEFINE_SPINLOCK(part_parser_lock);
}
/* clk rate info is needed for setup_data_interface */
- if (denali->clk_rate && denali->clk_x_rate)
+ if (!denali->clk_rate || !denali->clk_x_rate)
chip->options |= NAND_KEEP_TIMINGS;
chip->legacy.dummy_controller.ops = &denali_controller_ops;
dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
}
-/* fsmc_select_chip - assert or deassert nCE */
-static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert)
-{
- u32 pc = readl(host->regs_va + FSMC_PC);
-
- if (!assert)
- writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC);
- else
- writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC);
-
- /*
- * nCE line changes must be applied before returning from this
- * function.
- */
- mb();
-}
-
/*
* fsmc_exec_op - hook called by the core to execute NAND operations
*
pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
- fsmc_ce_ctrl(host, true);
-
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
}
}
- fsmc_ce_ctrl(host, false);
-
return ret;
}
}
static int jz_nand_ioremap_resource(struct platform_device *pdev,
- const char *name, struct resource **res, void *__iomem *base)
+ const char *name, struct resource **res, void __iomem **base)
{
int ret;
if (ret)
return ret;
+ if (nandc->props->is_bam) {
+ free_bam_transaction(nandc);
+ nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ return -ENOMEM;
+ }
+ }
+
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
nand_cleanup(chip);
struct qcom_nand_host *host;
int ret;
- if (nandc->props->is_bam) {
- free_bam_transaction(nandc);
- nandc->bam_txn = alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
- "failed to allocate bam transaction\n");
- return -ENOMEM;
- }
- }
-
for_each_available_child_of_node(dn, child) {
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
and destroy a failover master netdev and manages a primary and
standby slave netdevs that get registered via the generic failover
infrastructure. This can be used by paravirtual drivers to enable
- an alternate low latency datapath. It alsoenables live migration of
+ an alternate low latency datapath. It also enables live migration of
a VM with direct attached VF by failing over to the paravirtual
datapath when the VF is unplugged.
if (!bond_has_slaves(bond)) {
bond_set_carrier(bond);
eth_hw_addr_random(bond_dev);
+ bond->nest_level = SINGLE_DEPTH_NESTING;
+ } else {
+ bond->nest_level = dev_get_nest_level(bond_dev) + 1;
}
unblock_netpoll_tx();
#include <linux/delay.h>
#include <linux/export.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
return mv88e6xxx_g1_stats_clear(chip);
}
+/* The mv88e6390 has some hidden registers used for debug and
+ * development. The errata also makes use of them.
+ */
+static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
+ int reg, u16 val)
+{
+ u16 ctrl;
+ int err;
+
+ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
+ PORT_RESERVED_1A, val);
+ if (err)
+ return err;
+
+ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
+ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
+ reg;
+
+ return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
+ PORT_RESERVED_1A, ctrl);
+}
+
+static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
+ PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
+}
+
+
+static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
+ int reg, u16 *val)
+{
+ u16 ctrl;
+ int err;
+
+ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
+ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
+ reg;
+
+ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
+ PORT_RESERVED_1A, ctrl);
+ if (err)
+ return err;
+
+ err = mv88e6390_hidden_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
+ PORT_RESERVED_1A, val);
+}
+
+/* Check if the errata has already been applied. */
+static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+ u16 val;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6390_hidden_read(chip, port, 0, &val);
+ if (err) {
+ dev_err(chip->dev,
+ "Error reading hidden register: %d\n", err);
+ return false;
+ }
+ if (val != 0x01c0)
+ return false;
+ }
+
+ return true;
+}
+
+/* The 6390 copper ports have an errata which require poking magic
+ * values into undocumented hidden registers and then performing a
+ * software reset.
+ */
+static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+
+ if (mv88e6390_setup_errata_applied(chip))
+ return 0;
+
+ /* Set the ports into blocking mode */
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
+ if (err)
+ return err;
+ }
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
+ if (err)
+ return err;
+ }
+
+ return mv88e6xxx_software_reset(chip);
+}
+
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
mutex_lock(&chip->reg_lock);
+ if (chip->info->ops->setup_errata) {
+ err = chip->info->ops->setup_errata(chip);
+ if (err)
+ goto unlock;
+ }
+
/* Cache the cmode of each port. */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (chip->info->ops->port_get_cmode) {
static const struct mv88e6xxx_ops mv88e6190_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
static const struct mv88e6xxx_ops mv88e6190x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
static const struct mv88e6xxx_ops mv88e6191_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
static const struct mv88e6xxx_ops mv88e6390_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
static const struct mv88e6xxx_ops mv88e6390x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
};
struct mv88e6xxx_ops {
+ /* Switch Setup Errata, called early in the switch setup to
+ * allow any errata actions to be performed
+ */
+ int (*setup_errata)(struct mv88e6xxx_chip *chip);
+
int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
+/* Offset 0x1a: Magic undocumented errata register */
+#define PORT_RESERVED_1A 0x1a
+#define PORT_RESERVED_1A_BUSY BIT(15)
+#define PORT_RESERVED_1A_WRITE BIT(14)
+#define PORT_RESERVED_1A_READ 0
+#define PORT_RESERVED_1A_PORT_SHIFT 5
+#define PORT_RESERVED_1A_BLOCK (0xf << 10)
+#define PORT_RESERVED_1A_CTRL_PORT 4
+#define PORT_RESERVED_1A_DATA_PORT 5
+
int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
u16 *val);
int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
struct device_node *mdio_np;
int ret;
- mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
- "realtek,smi-mdio");
+ mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
if (!mdio_np) {
dev_err(smi->dev, "no MDIO bus node\n");
return -ENODEV;
}
smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
- if (!smi->slave_mii_bus)
- return -ENOMEM;
+ if (!smi->slave_mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
smi->slave_mii_bus->priv = smi;
smi->slave_mii_bus->name = "SMI slave MII";
smi->slave_mii_bus->read = realtek_smi_mdio_read;
if (ret) {
dev_err(smi->dev, "unable to register MDIO bus %s\n",
smi->slave_mii_bus->id);
- of_node_put(mdio_np);
+ goto err_put_node;
}
return 0;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
}
static int realtek_smi_probe(struct platform_device *pdev)
struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
dsa_unregister_switch(smi->ds);
+ if (smi->slave_mii_bus)
+ of_node_put(smi->slave_mii_bus->dev.of_node);
gpiod_set_value(smi->reset, 1);
return 0;
}
/* Allocate TX descriptor ring in coherent memory */
- greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
- &greth->tx_bd_base_phys,
- GFP_KERNEL);
+ greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->tx_bd_base) {
err = -ENOMEM;
goto error3;
}
/* Allocate RX descriptor ring in coherent memory */
- greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
- &greth->rx_bd_base_phys,
- GFP_KERNEL);
+ greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->rx_bd_base) {
err = -ENOMEM;
goto error4;
size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
- descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr,
- GFP_KERNEL);
+ descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr,
+ GFP_KERNEL);
if (!descs) {
netdev_err(sdev->netdev,
"failed to allocate status descriptors\n");
struct slic_shmem_data *sm_data;
dma_addr_t paddr;
- sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
- &paddr, GFP_KERNEL);
+ sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
+ &paddr, GFP_KERNEL);
if (!sm_data) {
dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
return -ENOMEM;
int err = 0;
u8 *mac[2];
- eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
- &paddr, GFP_KERNEL);
+ eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
+ &paddr, GFP_KERNEL);
if (!eeprom)
return -ENOMEM;
struct ena_com_admin_sq *sq = &queue->sq;
u16 size = ADMIN_SQ_SIZE(queue->q_depth);
- sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
- GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
+ GFP_KERNEL);
if (!sq->entries) {
pr_err("memory allocation failed");
struct ena_com_admin_cq *cq = &queue->cq;
u16 size = ADMIN_CQ_SIZE(queue->q_depth);
- cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
- GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
+ GFP_KERNEL);
if (!cq->entries) {
pr_err("memory allocation failed");
dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
- GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
+ GFP_KERNEL);
if (!aenq->entries) {
pr_err("memory allocation failed");
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
}
if (!io_sq->desc_addr.virt_addr) {
prev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr,
- GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_cq->cdesc_addr.phys_addr,
+ GFP_KERNEL);
}
if (!io_cq->cdesc_addr.virt_addr) {
struct ena_rss *rss = &ena_dev->rss;
rss->hash_key =
- dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- &rss->hash_key_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ &rss->hash_key_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_key))
return -ENOMEM;
struct ena_rss *rss = &ena_dev->rss;
rss->hash_ctrl =
- dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_ctrl))
return -ENOMEM;
sizeof(struct ena_admin_rss_ind_table_entry);
rss->rss_ind_tbl =
- dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, tbl_size,
+ &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
spin_lock_init(&mmio_read->lock);
mmio_read->read_resp =
- dma_zalloc_coherent(ena_dev->dmadev,
- sizeof(*mmio_read->read_resp),
- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
goto err;
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
host_attr->host_info =
- dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
- &host_attr->host_info_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+ &host_attr->host_info_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->host_info))
return -ENOMEM;
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
host_attr->debug_area_virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
- &host_attr->debug_area_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
+ &host_attr->debug_area_dma_addr,
+ GFP_KERNEL);
if (unlikely(!host_attr->debug_area_virt_addr)) {
host_attr->debug_area_size = 0;
return -ENOMEM;
#define MAC_MDIOSCAR_PA_WIDTH 5
#define MAC_MDIOSCAR_RA_INDEX 0
#define MAC_MDIOSCAR_RA_WIDTH 16
-#define MAC_MDIOSCAR_REG_INDEX 0
-#define MAC_MDIOSCAR_REG_WIDTH 21
#define MAC_MDIOSCCDR_BUSY_INDEX 22
#define MAC_MDIOSCCDR_BUSY_WIDTH 1
#define MAC_MDIOSCCDR_CMD_INDEX 16
}
}
+static unsigned int xgbe_create_mdio_sca(int port, int reg)
+{
+ unsigned int mdio_sca, da;
+
+ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
+
+ return mdio_sca;
+}
+
static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
int reg, u16 val)
{
reinit_completion(&pdata->mdio_complete);
- mdio_sca = 0;
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
reinit_completion(&pdata->mdio_complete);
- mdio_sca = 0;
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
}
/* Packet buffers should be 64B aligned */
- pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
- GFP_ATOMIC);
+ pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
+ GFP_ATOMIC);
if (unlikely(!pkt_buf)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
ring->ndev = ndev;
size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
- ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
- GFP_KERNEL);
+ ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
+ GFP_KERNEL);
if (!ring->desc_addr)
goto err;
alx->num_txq +
sizeof(struct alx_rrd) * alx->rx_ringsz +
sizeof(struct alx_rfd) * alx->rx_ringsz;
- alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
- alx->descmem.size,
- &alx->descmem.dma,
- GFP_KERNEL);
+ alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev,
+ alx->descmem.size,
+ &alx->descmem.dma, GFP_KERNEL);
if (!alx->descmem.virt)
return -ENOMEM;
sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
8 * 4;
- ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
- &ring_header->dma, GFP_KERNEL);
+ ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
+ &ring_header->dma, GFP_KERNEL);
if (unlikely(!ring_header->desc)) {
dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
goto err_nomem;
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_freeirq_tx;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_free_rx_ring;
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
/* We just need one DMA descriptor which is DMA-able, since writing to
* the port will allocate a new descriptor in its internal linked-list
*/
- p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
- GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
+ GFP_KERNEL);
if (!p) {
netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
return -ENOMEM;
/* Alloc ring of descriptors */
size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
- ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
- &ring->dma_base,
- GFP_KERNEL);
+ ring->cpu_base = dma_alloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
if (!ring->cpu_base) {
dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
ring->mmio_base);
/* Alloc ring of descriptors */
size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
- ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
- &ring->dma_base,
- GFP_KERNEL);
+ ring->cpu_base = dma_alloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
if (!ring->cpu_base) {
dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
ring->mmio_base);
BNX2_SBLK_MSIX_ALIGN_SIZE);
bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block);
- status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping, GFP_KERNEL);
+ status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+ &bp->status_blk_mapping, GFP_KERNEL);
if (!status_blk)
return -ENOMEM;
bool is_pf);
#define BNX2X_ILT_ZALLOC(x, y, size) \
- x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
#define BNX2X_PCI_ALLOC(y, size) \
({ \
- void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x) \
DP(NETIF_MSG_HW, \
"BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
goto alloc_tx_ext_stats;
bp->hw_rx_port_stats_ext =
- dma_zalloc_coherent(&pdev->dev,
- sizeof(struct rx_port_stats_ext),
- &bp->hw_rx_port_stats_ext_map,
- GFP_KERNEL);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct rx_port_stats_ext),
+ &bp->hw_rx_port_stats_ext_map,
+ GFP_KERNEL);
if (!bp->hw_rx_port_stats_ext)
return 0;
if (bp->hwrm_spec_code >= 0x10902) {
bp->hw_tx_port_stats_ext =
- dma_zalloc_coherent(&pdev->dev,
- sizeof(struct tx_port_stats_ext),
- &bp->hw_tx_port_stats_ext_map,
- GFP_KERNEL);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_port_stats_ext),
+ &bp->hw_tx_port_stats_ext_map,
+ GFP_KERNEL);
}
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
}
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
+ flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
else
flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
}
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- if (i == (nr_tbls - 1))
- rmem->nr_pages = ctx_pg->nr_pages %
- MAX_CTX_PAGES;
+ if (i == (nr_tbls - 1)) {
+ int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
+
+ if (rem)
+ rmem->nr_pages = rem;
+ }
rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
if (rc)
break;
n = IEEE_8021QAZ_MAX_TCS;
data_len = sizeof(*data) + sizeof(*fw_app) * n;
- data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
- GFP_KERNEL);
+ data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
return -EFAULT;
}
- data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
- &data_dma_addr, GFP_KERNEL);
+ data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
+ &data_dma_addr, GFP_KERNEL);
if (!data_addr)
return -ENOMEM;
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 33
-#define HWRM_VERSION_STR "1.10.0.33"
+#define HWRM_VERSION_RSVD 35
+#define HWRM_VERSION_STR "1.10.0.35"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
#define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
#define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
+ #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
if (!i && tg3_flag(tp, ENABLE_RSS))
continue;
- tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping,
- GFP_KERNEL);
+ tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out;
}
{
int i;
- tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
- sizeof(struct tg3_hw_stats),
- &tp->stats_mapping, GFP_KERNEL);
+ tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping, GFP_KERNEL);
if (!tp->hw_stats)
goto err_out;
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
- tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
- TG3_HW_STATUS_SIZE,
- &tnapi->status_mapping,
- GFP_KERNEL);
+ tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping,
+ GFP_KERNEL);
if (!tnapi->hw_status)
goto err_out;
*skb = nskb;
}
- if (padlen) {
- if (padlen >= ETH_FCS_LEN)
- skb_put_zero(*skb, padlen - ETH_FCS_LEN);
- else
- skb_trim(*skb, ETH_FCS_LEN - padlen);
- }
+ if (padlen > ETH_FCS_LEN)
+ skb_put_zero(*skb, padlen - ETH_FCS_LEN);
add_fcs:
/* set FCS to packet */
dmem->q_len = q_len;
dmem->size = (desc_size * q_len) + align_bytes;
/* Save address, need it while freeing */
- dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
+ dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
&dmem->dma, GFP_KERNEL);
if (!dmem->unalign_base)
return -ENOMEM;
{
size_t len = nelem * elem_size;
void *s = NULL;
- void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
+ void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
if (!p)
return NULL;
lro_add_page(adap, qs, fl,
G_RSPD_LEN(len),
flags & F_RSPD_EOP);
- goto next_fl;
+ goto next_fl;
}
skb = get_packet_pg(adap, fl, q,
for (i = 0; i < SGE_QSETS; ++i) {
struct sge_qset *q = &adap->sge.qs[i];
- if (q->tx_reclaim_timer.function)
- mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+ if (q->tx_reclaim_timer.function)
+ mod_timer(&q->tx_reclaim_timer,
+ jiffies + TX_RECLAIM_PERIOD);
- if (q->rx_reclaim_timer.function)
- mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+ if (q->rx_reclaim_timer.function)
+ mod_timer(&q->rx_reclaim_timer,
+ jiffies + RX_RECLAIM_PERIOD);
}
}
CH_WARN(adapter, "found newer FW version(%u.%u), "
"driver compiled for version %u.%u\n", major, minor,
FW_VERSION_MAJOR, FW_VERSION_MINOR);
- return 0;
+ return 0;
}
return -EINVAL;
}
static int init_parity(struct adapter *adap)
{
- int i, err, addr;
+ int i, err, addr;
if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
p->phy.ops->power_down(&p->phy, 1);
}
-return 0;
+ return 0;
}
int err;
memset(&c, 0, sizeof(c));
- c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F |
- FW_PTP_CMD_PORTID_V(0));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(0));
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
c.u.scmd.sc = FW_PTP_SC_INIT_TIMER;
unsigned long flags;
spin_lock_irqsave(&bmap->lock, flags);
- __clear_bit(msix_idx, bmap->msix_bmap);
+ __clear_bit(msix_idx, bmap->msix_bmap);
spin_unlock_irqrestore(&bmap->lock, flags);
}
{
size_t len = nelem * elem_size + stat_size;
void *s = NULL;
- void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL);
+ void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
if (!p)
return NULL;
/* If we have version number support, then check to see if the adapter
* already has up-to-date PHY firmware loaded.
*/
- if (phy_fw_version) {
+ if (phy_fw_version) {
new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
if (ret < 0)
* Allocate the hardware ring and PCI DMA bus address space for said.
*/
size_t hwlen = nelem * hwsize + stat_size;
- void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
+ void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
if (!hwring)
return NULL;
total_size = buf_len;
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
- get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- get_fat_cmd.size,
- &get_fat_cmd.dma, GFP_ATOMIC);
+ get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ get_fat_cmd.size,
+ &get_fat_cmd.dma, GFP_ATOMIC);
if (!get_fat_cmd.va)
return -ENOMEM;
return -EINVAL;
cmd.size = sizeof(struct be_cmd_resp_port_type);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
return -ENOMEM;
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK;
- flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
+ flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
if (!flash_cmd.va)
return -ENOMEM;
}
flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
- flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
- GFP_KERNEL);
+ flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
if (!flash_cmd.va)
return -ENOMEM;
goto err;
}
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
- attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- attribs_cmd.size,
- &attribs_cmd.dma, GFP_ATOMIC);
+ attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ attribs_cmd.size,
+ &attribs_cmd.dma, GFP_ATOMIC);
if (!attribs_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
- get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- get_mac_list_cmd.size,
- &get_mac_list_cmd.dma,
- GFP_ATOMIC);
+ get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma,
+ GFP_ATOMIC);
if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- extfat_cmd.size, &extfat_cmd.dma,
- GFP_ATOMIC);
+ extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
if (!extfat_cmd.va)
return -ENOMEM;
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- extfat_cmd.size, &extfat_cmd.dma,
- GFP_ATOMIC);
+ extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
if (!extfat_cmd.va) {
dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va)
return -ENOMEM;
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_profile_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va)
return -ENOMEM;
int status = 0;
read_cmd.size = LANCER_READ_FILE_CHUNK;
- read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
- &read_cmd.dma, GFP_ATOMIC);
+ read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size,
+ &read_cmd.dma, GFP_ATOMIC);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
}
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
+ cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
- ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- ddrdma_cmd.size, &ddrdma_cmd.dma,
- GFP_KERNEL);
+ ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ ddrdma_cmd.size, &ddrdma_cmd.dma,
+ GFP_KERNEL);
if (!ddrdma_cmd.va)
return -ENOMEM;
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
- eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- eeprom_cmd.size, &eeprom_cmd.dma,
- GFP_KERNEL);
+ eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ eeprom_cmd.size, &eeprom_cmd.dma,
+ GFP_KERNEL);
if (!eeprom_cmd.va)
return -ENOMEM;
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+ &mem->dma, GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
int status = 0;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
- &mbox_mem_alloc->dma,
- GFP_KERNEL);
+ mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
if (!mbox_mem_alloc->va)
return -ENOMEM;
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
- rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
- &rx_filter->dma, GFP_KERNEL);
+ rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
+ &rx_filter->dma, GFP_KERNEL);
if (!rx_filter->va) {
status = -ENOMEM;
goto free_mbox;
stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
else
stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
- stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
- &stats_cmd->dma, GFP_KERNEL);
+ stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
+ &stats_cmd->dma, GFP_KERNEL);
if (!stats_cmd->va) {
status = -ENOMEM;
goto free_rx_filter;
return -ENOMEM;
/* Allocate descriptors */
- priv->rxdes = dma_zalloc_coherent(priv->dev,
- MAX_RX_QUEUE_ENTRIES *
- sizeof(struct ftgmac100_rxdes),
- &priv->rxdes_dma, GFP_KERNEL);
+ priv->rxdes = dma_alloc_coherent(priv->dev,
+ MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
+ &priv->rxdes_dma, GFP_KERNEL);
if (!priv->rxdes)
return -ENOMEM;
- priv->txdes = dma_zalloc_coherent(priv->dev,
- MAX_TX_QUEUE_ENTRIES *
- sizeof(struct ftgmac100_txdes),
- &priv->txdes_dma, GFP_KERNEL);
+ priv->txdes = dma_alloc_coherent(priv->dev,
+ MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
+ &priv->txdes_dma, GFP_KERNEL);
if (!priv->txdes)
return -ENOMEM;
{
int i;
- priv->descs = dma_zalloc_coherent(priv->dev,
- sizeof(struct ftmac100_descs),
- &priv->descs_dma_addr,
- GFP_KERNEL);
+ priv->descs = dma_alloc_coherent(priv->dev,
+ sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr, GFP_KERNEL);
if (!priv->descs)
return -ENOMEM;
bool nonlinear = skb_is_nonlinear(skb);
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
+ struct netdev_queue *txq;
struct dpaa_priv *priv;
struct qm_fd fd;
int offset = 0;
if (unlikely(err < 0))
goto skb_to_fd_failed;
+ txq = netdev_get_tx_queue(net_dev, queue_mapping);
+
+ /* LLTX requires to do our own update of trans_start */
+ txq->trans_start = jiffies;
+
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
for (i = 0; i < QUEUE_NUMS; i++) {
size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
- virt_addr = dma_zalloc_coherent(dev, size, &phys_addr,
- GFP_KERNEL);
+ virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
if (virt_addr == NULL)
goto error_free_pool;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
int i;
- vf_cb->mac_cb = NULL;
-
- kfree(vf_cb);
-
for (i = 0; i < handle->q_num; i++)
hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+
+ kfree(vf_cb);
}
static int hns_ae_wait_flow_down(struct hnae_handle *handle)
if (!h->phy_dev)
return 0;
+ ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, supported);
+ linkmode_copy(phy_dev->advertising, phy_dev->supported);
+
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
+ phy_dev->autoneg = false;
+
if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
phy_dev->dev_flags = 0;
if (unlikely(ret))
return -ENODEV;
- ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
- linkmode_and(phy_dev->supported, phy_dev->supported, supported);
- linkmode_copy(phy_dev->advertising, phy_dev->supported);
-
- if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
- phy_dev->autoneg = false;
-
- if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
- phy_stop(phy_dev);
-
return 0;
}
{
int size = ring->desc_num * sizeof(ring->desc[0]);
- ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
- &ring->desc_dma_addr,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
{
int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
- size, &ring->desc_dma_addr,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
- size, &ring->desc_dma_addr,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
u8 *cmd_vaddr;
int err = 0;
- cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- &cmd_paddr, GFP_KERNEL);
+ cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
+ &cmd_paddr, GFP_KERNEL);
if (!cmd_vaddr) {
dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
return -ENOMEM;
dma_addr_t node_paddr;
int err;
- node = dma_zalloc_coherent(&pdev->dev, chain->cell_size,
- &node_paddr, GFP_KERNEL);
+ node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
+ GFP_KERNEL);
if (!node) {
dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
return -ENOMEM;
if (!chain->cell_ctxt)
return -ENOMEM;
- chain->wb_status = dma_zalloc_coherent(&pdev->dev,
- sizeof(*chain->wb_status),
- &chain->wb_status_paddr,
- GFP_KERNEL);
+ chain->wb_status = dma_alloc_coherent(&pdev->dev,
+ sizeof(*chain->wb_status),
+ &chain->wb_status_paddr,
+ GFP_KERNEL);
if (!chain->wb_status) {
dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
return -ENOMEM;
}
for (pg = 0; pg < eq->num_pages; pg++) {
- eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev,
- eq->page_size,
- &eq->dma_addr[pg],
- GFP_KERNEL);
+ eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
+ eq->page_size,
+ &eq->dma_addr[pg],
+ GFP_KERNEL);
if (!eq->virt_addr[pg]) {
err = -ENOMEM;
goto err_dma_alloc;
goto err_sq_db;
}
- ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- &func_to_io->ci_dma_base,
- GFP_KERNEL);
+ ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
+ &func_to_io->ci_dma_base,
+ GFP_KERNEL);
if (!ci_addr_base) {
dev_err(&pdev->dev, "Failed to allocate CI area\n");
err = -ENOMEM;
goto err_cqe_dma_arr_alloc;
for (i = 0; i < wq->q_depth; i++) {
- rq->cqe[i] = dma_zalloc_coherent(&pdev->dev,
- sizeof(*rq->cqe[i]),
- &rq->cqe_dma[i], GFP_KERNEL);
+ rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
+ sizeof(*rq->cqe[i]),
+ &rq->cqe_dma[i], GFP_KERNEL);
if (!rq->cqe[i])
goto err_cqe_alloc;
}
/* HW requirements: Must be at least 32 bit */
pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size,
- &rq->pi_dma_addr, GFP_KERNEL);
+ rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
+ &rq->pi_dma_addr, GFP_KERNEL);
if (!rq->pi_virt_addr) {
dev_err(&pdev->dev, "Failed to allocate PI address\n");
err = -ENOMEM;
struct pci_dev *pdev = hwif->pdev;
dma_addr_t dma_addr;
- *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr,
- GFP_KERNEL);
+ *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
+ GFP_KERNEL);
if (!*vaddr) {
dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
return -ENOMEM;
u64 *paddr = &wq->block_vaddr[i];
dma_addr_t dma_addr;
- *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size,
- &dma_addr, GFP_KERNEL);
+ *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
+ &dma_addr, GFP_KERNEL);
if (!*vaddr) {
dev_err(&pdev->dev, "Failed to allocate wq page\n");
goto err_alloc_wq_pages;
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL);
+ mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
+ GFP_KERNEL);
if (mal->bd_virt == NULL) {
err = -ENOMEM;
goto fail_unmap;
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
select MDIO
- select MDIO_DEVICE
+ select PHYLIB
imply PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 6;
goto err_nomem;
{
struct pci_dev *pdev = adapter->pdev;
- ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
struct i40e_pf *pf = (struct i40e_pf *)hw->back;
mem->size = ALIGN(size, alignment);
- mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
/* OS defined structs */
struct pci_dev *pdev;
- struct mutex stats64_lock;
+ spinlock_t stats64_lock;
struct rtnl_link_stats64 stats64;
/* structs defined in e1000_hw.h */
int i, j;
char *p;
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
}
static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
del_timer_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
adapter->link_speed = 0;
adapter->link_duplex = 0;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
- mutex_init(&adapter->stats64_lock);
+ spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
}
}
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *tx_ring = adapter->tx_ring[i];
{
struct igb_adapter *adapter = netdev_priv(netdev);
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
memcpy(stats, &adapter->stats64, sizeof(*stats));
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
}
/**
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
return -ENOMEM;
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
u32 txq_dma;
/* Allocate memory for TX descriptors */
- aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
- &aggr_txq->descs_dma, GFP_KERNEL);
+ aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ &aggr_txq->descs_dma, GFP_KERNEL);
if (!aggr_txq->descs)
return -ENOMEM;
if (!cgx->cgx_cmd_workq) {
dev_err(dev, "alloc workqueue failed for cgx cmd");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_free_irq_vectors;
}
list_add(&cgx->cgx_list, &cgx_list);
err_release_lmac:
cgx_lmac_exit(cgx);
list_del(&cgx->cgx_list);
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
qmem->entry_sz = entry_sz;
qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
- qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz,
+ qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
&qmem->iova, GFP_KERNEL);
if (!qmem->base)
return -ENOMEM;
* table is full.
*/
if (!pep->htpr) {
- pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
- HASH_ADDR_TABLE_SIZE,
- &pep->htpr_dma, GFP_KERNEL);
+ pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
if (!pep->htpr)
return -ENOMEM;
} else {
pep->rx_desc_count = 0;
size = pep->rx_ring_size * sizeof(struct rx_desc);
pep->rx_desc_area_size = size;
- pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
- &pep->rx_desc_dma,
- GFP_KERNEL);
+ pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_rx_desc_area)
goto out;
pep->tx_desc_count = 0;
size = pep->tx_ring_size * sizeof(struct tx_desc);
pep->tx_desc_area_size = size;
- pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
- &pep->tx_desc_dma,
- GFP_KERNEL);
+ pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_tx_desc_area)
goto out;
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
- if (dev->phydev->link)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
-
if (!of_phy_is_fixed_link(mac->of_node))
phy_print_status(dev->phydev);
}
if (mtk_phy_connect_node(eth, mac, np))
goto err_phy;
- dev->phydev->autoneg = AUTONEG_ENABLE;
- dev->phydev->speed = 0;
- dev->phydev->duplex = 0;
-
- phy_set_max_speed(dev->phydev, SPEED_1000);
- phy_support_asym_pause(dev->phydev);
- linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- dev->phydev->advertising);
- phy_start_aneg(dev->phydev);
-
of_node_put(np);
return 0;
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_zalloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
- ð->phy_scratch_ring,
- GFP_ATOMIC);
+ eth->scratch_ring = dma_alloc_coherent(eth->dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ ð->phy_scratch_ring,
+ GFP_ATOMIC);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
goto no_tx_mem;
return -ENOMEM;
}
- ring->dma = dma_zalloc_coherent(eth->dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dev,
+ rx_dma_size * sizeof(*ring->dma),
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
return -ENOMEM;
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf =
- dma_zalloc_coherent(&dev->persist->pdev->dev,
- size, &t, GFP_KERNEL);
+ dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
+ GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
- dma_zalloc_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE, &t, GFP_KERNEL);
+ dma_alloc_coherent(&dev->persist->pdev->dev,
+ PAGE_SIZE, &t, GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
int i;
if (chunk->nsg > 0)
- pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages,
+ DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
- __free_pages(sg_page(&chunk->mem[i]),
- get_order(chunk->mem[i].length));
+ __free_pages(sg_page(&chunk->sg[i]),
+ get_order(chunk->sg[i].length));
}
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->persist->pdev->dev,
- chunk->mem[i].length,
- lowmem_page_address(sg_page(&chunk->mem[i])),
- sg_dma_address(&chunk->mem[i]));
+ chunk->buf[i].size,
+ chunk->buf[i].addr,
+ chunk->buf[i].dma_addr);
}
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
return 0;
}
-static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
- int order, gfp_t gfp_mask)
+static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
+ int order, gfp_t gfp_mask)
{
- void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
- &sg_dma_address(mem), gfp_mask);
- if (!buf)
+ buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
+ &buf->dma_addr, gfp_mask);
+ if (!buf->addr)
return -ENOMEM;
- if (offset_in_page(buf)) {
- dma_free_coherent(dev, PAGE_SIZE << order,
- buf, sg_dma_address(mem));
+ if (offset_in_page(buf->addr)) {
+ dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
+ buf->dma_addr);
return -ENOMEM;
}
- sg_set_buf(mem, buf, PAGE_SIZE << order);
- sg_dma_len(mem) = PAGE_SIZE << order;
+ buf->size = PAGE_SIZE << order;
return 0;
}
while (npages > 0) {
if (!chunk) {
- chunk = kmalloc_node(sizeof(*chunk),
+ chunk = kzalloc_node(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN),
dev->numa_node);
if (!chunk) {
- chunk = kmalloc(sizeof(*chunk),
+ chunk = kzalloc(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN));
if (!chunk)
goto fail;
}
+ chunk->coherent = coherent;
- sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
- chunk->npages = 0;
- chunk->nsg = 0;
+ if (!coherent)
+ sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
list_add_tail(&chunk->list, &icm->chunk_list);
}
if (coherent)
ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
- &chunk->mem[chunk->npages],
- cur_order, mask);
+ &chunk->buf[chunk->npages],
+ cur_order, mask);
else
- ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
+ ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
cur_order, mask,
dev->numa_node);
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
- chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ chunk->nsg = dma_map_sg(&dev->persist->pdev->dev,
+ chunk->sg, chunk->npages,
+ DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
}
if (!coherent && chunk) {
- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
- chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
+ chunk->npages, DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
u64 idx;
struct mlx4_icm_chunk *chunk;
struct mlx4_icm *icm;
- struct page *page = NULL;
+ void *addr = NULL;
if (!table->lowmem)
return NULL;
list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
+ dma_addr_t dma_addr;
+ size_t len;
+
+ if (table->coherent) {
+ len = chunk->buf[i].size;
+ dma_addr = chunk->buf[i].dma_addr;
+ addr = chunk->buf[i].addr;
+ } else {
+ struct page *page;
+
+ len = sg_dma_len(&chunk->sg[i]);
+ dma_addr = sg_dma_address(&chunk->sg[i]);
+
+ /* XXX: we should never do this for highmem
+ * allocation. This function either needs
+ * to be split, or the kernel virtual address
+ * return needs to be made optional.
+ */
+ page = sg_page(&chunk->sg[i]);
+ addr = lowmem_page_address(page);
+ }
+
if (dma_handle && dma_offset >= 0) {
- if (sg_dma_len(&chunk->mem[i]) > dma_offset)
- *dma_handle = sg_dma_address(&chunk->mem[i]) +
- dma_offset;
- dma_offset -= sg_dma_len(&chunk->mem[i]);
+ if (len > dma_offset)
+ *dma_handle = dma_addr + dma_offset;
+ dma_offset -= len;
}
+
/*
* DMA mapping can merge pages but not split them,
* so if we found the page, dma_handle has already
* been assigned to.
*/
- if (chunk->mem[i].length > offset) {
- page = sg_page(&chunk->mem[i]);
+ if (len > offset)
goto out;
- }
- offset -= chunk->mem[i].length;
+ offset -= len;
}
}
+ addr = NULL;
out:
mutex_unlock(&table->mutex);
- return page ? lowmem_page_address(page) + offset : NULL;
+ return addr ? addr + offset : NULL;
}
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
};
+struct mlx4_icm_buf {
+ void *addr;
+ size_t size;
+ dma_addr_t dma_addr;
+};
+
struct mlx4_icm_chunk {
struct list_head list;
int npages;
int nsg;
- struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
+ bool coherent;
+ union {
+ struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
+ struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
+ };
};
struct mlx4_icm {
static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
{
- return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+ if (iter->chunk->coherent)
+ return iter->chunk->buf[iter->page_idx].dma_addr;
+ else
+ return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
}
static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
{
- return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
+ if (iter->chunk->coherent)
+ return iter->chunk->buf[iter->page_idx].size;
+ else
+ return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
mutex_lock(&priv->alloc_mutex);
original_node = dev_to_node(&dev->pdev->dev);
set_dev_node(&dev->pdev->dev, node);
- cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
- dma_handle, GFP_KERNEL);
+ cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
+ GFP_KERNEL);
set_dev_node(&dev->pdev->dev, original_node);
mutex_unlock(&priv->alloc_mutex);
return cpu_handle;
{
struct device *ddev = &dev->pdev->dev;
- cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
- &cmd->alloc_dma, GFP_KERNEL);
+ cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
+ &cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
return -ENOMEM;
dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
cmd->alloc_dma);
- cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
- 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
- &cmd->alloc_dma, GFP_KERNEL);
+ cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
+ 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
+ &cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
return -ENOMEM;
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Autoneg);
- if (get_fec_supported_advertised(mdev, link_ksettings))
+ err = get_fec_supported_advertised(mdev, link_ksettings);
+ if (err) {
netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n",
__func__, err);
+ err = 0; /* don't fail caps query because of FEC error */
+ }
if (!an_disable_admin)
ethtool_link_ksettings_add_link_mode(link_ksettings,
struct list_head list;
};
-static void mlx5e_rep_indr_unregister_block(struct net_device *netdev);
+static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev);
static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
+ s->tx_queue_dropped += sq_stats->dropped;
}
}
}
struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
list_for_each_entry_safe(cb_priv, temp, head, list) {
- mlx5e_rep_indr_unregister_block(cb_priv->netdev);
+ mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
kfree(cb_priv);
}
}
err = tcf_block_cb_register(f->block,
mlx5e_rep_indr_setup_block_cb,
- netdev, indr_priv, f->extack);
+ indr_priv, indr_priv, f->extack);
if (err) {
list_del(&indr_priv->list);
kfree(indr_priv);
return err;
case TC_BLOCK_UNBIND:
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (!indr_priv)
+ return -ENOENT;
+
tcf_block_cb_unregister(f->block,
mlx5e_rep_indr_setup_block_cb,
- netdev);
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
- if (indr_priv) {
- list_del(&indr_priv->list);
- kfree(indr_priv);
- }
+ indr_priv);
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
return 0;
default:
err = __tc_indr_block_cb_register(netdev, rpriv,
mlx5e_rep_indr_setup_tc_cb,
- netdev);
+ rpriv);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
return err;
}
-static void mlx5e_rep_indr_unregister_block(struct net_device *netdev)
+static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev)
{
__tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
- netdev);
+ rpriv);
}
static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
mlx5e_rep_indr_register_block(rpriv, netdev);
break;
case NETDEV_UNREGISTER:
- mlx5e_rep_indr_unregister_block(netdev);
+ mlx5e_rep_indr_unregister_block(rpriv, netdev);
break;
}
return NOTIFY_OK;
((struct ipv6hdr *)ip_p)->nexthdr;
}
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
goto csum_unnecessary;
+ /* CQE csum doesn't cover padding octets in short ethernet
+ * frames. And the pad field is appended prior to calculating
+ * and appending the FCS field.
+ *
+ * Detecting these padded frames requires to verify and parse
+ * IP headers, so we simply force all those small frames to be
+ * CHECKSUM_UNNECESSARY even if they are not padded.
+ */
+ if (short_frame(skb->len))
+ goto csum_unnecessary;
+
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
goto csum_unnecessary;
depends on IPV6 || IPV6=n
depends on NET_IPGRE || NET_IPGRE=n
depends on IPV6_GRE || IPV6_GRE=n
+ depends on VXLAN || VXLAN=n
select GENERIC_ALLOCATOR
select PARMAN
select OBJAGG
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
+ char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+
+ memcpy(ncqe, cqe, q->elem_size);
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
if (sendq) {
struct mlxsw_pci_queue *sdq;
sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, cqe);
+ wqe_counter, ncqe);
q->u.cq.comp_sdq_count++;
} else {
struct mlxsw_pci_queue *rdq;
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
- wqe_counter, q->u.cq.v, cqe);
+ wqe_counter, q->u.cq.v, ncqe);
q->u.cq.comp_rdq_count++;
}
if (++items == credits)
break;
}
- if (items) {
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ if (items)
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
- }
}
static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
- break;
+ return 0;
cond_resched();
} while (time_before(jiffies, end));
- return 0;
+ return -EBUSY;
}
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
lower_dev,
upper_dev);
} else if (netif_is_lag_master(upper_dev)) {
- if (info->linking)
+ if (info->linking) {
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
upper_dev);
- else
+ } else {
+ mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
+ false);
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
upper_dev);
+ }
} else if (netif_is_ovs_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
act_set = mlxsw_afa_block_first_set(rulei->act_block);
mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ goto err_ptce2_write;
+
+ return 0;
+
+err_ptce2_write:
+ cregion->ops->entry_remove(cregion, centry);
+ return err;
}
static void
{
struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
- ASSERT_RTNL();
objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
}
const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
unsigned int erp_bank;
- ASSERT_RTNL();
if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
return;
static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
.type = MLXSW_SP_FID_TYPE_DUMMY,
.fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = MLXSW_SP_RFID_BASE - 1,
- .end_index = MLXSW_SP_RFID_BASE - 1,
+ .start_index = VLAN_N_VID - 1,
+ .end_index = VLAN_N_VID - 1,
.ops = &mlxsw_sp_fid_dummy_ops,
};
ops = nve->nve_ops_arr[params->type];
if (!ops->can_offload(nve, params->dev, extack))
- return -EOPNOTSUPP;
+ return -EINVAL;
memset(&config, 0, sizeof(config));
ops->nve_config(nve, params->dev, &config);
if (nve->num_nve_tunnels &&
memcmp(&config, &nve->config, sizeof(config))) {
NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
- return -EOPNOTSUPP;
+ return -EINVAL;
}
err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
u16 vid, bool is_untagged, bool is_pvid,
- struct netlink_ext_ack *extack,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vlan->bridge_port != bridge_port)
return -EEXIST;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (!mlxsw_sp_port_vlan) {
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
vid);
return err;
}
+ if (switchdev_trans_ph_commit(trans))
+ return 0;
+
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
vid, flag_untagged,
- flag_pvid, extack, trans);
+ flag_pvid, extack);
if (err)
return err;
}
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
{
return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
- MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
}
static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const char *mac, u16 fid, bool adding,
enum mlxsw_reg_sfd_rec_action action,
- bool dynamic)
+ enum mlxsw_reg_sfd_rec_policy policy)
{
char *sfd_pl;
u8 num_rec;
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
- mac, fid, action, local_port);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
bool dynamic)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
- MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
+ MLXSW_REG_SFD_REC_ACTION_NOP,
+ mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
- false);
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
{
- u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
+ u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
struct mlxsw_sp_bridge_device *bridge_device,
const struct net_device *vxlan_dev, u16 vid,
bool flag_untagged, bool flag_pvid,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid))
return -EINVAL;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (!netif_running(vxlan_dev))
return 0;
port_obj_info->handled = true;
+ if (switchdev_trans_ph_commit(trans))
+ return 0;
+
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (!bridge_device)
return -EINVAL;
err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
vxlan_dev, vid,
flag_untagged,
- flag_pvid, trans,
- extack);
+ flag_pvid, extack);
if (err)
return err;
}
memset(&ksettings, 0, sizeof(ksettings));
phy_ethtool_get_link_ksettings(netdev, &ksettings);
- local_advertisement = phy_read(phydev, MII_ADVERTISE);
- if (local_advertisement < 0)
- return;
-
- remote_advertisement = phy_read(phydev, MII_LPA);
- if (remote_advertisement < 0)
- return;
+ local_advertisement =
+ linkmode_adv_to_mii_adv_t(phydev->advertising);
+ remote_advertisement =
+ linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
lan743x_phy_update_flowcontrol(adapter,
ksettings.base.duplex,
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
- ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
- &ss->rx_done.bus,
- GFP_KERNEL);
+ ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
+ &ss->rx_done.bus,
+ GFP_KERNEL);
if (ss->rx_done.entry == NULL)
goto abort;
bytes = sizeof(*ss->fw_stats);
tx_ring->cnt = dp->txd_cnt;
tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
- tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
+ tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
if (!tx_ring->txds) {
netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
tx_ring->cnt);
rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
- rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
+ rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
if (!rx_ring->rxds) {
netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
rx_ring->cnt);
priv->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*priv->tx_bd_v) * TX_BD_NUM,
- &priv->tx_bd_p, GFP_KERNEL);
+ priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*priv->tx_bd_v) * TX_BD_NUM,
+ &priv->tx_bd_p, GFP_KERNEL);
if (!priv->tx_bd_v)
goto out;
if (!priv->tx_skb)
goto out;
- priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*priv->rx_bd_v) * RX_BD_NUM,
- &priv->rx_bd_p, GFP_KERNEL);
+ priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*priv->rx_bd_v) * RX_BD_NUM,
+ &priv->rx_bd_p, GFP_KERNEL);
if (!priv->rx_bd_v)
goto out;
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
rx_ring->rx_buff_pool =
- dma_zalloc_coherent(&pdev->dev, size,
- &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
+ dma_alloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
if (!rx_ring->rx_buff_pool)
return -ENOMEM;
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
- tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc) {
vfree(tx_ring->buffer_info);
return -ENOMEM;
return -ENOMEM;
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
- rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
vfree(rx_ring->buffer_info);
return -ENOMEM;
if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
goto out_ring_desc;
- ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
- RX_RING_SIZE * sizeof(u64),
- &ring->buf_dma, GFP_KERNEL);
+ ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
+ RX_RING_SIZE * sizeof(u64),
+ &ring->buf_dma, GFP_KERNEL);
if (!ring->buffers)
goto out_ring_desc;
u32 size = min_t(u32, total_size, psz);
void **p_virt = &p_mngr->t2[i].p_virt;
- *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
- size, &p_mngr->t2[i].p_phys,
- GFP_KERNEL);
+ *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+ &p_mngr->t2[i].p_phys,
+ GFP_KERNEL);
if (!p_mngr->t2[i].p_virt) {
rc = -ENOMEM;
goto t2_fail;
u32 size;
size = min_t(u32, sz_left, p_blk->real_size_in_page);
- p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
- &p_phys, GFP_KERNEL);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+ &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
goto out0;
}
- p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_blk->real_size_in_page, &p_phys,
- GFP_KERNEL);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_blk->real_size_in_page, &p_phys,
+ GFP_KERNEL);
if (!p_virt) {
rc = -ENOMEM;
goto out1;
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
rx_prod.bd_prod = cpu_to_le16(bd_prod);
rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+
+ /* Make sure chain element is updated before ringing the doorbell */
+ dma_wmb();
+
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
}
*(tx_ring->hw_consumer) = 0;
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
- rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
- &rq_phys_addr, GFP_KERNEL);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
- rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
- &rsp_phys_addr, GFP_KERNEL);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
struct qlcnic_cmd_args cmd;
size_t nic_size = sizeof(struct qlcnic_info_le);
- nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return err;
- nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
void *pci_info_addr;
int err = 0, i;
- pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
- &pci_info_dma_t, GFP_KERNEL);
+ pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
if (!pci_info_addr)
return -ENOMEM;
return -EIO;
}
- stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
+ stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
if (mac_stats == NULL)
return -ENOMEM;
- stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
+ stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
ring_header->used = 0;
- ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size,
+ ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size,
&ring_header->dma_addr,
GFP_KERNEL);
if (!ring_header->v_addr)
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
+ { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
+ { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
{ PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 },
{ PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 },
{ PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 },
MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+MODULE_SOFTDEP("pre: realtek");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_8168D_1);
MODULE_FIRMWARE(FIRMWARE_8168D_2);
static bool rtl8169_update_counters(struct rtl8169_private *tp)
{
+ u8 val = RTL_R8(tp, ChipCmd);
+
/*
* Some chips are unable to dump tally counters when the receiver
- * is disabled.
+ * is disabled. If 0xff chip may be in a PCI power-save state.
*/
- if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0)
+ if (!(val & CmdRxEnb) || val == 0xff)
return true;
return rtl8169_do_counters(tp, CounterDump);
}
/* allocate memory for TX descriptors */
- tx_ring->dma_tx = dma_zalloc_coherent(dev,
- tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
- &tx_ring->dma_tx_phy, GFP_KERNEL);
+ tx_ring->dma_tx = dma_alloc_coherent(dev,
+ tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+ &tx_ring->dma_tx_phy, GFP_KERNEL);
if (!tx_ring->dma_tx)
return -ENOMEM;
rx_ring->queue_no = queue_no;
/* allocate memory for RX descriptors */
- rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
- rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
- &rx_ring->dma_rx_phy, GFP_KERNEL);
+ rx_ring->dma_rx = dma_alloc_coherent(priv->device,
+ rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ &rx_ring->dma_rx_phy, GFP_KERNEL);
if (rx_ring->dma_rx == NULL)
return -ENOMEM;
int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
unsigned int len, gfp_t gfp_flags)
{
- buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, gfp_flags);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len, gfp_t gfp_flags)
{
- buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, gfp_flags);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
static int meth_init_tx_ring(struct meth_private *priv)
{
/* Init TX ring */
- priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE,
- &priv->tx_ring_dma, GFP_ATOMIC);
+ priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
+ &priv->tx_ring_dma, GFP_ATOMIC);
if (!priv->tx_ring)
return -ENOMEM;
struct netsec_desc_ring *dring = &priv->desc_ring[id];
int i;
- dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
- &dring->desc_dma, GFP_KERNEL);
+ dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ &dring->desc_dma, GFP_KERNEL);
if (!dring->vaddr)
goto err;
struct stmmac_extra_stats *x, u32 chan)
{
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
/* ABNORMAL interrupts */
x->normal_irq_n++;
if (likely(intr_status & XGMAC_RI)) {
- u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
- if (likely(value & XGMAC_RIE)) {
+ if (likely(intr_en & XGMAC_RIE)) {
x->rx_normal_irq_n++;
ret |= handle_rx;
}
}
/* Clear interrupts */
- writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
return ret;
}
goto err_dma;
if (priv->extend_desc) {
- rx_q->dma_erx = dma_zalloc_coherent(priv->device,
- DMA_RX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
+ rx_q->dma_erx = dma_alloc_coherent(priv->device,
+ DMA_RX_SIZE * sizeof(struct dma_extended_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
if (!rx_q->dma_erx)
goto err_dma;
} else {
- rx_q->dma_rx = dma_zalloc_coherent(priv->device,
- DMA_RX_SIZE *
- sizeof(struct
- dma_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
+ rx_q->dma_rx = dma_alloc_coherent(priv->device,
+ DMA_RX_SIZE * sizeof(struct dma_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
if (!rx_q->dma_rx)
goto err_dma;
}
goto err_dma;
if (priv->extend_desc) {
- tx_q->dma_etx = dma_zalloc_coherent(priv->device,
- DMA_TX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
+ tx_q->dma_etx = dma_alloc_coherent(priv->device,
+ DMA_TX_SIZE * sizeof(struct dma_extended_desc),
+ &tx_q->dma_tx_phy,
+ GFP_KERNEL);
if (!tx_q->dma_etx)
goto err_dma;
} else {
- tx_q->dma_tx = dma_zalloc_coherent(priv->device,
- DMA_TX_SIZE *
- sizeof(struct
- dma_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
+ tx_q->dma_tx = dma_alloc_coherent(priv->device,
+ DMA_TX_SIZE * sizeof(struct dma_desc),
+ &tx_q->dma_tx_phy,
+ GFP_KERNEL);
if (!tx_q->dma_tx)
goto err_dma;
}
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, napi);
struct stmmac_priv *priv = ch->priv_data;
- int work_done = 0, work_rem = budget;
+ int work_done, rx_done = 0, tx_done = 0;
u32 chan = ch->index;
priv->xstats.napi_poll++;
- if (ch->has_tx) {
- int done = stmmac_tx_clean(priv, work_rem, chan);
-
- work_done += done;
- work_rem -= done;
- }
+ if (ch->has_tx)
+ tx_done = stmmac_tx_clean(priv, budget, chan);
+ if (ch->has_rx)
+ rx_done = stmmac_rx(priv, budget, chan);
- if (ch->has_rx) {
- int done = stmmac_rx(priv, work_rem, chan);
+ work_done = max(rx_done, tx_done);
+ work_done = min(work_done, budget);
- work_done += done;
- work_rem -= done;
- }
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ int stat;
- if (work_done < budget && napi_complete_done(napi, work_done))
stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+ &priv->xstats, chan);
+ if (stat && napi_reschedule(napi))
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ }
return work_done;
}
return ret;
}
+ /* Rx Watchdog is available in the COREs newer than the 3.40.
+ * In some case, for example on bugged HW this feature
+ * has to be disable and this can be done by passing the
+ * riwt_off field from the platform.
+ */
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+ priv->use_riwt = 1;
+ dev_info(priv->device,
+ "Enable RX Mitigation via HW Watchdog Timer\n");
+ }
+
return 0;
}
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
- /* Rx Watchdog is available in the COREs newer than the 3.40.
- * In some case, for example on bugged HW this feature
- * has to be disable and this can be done by passing the
- * riwt_off field from the platform.
- */
- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
- priv->use_riwt = 1;
- dev_info(priv->device,
- "Enable RX Mitigation via HW Watchdog Timer\n");
- }
-
/* Setup channels NAPI */
maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
+ int i;
+
stmmac_dvr_remove(&pdev->dev);
+
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ pcim_iounmap_regions(pdev, BIT(i));
+ break;
+ }
+
pci_disable_device(pdev);
}
/* Queue 0 is not AVB capable */
if (queue <= 0 || queue >= tx_queues_count)
return -EINVAL;
+ if (!priv->dma_cap.av)
+ return -EOPNOTSUPP;
if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
return -EOPNOTSUPP;
data->id, dev->irq, dev->name);
}
- data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size,
- &data->rxdma, GFP_KERNEL);
+ data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
+ &data->rxdma, GFP_KERNEL);
if (!data->rxring)
return -ENOMEM;
- data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size,
- &data->txdma, GFP_KERNEL);
+ data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
+ &data->txdma, GFP_KERNEL);
if (!data->txring) {
dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
data->rxdma);
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
#endif
sizeof(PI_CONSUMER_BLOCK) +
(PI_ALIGN_K_DESC_BLK - 1);
- bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size,
- &bp->kmalloced_dma,
- GFP_ATOMIC);
+ bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
+ &bp->kmalloced_dma,
+ GFP_ATOMIC);
if (top_v == NULL)
return DFX_K_FAILURE;
if (bp->SharedMemSize > 0) {
bp->SharedMemSize += 16; // for descriptor alignment
- bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev,
- bp->SharedMemSize,
- &bp->SharedMemDMA,
- GFP_ATOMIC);
+ bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
+ bp->SharedMemSize,
+ &bp->SharedMemDMA,
+ GFP_ATOMIC);
if (!bp->SharedMemAddr) {
printk("could not allocate mem for ");
printk("hardware module: %ld byte\n",
if (src)
dev_put(src->dev);
- kfree_skb(skb);
+ consume_skb(skb);
}
}
.phy_id = PHY_ID_BCM8706,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8706",
+ .features = PHY_10GBIT_FEC_FEATURES,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8727",
+ .features = PHY_10GBIT_FEC_FEATURES,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
.phy_id = PHY_ID_CS4340,
.phy_id_mask = 0xffffffff,
.name = "Cortina CS4340",
+ .features = PHY_10GBIT_FEATURES,
.config_init = gen10g_config_init,
.config_aneg = gen10g_config_aneg,
.read_status = cortina_read_status,
return 0;
}
+/* The VOD can be out of specification on link up. Poke an
+ * undocumented register, in an undocumented page, with a magic value
+ * to fix this.
+ */
+static int m88e6390_errata(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_BMCR,
+ BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
+ if (err)
+ return err;
+
+ usleep_range(300, 400);
+
+ err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
+ if (err)
+ return err;
+
+ return genphy_soft_reset(phydev);
+}
+
+static int m88e6390_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ err = m88e6390_errata(phydev);
+ if (err)
+ return err;
+
+ return m88e1510_config_aneg(phydev);
+}
+
/**
* fiber_lpa_mod_linkmode_lpa_t
* @advertising: the linkmode advertisement settings
* before enabling it if !phy_interrupt_is_valid()
*/
if (!phy_interrupt_is_valid(phydev))
- phy_read(phydev, MII_M1011_IEVENT);
+ __phy_read(phydev, MII_M1011_IEVENT);
/* Enable the WOL interrupt */
err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
.features = PHY_GBIT_FEATURES,
.probe = m88e6390_probe,
.config_init = &marvell_config_init,
- .config_aneg = &m88e1510_config_aneg,
+ .config_aneg = &m88e6390_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
if (IS_ERR(gpiod)) {
dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
bus->id);
+ device_del(&bus->dev);
return PTR_ERR(gpiod);
} else if (gpiod) {
bus->reset_gpiod = gpiod;
.name = "Meson GXL Internal PHY",
.features = PHY_BASIC_FEATURES,
.flags = PHY_IS_INTERNAL,
+ .soft_reset = genphy_soft_reset,
.config_init = meson_gxl_config_init,
.aneg_done = genphy_aneg_done,
.read_status = meson_gxl_read_status,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9031_config_init,
+ .soft_reset = genphy_soft_reset,
.read_status = ksz9031_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
+ .features = PHY_BASIC_FEATURES,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
mutex_lock(&phydev->lock);
- if (!__phy_is_started(phydev)) {
- WARN(1, "called from state %s\n",
- phy_state_to_str(phydev->state));
- err = -EBUSY;
- goto out_unlock;
- }
-
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
if (err < 0)
goto out_unlock;
- if (phydev->autoneg == AUTONEG_ENABLE) {
- err = phy_check_link_status(phydev);
- } else {
- phydev->state = PHY_FORCING;
- phydev->link_timeout = PHY_FORCE_TIMEOUT;
+ if (__phy_is_started(phydev)) {
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ err = phy_check_link_status(phydev);
+ } else {
+ phydev->state = PHY_FORCING;
+ phydev->link_timeout = PHY_FORCE_TIMEOUT;
+ }
}
out_unlock:
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_features);
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
+
static const int phy_basic_ports_array[] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
};
EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
+const int phy_10gbit_fec_features_array[1] = {
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+};
+EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array);
+
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
linkmode_set_bit_array(phy_10gbit_full_features_array,
ARRAY_SIZE(phy_10gbit_full_features_array),
phy_10gbit_full_features);
+ /* 10G FEC only */
+ linkmode_set_bit_array(phy_10gbit_fec_features_array,
+ ARRAY_SIZE(phy_10gbit_fec_features_array),
+ phy_10gbit_fec_features);
}
void phy_device_free(struct phy_device *phydev)
{
int retval;
+ if (WARN_ON(!new_driver->features)) {
+ pr_err("%s: Driver features are missing\n", new_driver->name);
+ return -EINVAL;
+ }
+
new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
new_driver->mdiodrv.driver.name = new_driver->name;
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
.phy_id = PHY_ID_TN2020,
.phy_id_mask = 0xffffffff,
.name = "Teranetics TN2020",
+ .features = PHY_10GBIT_FEATURES,
.soft_reset = gen10g_no_soft_reset,
.aneg_done = teranetics_aneg_done,
.config_init = gen10g_config_init,
if (pskb_trim_rcsum(skb, len))
goto drop;
+ ph = pppoe_hdr(skb);
pn = pppoe_pernet(dev_net(dev));
/* Note that get_item does a sock_hold(), so sk_pppox(po)
err = 0;
}
- rcu_assign_pointer(tfile->tun, tun);
- rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
- tun->numqueues++;
-
if (tfile->detached) {
tun_enable_queue(tfile);
} else {
* refcnt.
*/
+ /* Publish tfile->tun and tun->tfiles only after we've fully
+ * initialized tfile; otherwise we risk using half-initialized
+ * object.
+ */
+ rcu_assign_pointer(tfile->tun, tun);
+ rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+ tun->numqueues++;
out:
return err;
}
#undef ASIX112_DESC
+static const struct driver_info trendnet_info = {
+ .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter",
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
{AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)},
{AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
{AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
+ {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
{ },/* END */
};
MODULE_DEVICE_TABLE(usb, products);
* probed with) and a slave/data interface; union
* descriptors sort this all out.
*/
- info->control = usb_ifnum_to_if(dev->udev,
- info->u->bMasterInterface0);
- info->data = usb_ifnum_to_if(dev->udev,
- info->u->bSlaveInterface0);
+ info->control = usb_ifnum_to_if(dev->udev, info->u->bMasterInterface0);
+ info->data = usb_ifnum_to_if(dev->udev, info->u->bSlaveInterface0);
if (!info->control || !info->data) {
dev_dbg(&intf->dev,
"master #%u/%p slave #%u/%p\n",
/* a data interface altsetting does the real i/o */
d = &info->data->cur_altsetting->desc;
if (d->bInterfaceClass != USB_CLASS_CDC_DATA) {
- dev_dbg(&intf->dev, "slave class %u\n",
- d->bInterfaceClass);
+ dev_dbg(&intf->dev, "slave class %u\n", d->bInterfaceClass);
goto bad_desc;
}
skip:
- if ( rndis &&
- header.usb_cdc_acm_descriptor &&
- header.usb_cdc_acm_descriptor->bmCapabilities) {
- dev_dbg(&intf->dev,
- "ACM capabilities %02x, not really RNDIS?\n",
- header.usb_cdc_acm_descriptor->bmCapabilities);
- goto bad_desc;
+ if (rndis && header.usb_cdc_acm_descriptor &&
+ header.usb_cdc_acm_descriptor->bmCapabilities) {
+ dev_dbg(&intf->dev,
+ "ACM capabilities %02x, not really RNDIS?\n",
+ header.usb_cdc_acm_descriptor->bmCapabilities);
+ goto bad_desc;
}
if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
}
if (header.usb_cdc_mdlm_desc &&
- memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) {
+ memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) {
dev_dbg(&intf->dev, "GUID doesn't match\n");
goto bad_desc;
}
if (info->control->cur_altsetting->desc.bNumEndpoints == 1) {
struct usb_endpoint_descriptor *desc;
- dev->status = &info->control->cur_altsetting->endpoint [0];
+ dev->status = &info->control->cur_altsetting->endpoint[0];
desc = &dev->status->desc;
if (!usb_endpoint_is_int_in(desc) ||
(le16_to_cpu(desc->wMaxPacketSize)
.driver_info = 0,
},
+/* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
dev->addr_len = 0;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->netdev_ops = &qmimux_netdev_ops;
+ dev->mtu = 1500;
dev->needs_free_netdev = true;
}
return stats.packets;
}
-static void free_old_xmit_skbs(struct send_queue *sq)
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{
struct sk_buff *skb;
unsigned int len;
bytes += skb->len;
packets++;
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, in_napi);
}
/* Avoid overhead when no packets have been processed
return;
if (__netif_tx_trylock(txq)) {
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq);
}
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
__netif_tx_lock(txq, raw_smp_processor_id());
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0);
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, false);
if (use_napi && kick)
virtqueue_enable_cb_delayed(sq->vq);
if (!use_napi &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
}
sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
- tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
- &tq->buf_info_pa, GFP_KERNEL);
+ tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &tq->buf_info_pa, GFP_KERNEL);
if (!tq->buf_info)
goto err;
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
- bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
- GFP_KERNEL);
+ bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
+ GFP_KERNEL);
if (!bi)
goto err;
iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
/* Get BD buffer */
- bd_buffer = dma_zalloc_coherent(priv->dev,
- (RX_BD_RING_LEN + TX_BD_RING_LEN) *
- MAX_RX_BUF_LENGTH,
- &bd_dma_addr, GFP_KERNEL);
+ bd_buffer = dma_alloc_coherent(priv->dev,
+ (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
+ &bd_dma_addr, GFP_KERNEL);
if (!bd_buffer) {
dev_err(priv->dev, "Could not allocate buffer descriptors\n");
.ndo_tx_timeout = uhdlc_tx_timeout,
};
+static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
+{
+ struct device_node *np;
+ struct platform_device *pdev;
+ struct resource *res;
+ static int siram_init_flag;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, name);
+ if (!np)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ pr_err("%pOFn: failed to lookup pdev\n", np);
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ of_node_put(np);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -EINVAL;
+ goto error_put_device;
+ }
+ *ptr = ioremap(res->start, resource_size(res));
+ if (!*ptr) {
+ ret = -ENOMEM;
+ goto error_put_device;
+ }
+
+ /* We've remapped the addresses, and we don't need the device any
+ * more, so we should release it.
+ */
+ put_device(&pdev->dev);
+
+ if (init_flag && siram_init_flag == 0) {
+ memset_io(*ptr, 0, resource_size(res));
+ siram_init_flag = 1;
+ }
+ return 0;
+
+error_put_device:
+ put_device(&pdev->dev);
+
+ return ret;
+}
+
static int ucc_hdlc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
ret = ucc_of_parse_tdm(np, utdm, ut_info);
if (ret)
goto free_utdm;
+
+ ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
+ (void __iomem **)&utdm->si_regs);
+ if (ret)
+ goto free_utdm;
+ ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
+ (void __iomem **)&utdm->siram);
+ if (ret)
+ goto unmap_si_regs;
}
if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
ret = uhdlc_init(uhdlc_priv);
if (ret) {
dev_err(&pdev->dev, "Failed to init uhdlc\n");
- goto free_utdm;
+ goto undo_uhdlc_init;
}
dev = alloc_hdlcdev(uhdlc_priv);
free_dev:
free_netdev(dev);
undo_uhdlc_init:
+ iounmap(utdm->siram);
+unmap_si_regs:
+ iounmap(utdm->si_regs);
free_utdm:
if (uhdlc_priv->tsa)
kfree(utdm);
* coherent DMA are unsupported
*/
dest_ring->base_addr_owner_space_unaligned =
- dma_zalloc_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr, GFP_KERNEL);
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(dest_ring);
return ERR_PTR(-ENOMEM);
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_AP) {
- arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
- IEEE80211_MAX_FRAME_LEN,
- &arvif->beacon_paddr,
- GFP_ATOMIC);
+ arvif->beacon_buf = dma_alloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
if (!arvif->beacon_buf) {
ret = -ENOMEM;
ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
*/
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
- data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev,
- alloc_nbytes,
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
void *vaddr;
pool_size = num_units * round_up(unit_len, 4);
- vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
+ vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
int i;
size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
- wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size,
- &wcn_ch->dma_addr,
- GFP_KERNEL);
+ wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
+ GFP_KERNEL);
if (!wcn_ch->cpu_addr)
return -ENOMEM;
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
- cpu_addr = dma_zalloc_coherent(wcn->dev, s,
- &wcn->mgmt_mem_pool.phy_addr,
- GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(wcn->dev, s,
+ &wcn->mgmt_mem_pool.phy_addr,
+ GFP_KERNEL);
if (!cpu_addr)
goto out_err;
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
- cpu_addr = dma_zalloc_coherent(wcn->dev, s,
- &wcn->data_mem_pool.phy_addr,
- GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(wcn->dev, s,
+ &wcn->data_mem_pool.phy_addr,
+ GFP_KERNEL);
if (!cpu_addr)
goto out_err;
/* Status messages are allocated and initialized to 0. This is necessary
* since DR bit should be initialized to 0.
*/
- sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
+ sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
if (!sring->va)
return -ENOMEM;
if (!ring->ctx)
goto err;
- ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
+ ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
if (!ring->va)
goto err_free_ctx;
if (ring->is_rx) {
sz = sizeof(*ring->edma_rx_swtail.va);
ring->edma_rx_swtail.va =
- dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
- GFP_KERNEL);
+ dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
+ GFP_KERNEL);
if (!ring->edma_rx_swtail.va)
goto err_free_va;
}
u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
- ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
- ring_mem_size, &(ring->dmabase),
- GFP_KERNEL);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ ring_mem_size, &(ring->dmabase),
+ GFP_KERNEL);
if (!ring->descbase)
return -ENOMEM;
static int alloc_ringmemory(struct b43legacy_dmaring *ring)
{
/* GFP flags must match the flags in free_ringmemory()! */
- ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
- B43legacy_DMA_RINGMEMSIZE,
- &(ring->dmabase), GFP_KERNEL);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ B43legacy_DMA_RINGMEMSIZE,
+ &(ring->dmabase), GFP_KERNEL);
if (!ring->descbase)
return -ENOMEM;
u32 addr;
devinfo->shared.scratch =
- dma_zalloc_coherent(&devinfo->pdev->dev,
- BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
- &devinfo->shared.scratch_dmahandle,
- GFP_KERNEL);
+ dma_alloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
+ &devinfo->shared.scratch_dmahandle,
+ GFP_KERNEL);
if (!devinfo->shared.scratch)
goto fail;
brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
devinfo->shared.ringupd =
- dma_zalloc_coherent(&devinfo->pdev->dev,
- BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
- &devinfo->shared.ringupd_dmahandle,
- GFP_KERNEL);
+ dma_alloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
+ &devinfo->shared.ringupd_dmahandle,
+ GFP_KERNEL);
if (!devinfo->shared.ringupd)
goto fail;
* Allocate the circular buffer of Read Buffer Descriptors
* (RBDs)
*/
- rxq->bd = dma_zalloc_coherent(dev,
- free_size * rxq->queue_size,
- &rxq->bd_dma, GFP_KERNEL);
+ rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
+ &rxq->bd_dma, GFP_KERNEL);
if (!rxq->bd)
goto err;
if (trans->cfg->mq_rx_supported) {
- rxq->used_bd = dma_zalloc_coherent(dev,
- (use_rx_td ?
- sizeof(*rxq->cd) :
- sizeof(__le32)) *
- rxq->queue_size,
- &rxq->used_bd_dma,
- GFP_KERNEL);
+ rxq->used_bd = dma_alloc_coherent(dev,
+ (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
+ &rxq->used_bd_dma,
+ GFP_KERNEL);
if (!rxq->used_bd)
goto err;
}
/* Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
- sizeof(__le16) :
- sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma,
- GFP_KERNEL);
+ rxq->rb_stts = dma_alloc_coherent(dev,
+ use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err;
return 0;
/* Allocate the driver's pointer to TR tail */
- rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
- &rxq->tr_tail_dma,
- GFP_KERNEL);
+ rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
+ &rxq->tr_tail_dma, GFP_KERNEL);
if (!rxq->tr_tail)
goto err;
/* Allocate the driver's pointer to CR tail */
- rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
- &rxq->cr_tail_dma,
- GFP_KERNEL);
+ rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
+ &rxq->cr_tail_dma, GFP_KERNEL);
if (!rxq->cr_tail)
goto err;
/*
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
- dma_zalloc_coherent(trans->dev, ICT_SIZE,
- &trans_pcie->ict_tbl_dma,
- GFP_KERNEL);
+ dma_alloc_coherent(trans->dev, ICT_SIZE,
+ &trans_pcie->ict_tbl_dma, GFP_KERNEL);
if (!trans_pcie->ict_tbl)
return -ENOMEM;
/*
* Allocate DMA memory for descriptor and buffer.
*/
- addr = dma_zalloc_coherent(rt2x00dev->dev,
- queue->limit * queue->desc_size, &dma,
- GFP_KERNEL);
+ addr = dma_alloc_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size, &dma,
+ GFP_KERNEL);
if (!addr)
return -ENOMEM;
int rc;
sndev->nr_rsvd_luts++;
- sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
- LUT_SIZE,
- &sndev->self_shared_dma,
- GFP_KERNEL);
+ sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
+ LUT_SIZE,
+ &sndev->self_shared_dma,
+ GFP_KERNEL);
if (!sndev->self_shared) {
dev_err(&sndev->stdev->dev,
"unable to allocate memory for shared mw\n");
};
static inline enum nvdimm_security_state nvdimm_security_state(
- struct nvdimm *nvdimm, bool master)
+ struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
{
if (!nvdimm->sec.ops)
return -ENXIO;
- return nvdimm->sec.ops->state(nvdimm, master);
+ return nvdimm->sec.ops->state(nvdimm, ptype);
}
int nvdimm_security_freeze(struct nvdimm *nvdimm);
#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
size_t nqnlen;
int off;
- nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
- if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
- strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
- return;
- }
+ if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
+ nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
+ if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
+ strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
+ return;
+ }
- if (ctrl->vs >= NVME_VS(1, 2, 1))
- dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
+ if (ctrl->vs >= NVME_VS(1, 2, 1))
+ dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
+ }
/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
- "nqn.2014.08.org.nvmexpress:%4x%4x",
+ "nqn.2014.08.org.nvmexpress:%04x%04x",
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
off += sizeof(id->sn);
ctrl->oaes = le32_to_cpu(id->oaes);
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
- ctrl->cntlid = le16_to_cpup(&id->cntlid);
if (id->mdts)
max_hw_sectors = 1 << (id->mdts + page_shift - 9);
else
if (opts->discovery_nqn) {
opts->kato = 0;
opts->nr_io_queues = 0;
+ opts->nr_write_queues = 0;
+ opts->nr_poll_queues = 0;
opts->duplicate_connect = true;
}
if (ctrl_loss_tmo < 0)
return 0;
out_free_ana_log_buf:
kfree(ctrl->ana_log_buf);
+ ctrl->ana_log_buf = NULL;
out:
return error;
}
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
kfree(ctrl->ana_log_buf);
+ ctrl->ana_log_buf = NULL;
}
* Set MEDIUM priority on SQ creation
*/
NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
+
+ /*
+ * Ignore device provided subnqn.
+ */
+ NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
};
/*
struct nvme_queue;
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
+static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{
- if (++nvmeq->cq_head == nvmeq->q_depth) {
+ if (nvmeq->cq_head == nvmeq->q_depth - 1) {
nvmeq->cq_head = 0;
nvmeq->cq_phase = !nvmeq->cq_phase;
+ } else {
+ nvmeq->cq_head++;
}
}
return 0;
}
+static void nvme_suspend_io_queues(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->ctrl.queue_count - 1; i > 0; i--)
+ nvme_suspend_queue(&dev->queues[i]);
+}
+
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
struct nvme_queue *nvmeq = &dev->queues[0];
if (dev->ctrl.queue_count > qid)
return 0;
- nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
- &nvmeq->cq_dma_addr, GFP_KERNEL);
+ nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
+ &nvmeq->cq_dma_addr, GFP_KERNEL);
if (!nvmeq->cqes)
goto free_nvmeq;
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
- dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
- le64_to_cpu(desc->addr));
+ dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
+ le64_to_cpu(desc->addr),
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
}
kfree(dev->host_mem_desc_bufs);
if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
max_entries = dev->ctrl.hmmaxd;
- descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
- &descs_dma, GFP_KERNEL);
+ descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
+ &descs_dma, GFP_KERNEL);
if (!descs)
goto out;
while (--i >= 0) {
size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
- dma_free_coherent(dev->dev, size, bufs[i],
- le64_to_cpu(descs[i].addr));
+ dma_free_attrs(dev->dev, size, bufs[i],
+ le64_to_cpu(descs[i].addr),
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
}
kfree(bufs);
return ret;
}
+/* irq_queues covers admin queue */
static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
{
unsigned int this_w_queues = write_queues;
+ WARN_ON(!irq_queues);
+
/*
- * Setup read/write queue split
+ * Setup read/write queue split, assign admin queue one independent
+ * irq vector if irq_queues is > 1.
*/
- if (irq_queues == 1) {
+ if (irq_queues <= 2) {
dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
dev->io_queues[HCTX_TYPE_READ] = 0;
return;
/*
* If 'write_queues' is set, ensure it leaves room for at least
- * one read queue
+ * one read queue and one admin queue
*/
if (this_w_queues >= irq_queues)
- this_w_queues = irq_queues - 1;
+ this_w_queues = irq_queues - 2;
/*
* If 'write_queues' is set to zero, reads and writes will share
* a queue set.
*/
if (!this_w_queues) {
- dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues;
+ dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1;
dev->io_queues[HCTX_TYPE_READ] = 0;
} else {
dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues;
- dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues;
+ dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1;
}
}
this_p_queues = nr_io_queues - 1;
irq_queues = 1;
} else {
- irq_queues = nr_io_queues - this_p_queues;
+ irq_queues = nr_io_queues - this_p_queues + 1;
}
dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
* If we got a failure and we're down to asking for just
* 1 + 1 queues, just ask for a single vector. We'll share
* that between the single IO queue and the admin queue.
+ * Otherwise, we assign one independent vector to admin queue.
*/
- if (result >= 0 && irq_queues > 1)
+ if (irq_queues > 1)
irq_queues = irq_sets[0] + irq_sets[1] + 1;
result = pci_alloc_irq_vectors_affinity(pdev, irq_queues,
return result;
}
+static void nvme_disable_io_queues(struct nvme_dev *dev)
+{
+ if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq))
+ __nvme_disable_io_queues(dev, nvme_admin_delete_cq);
+}
+
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = &dev->queues[0];
} while (1);
adminq->q_db = dev->dbs;
+ retry:
/* Deregister the admin queue's interrupt */
pci_free_irq(pdev, 0, adminq);
result = max(result - 1, 1);
dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
- dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
- dev->io_queues[HCTX_TYPE_DEFAULT],
- dev->io_queues[HCTX_TYPE_READ],
- dev->io_queues[HCTX_TYPE_POLL]);
-
/*
* Should investigate if there's a performance win from allocating
* more queues than interrupt vectors; it might allow the submission
* path to scale better, even if the receive path is limited by the
* number of interrupts.
*/
-
result = queue_request_irq(adminq);
if (result) {
adminq->cq_vector = -1;
return result;
}
set_bit(NVMEQ_ENABLED, &adminq->flags);
- return nvme_create_io_queues(dev);
+
+ result = nvme_create_io_queues(dev);
+ if (result || dev->online_queues < 2)
+ return result;
+
+ if (dev->online_queues - 1 < dev->max_qid) {
+ nr_io_queues = dev->online_queues - 1;
+ nvme_disable_io_queues(dev);
+ nvme_suspend_io_queues(dev);
+ goto retry;
+ }
+ dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
+ dev->io_queues[HCTX_TYPE_DEFAULT],
+ dev->io_queues[HCTX_TYPE_READ],
+ dev->io_queues[HCTX_TYPE_POLL]);
+ return 0;
}
static void nvme_del_queue_end(struct request *req, blk_status_t error)
return 0;
}
-static bool nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
+static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
{
int nr_queues = dev->online_queues - 1, sent = 0;
unsigned long timeout;
dev->tagset.nr_maps = 2; /* default + read */
if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.nr_maps++;
- dev->tagset.nr_maps = HCTX_MAX_TYPES;
dev->tagset.timeout = NVME_IO_TIMEOUT;
dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth =
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i;
bool dead = true;
struct pci_dev *pdev = to_pci_dev(dev->dev);
nvme_stop_queues(&dev->ctrl);
if (!dead && dev->ctrl.queue_count > 0) {
- if (nvme_disable_io_queues(dev, nvme_admin_delete_sq))
- nvme_disable_io_queues(dev, nvme_admin_delete_cq);
+ nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
- for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
- nvme_suspend_queue(&dev->queues[i]);
-
+ nvme_suspend_io_queues(dev);
+ nvme_suspend_queue(&dev->queues[0]);
nvme_pci_disable(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_MEDIUM_PRIO_SQ },
+ { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
{
nvme_tcp_stop_io_queues(ctrl);
if (remove) {
- if (ctrl->ops->flags & NVME_F_FABRICS)
- blk_cleanup_queue(ctrl->connect_q);
+ blk_cleanup_queue(ctrl->connect_q);
blk_mq_free_tag_set(ctrl->tagset);
}
nvme_tcp_free_io_queues(ctrl);
goto out_free_io_queues;
}
- if (ctrl->ops->flags & NVME_F_FABRICS) {
- ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ctrl->connect_q)) {
- ret = PTR_ERR(ctrl->connect_q);
- goto out_free_tag_set;
- }
+ ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
+ if (IS_ERR(ctrl->connect_q)) {
+ ret = PTR_ERR(ctrl->connect_q);
+ goto out_free_tag_set;
}
} else {
blk_mq_update_nr_hw_queues(ctrl->tagset,
return 0;
out_cleanup_connect_q:
- if (new && (ctrl->ops->flags & NVME_F_FABRICS))
+ if (new)
blk_cleanup_queue(ctrl->connect_q);
out_free_tag_set:
if (new)
{
nvme_tcp_stop_queue(ctrl, 0);
if (remove) {
- free_opal_dev(ctrl->opal_dev);
blk_cleanup_queue(ctrl->admin_q);
blk_mq_free_tag_set(ctrl->admin_tagset);
}
static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
{
- int result;
+ int result = 0;
if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
return 0;
if (!of_node_check_flag(np, OF_OVERLAY)) {
np->name = __of_get_property(np, "name", NULL);
- np->type = __of_get_property(np, "device_type", NULL);
if (!np->name)
np->name = "<NULL>";
- if (!np->type)
- np->type = "<NULL>";
phandle = __of_get_property(np, "phandle", &sz);
if (!phandle)
populate_properties(blob, offset, mem, np, pathp, dryrun);
if (!dryrun) {
np->name = of_get_property(np, "name", NULL);
- np->type = of_get_property(np, "device_type", NULL);
-
if (!np->name)
np->name = "<NULL>";
- if (!np->type)
- np->type = "<NULL>";
}
*pnp = np;
tchild->parent = target->np;
tchild->name = __of_get_property(node, "name", NULL);
- tchild->type = __of_get_property(node, "device_type", NULL);
if (!tchild->name)
tchild->name = "<NULL>";
- if (!tchild->type)
- tchild->type = "<NULL>";
/* ignore obsolete "linux,phandle" */
phandle = __of_get_property(node, "phandle", &size);
dp->parent = parent;
dp->name = of_pdt_get_one_property(node, "name");
- dp->type = of_pdt_get_one_property(node, "device_type");
dp->phandle = node;
dp->properties = of_pdt_build_prop_list(node);
if (!of_device_is_available(remote)) {
pr_debug("not available for remote node\n");
+ of_node_put(remote);
return NULL;
}
kfree(opp);
}
-static void _opp_kref_release(struct kref *kref)
+static void _opp_kref_release(struct dev_pm_opp *opp,
+ struct opp_table *opp_table)
{
- struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
- struct opp_table *opp_table = opp->opp_table;
-
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
opp_debug_remove_one(opp);
list_del(&opp->node);
kfree(opp);
+}
+static void _opp_kref_release_unlocked(struct kref *kref)
+{
+ struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+ struct opp_table *opp_table = opp->opp_table;
+
+ _opp_kref_release(opp, opp_table);
+}
+
+static void _opp_kref_release_locked(struct kref *kref)
+{
+ struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+ struct opp_table *opp_table = opp->opp_table;
+
+ _opp_kref_release(opp, opp_table);
mutex_unlock(&opp_table->lock);
}
void dev_pm_opp_put(struct dev_pm_opp *opp)
{
- kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
+ kref_put_mutex(&opp->kref, _opp_kref_release_locked,
+ &opp->opp_table->lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put);
+static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
+{
+ kref_put(&opp->kref, _opp_kref_release_unlocked);
+}
+
/**
* dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+/**
+ * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
+ * @dev: device for which we do this operation
+ *
+ * This function removes all dynamically created OPPs from the opp table.
+ */
+void dev_pm_opp_remove_all_dynamic(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp, *temp;
+ int count = 0;
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return;
+
+ mutex_lock(&opp_table->lock);
+ list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
+ if (opp->dynamic) {
+ dev_pm_opp_put_unlocked(opp);
+ count++;
+ }
+ }
+ mutex_unlock(&opp_table->lock);
+
+ /* Drop the references taken by dev_pm_opp_add() */
+ while (count--)
+ dev_pm_opp_put_opp_table(opp_table);
+
+ /* Drop the reference taken by _find_opp_table() */
+ dev_pm_opp_put_opp_table(opp_table);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
+
struct dev_pm_opp *_opp_allocate(struct opp_table *table)
{
struct dev_pm_opp *opp;
support for PCI-X and the foundations for PCI Express support.
Say 'Y' here unless you know what you are doing.
+if PCI
+
config PCI_DOMAINS
bool
depends on PCI
config PCI_DOMAINS_GENERIC
bool
- depends on PCI
select PCI_DOMAINS
config PCI_SYSCALL
config PCI_MSI
bool "Message Signaled Interrupts (MSI and MSI-X)"
- depends on PCI
select GENERIC_MSI_IRQ
help
This allows device drivers to enable MSI (Message Signaled
config PCI_QUIRKS
default y
bool "Enable PCI quirk workarounds" if EXPERT
- depends on PCI
help
This enables workarounds for various PCI chipset bugs/quirks.
Disable this only if your target machine is unaffected by PCI
config PCI_DEBUG
bool "PCI Debugging"
- depends on PCI && DEBUG_KERNEL
+ depends on DEBUG_KERNEL
help
Say Y here if you want the PCI core to produce a bunch of debug
messages to the system log. Select this if you are having a
config PCI_REALLOC_ENABLE_AUTO
bool "Enable PCI resource re-allocation detection"
- depends on PCI
depends on PCI_IOV
help
Say Y here if you want the PCI core to detect if PCI resource
config PCI_STUB
tristate "PCI Stub driver"
- depends on PCI
help
Say Y or M here if you want be able to reserve a PCI device
when it is going to be assigned to a guest operating system.
config PCI_PF_STUB
tristate "PCI PF Stub driver"
- depends on PCI
depends on PCI_IOV
help
Say Y or M here if you want to enable support for devices that
- require SR-IOV support, while at the same time the PF itself is
- not providing any actual services on the host itself such as
- storage or networking.
+ require SR-IOV support, while at the same time the PF (Physical
+ Function) itself is not providing any actual services on the
+ host itself such as storage or networking.
When in doubt, say N.
config XEN_PCIDEV_FRONTEND
tristate "Xen PCI Frontend"
- depends on PCI && X86 && XEN
+ depends on X86 && XEN
select PCI_XEN
select XEN_XENBUS_FRONTEND
default y
config PCI_IOV
bool "PCI IOV support"
- depends on PCI
select PCI_ATS
help
I/O Virtualization is a PCI feature supported by some devices
config PCI_PRI
bool "PCI PRI support"
- depends on PCI
select PCI_ATS
help
PRI is the PCI Page Request Interface. It allows PCI devices that are
config PCI_PASID
bool "PCI PASID support"
- depends on PCI
select PCI_ATS
help
Process Address Space Identifiers (PASIDs) can be used by PCI devices
config PCI_P2PDMA
bool "PCI peer-to-peer transfer support"
- depends on PCI && ZONE_DEVICE
+ depends on ZONE_DEVICE
select GENERIC_ALLOCATOR
help
Enableѕ drivers to do PCI peer-to-peer transactions to and from
config PCI_LABEL
def_bool y if (DMI || ACPI)
- depends on PCI
select NLS
config PCI_HYPERV
tristate "Hyper-V PCI Frontend"
- depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
+ depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
help
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
source "drivers/pci/controller/Kconfig"
source "drivers/pci/endpoint/Kconfig"
source "drivers/pci/switch/Kconfig"
+
+endif
select PCIE_DW_HOST
config PCI_IMX6
- bool "Freescale i.MX6 PCIe controller"
- depends on SOC_IMX6Q || (ARM && COMPILE_TEST)
+ bool "Freescale i.MX6/7 PCIe controller"
+ depends on SOC_IMX6Q || SOC_IMX7D || (ARM && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+config PCI_MESON
+ bool "MESON PCIe controller"
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCI controller support on Amlogic
+ SoCs. The PCI controller on Amlogic is based on DesignWare hardware
+ and therefore the driver re-uses the DesignWare core functions to
+ implement the driver.
+
+config PCIE_UNIPHIER
+ bool "Socionext UniPhier PCIe controllers"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on UniPhier SoCs.
+ This driver supports LD20 and PXs3 SoCs.
+
endmenu
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
+obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include "pcie-designware.h"
u32 tx_swing_low;
int link_gen;
struct regulator *vpcie;
+
+ /* power domain for pcie */
+ struct device *pd_pcie;
+ /* power domain for pcie phy */
+ struct device *pd_pcie_phy;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
/* PCIe Root Complex registers (memory-mapped) */
+#define PCIE_RC_IMX6_MSI_CAP 0x50
#define PCIE_RC_LCR 0x7c
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
return 1;
}
+static int imx6_pcie_attach_pd(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct device_link *link;
+
+ /* Do nothing when in a single power domain */
+ if (dev->pm_domain)
+ return 0;
+
+ imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx6_pcie->pd_pcie))
+ return PTR_ERR(imx6_pcie->pd_pcie);
+ link = device_link_add(dev, imx6_pcie->pd_pcie,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ return -EINVAL;
+ }
+
+ imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pd_pcie_phy))
+ return PTR_ERR(imx6_pcie->pd_pcie_phy);
+
+ device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (IS_ERR(link)) {
+ dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link));
+ return PTR_ERR(link);
+ }
+
+ return 0;
+}
+
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
struct device *dev = imx6_pcie->pci->dev;
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
- reset_control_assert(imx6_pcie->turnoff_reset);
- reset_control_deassert(imx6_pcie->turnoff_reset);
+ struct device *dev = imx6_pcie->pci->dev;
+
+ /* Some variants have a turnoff reset in DT */
+ if (imx6_pcie->turnoff_reset) {
+ reset_control_assert(imx6_pcie->turnoff_reset);
+ reset_control_deassert(imx6_pcie->turnoff_reset);
+ goto pm_turnoff_sleep;
+ }
+
+ /* Others poke directly at IOMUXC registers */
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
+ break;
+ default:
+ dev_err(dev, "PME_Turn_Off not implemented\n");
+ return;
+ }
/*
* Components with an upstream port must respond to
* The standard recommends a 1-10ms timeout after which to
* proceed anyway as if acks were received.
*/
+pm_turnoff_sleep:
usleep_range(1000, 10000);
}
clk_disable_unprepare(imx6_pcie->pcie_phy);
clk_disable_unprepare(imx6_pcie->pcie_bus);
- if (imx6_pcie->variant == IMX7D) {
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ break;
+ case IMX7D:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ break;
+ default:
+ break;
}
}
+static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
+{
+ return (imx6_pcie->variant == IMX7D ||
+ imx6_pcie->variant == IMX6SX);
+}
+
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- if (imx6_pcie->variant != IMX7D)
+ if (!imx6_pcie_supports_suspend(imx6_pcie))
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
struct pcie_port *pp = &imx6_pcie->pci->pp;
- if (imx6_pcie->variant != IMX7D)
+ if (!imx6_pcie_supports_suspend(imx6_pcie))
return 0;
imx6_pcie_assert_core_reset(imx6_pcie);
struct resource *dbi_base;
struct device_node *node = dev->of_node;
int ret;
+ u16 val;
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
platform_set_drvdata(pdev, imx6_pcie);
+ ret = imx6_pcie_attach_pd(dev);
+ if (ret)
+ return ret;
+
ret = imx6_add_pcie_port(imx6_pcie, pdev);
if (ret < 0)
return ret;
+ if (pci_msi_enabled()) {
+ val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
+ PCI_MSI_FLAGS);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
+ val);
+ }
+
return 0;
}
.link_up = ls_pcie_link_up,
};
-static struct ls_pcie_drvdata ls1021_drvdata = {
+static const struct ls_pcie_drvdata ls1021_drvdata = {
.ops = &ls1021_pcie_host_ops,
.dw_pcie_ops = &dw_ls1021_pcie_ops,
};
-static struct ls_pcie_drvdata ls1043_drvdata = {
+static const struct ls_pcie_drvdata ls1043_drvdata = {
.lut_offset = 0x10000,
.ltssm_shift = 24,
.lut_dbg = 0x7fc,
.dw_pcie_ops = &dw_ls_pcie_ops,
};
-static struct ls_pcie_drvdata ls1046_drvdata = {
+static const struct ls_pcie_drvdata ls1046_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 24,
.lut_dbg = 0x407fc,
.dw_pcie_ops = &dw_ls_pcie_ops,
};
-static struct ls_pcie_drvdata ls2080_drvdata = {
+static const struct ls_pcie_drvdata ls2080_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 0,
.lut_dbg = 0x7fc,
.dw_pcie_ops = &dw_ls_pcie_ops,
};
-static struct ls_pcie_drvdata ls2088_drvdata = {
+static const struct ls_pcie_drvdata ls2088_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 0,
.lut_dbg = 0x407fc,
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amlogic MESON SoCs
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Yue Wang <yue.wang@amlogic.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
+
+/* External local bus interface registers */
+#define PLR_OFFSET 0x700
+#define PCIE_PORT_LINK_CTRL_OFF (PLR_OFFSET + 0x10)
+#define FAST_LINK_MODE BIT(7)
+#define LINK_CAPABLE_MASK GENMASK(21, 16)
+#define LINK_CAPABLE_X1 BIT(16)
+
+#define PCIE_GEN2_CTRL_OFF (PLR_OFFSET + 0x10c)
+#define NUM_OF_LANES_MASK GENMASK(12, 8)
+#define NUM_OF_LANES_X1 BIT(8)
+#define DIRECT_SPEED_CHANGE BIT(17)
+
+#define TYPE1_HDR_OFFSET 0x0
+#define PCIE_STATUS_COMMAND (TYPE1_HDR_OFFSET + 0x04)
+#define PCI_IO_EN BIT(0)
+#define PCI_MEM_SPACE_EN BIT(1)
+#define PCI_BUS_MASTER_EN BIT(2)
+
+#define PCIE_BASE_ADDR0 (TYPE1_HDR_OFFSET + 0x10)
+#define PCIE_BASE_ADDR1 (TYPE1_HDR_OFFSET + 0x14)
+
+#define PCIE_CAP_OFFSET 0x70
+#define PCIE_DEV_CTRL_DEV_STUS (PCIE_CAP_OFFSET + 0x08)
+#define PCIE_CAP_MAX_PAYLOAD_MASK GENMASK(7, 5)
+#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5)
+#define PCIE_CAP_MAX_READ_REQ_MASK GENMASK(14, 12)
+#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12)
+
+/* PCIe specific config registers */
+#define PCIE_CFG0 0x0
+#define APP_LTSSM_ENABLE BIT(7)
+
+#define PCIE_CFG_STATUS12 0x30
+#define IS_SMLH_LINK_UP(x) ((x) & (1 << 6))
+#define IS_RDLH_LINK_UP(x) ((x) & (1 << 16))
+#define IS_LTSSM_UP(x) ((((x) >> 10) & 0x1f) == 0x11)
+
+#define PCIE_CFG_STATUS17 0x44
+#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1)
+
+#define WAIT_LINKUP_TIMEOUT 4000
+#define PORT_CLK_RATE 100000000UL
+#define MAX_PAYLOAD_SIZE 256
+#define MAX_READ_REQ_SIZE 256
+#define MESON_PCIE_PHY_POWERUP 0x1c
+#define PCIE_RESET_DELAY 500
+#define PCIE_SHARED_RESET 1
+#define PCIE_NORMAL_RESET 0
+
+enum pcie_data_rate {
+ PCIE_GEN1,
+ PCIE_GEN2,
+ PCIE_GEN3,
+ PCIE_GEN4
+};
+
+struct meson_pcie_mem_res {
+ void __iomem *elbi_base;
+ void __iomem *cfg_base;
+ void __iomem *phy_base;
+};
+
+struct meson_pcie_clk_res {
+ struct clk *clk;
+ struct clk *mipi_gate;
+ struct clk *port_clk;
+ struct clk *general_clk;
+};
+
+struct meson_pcie_rc_reset {
+ struct reset_control *phy;
+ struct reset_control *port;
+ struct reset_control *apb;
+};
+
+struct meson_pcie {
+ struct dw_pcie pci;
+ struct meson_pcie_mem_res mem_res;
+ struct meson_pcie_clk_res clk_res;
+ struct meson_pcie_rc_reset mrst;
+ struct gpio_desc *reset_gpio;
+};
+
+static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
+ const char *id,
+ u32 reset_type)
+{
+ struct device *dev = mp->pci.dev;
+ struct reset_control *reset;
+
+ if (reset_type == PCIE_SHARED_RESET)
+ reset = devm_reset_control_get_shared(dev, id);
+ else
+ reset = devm_reset_control_get(dev, id);
+
+ return reset;
+}
+
+static int meson_pcie_get_resets(struct meson_pcie *mp)
+{
+ struct meson_pcie_rc_reset *mrst = &mp->mrst;
+
+ mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
+ if (IS_ERR(mrst->phy))
+ return PTR_ERR(mrst->phy);
+ reset_control_deassert(mrst->phy);
+
+ mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
+ if (IS_ERR(mrst->port))
+ return PTR_ERR(mrst->port);
+ reset_control_deassert(mrst->port);
+
+ mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET);
+ if (IS_ERR(mrst->apb))
+ return PTR_ERR(mrst->apb);
+ reset_control_deassert(mrst->apb);
+
+ return 0;
+}
+
+static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
+ struct meson_pcie *mp,
+ const char *id)
+{
+ struct device *dev = mp->pci.dev;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
+
+ return devm_ioremap_resource(dev, res);
+}
+
+static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev,
+ struct meson_pcie *mp,
+ const char *id)
+{
+ struct device *dev = mp->pci.dev;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
+ if (!res) {
+ dev_err(dev, "No REG resource %s\n", id);
+ return ERR_PTR(-ENXIO);
+ }
+
+ return devm_ioremap(dev, res->start, resource_size(res));
+}
+
+static int meson_pcie_get_mems(struct platform_device *pdev,
+ struct meson_pcie *mp)
+{
+ mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi");
+ if (IS_ERR(mp->mem_res.elbi_base))
+ return PTR_ERR(mp->mem_res.elbi_base);
+
+ mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg");
+ if (IS_ERR(mp->mem_res.cfg_base))
+ return PTR_ERR(mp->mem_res.cfg_base);
+
+ /* Meson SoC has two PCI controllers use same phy register*/
+ mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
+ if (IS_ERR(mp->mem_res.phy_base))
+ return PTR_ERR(mp->mem_res.phy_base);
+
+ return 0;
+}
+
+static void meson_pcie_power_on(struct meson_pcie *mp)
+{
+ writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+}
+
+static void meson_pcie_reset(struct meson_pcie *mp)
+{
+ struct meson_pcie_rc_reset *mrst = &mp->mrst;
+
+ reset_control_assert(mrst->phy);
+ udelay(PCIE_RESET_DELAY);
+ reset_control_deassert(mrst->phy);
+ udelay(PCIE_RESET_DELAY);
+
+ reset_control_assert(mrst->port);
+ reset_control_assert(mrst->apb);
+ udelay(PCIE_RESET_DELAY);
+ reset_control_deassert(mrst->port);
+ reset_control_deassert(mrst->apb);
+ udelay(PCIE_RESET_DELAY);
+}
+
+static inline struct clk *meson_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret) {
+ dev_err(dev, "set clk rate failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clk\n");
+ return ERR_PTR(ret);
+ }
+
+ devm_add_action_or_reset(dev,
+ (void (*) (void *))clk_disable_unprepare,
+ clk);
+
+ return clk;
+}
+
+static int meson_pcie_probe_clocks(struct meson_pcie *mp)
+{
+ struct device *dev = mp->pci.dev;
+ struct meson_pcie_clk_res *res = &mp->clk_res;
+
+ res->port_clk = meson_pcie_probe_clock(dev, "port", PORT_CLK_RATE);
+ if (IS_ERR(res->port_clk))
+ return PTR_ERR(res->port_clk);
+
+ res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
+ if (IS_ERR(res->mipi_gate))
+ return PTR_ERR(res->mipi_gate);
+
+ res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
+ if (IS_ERR(res->general_clk))
+ return PTR_ERR(res->general_clk);
+
+ res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
+ if (IS_ERR(res->clk))
+ return PTR_ERR(res->clk);
+
+ return 0;
+}
+
+static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg)
+{
+ writel(val, mp->mem_res.elbi_base + reg);
+}
+
+static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg)
+{
+ return readl(mp->mem_res.elbi_base + reg);
+}
+
+static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
+{
+ return readl(mp->mem_res.cfg_base + reg);
+}
+
+static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
+{
+ writel(val, mp->mem_res.cfg_base + reg);
+}
+
+static void meson_pcie_assert_reset(struct meson_pcie *mp)
+{
+ gpiod_set_value_cansleep(mp->reset_gpio, 0);
+ udelay(500);
+ gpiod_set_value_cansleep(mp->reset_gpio, 1);
+}
+
+static void meson_pcie_init_dw(struct meson_pcie *mp)
+{
+ u32 val;
+
+ val = meson_cfg_readl(mp, PCIE_CFG0);
+ val |= APP_LTSSM_ENABLE;
+ meson_cfg_writel(mp, val, PCIE_CFG0);
+
+ val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
+ val &= ~LINK_CAPABLE_MASK;
+ meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
+
+ val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
+ val |= LINK_CAPABLE_X1 | FAST_LINK_MODE;
+ meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
+
+ val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
+ val &= ~NUM_OF_LANES_MASK;
+ meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
+
+ val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
+ val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE;
+ meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
+
+ meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0);
+ meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);
+}
+
+static int meson_size_to_payload(struct meson_pcie *mp, int size)
+{
+ struct device *dev = mp->pci.dev;
+
+ /*
+ * dwc supports 2^(val+7) payload size, which val is 0~5 default to 1.
+ * So if input size is not 2^order alignment or less than 2^7 or bigger
+ * than 2^12, just set to default size 2^(1+7).
+ */
+ if (!is_power_of_2(size) || size < 128 || size > 4096) {
+ dev_warn(dev, "payload size %d, set to default 256\n", size);
+ return 1;
+ }
+
+ return fls(size) - 8;
+}
+
+static void meson_set_max_payload(struct meson_pcie *mp, int size)
+{
+ u32 val;
+ int max_payload_size = meson_size_to_payload(mp, size);
+
+ val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val &= ~PCIE_CAP_MAX_PAYLOAD_MASK;
+ meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+
+ val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
+ meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+}
+
+static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
+{
+ u32 val;
+ int max_rd_req_size = meson_size_to_payload(mp, size);
+
+ val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val &= ~PCIE_CAP_MAX_READ_REQ_MASK;
+ meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+
+ val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
+ meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+}
+
+static inline void meson_enable_memory_space(struct meson_pcie *mp)
+{
+ /* Set the RC Bus Master, Memory Space and I/O Space enables */
+ meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN,
+ PCIE_STATUS_COMMAND);
+}
+
+static int meson_pcie_establish_link(struct meson_pcie *mp)
+{
+ struct dw_pcie *pci = &mp->pci;
+ struct pcie_port *pp = &pci->pp;
+
+ meson_pcie_init_dw(mp);
+ meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
+ meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
+
+ dw_pcie_setup_rc(pp);
+ meson_enable_memory_space(mp);
+
+ meson_pcie_assert_reset(mp);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static void meson_pcie_enable_interrupts(struct meson_pcie *mp)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(&mp->pci.pp);
+}
+
+static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ ret = dw_pcie_read(pci->dbi_base + where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ /*
+ * There is a bug in the MESON AXG PCIe controller whereby software
+ * cannot program the PCI_CLASS_DEVICE register, so we must fabricate
+ * the return value in the config accessors.
+ */
+ if (where == PCI_CLASS_REVISION && size == 4)
+ *val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff);
+ else if (where == PCI_CLASS_DEVICE && size == 2)
+ *val = PCI_CLASS_BRIDGE_PCI;
+ else if (where == PCI_CLASS_DEVICE && size == 1)
+ *val = PCI_CLASS_BRIDGE_PCI & 0xff;
+ else if (where == PCI_CLASS_DEVICE + 1 && size == 1)
+ *val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where,
+ int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ return dw_pcie_write(pci->dbi_base + where, size, val);
+}
+
+static int meson_pcie_link_up(struct dw_pcie *pci)
+{
+ struct meson_pcie *mp = to_meson_pcie(pci);
+ struct device *dev = pci->dev;
+ u32 speed_okay = 0;
+ u32 cnt = 0;
+ u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
+
+ do {
+ state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
+ state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
+ smlh_up = IS_SMLH_LINK_UP(state12);
+ rdlh_up = IS_RDLH_LINK_UP(state12);
+ ltssm_up = IS_LTSSM_UP(state12);
+
+ if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
+ speed_okay = 1;
+
+ if (smlh_up)
+ dev_dbg(dev, "smlh_link_up is on\n");
+ if (rdlh_up)
+ dev_dbg(dev, "rdlh_link_up is on\n");
+ if (ltssm_up)
+ dev_dbg(dev, "ltssm_up is on\n");
+ if (speed_okay)
+ dev_dbg(dev, "speed_okay\n");
+
+ if (smlh_up && rdlh_up && ltssm_up && speed_okay)
+ return 1;
+
+ cnt++;
+
+ udelay(10);
+ } while (cnt < WAIT_LINKUP_TIMEOUT);
+
+ dev_err(dev, "error: wait linkup timeout\n");
+ return 0;
+}
+
+static int meson_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct meson_pcie *mp = to_meson_pcie(pci);
+ int ret;
+
+ ret = meson_pcie_establish_link(mp);
+ if (ret)
+ return ret;
+
+ meson_pcie_enable_interrupts(mp);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops meson_pcie_host_ops = {
+ .rd_own_conf = meson_pcie_rd_own_conf,
+ .wr_own_conf = meson_pcie_wr_own_conf,
+ .host_init = meson_pcie_host_init,
+};
+
+static int meson_add_pcie_port(struct meson_pcie *mp,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &mp->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (pp->msi_irq < 0) {
+ dev_err(dev, "failed to get MSI IRQ\n");
+ return pp->msi_irq;
+ }
+ }
+
+ pp->ops = &meson_pcie_host_ops;
+ pci->dbi_base = mp->mem_res.elbi_base;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = meson_pcie_link_up,
+};
+
+static int meson_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct meson_pcie *mp;
+ int ret;
+
+ mp = devm_kzalloc(dev, sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ return -ENOMEM;
+
+ pci = &mp->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(mp->reset_gpio)) {
+ dev_err(dev, "get reset gpio failed\n");
+ return PTR_ERR(mp->reset_gpio);
+ }
+
+ ret = meson_pcie_get_resets(mp);
+ if (ret) {
+ dev_err(dev, "get reset resource failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_get_mems(pdev, mp);
+ if (ret) {
+ dev_err(dev, "get memory resource failed, %d\n", ret);
+ return ret;
+ }
+
+ meson_pcie_power_on(mp);
+ meson_pcie_reset(mp);
+
+ ret = meson_pcie_probe_clocks(mp);
+ if (ret) {
+ dev_err(dev, "init clock resources failed, %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, mp);
+
+ ret = meson_add_pcie_port(mp, pdev);
+ if (ret < 0) {
+ dev_err(dev, "Add PCIe port failed, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id meson_pcie_of_match[] = {
+ {
+ .compatible = "amlogic,axg-pcie",
+ },
+ {},
+};
+
+static struct platform_driver meson_pcie_driver = {
+ .probe = meson_pcie_probe,
+ .driver = {
+ .name = "meson-pcie",
+ .of_match_table = meson_pcie_of_match,
+ },
+};
+
+builtin_platform_driver(meson_pcie_driver);
#include <linux/resource.h>
#include <linux/of_pci.h>
#include <linux/of_irq.h>
+#include <linux/gpio/consumer.h>
#include "pcie-designware.h"
struct dw_pcie *pci;
struct clk *clk;
struct clk *clk_reg;
+ struct gpio_desc *reset_gpio;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
+ if (pcie->reset_gpio) {
+ /* assert and then deassert the reset signal */
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ msleep(100);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+ }
dw_pcie_setup_rc(pp);
armada8k_pcie_establish_link(pcie);
goto fail_clkreg;
}
+ /* Get reset gpio signal and hold asserted (logically high) */
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset_gpio)) {
+ ret = PTR_ERR(pcie->reset_gpio);
+ goto fail_clkreg;
+ }
+
platform_set_drvdata(pdev, pcie);
ret = armada8k_add_pcie_port(pcie, pdev);
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
return -EINVAL;
}
+ if (pci->iatu_unroll_enabled && !pci->atu_base) {
+ dev_err(dev, "atu_base is not populated\n");
+ return -EINVAL;
+ }
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
if (ret < 0) {
(i * MAX_MSI_IRQS_PER_CTRL) +
pos);
generic_handle_irq(irq);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
- (i * MSI_REG_CTRL_BLOCK_SIZE),
- 4, 1 << pos);
pos++;
}
}
bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] &= ~(1 << bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
- pp->irq_status[ctrl]);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+ ~pp->irq_status[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] |= 1 << bit;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
- pp->irq_status[ctrl]);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+ ~pp->irq_status[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
static void dw_pci_bottom_ack(struct irq_data *d)
{
- struct msi_desc *msi = irq_data_get_msi_desc(d);
- struct pcie_port *pp;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
- pp = msi_desc_to_pci_sysdata(msi);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
if (pp->ops->msi_irq_ack)
pp->ops->msi_irq_ack(d->hwirq, pp);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
/* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++)
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, ~0);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, &pp->irq_status[ctrl]);
+ 4, ~0);
+ pp->irq_status[ctrl] = 0;
+ }
/* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dev_dbg(pci->dev, "iATU unroll: %s\n",
pci->iatu_unroll_enabled ? "enabled" : "disabled");
+ if (pci->iatu_unroll_enabled && !pci->atu_base)
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
- return dw_pcie_readl_dbi(pci, offset + reg);
+ return dw_pcie_readl_atu(pci, offset + reg);
}
static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
- dw_pcie_writel_dbi(pci, offset + reg, val);
+ dw_pcie_writel_atu(pci, offset + reg, val);
}
static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
{
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
- return dw_pcie_readl_dbi(pci, offset + reg);
+ return dw_pcie_readl_atu(pci, offset + reg);
}
static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
{
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
- dw_pcie_writel_dbi(pci, offset + reg, val);
+ dw_pcie_writel_atu(pci, offset + reg, val);
}
static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
+/*
+ * The default address offset between dbi_base and atu_base. Root controller
+ * drivers are not required to initialize atu_base if the offset matches this
+ * default; the driver core automatically derives atu_base from dbi_base using
+ * this offset, if atu_base not set.
+ */
+#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
+
/* Register address builder */
-#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
- ((0x3 << 20) | ((region) << 9))
+#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
+ ((region) << 9)
-#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
- ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
+#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
+ (((region) << 9) | (0x1 << 8))
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
struct device *dev;
void __iomem *dbi_base;
void __iomem *dbi_base2;
+ /* Used when iatu_unroll_enabled is true */
+ void __iomem *atu_base;
u32 num_viewport;
u8 iatu_unroll_enabled;
struct pcie_port pp;
return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
}
+static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
+{
+ __dw_pcie_write_dbi(pci, pci->atu_base, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
+{
+ return __dw_pcie_read_dbi(pci, pci->atu_base, reg, 0x4);
+}
+
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
{
u32 reg;
return 0;
}
-static struct dw_pcie_host_ops histb_pcie_host_ops = {
+static const struct dw_pcie_host_ops histb_pcie_host_ops = {
.rd_own_conf = histb_pcie_rd_own_conf,
.wr_own_conf = histb_pcie_wr_own_conf,
.host_init = histb_pcie_host_init,
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define PCL_PINCTRL0 0x002c
+#define PCL_PERST_PLDN_REGEN BIT(12)
+#define PCL_PERST_NOE_REGEN BIT(11)
+#define PCL_PERST_OUT_REGEN BIT(8)
+#define PCL_PERST_PLDN_REGVAL BIT(4)
+#define PCL_PERST_NOE_REGVAL BIT(3)
+#define PCL_PERST_OUT_REGVAL BIT(0)
+
+#define PCL_PIPEMON 0x0044
+#define PCL_PCLK_ALIVE BIT(15)
+
+#define PCL_APP_READY_CTRL 0x8008
+#define PCL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCL_APP_PM0 0x8078
+#define PCL_SYS_AUX_PWR_DET BIT(8)
+
+#define PCL_RCV_INT 0x8108
+#define PCL_RCV_INT_ALL_ENABLE GENMASK(20, 17)
+#define PCL_CFG_BW_MGT_STATUS BIT(4)
+#define PCL_CFG_LINK_AUTO_BW_STATUS BIT(3)
+#define PCL_CFG_AER_RC_ERR_MSI_STATUS BIT(2)
+#define PCL_CFG_PME_MSI_STATUS BIT(1)
+
+#define PCL_RCV_INTX 0x810c
+#define PCL_RCV_INTX_ALL_ENABLE GENMASK(19, 16)
+#define PCL_RCV_INTX_ALL_MASK GENMASK(11, 8)
+#define PCL_RCV_INTX_MASK_SHIFT 8
+#define PCL_RCV_INTX_ALL_STATUS GENMASK(3, 0)
+#define PCL_RCV_INTX_STATUS_SHIFT 0
+
+#define PCL_STATUS_LINK 0x8140
+#define PCL_RDLH_LINK_UP BIT(1)
+#define PCL_XMLH_LINK_UP BIT(0)
+
+struct uniphier_pcie_priv {
+ void __iomem *base;
+ struct dw_pcie pci;
+ struct clk *clk;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct irq_domain *legacy_irq_domain;
+};
+
+#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_APP_READY_CTRL);
+ if (enable)
+ val |= PCL_APP_LTSSM_ENABLE;
+ else
+ val &= ~PCL_APP_LTSSM_ENABLE;
+ writel(val, priv->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
+{
+ u32 val;
+
+ /* use auxiliary power detection */
+ val = readl(priv->base + PCL_APP_PM0);
+ val |= PCL_SYS_AUX_PWR_DET;
+ writel(val, priv->base + PCL_APP_PM0);
+
+ /* assert PERST# */
+ val = readl(priv->base + PCL_PINCTRL0);
+ val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
+ | PCL_PERST_PLDN_REGVAL);
+ val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
+ | PCL_PERST_PLDN_REGEN;
+ writel(val, priv->base + PCL_PINCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ usleep_range(100000, 200000);
+
+ /* deassert PERST# */
+ val = readl(priv->base + PCL_PINCTRL0);
+ val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
+ writel(val, priv->base + PCL_PINCTRL0);
+}
+
+static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
+{
+ u32 status;
+ int ret;
+
+ /* wait PIPE clock */
+ ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
+ status & PCL_PCLK_ALIVE, 100000, 1000000);
+ if (ret) {
+ dev_err(priv->pci.dev,
+ "Failed to initialize controller in RC mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uniphier_pcie_link_up(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ u32 val, mask;
+
+ val = readl(priv->base + PCL_STATUS_LINK);
+ mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP;
+
+ return (val & mask) == mask;
+}
+
+static int uniphier_pcie_establish_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+
+ if (dw_pcie_link_up(pci))
+ return 0;
+
+ uniphier_pcie_ltssm_enable(priv, true);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+}
+
+static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
+{
+ writel(PCL_RCV_INT_ALL_ENABLE, priv->base + PCL_RCV_INT);
+ writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv)
+{
+ writel(0, priv->base + PCL_RCV_INT);
+ writel(0, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_ack(struct irq_data *d)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ val = readl(priv->base + PCL_RCV_INTX);
+ val &= ~PCL_RCV_INTX_ALL_STATUS;
+ val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_STATUS_SHIFT);
+ writel(val, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_mask(struct irq_data *d)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ val = readl(priv->base + PCL_RCV_INTX);
+ val &= ~PCL_RCV_INTX_ALL_MASK;
+ val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
+ writel(val, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_unmask(struct irq_data *d)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ val = readl(priv->base + PCL_RCV_INTX);
+ val &= ~PCL_RCV_INTX_ALL_MASK;
+ val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
+ writel(val, priv->base + PCL_RCV_INTX);
+}
+
+static struct irq_chip uniphier_pcie_irq_chip = {
+ .name = "PCI",
+ .irq_ack = uniphier_pcie_irq_ack,
+ .irq_mask = uniphier_pcie_irq_mask,
+ .irq_unmask = uniphier_pcie_irq_unmask,
+};
+
+static int uniphier_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &uniphier_pcie_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops uniphier_intx_domain_ops = {
+ .map = uniphier_pcie_intx_map,
+};
+
+static void uniphier_pcie_irq_handler(struct irq_desc *desc)
+{
+ struct pcie_port *pp = irq_desc_get_handler_data(desc);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long reg;
+ u32 val, bit, virq;
+
+ /* INT for debug */
+ val = readl(priv->base + PCL_RCV_INT);
+
+ if (val & PCL_CFG_BW_MGT_STATUS)
+ dev_dbg(pci->dev, "Link Bandwidth Management Event\n");
+ if (val & PCL_CFG_LINK_AUTO_BW_STATUS)
+ dev_dbg(pci->dev, "Link Autonomous Bandwidth Event\n");
+ if (val & PCL_CFG_AER_RC_ERR_MSI_STATUS)
+ dev_dbg(pci->dev, "Root Error\n");
+ if (val & PCL_CFG_PME_MSI_STATUS)
+ dev_dbg(pci->dev, "PME Interrupt\n");
+
+ writel(val, priv->base + PCL_RCV_INT);
+
+ /* INTx */
+ chained_irq_enter(chip, desc);
+
+ val = readl(priv->base + PCL_RCV_INTX);
+ reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
+
+ for_each_set_bit(bit, ®, PCI_NUM_INTX) {
+ virq = irq_linear_revmap(priv->legacy_irq_domain, bit);
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct device_node *np = pci->dev->of_node;
+ struct device_node *np_intc;
+
+ np_intc = of_get_child_by_name(np, "legacy-interrupt-controller");
+ if (!np_intc) {
+ dev_err(pci->dev, "Failed to get legacy-interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ pp->irq = irq_of_parse_and_map(np_intc, 0);
+ if (!pp->irq) {
+ dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n");
+ return -EINVAL;
+ }
+
+ priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ &uniphier_intx_domain_ops, pp);
+ if (!priv->legacy_irq_domain) {
+ dev_err(pci->dev, "Failed to get INTx domain\n");
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler,
+ pp);
+
+ return 0;
+}
+
+static int uniphier_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ int ret;
+
+ ret = uniphier_pcie_config_legacy_irq(pp);
+ if (ret)
+ return ret;
+
+ uniphier_pcie_irq_enable(priv);
+
+ dw_pcie_setup_rc(pp);
+ ret = uniphier_pcie_establish_link(pci);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
+ .host_init = uniphier_pcie_host_init,
+};
+
+static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &priv->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->ops = &uniphier_pcie_host_ops;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq < 0)
+ return pp->msi_irq;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize host (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ uniphier_pcie_init_rc(priv);
+
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto out_rst_assert;
+
+ ret = uniphier_pcie_wait_rc(priv);
+ if (ret)
+ goto out_phy_exit;
+
+ return 0;
+
+out_phy_exit:
+ phy_exit(priv->phy);
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv)
+{
+ uniphier_pcie_irq_disable(priv);
+ phy_exit(priv->phy);
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = uniphier_pcie_establish_link,
+ .stop_link = uniphier_pcie_stop_link,
+ .link_up = uniphier_pcie_link_up,
+};
+
+static int uniphier_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_pcie_priv *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pci.dev = dev;
+ priv->pci.ops = &dw_pcie_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(priv->pci.dbi_base))
+ return PTR_ERR(priv->pci.dbi_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ priv->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(priv->phy))
+ return PTR_ERR(priv->phy);
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = uniphier_pcie_host_enable(priv);
+ if (ret)
+ return ret;
+
+ return uniphier_add_pcie_port(priv, pdev);
+}
+
+static int uniphier_pcie_remove(struct platform_device *pdev)
+{
+ struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev);
+
+ uniphier_pcie_host_disable(priv);
+
+ return 0;
+}
+
+static const struct of_device_id uniphier_pcie_match[] = {
+ { .compatible = "socionext,uniphier-pcie", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, uniphier_pcie_match);
+
+static struct platform_driver uniphier_pcie_driver = {
+ .probe = uniphier_pcie_probe,
+ .remove = uniphier_pcie_remove,
+ .driver = {
+ .name = "uniphier-pcie",
+ .of_match_table = uniphier_pcie_match,
+ },
+};
+builtin_platform_driver(uniphier_pcie_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
}
/* Reserve memory for event queue and make sure memories are zeroed */
- msi->eq_cpu = dma_zalloc_coherent(pcie->dev,
- msi->nr_eq_region * EQ_MEM_REGION_SIZE,
- &msi->eq_dma, GFP_KERNEL);
+ msi->eq_cpu = dma_alloc_coherent(pcie->dev,
+ msi->nr_eq_region * EQ_MEM_REGION_SIZE,
+ &msi->eq_dma, GFP_KERNEL);
if (!msi->eq_cpu) {
ret = -ENOMEM;
goto free_irqs;
* @obff_ck: pointer to OBFF functional block operating clock
* @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
* @phy: pointer to PHY control block
- * @lane: lane count
* @slot: port slot
* @irq: GIC irq
* @irq_domain: legacy INTx IRQ domain
struct clk *obff_ck;
struct clk *pipe_ck;
struct phy *phy;
- u32 lane;
u32 slot;
int irq;
struct irq_domain *irq_domain;
* @dev: pointer to PCIe device
* @base: IO mapped register base
* @free_ck: free-run reference clock
- * @io: IO resource
- * @pio: PIO resource
* @mem: non-prefetchable memory resource
- * @busn: bus range
- * @offset: IO / Memory offset
* @ports: pointer to PCIe port information
* @soc: pointer to SoC-dependent operations
+ * @busnr: root bus number
*/
struct mtk_pcie {
struct device *dev;
void __iomem *base;
struct clk *free_ck;
- struct resource io;
- struct resource pio;
struct resource mem;
- struct resource busn;
- struct {
- resource_size_t mem;
- resource_size_t io;
- } offset;
struct list_head ports;
const struct mtk_pcie_soc *soc;
+ unsigned int busnr;
};
static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
if (!port)
return -ENOMEM;
- err = of_property_read_u32(node, "num-lanes", &port->lane);
- if (err) {
- dev_err(dev, "missing num-lanes property\n");
- return err;
- }
-
snprintf(name, sizeof(name), "port%d", slot);
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
port->base = devm_ioremap_resource(dev, regs);
{
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node, *child;
- struct of_pci_range_parser parser;
- struct of_pci_range range;
- struct resource res;
struct mtk_pcie_port *port, *tmp;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct list_head *windows = &host->windows;
+ struct resource_entry *win, *tmp_win;
+ resource_size_t io_base;
int err;
- if (of_pci_range_parser_init(&parser, node)) {
- dev_err(dev, "missing \"ranges\" property\n");
- return -EINVAL;
- }
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ windows, &io_base);
+ if (err)
+ return err;
- for_each_of_pci_range(&parser, &range) {
- err = of_pci_range_to_resource(&range, node, &res);
- if (err < 0)
- return err;
+ err = devm_request_pci_bus_resources(dev, windows);
+ if (err < 0)
+ return err;
- switch (res.flags & IORESOURCE_TYPE_BITS) {
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry_safe(win, tmp_win, windows) {
+ switch (resource_type(win->res)) {
case IORESOURCE_IO:
- pcie->offset.io = res.start - range.pci_addr;
-
- memcpy(&pcie->pio, &res, sizeof(res));
- pcie->pio.name = node->full_name;
-
- pcie->io.start = range.cpu_addr;
- pcie->io.end = range.cpu_addr + range.size - 1;
- pcie->io.flags = IORESOURCE_MEM;
- pcie->io.name = "I/O";
-
- memcpy(&res, &pcie->io, sizeof(res));
+ err = devm_pci_remap_iospace(dev, win->res, io_base);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, win->res);
+ resource_list_destroy_entry(win);
+ }
break;
-
case IORESOURCE_MEM:
- pcie->offset.mem = res.start - range.pci_addr;
-
- memcpy(&pcie->mem, &res, sizeof(res));
+ memcpy(&pcie->mem, win->res, sizeof(*win->res));
pcie->mem.name = "non-prefetchable";
break;
+ case IORESOURCE_BUS:
+ pcie->busnr = win->res->start;
+ break;
}
}
- err = of_pci_parse_bus_range(node, &pcie->busn);
- if (err < 0) {
- dev_err(dev, "failed to parse bus ranges property: %d\n", err);
- pcie->busn.name = node->name;
- pcie->busn.start = 0;
- pcie->busn.end = 0xff;
- pcie->busn.flags = IORESOURCE_BUS;
- }
-
for_each_available_child_of_node(node, child) {
int slot;
return 0;
}
-static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
-{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
- struct list_head *windows = &host->windows;
- struct device *dev = pcie->dev;
- int err;
-
- pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
- pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
- pci_add_resource(windows, &pcie->busn);
-
- err = devm_request_pci_bus_resources(dev, windows);
- if (err < 0)
- return err;
-
- err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
- if (err)
- return err;
-
- return 0;
-}
-
static int mtk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
if (err)
return err;
- err = mtk_pcie_request_resources(pcie);
- if (err)
- goto put_resources;
-
- host->busnr = pcie->busn.start;
+ host->busnr = pcie->busnr;
host->dev.parent = pcie->dev;
host->ops = pcie->soc->ops;
host->map_irq = of_irq_parse_and_map_pci;
return 0;
}
+static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
+{
+ unsigned int i;
+ int rc;
+
+ if (dev->no_vf_scan)
+ return 0;
+
+ for (i = 0; i < num_vfs; i++) {
+ rc = pci_iov_add_virtfn(dev, i);
+ if (rc)
+ goto failed;
+ }
+ return 0;
+failed:
+ while (i--)
+ pci_iov_remove_virtfn(dev, i);
+
+ return rc;
+}
+
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
msleep(100);
pci_cfg_access_unlock(dev);
- for (i = 0; i < initial; i++) {
- rc = pci_iov_add_virtfn(dev, i);
- if (rc)
- goto failed;
- }
+ rc = sriov_add_vfs(dev, initial);
+ if (rc)
+ goto err_pcibios;
kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
iov->num_VFs = nr_virtfn;
return 0;
-failed:
- while (i--)
- pci_iov_remove_virtfn(dev, i);
-
err_pcibios:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
return rc;
}
-static void sriov_disable(struct pci_dev *dev)
+static void sriov_del_vfs(struct pci_dev *dev)
{
- int i;
struct pci_sriov *iov = dev->sriov;
+ int i;
- if (!iov->num_VFs)
+ if (dev->no_vf_scan)
return;
for (i = 0; i < iov->num_VFs; i++)
pci_iov_remove_virtfn(dev, i);
+}
+
+static void sriov_disable(struct pci_dev *dev)
+{
+ struct pci_sriov *iov = dev->sriov;
+
+ if (!iov->num_VFs)
+ return;
+ sriov_del_vfs(dev);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
const struct irq_affinity *affd)
{
static const struct irq_affinity msi_default_affd;
- int vecs = -ENOSPC;
+ int msix_vecs = -ENOSPC;
+ int msi_vecs = -ENOSPC;
if (flags & PCI_IRQ_AFFINITY) {
if (!affd)
}
if (flags & PCI_IRQ_MSIX) {
- vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
- affd);
- if (vecs > 0)
- return vecs;
+ msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs,
+ max_vecs, affd);
+ if (msix_vecs > 0)
+ return msix_vecs;
}
if (flags & PCI_IRQ_MSI) {
- vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
- if (vecs > 0)
- return vecs;
+ msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs,
+ affd);
+ if (msi_vecs > 0)
+ return msi_vecs;
}
/* use legacy irq if allowed */
}
}
- return vecs;
+ if (msix_vecs == -ENOSPC)
+ return -ENOSPC;
+ return msi_vecs;
}
EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
*
* Returns -1 if any of the clients are not compatible (behind the same
* root port as the provider), otherwise returns a positive number where
- * a lower number is the preferrable choice. (If there's one client
+ * a lower number is the preferable choice. (If there's one client
* that's the same as the provider it will return 0, which is best choice).
*
* For now, "compatible" means the provider and the clients are all behind
* @num_clients: number of client devices in the list
*
* If multiple devices are behind the same switch, the one "closest" to the
- * client devices in use will be chosen first. (So if one of the providers are
+ * client devices in use will be chosen first. (So if one of the providers is
* the same as one of the clients, that provider will be used ahead of any
* other providers that are unrelated). If multiple providers are an equal
* distance away, one will be chosen at random.
* pci_free_p2pmem - free peer-to-peer DMA memory
* @pdev: the device the memory was allocated from
* @addr: address of the memory that was allocated
- * @size: number of bytes that was allocated
+ * @size: number of bytes that were allocated
*/
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
{
* @nents: the number of SG entries in the list
* @length: number of bytes to allocate
*
- * Returns 0 on success
+ * Return: %NULL on error or &struct scatterlist pointer and @nents on success
*/
struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
unsigned int *nents, u32 length)
*
* Published memory can be used by other PCI device drivers for
* peer-2-peer DMA operations. Non-published memory is reserved for
- * exlusive use of the device driver that registers the peer-to-peer
+ * exclusive use of the device driver that registers the peer-to-peer
* memory.
*/
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
* @use_p2pdma: returns whether to enable p2pdma or not
*
* Parses an attribute value to decide whether to enable p2pdma.
- * The value can select a PCI device (using it's full BDF device
+ * The value can select a PCI device (using its full BDF device
* name) or a boolean (in any format strtobool() accepts). A false
* value disables p2pdma, a true value expects the caller
* to automatically find a compatible device and specifying a PCI device
* whether p2pdma is enabled
* @page: contents of the stored value
* @p2p_dev: the selected p2p device (NULL if no device is selected)
- * @use_p2pdma: whether p2pdme has been enabled
+ * @use_p2pdma: whether p2pdma has been enabled
*
* Attributes that use pci_p2pdma_enable_store() should use this function
* to show the value of the attribute.
return 0;
}
- if (!pm || !pm->runtime_suspend)
- return -ENOSYS;
-
pci_dev->state_saved = false;
- error = pm->runtime_suspend(dev);
- if (error) {
+ if (pm && pm->runtime_suspend) {
+ error = pm->runtime_suspend(dev);
/*
* -EBUSY and -EAGAIN is used to request the runtime PM core
* to schedule a new suspend, so log the event only with debug
* log level.
*/
- if (error == -EBUSY || error == -EAGAIN)
+ if (error == -EBUSY || error == -EAGAIN) {
dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
pm->runtime_suspend, error);
- else
+ return error;
+ } else if (error) {
dev_err(dev, "can't suspend (%pf returned %d)\n",
pm->runtime_suspend, error);
-
- return error;
+ return error;
+ }
}
pci_fixup_device(pci_fixup_suspend, pci_dev);
- if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+ if (pm && pm->runtime_suspend
+ && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
WARN_ONCE(pci_dev->current_state != prev,
"PCI PM: State of device not saved by %pF\n",
static int pci_pm_runtime_resume(struct device *dev)
{
- int rc;
+ int rc = 0;
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (!pci_dev->driver)
return 0;
- if (!pm || !pm->runtime_resume)
- return -ENOSYS;
-
pci_fixup_device(pci_fixup_resume_early, pci_dev);
pci_enable_wake(pci_dev, PCI_D0, false);
pci_fixup_device(pci_fixup_resume, pci_dev);
- rc = pm->runtime_resume(dev);
+ if (pm && pm->runtime_resume)
+ rc = pm->runtime_resume(dev);
pci_dev->runtime_d3cold = false;
} else if (!strncmp(str, "pcie_scan_all", 13)) {
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
- disable_acs_redir_param = str + 18;
+ disable_acs_redir_param =
+ kstrdup(str + 18, GFP_KERNEL);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
+#include <linux/pci.h>
+
#define PCI_FIND_CAP_TTL 48
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
struct pcie_link_state *root; /* pointer to the root port link */
struct pcie_link_state *parent; /* pointer to the parent Link state */
struct list_head sibling; /* node in link_list */
- struct list_head children; /* list of child link states */
- struct list_head link; /* node in parent's children list */
/* ASPM state */
u32 aspm_support:7; /* Supported ASPM state */
return NULL;
INIT_LIST_HEAD(&link->sibling);
- INIT_LIST_HEAD(&link->children);
- INIT_LIST_HEAD(&link->link);
link->pdev = pdev;
link->downstream = pci_function_0(pdev->subordinate);
link->parent = parent;
link->root = link->parent->root;
- list_add(&link->link, &parent->children);
}
list_add(&link->sibling, &link_list);
/* All functions are removed, so just disable ASPM for the link */
pcie_config_aspm_link(link, 0);
list_del(&link->sibling);
- list_del(&link->link);
/* Clock PM is for endpoint device */
free_link_state(link);
struct pcie_port_service_driver {
const char *name;
- int (*probe) (struct pcie_device *dev);
- void (*remove) (struct pcie_device *dev);
- int (*suspend) (struct pcie_device *dev);
- int (*resume_noirq) (struct pcie_device *dev);
- int (*resume) (struct pcie_device *dev);
- int (*runtime_suspend) (struct pcie_device *dev);
- int (*runtime_resume) (struct pcie_device *dev);
+ int (*probe)(struct pcie_device *dev);
+ void (*remove)(struct pcie_device *dev);
+ int (*suspend)(struct pcie_device *dev);
+ int (*resume_noirq)(struct pcie_device *dev);
+ int (*resume)(struct pcie_device *dev);
+ int (*runtime_suspend)(struct pcie_device *dev);
+ int (*runtime_resume)(struct pcie_device *dev);
/* Device driver may resume normal operations */
void (*error_resume)(struct pci_dev *dev);
/* Link Reset Capability - AER service driver specific */
- pci_ers_result_t (*reset_link) (struct pci_dev *dev);
+ pci_ers_result_t (*reset_link)(struct pci_dev *dev);
int port_type; /* Type of the port this driver can handle */
u32 service; /* Port service this device represents */
quirk_amd_nl_class);
/*
+ * Synopsys USB 3.x host HAPS platform has a class code of
+ * PCI_CLASS_SERIAL_USB_XHCI, and xhci driver can claim it. However, these
+ * devices should use dwc3-haps driver. Change these devices' class code to
+ * PCI_CLASS_SERIAL_USB_DEVICE to prevent the xhci-pci driver from claiming
+ * them.
+ */
+static void quirk_synopsys_haps(struct pci_dev *pdev)
+{
+ u32 class = pdev->class;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3:
+ case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI:
+ case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31:
+ pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
+ pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
+ class, pdev->class);
+ break;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
+ quirk_synopsys_haps);
+
+/*
* Let's make the southbridge information explicit instead of having to
* worry about people probing the ACPI areas, for example.. (Yes, it
* happens, and if you read the wrong ACPI register it will put the machine
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
-
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/nospec.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
module_param(max_devices, int, 0644);
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
+static bool use_dma_mrpc = 1;
+module_param(use_dma_mrpc, bool, 0644);
+MODULE_PARM_DESC(use_dma_mrpc,
+ "Enable the use of the DMA MRPC feature");
+
static dev_t switchtec_devt;
static DEFINE_IDA(switchtec_minor_ida);
static void mrpc_complete_cmd(struct switchtec_dev *stdev);
+static void flush_wc_buf(struct switchtec_dev *stdev)
+{
+ struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
+
+ /*
+ * odb (outbound doorbell) register is processed by low latency
+ * hardware and w/o side effect
+ */
+ mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
+ SWITCHTEC_NTB_REG_DBMSG_OFFSET;
+ ioread32(&mmio_dbmsg->odb);
+}
+
static void mrpc_cmd_submit(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
list);
+ if (stdev->dma_mrpc) {
+ stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
+ memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
+ }
+
stuser_set_state(stuser, MRPC_RUNNING);
stdev->mrpc_busy = 1;
memcpy_toio(&stdev->mmio_mrpc->input_data,
stuser->data, stuser->data_len);
+ flush_wc_buf(stdev);
iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
- stuser->status = ioread32(&stdev->mmio_mrpc->status);
- if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
- mrpc_complete_cmd(stdev);
-
schedule_delayed_work(&stdev->mrpc_timeout,
msecs_to_jiffies(500));
}
stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
list);
- stuser->status = ioread32(&stdev->mmio_mrpc->status);
+ if (stdev->dma_mrpc)
+ stuser->status = stdev->dma_mrpc->status;
+ else
+ stuser->status = ioread32(&stdev->mmio_mrpc->status);
+
if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
return;
if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
goto out;
- stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
+ if (stdev->dma_mrpc)
+ stuser->return_code = stdev->dma_mrpc->rtn_code;
+ else
+ stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
if (stuser->return_code != 0)
goto out;
- memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
- stuser->read_len);
-
+ if (stdev->dma_mrpc)
+ memcpy(stuser->data, &stdev->dma_mrpc->data,
+ stuser->read_len);
+ else
+ memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
+ stuser->read_len);
out:
complete_all(&stuser->comp);
list_del_init(&stuser->list);
mutex_lock(&stdev->mrpc_mutex);
- status = ioread32(&stdev->mmio_mrpc->status);
+ if (stdev->dma_mrpc)
+ status = stdev->dma_mrpc->status;
+ else
+ status = ioread32(&stdev->mmio_mrpc->status);
if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
schedule_delayed_work(&stdev->mrpc_timeout,
msecs_to_jiffies(500));
}
mrpc_complete_cmd(stdev);
-
out:
mutex_unlock(&stdev->mrpc_mutex);
}
{
int ret;
int nr_idxs;
+ unsigned int event_flags;
struct switchtec_ioctl_event_ctl ctl;
if (copy_from_user(&ctl, uctl, sizeof(ctl)))
else
return -EINVAL;
+ event_flags = ctl.flags;
for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
+ ctl.flags = event_flags;
ret = event_ctl(stdev, &ctl);
if (ret < 0)
return ret;
}
}
+static void enable_dma_mrpc(struct switchtec_dev *stdev)
+{
+ writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
+ flush_wc_buf(stdev);
+ iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
+}
+
static void stdev_release(struct device *dev)
{
struct switchtec_dev *stdev = to_stdev(dev);
+ if (stdev->dma_mrpc) {
+ iowrite32(0, &stdev->mmio_mrpc->dma_en);
+ flush_wc_buf(stdev);
+ writeq(0, &stdev->mmio_mrpc->dma_addr);
+ dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
+ stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
+ }
kfree(stdev);
}
return ret;
}
+
+static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
+{
+ struct switchtec_dev *stdev = dev;
+ irqreturn_t ret = IRQ_NONE;
+
+ iowrite32(SWITCHTEC_EVENT_CLEAR |
+ SWITCHTEC_EVENT_EN_IRQ,
+ &stdev->mmio_part_cfg->mrpc_comp_hdr);
+ schedule_work(&stdev->mrpc_work);
+
+ ret = IRQ_HANDLED;
+ return ret;
+}
+
static int switchtec_init_isr(struct switchtec_dev *stdev)
{
int nvecs;
int event_irq;
+ int dma_mrpc_irq;
+ int rc;
nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (event_irq < 0)
return event_irq;
- return devm_request_irq(&stdev->pdev->dev, event_irq,
+ rc = devm_request_irq(&stdev->pdev->dev, event_irq,
switchtec_event_isr, 0,
KBUILD_MODNAME, stdev);
+
+ if (rc)
+ return rc;
+
+ if (!stdev->dma_mrpc)
+ return rc;
+
+ dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
+ if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
+ return -EFAULT;
+
+ dma_mrpc_irq = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
+ if (dma_mrpc_irq < 0)
+ return dma_mrpc_irq;
+
+ rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
+ switchtec_dma_mrpc_isr, 0,
+ KBUILD_MODNAME, stdev);
+
+ return rc;
}
static void init_pff(struct switchtec_dev *stdev)
struct pci_dev *pdev)
{
int rc;
+ void __iomem *map;
+ unsigned long res_start, res_len;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
- rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
return rc;
pci_set_master(pdev);
- stdev->mmio = pcim_iomap_table(pdev)[0];
- stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
+ res_start = pci_resource_start(pdev, 0);
+ res_len = pci_resource_len(pdev, 0);
+
+ if (!devm_request_mem_region(&pdev->dev, res_start,
+ res_len, KBUILD_MODNAME))
+ return -EBUSY;
+
+ stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
+ SWITCHTEC_GAS_TOP_CFG_OFFSET);
+ if (!stdev->mmio_mrpc)
+ return -ENOMEM;
+
+ map = devm_ioremap(&pdev->dev,
+ res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
+ res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
+ if (!map)
+ return -ENOMEM;
+
+ stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
pci_set_drvdata(pdev, stdev);
+ if (!use_dma_mrpc)
+ return 0;
+
+ if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
+ return 0;
+
+ stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
+ sizeof(*stdev->dma_mrpc),
+ &stdev->dma_mrpc_dma_addr,
+ GFP_KERNEL);
+ if (stdev->dma_mrpc == NULL)
+ return -ENOMEM;
+
return 0;
}
&stdev->mmio_part_cfg->mrpc_comp_hdr);
enable_link_state_events(stdev);
+ if (stdev->dma_mrpc)
+ enable_dma_mrpc(stdev);
+
rc = cdev_device_add(&stdev->cdev, &stdev->dev);
if (rc)
goto err_devadd;
cdev_device_del(&stdev->cdev, &stdev->dev);
ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
dev_info(&stdev->dev, "unregistered.\n");
-
stdev_kill(stdev);
put_device(&stdev->dev);
}
/* register 0x01 */
#define REF_FREF_SEL_25 BIT(0)
-#define PHY_MODE_SATA (0x0 << 5)
+#define PHY_BERLIN_MODE_SATA (0x0 << 5)
/* register 0x02 */
#define USE_MAX_PLL_RATE BIT(12)
/* set PHY mode and ref freq to 25 MHz */
phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01,
- 0x00ff, REF_FREF_SEL_25 | PHY_MODE_SATA);
+ 0x00ff,
+ REF_FREF_SEL_25 | PHY_BERLIN_MODE_SATA);
/* set PHY up to 6 Gbps */
phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25,
default y if TI_CPSW=y
depends on TI_CPSW || COMPILE_TEST
select GENERIC_PHY
+ select REGMAP
default m
help
This driver supports configuring of the TI CPSW Port mode depending on
int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
{
+ u8 event_type;
u32 host_event;
int ret;
if (!ec_dev->mkbp_event_supported) {
ret = get_keyboard_state_event(ec_dev);
- if (ret < 0)
+ if (ret <= 0)
return ret;
if (wake_event)
}
ret = get_next_event(ec_dev);
- if (ret < 0)
+ if (ret <= 0)
return ret;
if (wake_event) {
+ event_type = ec_dev->event_data.event_type;
host_event = cros_ec_get_host_event(ec_dev);
- /* Consider non-host_event as wake event */
- *wake_event = !host_event ||
- !!(host_event & ec_dev->host_event_wake_mask);
+ /*
+ * Sensor events need to be parsed by the sensor sub-device.
+ * Defer them, and don't report the wakeup here.
+ */
+ if (event_type == EC_MKBP_EVENT_SENSOR_FIFO)
+ *wake_event = false;
+ /* Masked host-events should not count as wake events. */
+ else if (host_event &&
+ !(host_event & ec_dev->host_event_wake_mask))
+ *wake_event = false;
+ /* Consider all other events as wake events. */
+ else
+ *wake_event = true;
}
return ret;
config INTEL_IPS
tristate "Intel Intelligent Power Sharing"
- depends on ACPI
+ depends on ACPI && PCI
---help---
Intel Calpella platforms support dynamic power sharing between the
CPU and GPU, maximizing performance in a given TDP. This driver,
config APPLE_GMUX
tristate "Apple Gmux Driver"
- depends on ACPI
+ depends on ACPI && PCI
depends on PNP
depends on BACKLIGHT_CLASS_DEVICE
depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
config INTEL_PMC_IPC
tristate "Intel PMC IPC Driver"
- depends on ACPI
+ depends on ACPI && PCI
---help---
This driver provides support for PMC control on some Intel platforms.
The PMC is an ARC processor which defines IPC commands for communication
extoff = NULL;
break;
}
- if (extoff->n_samples > PTP_MAX_SAMPLES) {
+ if (extoff->n_samples > PTP_MAX_SAMPLES
+ || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
err = -EINVAL;
break;
}
INIT_WORK(&priv->idb_work, tsi721_db_dpc);
/* Allocate buffer for inbound doorbells queue */
- priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
- IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
- &priv->idb_dma, GFP_KERNEL);
+ priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
+ &priv->idb_dma, GFP_KERNEL);
if (!priv->idb_base)
return -ENOMEM;
regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
/* Allocate space for DMA descriptors */
- bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
- bd_num * sizeof(struct tsi721_dma_desc),
- &bd_phys, GFP_KERNEL);
+ bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_KERNEL);
if (!bd_ptr)
return -ENOMEM;
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
bd_num : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
+ sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
/* Outbound message descriptor status FIFO allocation */
priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
- priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
- priv->omsg_ring[mbox].sts_size *
- sizeof(struct tsi721_dma_sts),
- &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
+ priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
+ &priv->omsg_ring[mbox].sts_phys,
+ GFP_KERNEL);
if (priv->omsg_ring[mbox].sts_base == NULL) {
tsi_debug(OMSG, &priv->pdev->dev,
"ENOMEM for OB_MSG_%d status FIFO", mbox);
* Allocate space for DMA descriptors
* (add an extra element for link descriptor)
*/
- bd_ptr = dma_zalloc_coherent(dev,
- (bd_num + 1) * sizeof(struct tsi721_dma_desc),
- &bd_phys, GFP_ATOMIC);
+ bd_ptr = dma_alloc_coherent(dev,
+ (bd_num + 1) * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_ATOMIC);
if (!bd_ptr)
return -ENOMEM;
sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
(bd_num + 1) : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_zalloc_coherent(dev,
+ sts_ptr = dma_alloc_coherent(dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_ATOMIC);
if (!sts_ptr) {
const bool * ctx,
struct irq_affinity *desc)
{
- int i, ret;
+ int i, ret, queue_idx = 0;
for (i = 0; i < nvqs; ++i) {
- vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i],
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST
- default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED
+ default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
help
This enables the RCC reset controller driver for STM32 MPUs.
+config RESET_SOCFPGA
+ bool "SoCFPGA Reset Driver" if COMPILE_TEST && !ARCH_SOCFPGA
+ default ARCH_SOCFPGA
+ select RESET_SIMPLE
+ help
+ This enables the reset driver for the SoCFPGA ARMv7 platforms. This
+ driver gets initialized early during platform init calls.
+
config RESET_SUNXI
bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
default ARCH_SUNXI
Say Y if you want to control reset signals provided by System Control
block, Media I/O block, Peripheral Block.
-config RESET_UNIPHIER_USB3
- tristate "USB3 reset driver for UniPhier SoCs"
+config RESET_UNIPHIER_GLUE
+ tristate "Reset driver in glue layer for UniPhier SoCs"
depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
default ARCH_UNIPHIER
select RESET_SIMPLE
help
- Support for the USB3 core reset on UniPhier SoCs.
- Say Y if you want to control reset signals provided by
- USB3 glue layer.
+ Support for peripheral core reset included in its own glue layer
+ on UniPhier SoCs. Say Y if you want to control reset signals
+ provided by the glue layer.
config RESET_ZYNQ
bool "ZYNQ Reset Driver" if COMPILE_TEST
obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o
+obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
-obj-$(CONFIG_RESET_UNIPHIER_USB3) += reset-uniphier-usb3.o
+obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
return rstc;
}
EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
+
+static int reset_control_get_count_from_lookup(struct device *dev)
+{
+ const struct reset_control_lookup *lookup;
+ const char *dev_id;
+ int count = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ dev_id = dev_name(dev);
+ mutex_lock(&reset_lookup_mutex);
+
+ list_for_each_entry(lookup, &reset_lookup_list, list) {
+ if (!strcmp(lookup->dev_id, dev_id))
+ count++;
+ }
+
+ mutex_unlock(&reset_lookup_mutex);
+
+ if (count == 0)
+ count = -ENOENT;
+
+ return count;
+}
+
+/**
+ * reset_control_get_count - Count number of resets available with a device
+ *
+ * @dev: device for which to return the number of resets
+ *
+ * Returns positive reset count on success, or error number on failure and
+ * on count being zero.
+ */
+int reset_control_get_count(struct device *dev)
+{
+ if (dev->of_node)
+ return of_reset_control_get_count(dev->of_node);
+
+ return reset_control_get_count_from_lookup(dev);
+}
+EXPORT_SYMBOL_GPL(reset_control_get_count);
static const struct reset_control_ops hsdk_reset_ops = {
.reset = hsdk_reset_reset,
+ .deassert = hsdk_reset_reset,
};
static int hsdk_reset_probe(struct platform_device *pdev)
#define SOCFPGA_NR_BANKS 8
static const struct reset_simple_devdata reset_simple_socfpga = {
- .reg_offset = 0x10,
+ .reg_offset = 0x20,
.nr_resets = SOCFPGA_NR_BANKS * 32,
.status_active_low = true,
};
};
static const struct of_device_id reset_simple_dt_ids[] = {
- { .compatible = "altr,rst-mgr", .data = &reset_simple_socfpga },
+ { .compatible = "altr,stratix10-rst-mgr",
+ .data = &reset_simple_socfpga },
{ .compatible = "st,stm32-rcc", },
{ .compatible = "allwinner,sun6i-a31-clock-reset",
.data = &reset_simple_active_low },
data->status_active_low = devdata->status_active_low;
}
- if (of_device_is_compatible(dev->of_node, "altr,rst-mgr") &&
- of_property_read_u32(dev->of_node, "altr,modrst-offset",
- ®_offset)) {
- dev_warn(dev,
- "missing altr,modrst-offset property, assuming 0x%x!\n",
- reg_offset);
- }
-
data->membase += reg_offset;
return devm_reset_controller_register(dev, &data->rcdev);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, Intel Corporation
+ * Copied from reset-sunxi.c
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "reset-simple.h"
+
+#define SOCFPGA_NR_BANKS 8
+void __init socfpga_reset_init(void);
+
+static int a10_reset_init(struct device_node *np)
+{
+ struct reset_simple_data *data;
+ struct resource res;
+ resource_size_t size;
+ int ret;
+ u32 reg_offset = 0x10;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ goto err_alloc;
+
+ size = resource_size(&res);
+ if (!request_mem_region(res.start, size, np->name)) {
+ ret = -EBUSY;
+ goto err_alloc;
+ }
+
+ data->membase = ioremap(res.start, size);
+ if (!data->membase) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ if (of_property_read_u32(np, "altr,modrst-offset", ®_offset))
+ pr_warn("missing altr,modrst-offset property, assuming 0x10\n");
+ data->membase += reg_offset;
+
+ spin_lock_init(&data->lock);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = SOCFPGA_NR_BANKS * 32;
+ data->rcdev.ops = &reset_simple_ops;
+ data->rcdev.of_node = np;
+ data->status_active_low = true;
+
+ return reset_controller_register(&data->rcdev);
+
+err_alloc:
+ kfree(data);
+ return ret;
+};
+
+/*
+ * These are the reset controller we need to initialize early on in
+ * our system, before we can even think of using a regular device
+ * driver for it.
+ * The controllers that we can register through the regular device
+ * model are handled by the simple reset driver directly.
+ */
+static const struct of_device_id socfpga_early_reset_dt_ids[] __initconst = {
+ { .compatible = "altr,rst-mgr", },
+ { /* sentinel */ },
+};
+
+void __init socfpga_reset_init(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, socfpga_early_reset_dt_ids)
+ a10_reset_init(np);
+}
// SPDX-License-Identifier: GPL-2.0
//
-// reset-uniphier-usb3.c - USB3 reset driver for UniPhier
+// reset-uniphier-glue.c - Glue layer reset driver for UniPhier
// Copyright 2018 Socionext Inc.
// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
#define MAX_CLKS 2
#define MAX_RSTS 2
-struct uniphier_usb3_reset_soc_data {
+struct uniphier_glue_reset_soc_data {
int nclks;
const char * const *clock_names;
int nrsts;
const char * const *reset_names;
};
-struct uniphier_usb3_reset_priv {
+struct uniphier_glue_reset_priv {
struct clk_bulk_data clk[MAX_CLKS];
struct reset_control *rst[MAX_RSTS];
struct reset_simple_data rdata;
- const struct uniphier_usb3_reset_soc_data *data;
+ const struct uniphier_glue_reset_soc_data *data;
};
-static int uniphier_usb3_reset_probe(struct platform_device *pdev)
+static int uniphier_glue_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct uniphier_usb3_reset_priv *priv;
+ struct uniphier_glue_reset_priv *priv;
struct resource *res;
resource_size_t size;
const char *name;
return ret;
}
-static int uniphier_usb3_reset_remove(struct platform_device *pdev)
+static int uniphier_glue_reset_remove(struct platform_device *pdev)
{
- struct uniphier_usb3_reset_priv *priv = platform_get_drvdata(pdev);
+ struct uniphier_glue_reset_priv *priv = platform_get_drvdata(pdev);
int i;
for (i = 0; i < priv->data->nrsts; i++)
"gio", "link",
};
-static const struct uniphier_usb3_reset_soc_data uniphier_pro4_data = {
+static const struct uniphier_glue_reset_soc_data uniphier_pro4_data = {
.nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
.clock_names = uniphier_pro4_clock_reset_names,
.nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
"link",
};
-static const struct uniphier_usb3_reset_soc_data uniphier_pxs2_data = {
+static const struct uniphier_glue_reset_soc_data uniphier_pxs2_data = {
.nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
.clock_names = uniphier_pxs2_clock_reset_names,
.nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
.reset_names = uniphier_pxs2_clock_reset_names,
};
-static const struct of_device_id uniphier_usb3_reset_match[] = {
+static const struct of_device_id uniphier_glue_reset_match[] = {
{
.compatible = "socionext,uniphier-pro4-usb3-reset",
.data = &uniphier_pro4_data,
.compatible = "socionext,uniphier-pxs3-usb3-reset",
.data = &uniphier_pxs2_data,
},
+ {
+ .compatible = "socionext,uniphier-pro4-ahci-reset",
+ .data = &uniphier_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-ahci-reset",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-ahci-reset",
+ .data = &uniphier_pxs2_data,
+ },
{ /* Sentinel */ }
};
-MODULE_DEVICE_TABLE(of, uniphier_usb3_reset_match);
+MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match);
-static struct platform_driver uniphier_usb3_reset_driver = {
- .probe = uniphier_usb3_reset_probe,
- .remove = uniphier_usb3_reset_remove,
+static struct platform_driver uniphier_glue_reset_driver = {
+ .probe = uniphier_glue_reset_probe,
+ .remove = uniphier_glue_reset_remove,
.driver = {
- .name = "uniphier-usb3-reset",
- .of_match_table = uniphier_usb3_reset_match,
+ .name = "uniphier-glue-reset",
+ .of_match_table = uniphier_glue_reset_match,
},
};
-module_platform_driver(uniphier_usb3_reset_driver);
+module_platform_driver(uniphier_glue_reset_driver);
MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
-MODULE_DESCRIPTION("UniPhier USB3 Reset Driver");
+MODULE_DESCRIPTION("UniPhier Glue layer reset driver");
MODULE_LICENSE("GPL");
dma_addr_t dma_handle;
struct ism_sba *sba;
- sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
- &dma_handle, GFP_KERNEL);
+ sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
+ GFP_KERNEL);
if (!sba)
return -ENOMEM;
dma_addr_t dma_handle;
struct ism_eq *ieq;
- ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
- &dma_handle, GFP_KERNEL);
+ ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
+ GFP_KERNEL);
if (!ieq)
return -ENOMEM;
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
- dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len,
- &dmb->dma_addr, GFP_KERNEL |
- __GFP_NOWARN | __GFP_NOMEMALLOC |
- __GFP_COMP | __GFP_NORETRY);
+ dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+ &dmb->dma_addr,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
if (!dmb->cpu_addr)
clear_bit(dmb->sba_idx, ism->sba_bitmap);
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
unsigned long *indicatorp = NULL;
- int ret, i;
+ int ret, i, queue_idx = 0;
struct ccw1 *ccw;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
return -ENOMEM;
for (i = 0; i < nvqs; ++i) {
- vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
- ctx ? ctx[i] : false, ccw);
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
+ names[i], ctx ? ctx[i] : false,
+ ccw);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
vqs[i] = NULL;
unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
- size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle,
+ GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
/* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
- host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys,
- GFP_KERNEL);
+ host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys,
+ GFP_KERNEL);
if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
/* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
- host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys,
- GFP_KERNEL);
+ host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys,
+ GFP_KERNEL);
if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg;
acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
- &dma_coherent_handle, GFP_KERNEL);
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ acb->roundup_ccbsize,
+ &dma_coherent_handle,
+ GFP_KERNEL);
if (!dma_coherent) {
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
struct MessageUnit_D *reg;
acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
- &dma_coherent_handle, GFP_KERNEL);
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ acb->roundup_ccbsize,
+ &dma_coherent_handle,
+ GFP_KERNEL);
if (!dma_coherent) {
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
uint32_t completeQ_size;
completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
acb->roundup_ccbsize = roundup(completeQ_size, 32);
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
- &dma_coherent_handle, GFP_KERNEL);
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ acb->roundup_ccbsize,
+ &dma_coherent_handle,
+ GFP_KERNEL);
if (!dma_coherent){
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
- GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
struct be_dma_mem *cmd,
u8 subsystem, u8 opcode, u32 size)
{
- cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
- GFP_KERNEL);
+ cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
+ GFP_KERNEL);
if (!cmd->va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to allocate memory for if info\n");
return -EINVAL;
nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
- nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev,
- nonemb_cmd.size,
- &nonemb_cmd.dma,
- GFP_KERNEL);
+ nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
+ nonemb_cmd.size, &nonemb_cmd.dma,
+ GFP_KERNEL);
if (!nonemb_cmd.va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
"BM_%d : invldt_cmds_params alloc failed\n");
/* Allocate dma coherent memory */
buf_info = buf_base;
buf_info->size = payload_len;
- buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev,
- buf_info->size, &buf_info->phys,
- GFP_KERNEL);
+ buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev,
+ buf_info->size, &buf_info->phys,
+ GFP_KERNEL);
if (!buf_info->virt)
goto out_free_mem;
* entries. Hence the limit with one page is 8192 task context
* entries.
*/
- hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_bd_dma,
- GFP_KERNEL);
+ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
if (!hba->task_ctx_bd_tbl) {
printk(KERN_ERR PFX "unable to allocate task context BDT\n");
rc = -1;
task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
for (i = 0; i < task_ctx_arr_sz; i++) {
- hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_dma[i],
- GFP_KERNEL);
+ hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
if (!hba->task_ctx[i]) {
printk(KERN_ERR PFX "unable to alloc task context\n");
rc = -1;
}
for (i = 0; i < segment_count; ++i) {
- hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- &dma_segment_array[i],
- GFP_KERNEL);
+ hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
if (!hba->hash_tbl_segments[i]) {
printk(KERN_ERR PFX "hash segment alloc failed\n");
goto cleanup_dma;
}
}
- hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
- &hba->hash_tbl_pbl_dma,
- GFP_KERNEL);
+ hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
if (!hba->hash_tbl_pbl) {
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
goto cleanup_dma;
return -ENOMEM;
mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
- hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
- mem_size,
- &hba->t2_hash_tbl_ptr_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl_ptr) {
printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
bnx2fc_free_fw_resc(hba);
mem_size = BNX2FC_NUM_MAX_SESS *
sizeof(struct fcoe_t2_hash_table_entry);
- hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl) {
printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
- &hba->stats_buf_dma,
- GFP_KERNEL);
+ hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
if (!hba->stats_buffer) {
printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
bnx2fc_free_fw_resc(hba);
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
- &tgt->sq_dma, GFP_KERNEL);
+ tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
if (!tgt->sq) {
printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
tgt->sq_mem_size);
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
- &tgt->cq_dma, GFP_KERNEL);
+ tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
if (!tgt->cq) {
printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
tgt->cq_mem_size);
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
- &tgt->rq_dma, GFP_KERNEL);
+ tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
if (!tgt->rq) {
printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
tgt->rq_mem_size);
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
- &tgt->rq_pbl_dma, GFP_KERNEL);
+ tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
if (!tgt->rq_pbl) {
printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
tgt->rq_pbl_size);
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->xferq_mem_size, &tgt->xferq_dma,
- GFP_KERNEL);
+ tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->xferq_mem_size, &tgt->xferq_dma,
+ GFP_KERNEL);
if (!tgt->xferq) {
printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
tgt->xferq_mem_size);
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->confq_mem_size, &tgt->confq_dma,
- GFP_KERNEL);
+ tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_mem_size, &tgt->confq_dma,
+ GFP_KERNEL);
if (!tgt->confq) {
printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
tgt->confq_mem_size);
tgt->confq_pbl_size =
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
- tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->confq_pbl_size,
- &tgt->confq_pbl_dma, GFP_KERNEL);
+ tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
if (!tgt->confq_pbl) {
printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
tgt->confq_pbl_size);
/* Allocate and map ConnDB */
tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
- tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->conn_db_mem_size,
- &tgt->conn_db_dma, GFP_KERNEL);
+ tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
if (!tgt->conn_db) {
printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
tgt->conn_db_mem_size);
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
- &tgt->lcq_dma, GFP_KERNEL);
+ tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
if (!tgt->lcq) {
printk(KERN_ERR PFX "unable to allocate lcq %d\n",
/* Allocate memory area for actual SQ element */
ep->qp.sq_virt =
- dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
- &ep->qp.sq_phys, GFP_KERNEL);
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ &ep->qp.sq_phys, GFP_KERNEL);
if (!ep->qp.sq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
ep->qp.sq_mem_size);
/* Allocate memory area for actual CQ element */
ep->qp.cq_virt =
- dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
- &ep->qp.cq_phys, GFP_KERNEL);
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ &ep->qp.cq_phys, GFP_KERNEL);
if (!ep->qp.cq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
ep->qp.cq_mem_size);
q = wrm->q_arr[free_idx];
- q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
- GFP_KERNEL);
+ q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
+ GFP_KERNEL);
if (!q->vstart) {
csio_err(hw,
"Failed to allocate DMA memory for "
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
- unsigned int tid, int pg_idx, bool reply)
+ unsigned int tid, int pg_idx)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0xF0000000);
* @tid: connection id
* @hcrc: header digest enabled
* @dcrc: data digest enabled
- * @reply: request reply from h/w
* set up the iscsi digest settings for a connection identified by tid
*/
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
- int hcrc, int dcrc, int reply)
+ int hcrc, int dcrc)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0x0F000000);
struct cxgbi_sock *csk;
csk = lookup_tid(t, tid);
- if (!csk)
+ if (!csk) {
pr_err("can't find conn. for tid %u.\n", tid);
+ return;
+ }
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
csk, csk->state, csk->flags, csk->tid, rpl->status);
- if (rpl->status != CPL_ERR_NONE)
+ if (rpl->status != CPL_ERR_NONE) {
pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
csk, tid, rpl->status);
+ csk->err = -EINVAL;
+ }
+
+ complete(&csk->cmpl);
__kfree_skb(skb);
}
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
- int pg_idx, bool reply)
+ int pg_idx)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
+ reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
- return 0;
+ wait_for_completion(&csk->cmpl);
+
+ return csk->err;
}
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
- int hcrc, int dcrc, int reply)
+ int hcrc, int dcrc)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
+ reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
- return 0;
+ wait_for_completion(&csk->cmpl);
+
+ return csk->err;
}
static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
skb_queue_head_init(&csk->receive_queue);
skb_queue_head_init(&csk->write_queue);
timer_setup(&csk->retry_timer, NULL, 0);
+ init_completion(&csk->cmpl);
rwlock_init(&csk->callback_lock);
csk->cdev = cdev;
csk->flags = 0;
if (!err && conn->hdrdgst_en)
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
conn->hdrdgst_en,
- conn->datadgst_en, 0);
+ conn->datadgst_en);
break;
case ISCSI_PARAM_DATADGST_EN:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err && conn->datadgst_en)
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
conn->hdrdgst_en,
- conn->datadgst_en, 0);
+ conn->datadgst_en);
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
ppm = csk->cdev->cdev2ppm(csk->cdev);
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
- ppm->tformat.pgsz_idx_dflt, 0);
+ ppm->tformat.pgsz_idx_dflt);
if (err < 0)
return err;
struct sk_buff_head receive_queue;
struct sk_buff_head write_queue;
struct timer_list retry_timer;
+ struct completion cmpl;
int err;
rwlock_t callback_lock;
void *user_data;
struct cxgbi_ppm *,
struct cxgbi_task_tag_info *);
int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
- unsigned int, int, int, int);
+ unsigned int, int, int);
int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
- unsigned int, int, bool);
+ unsigned int, int);
void (*csk_release_offload_resources)(struct cxgbi_sock *);
int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
+ if (hisi_hba->prot_mask) {
+ dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
+ prot_mask);
+ scsi_host_set_prot(hisi_hba->shost, prot_mask);
+ }
+
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_ha;
if (rc)
goto err_out_register_ha;
- if (hisi_hba->prot_mask) {
- dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
- prot_mask);
- scsi_host_set_prot(hisi_hba->shost, prot_mask);
- }
-
scsi_scan_host(shost);
return 0;
shost->max_lun = ~0;
shost->max_cmd_len = MAX_COMMAND_SIZE;
+ /* turn on DIF support */
+ scsi_host_set_prot(shost,
+ SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
err = scsi_add_host(shost, &pdev->dev);
if (err)
goto err_shost;
goto err_host_alloc;
}
pci_info->hosts[i] = h;
-
- /* turn on DIF support */
- scsi_host_set_prot(to_shost(h),
- SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIF_TYPE2_PROTECTION |
- SHOST_DIF_TYPE3_PROTECTION);
- scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
}
err = isci_setup_interrupts(pdev);
INIT_LIST_HEAD(&dmabuf->list);
/* now, allocate dma buffer */
- dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
- &(dmabuf->phys), GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ &(dmabuf->phys), GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
if (!dmabuf)
return NULL;
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
- LPFC_HDR_TEMPLATE_SIZE,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ LPFC_HDR_TEMPLATE_SIZE,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
rpi_hdr = NULL;
goto err_free_dmabuf;
}
/* Allocate memory for SLI-2 structures */
- phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
- &phba->slim2p.phys, GFP_KERNEL);
+ phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ &phba->slim2p.phys, GFP_KERNEL);
if (!phba->slim2p.virt)
goto out_iounmap;
* plus an alignment restriction of 16 bytes.
*/
bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return -ENOMEM;
* page, this is used as a priori size of SLI4_PAGE_SIZE for
* the later DMA memory free.
*/
- viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
- SLI4_PAGE_SIZE, &phyaddr,
- GFP_KERNEL);
+ viraddr = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE, &phyaddr,
+ GFP_KERNEL);
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
* mailbox command.
*/
dma_size = *vpd_size;
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return -ENOMEM;
goto free_mem;
}
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
LPFC_RAS_MAX_ENTRY_SIZE,
- &dmabuf->phys,
- GFP_KERNEL);
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
rc = -ENOMEM;
cmnd = CMD_XMIT_SEQUENCE64_CR;
if (phba->link_flag & LS_LOOPBACK_MODE)
bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+ /* fall through */
case CMD_XMIT_SEQUENCE64_CR:
/* word3 iocb=io_tag32 wqe=reserved */
wqe->xmit_sequence.rsvd3 = 0;
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
+ /* fall through */
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"6126 Receive Frame Truncated!!\n");
- /* Drop thru */
+ /* fall through */
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
goto out_fail;
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
- hw_page_size, &dmabuf->phys,
- GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ hw_page_size, &dmabuf->phys,
+ GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
goto out_fail;
eq->entry_count);
if (eq->entry_count < 256)
return -EINVAL;
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 256:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_256);
LPFC_CQ_CNT_WORD7);
break;
}
- /* Fall Thru */
+ /* fall through */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count: "
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 256:
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
LPFC_CQ_CNT_256);
LPFC_CQ_CNT_WORD7);
break;
}
- /* Fall Thru */
+ /* fall through */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3118 Bad CQ count. (%d)\n",
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest (drop thru) */
+ /* fall through - otherwise default to smallest */
case 256:
bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
&cq_set->u.request, LPFC_CQ_CNT_256);
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 16:
bf_set(lpfc_mq_context_ring_size,
&mq_create_ext->u.request.context,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
* Allocate the common 16-byte aligned memory for the handshake
* mailbox.
*/
- raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev,
- sizeof(mbox64_t), &raid_dev->una_mbox64_dma,
- GFP_KERNEL);
+ raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(mbox64_t),
+ &raid_dev->una_mbox64_dma,
+ GFP_KERNEL);
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
align;
// Allocate memory for commands issued internally
- adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
- &adapter->ibuf_dma_h, GFP_KERNEL);
+ adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
+ &adapter->ibuf_dma_h, GFP_KERNEL);
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
* Issue an ENQUIRY3 command to find out certain adapter parameters,
* e.g., max channels, max commands etc.
*/
- pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
- &pinfo_dma_h, GFP_KERNEL);
+ pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
+ &pinfo_dma_h, GFP_KERNEL);
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
sizeof(struct MR_LD_VF_AFFILIATION_111));
else {
new_affiliation_111 =
- dma_zalloc_coherent(&instance->pdev->dev,
- sizeof(struct MR_LD_VF_AFFILIATION_111),
- &new_affiliation_111_h, GFP_KERNEL);
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h, GFP_KERNEL);
if (!new_affiliation_111) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
sizeof(struct MR_LD_VF_AFFILIATION));
else {
new_affiliation =
- dma_zalloc_coherent(&instance->pdev->dev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &new_affiliation_h, GFP_KERNEL);
+ dma_alloc_coherent(&instance->pdev->dev,
+ (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h, GFP_KERNEL);
if (!new_affiliation) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
if (initial) {
instance->hb_host_mem =
- dma_zalloc_coherent(&instance->pdev->dev,
- sizeof(struct MR_CTRL_HB_HOST_MEM),
- &instance->hb_host_mem_h, GFP_KERNEL);
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MR_CTRL_HB_HOST_MEM),
+ &instance->hb_host_mem_h,
+ GFP_KERNEL);
if (!instance->hb_host_mem) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
" memory for heartbeat host memory for scsi%d\n",
}
dcmd = &cmd->frame->dcmd;
- el_info = dma_zalloc_coherent(&instance->pdev->dev,
- sizeof(struct megasas_evt_log_info), &el_info_h,
- GFP_KERNEL);
+ el_info = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info),
+ &el_info_h, GFP_KERNEL);
if (!el_info) {
megasas_return_cmd(instance, cmd);
return -ENOMEM;
instance->consistent_mask_64bit = true;
dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
- ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"),
+ ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
(instance->consistent_mask_64bit ? "63" : "32"));
return 0;
/*
* Check if it is our interrupt
*/
- status = readl(®s->outbound_intr_status);
+ status = megasas_readl(instance,
+ ®s->outbound_intr_status);
if (status & 1) {
writel(status, ®s->outbound_intr_status);
array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev,
- array_size, &fusion->rdpq_phys, GFP_KERNEL);
+ fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
+ array_size, &fusion->rdpq_phys,
+ GFP_KERNEL);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
- dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev,
- ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL);
+ dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
+ ms->dma_cmd_size, &dma_cmd_bus,
+ GFP_KERNEL);
if (dma_cmd_space == NULL) {
printk(KERN_ERR "mesh: can't allocate DMA table\n");
goto out_unmap;
case RESOURCE_UNCACHED_MEMORY:
size = round_up(size, 8);
- res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size,
- &res->bus_addr, GFP_KERNEL);
+ res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
+ &res->bus_addr,
+ GFP_KERNEL);
if (!res->virt_addr) {
dev_err(&mhba->pdev->dev,
"unable to allocate consistent mem,"
if (size == 0)
return 0;
- virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr,
- GFP_KERNEL);
+ virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
+ GFP_KERNEL);
if (!virt_addr)
return -1;
u64 align_offset = 0;
if (align)
align_offset = (dma_addr_t)align - 1;
- mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align,
- &mem_dma_handle, GFP_KERNEL);
+ mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
+ &mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) {
pm8001_printk("memory allocation error\n");
return -1;
if (dev->dev_type == SAS_SATA_DEV) {
pm8001_device->attached_phy =
dev->rphy->identify.phy_identifier;
- flag = 1; /* directly sata*/
+ flag = 1; /* directly sata */
}
} /*register this device to HBA*/
PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
sizeof(void *);
fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
- fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev,
- fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL);
+ fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
+ &fcport->sq_dma, GFP_KERNEL);
if (!fcport->sq) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
rval = 1;
goto out;
}
- fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev,
- fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
+ fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
+ fcport->sq_pbl_size,
+ &fcport->sq_pbl_dma, GFP_KERNEL);
if (!fcport->sq_pbl) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
rval = 1;
}
/* Allocate list of PBL pages */
- qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev,
- QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
+ qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
+ QEDF_PAGE_SIZE,
+ &qedf->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedf->bdq_pbl_list) {
QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
return -ENOMEM;
ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
qedf->global_queues[i]->cq =
- dma_zalloc_coherent(&qedf->pdev->dev,
- qedf->global_queues[i]->cq_mem_size,
- &qedf->global_queues[i]->cq_dma, GFP_KERNEL);
+ dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->global_queues[i]->cq_mem_size,
+ &qedf->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedf->global_queues[i]->cq) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
}
qedf->global_queues[i]->cq_pbl =
- dma_zalloc_coherent(&qedf->pdev->dev,
- qedf->global_queues[i]->cq_pbl_size,
- &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
+ dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->global_queues[i]->cq_pbl_size,
+ &qedf->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedf->global_queues[i]->cq_pbl) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
qedi_ep = ep->dd_data;
if (qedi_ep->state == EP_STATE_IDLE ||
+ qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
return -1;
switch (qedi_ep->state) {
case EP_STATE_OFLDCONN_START:
+ case EP_STATE_OFLDCONN_NONE:
goto ep_release_conn;
case EP_STATE_OFLDCONN_FAILED:
break;
if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+ qedi_ep->state = EP_STATE_OFLDCONN_NONE;
ret = -EIO;
goto set_path_exit;
}
EP_STATE_OFLDCONN_FAILED = 0x2000,
EP_STATE_CONNECT_FAILED = 0x4000,
EP_STATE_DISCONN_TIMEDOUT = 0x8000,
+ EP_STATE_OFLDCONN_NONE = 0x10000,
};
struct qedi_conn;
{
struct qedi_nvm_iscsi_image nvm_image;
- qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
- sizeof(nvm_image),
- &qedi->nvm_buf_dma,
- GFP_KERNEL);
+ qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
+ sizeof(nvm_image),
+ &qedi->nvm_buf_dma, GFP_KERNEL);
if (!qedi->iscsi_image) {
QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
return -ENOMEM;
}
/* Allocate list of PBL pages */
- qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev,
- QEDI_PAGE_SIZE,
- &qedi->bdq_pbl_list_dma,
- GFP_KERNEL);
+ qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedi->bdq_pbl_list) {
QEDI_ERR(&qedi->dbg_ctx,
"Could not allocate list of PBL pages.\n");
(qedi->global_queues[i]->cq_pbl_size +
(QEDI_PAGE_SIZE - 1));
- qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_mem_size,
- &qedi->global_queues[i]->cq_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq) {
QEDI_WARN(&qedi->dbg_ctx,
status = -ENOMEM;
goto mem_alloc_failure;
}
- qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_pbl_size,
- &qedi->global_queues[i]->cq_pbl_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
- ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
- &ep->sq_dma, GFP_KERNEL);
+ ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
if (!ep->sq) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue.\n");
rval = -ENOMEM;
goto out;
}
- ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
- &ep->sq_pbl_dma, GFP_KERNEL);
+ ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
if (!ep->sq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue PBL.\n");
ha->devnum = devnum; /* specifies microcode load address */
#ifdef QLA_64BIT_PTR
- if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);
if (qla2x00_chip_is_down(vha))
goto done;
- stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
- &stats_dma, GFP_KERNEL);
+ stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
+ GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
if (!IS_FWI2_CAPABLE(ha))
return -EPERM;
- stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
- &stats_dma, GFP_KERNEL);
+ stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
+ GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x70e2,
"Failed to allocate memory for stats.\n");
uint16_t n2n_id;
struct list_head gpnid_list;
struct fab_scan scan;
+
+ unsigned int irq_offset;
} scsi_qla_host_t;
struct qla27xx_image_status {
return rval;
}
- sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
- &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
- &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xffff,
((vha->hw->max_fibre_devices - 1) *
sizeof(struct ct_sns_gpn_ft_data));
- sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
- &vha->hw->pdev->dev, rspsz,
- &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ rspsz,
+ &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xffff,
FCE_SIZE, ha->fce, ha->fce_dma);
/* Allocate memory for Fibre Channel Event Buffer. */
- tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
- GFP_KERNEL);
+ tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
+ GFP_KERNEL);
if (!tc) {
ql_log(ql_log_warn, vha, 0x00be,
"Unable to allocate (%d KB) for FCE.\n",
EFT_SIZE, ha->eft, ha->eft_dma);
/* Allocate memory for Extended Trace Buffer. */
- tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
- GFP_KERNEL);
+ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+ GFP_KERNEL);
if (!tc) {
ql_log(ql_log_warn, vha, 0x00c1,
"Unable to allocate (%d KB) for EFT.\n",
"Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
}
}
+ vha->irq_offset = desc.pre_vectors;
ha->msix_entries = kcalloc(ha->msix_count,
sizeof(struct qla_msix_entry),
GFP_KERNEL);
if (USER_CTRL_IRQ(vha->hw))
rc = blk_mq_map_queues(qmap);
else
- rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0);
+ rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
return rc;
}
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
- init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
__func__);
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- about_fw = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct about_fw_info),
- &about_fw_dma, GFP_KERNEL);
+ about_fw = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
if (!about_fw) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
"for about_fw\n", __func__));
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
uint32_t rem = len;
struct nlattr *attr;
- init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (!init_fw_cb) {
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
__func__);
sizeof(struct shadow_regs) +
MEM_ALIGN_VALUE +
(PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len,
- &ha->queues_dma, GFP_KERNEL);
+ ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
+ &ha->queues_dma, GFP_KERNEL);
if (ha->queues == NULL) {
ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed - queues.\n");
rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
fw_ddb_entry);
+ if (rc)
+ goto free_sess;
ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
__func__, fnode_sess->dev.kobj.name);
if (err == 0) {
pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
+ err = pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+
+ /*
+ * Forcibly set runtime PM status of request queue to "active"
+ * to make sure we can again get requests from the queue
+ * (see also blk_pm_peek_request()).
+ *
+ * The resume hook will correct runtime PM status of the disk.
+ */
+ if (!err && scsi_is_sdev_device(dev)) {
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (sdev->request_queue->dev)
+ blk_set_runtime_active(sdev->request_queue);
+ }
}
return err;
else
fn = NULL;
- /*
- * Forcibly set runtime PM status of request queue to "active" to
- * make sure we can again get requests from the queue (see also
- * blk_pm_peek_request()).
- *
- * The resume hook will correct runtime PM status of the disk.
- */
- if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
- blk_set_runtime_active(to_scsi_device(dev)->request_queue);
-
if (fn) {
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
sp = buffer_data[0] & 0x80 ? 1 : 0;
buffer_data[0] &= ~0x80;
+ /*
+ * Ensure WP, DPOFUA, and RESERVED fields are cleared in
+ * received mode parameter buffer before doing MODE SELECT.
+ */
+ data.device_specific = 0;
+
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
SD_MAX_RETRIES, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
- return device->in_remove & !ctrl_info->in_shutdown;
+ return device->in_remove && !ctrl_info->in_shutdown;
}
static inline void pqi_schedule_rescan_worker_with_delay(
alloc_length += PQI_EXTRA_SGL_MEMORY;
ctrl_info->queue_memory_base =
- dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
- alloc_length,
- &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
+ dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
+ &ctrl_info->queue_memory_base_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->queue_memory_base)
return -ENOMEM;
PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
ctrl_info->admin_queue_memory_base =
- dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
- alloc_length,
- &ctrl_info->admin_queue_memory_base_dma_handle,
- GFP_KERNEL);
+ dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
+ &ctrl_info->admin_queue_memory_base_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->admin_queue_memory_base)
return -ENOMEM;
static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
- ctrl_info->error_buffer_length,
- &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
+ ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->error_buffer_length,
+ &ctrl_info->error_buffer_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->error_buffer)
return -ENOMEM;
dma_addr_t dma_handle;
ctrl_info->pqi_ofa_chunk_virt_addr[i] =
- dma_zalloc_coherent(dev, chunk_size, &dma_handle,
- GFP_KERNEL);
+ dma_alloc_coherent(dev, chunk_size, &dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
break;
struct device *dev;
dev = &ctrl_info->pci_dev->dev;
- pqi_ofa_memory = dma_zalloc_coherent(dev,
- PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
- &ctrl_info->pqi_ofa_mem_dma_handle,
- GFP_KERNEL);
+ pqi_ofa_memory = dma_alloc_coherent(dev,
+ PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
+ &ctrl_info->pqi_ofa_mem_dma_handle,
+ GFP_KERNEL);
if (!pqi_ofa_memory)
return;
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
QUERY_DESC_UNIT_DEF_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
- QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
QUERY_DESC_POWER_DEF_SIZE = 0x62,
QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
};
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = false;
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
return -ENODEV;
}
- if (!dma_zalloc_coherent(dev, *size, addr, 0)) {
+ if (!dma_alloc_coherent(dev, *size, addr, 0)) {
dev_err(dev, "DMA Alloc memory failed\n");
return -ENODEV;
}
const char *sprop;
int ret = 0;
u32 val;
- struct resource *res;
- struct device_node *np2;
- static int siram_init_flag;
- struct platform_device *pdev;
sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
if (sprop) {
utdm->siram_entry_id = val;
set_si_param(utdm, ut_info);
-
- np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
- if (!np2)
- return -EINVAL;
-
- pdev = of_find_device_by_node(np2);
- if (!pdev) {
- pr_err("%pOFn: failed to lookup pdev\n", np2);
- of_node_put(np2);
- return -EINVAL;
- }
-
- of_node_put(np2);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(utdm->si_regs)) {
- ret = PTR_ERR(utdm->si_regs);
- goto err_miss_siram_property;
- }
-
- np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
- if (!np2) {
- ret = -EINVAL;
- goto err_miss_siram_property;
- }
-
- pdev = of_find_device_by_node(np2);
- if (!pdev) {
- ret = -EINVAL;
- pr_err("%pOFn: failed to lookup pdev\n", np2);
- of_node_put(np2);
- goto err_miss_siram_property;
- }
-
- of_node_put(np2);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- utdm->siram = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(utdm->siram)) {
- ret = PTR_ERR(utdm->siram);
- goto err_miss_siram_property;
- }
-
- if (siram_init_flag == 0) {
- memset_io(utdm->siram, 0, resource_size(res));
- siram_init_flag = 1;
- }
-
- return ret;
-
-err_miss_siram_property:
- devm_iounmap(&pdev->dev, utdm->si_regs);
return ret;
}
EXPORT_SYMBOL(ucc_of_parse_tdm);
bool
select ARM_AMBA
-if ARM
+if ARM && ARCH_RENESAS
#comment "Renesas ARM SoCs System Type"
{ "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A },
};
-static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas,
- unsigned int num_areas, u8 id,
- int new_parent)
-{
- unsigned int i;
-
- for (i = 0; i < num_areas; i++)
- if (areas[i].isr_bit == id) {
- areas[i].parent = new_parent;
- return;
- }
-}
-
/* Fixups for RZ/G2E ES1.0 revision */
static const struct soc_device_attribute r8a774c0[] __initconst = {
{ .soc_id = "r8a774c0", .revision = "ES1.0" },
static int __init r8a774c0_sysc_init(void)
{
if (soc_device_match(r8a774c0)) {
- rcar_sysc_fix_parent(r8a774c0_areas,
- ARRAY_SIZE(r8a774c0_areas),
- R8A774C0_PD_3DG_A, R8A774C0_PD_3DG_B);
- rcar_sysc_fix_parent(r8a774c0_areas,
- ARRAY_SIZE(r8a774c0_areas),
- R8A774C0_PD_3DG_B, R8A774C0_PD_ALWAYS_ON);
+ /* Fix incorrect 3DG hierarchy */
+ swap(r8a774c0_areas[6], r8a774c0_areas[7]);
+ r8a774c0_areas[6].parent = R8A774C0_PD_ALWAYS_ON;
+ r8a774c0_areas[7].parent = R8A774C0_PD_3DG_B;
}
return 0;
int i;
/* allocate coherent DMAable memory for hardware buffer descriptors. */
- sqi->bd = dma_zalloc_coherent(&sqi->master->dev,
- sizeof(*bd) * PESQI_BD_COUNT,
- &sqi->bd_dma, GFP_KERNEL);
+ sqi->bd = dma_alloc_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ &sqi->bd_dma, GFP_KERNEL);
if (!sqi->bd) {
dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
return -ENOMEM;
if (!ring->tx_buf)
goto no_tx_mem;
- ring->tx_dma = dma_zalloc_coherent(eth->dev,
- ring->tx_ring_size * sz,
+ ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
&ring->tx_phys,
GFP_ATOMIC | __GFP_ZERO);
if (!ring->tx_dma)
pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
- crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
+ crypto_ops = lib80211_get_crypto_ops("WEP");
if (!crypto_ops)
return;
void *crypto_private = NULL;
int status = _SUCCESS;
const int keyindex = prxattrib->key_index;
- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
+ struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP");
char iv[4], icv[4];
if (!crypto_ops) {
struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
void *crypto_private = NULL;
u8 *key, *pframe = skb->data;
- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp");
+ struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP");
struct security_priv *psecuritypriv = &padapter->securitypriv;
char iv[8], icv[8];
{ SDIO_DEVICE(0x024c, 0xb723), },
{ /* end: all zeroes */ },
};
-static const struct acpi_device_id acpi_ids[] __used = {
+static const struct acpi_device_id acpi_ids[] = {
{"OBDA8723", 0x0000},
{}
};
/* Allocate enough storage to hold the page pointers and the page
* list
*/
- pagelist = dma_zalloc_coherent(g_dev,
- pagelist_size,
- &dma_addr,
- GFP_KERNEL);
+ pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
+ GFP_KERNEL);
vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
void *vir_pool;
/*allocate all RD/TD rings a single pool*/
- vir_pool = dma_zalloc_coherent(&priv->pcid->dev,
- priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
- priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
- priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
- priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
- &priv->pool_dma, GFP_ATOMIC);
+ vir_pool = dma_alloc_coherent(&priv->pcid->dev,
+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
+ &priv->pool_dma, GFP_ATOMIC);
if (!vir_pool) {
dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
return false;
priv->rd1_pool_dma = priv->rd0_pool_dma +
priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
- priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev,
- priv->opts.tx_descs[0] * PKT_BUF_SZ +
- priv->opts.tx_descs[1] * PKT_BUF_SZ +
- CB_BEACON_BUF_SIZE +
- CB_MAX_BUF_SIZE,
- &priv->tx_bufs_dma0,
- GFP_ATOMIC);
+ priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev,
+ priv->opts.tx_descs[0] * PKT_BUF_SZ + priv->opts.tx_descs[1] * PKT_BUF_SZ + CB_BEACON_BUF_SIZE + CB_MAX_BUF_SIZE,
+ &priv->tx_bufs_dma0, GFP_ATOMIC);
if (!priv->tx0_bufs) {
dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
sizeof(struct iscsi_queue_req),
__alignof__(struct iscsi_queue_req), 0, NULL);
if (!lio_qr_cache) {
- pr_err("nable to kmem_cache_create() for"
+ pr_err("Unable to kmem_cache_create() for"
" lio_qr_cache\n");
goto bitmap_out;
}
size_t ring_size;
struct mutex cmdr_lock;
- struct list_head cmdr_queue;
+ struct list_head qfull_queue;
uint32_t dbi_max;
uint32_t dbi_thresh;
struct timer_list cmd_timer;
unsigned int cmd_time_out;
+ struct list_head inflight_queue;
struct timer_list qfull_timer;
int qfull_time_out;
struct tcmu_cmd {
struct se_cmd *se_cmd;
struct tcmu_dev *tcmu_dev;
- struct list_head cmdr_queue_entry;
+ struct list_head queue_entry;
uint16_t cmd_id;
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
+#define TCMU_CMD_BIT_INFLIGHT 1
unsigned long flags;
};
/*
if (!tcmu_cmd)
return NULL;
- INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
+ INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
return 0;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
- mod_timer(timer, tcmu_cmd->deadline);
+ if (!timer_pending(timer))
+ mod_timer(timer, tcmu_cmd->deadline);
+
return 0;
}
-static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
+static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
if (ret)
return ret;
- list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
tcmu_cmd->cmd_id, udev->name);
return 0;
base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
- if (!list_empty(&udev->cmdr_queue))
+ if (!list_empty(&udev->qfull_queue))
goto queue;
mb = udev->mb_addr;
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
+ set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
+
/* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info);
return 0;
queue:
- if (add_to_cmdr_queue(tcmu_cmd)) {
+ if (add_to_qfull_queue(tcmu_cmd)) {
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
goto out;
+ list_del_init(&cmd->queue_entry);
+
tcmu_cmd_reset_dbi_cur(cmd);
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
tcmu_free_cmd(cmd);
}
+static void tcmu_set_next_deadline(struct list_head *queue,
+ struct timer_list *timer)
+{
+ struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
+ unsigned long deadline = 0;
+
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
+ if (!time_after(jiffies, tcmu_cmd->deadline)) {
+ deadline = tcmu_cmd->deadline;
+ break;
+ }
+ }
+
+ if (deadline)
+ mod_timer(timer, deadline);
+ else
+ del_timer(timer);
+}
+
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
{
struct tcmu_mailbox *mb;
+ struct tcmu_cmd *cmd;
int handled = 0;
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
- struct tcmu_cmd *cmd;
tcmu_flush_dcache_range(entry, sizeof(*entry));
/* no more pending commands */
del_timer(&udev->cmd_timer);
- if (list_empty(&udev->cmdr_queue)) {
+ if (list_empty(&udev->qfull_queue)) {
/*
* no more pending or waiting commands so try to
* reclaim blocks if needed.
tcmu_global_max_blocks)
schedule_delayed_work(&tcmu_unmap_work, 0);
}
+ } else if (udev->cmd_time_out) {
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
}
return handled;
if (!time_after(jiffies, cmd->deadline))
return 0;
- is_running = list_empty(&cmd->cmdr_queue_entry);
+ is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
se_cmd = cmd->se_cmd;
if (is_running) {
*/
scsi_status = SAM_STAT_CHECK_CONDITION;
} else {
- list_del_init(&cmd->cmdr_queue_entry);
-
idr_remove(&udev->commands, id);
tcmu_free_cmd(cmd);
scsi_status = SAM_STAT_TASK_SET_FULL;
}
+ list_del_init(&cmd->queue_entry);
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
id, udev->name, is_running ? "inflight" : "queued");
INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
- INIT_LIST_HEAD(&udev->cmdr_queue);
+ INIT_LIST_HEAD(&udev->qfull_queue);
+ INIT_LIST_HEAD(&udev->inflight_queue);
idr_init(&udev->commands);
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
return &udev->se_dev;
}
-static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
+static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
sense_reason_t scsi_ret;
int ret;
- if (list_empty(&udev->cmdr_queue))
+ if (list_empty(&udev->qfull_queue))
return true;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
- list_splice_init(&udev->cmdr_queue, &cmds);
+ list_splice_init(&udev->qfull_queue, &cmds);
- list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
- list_del_init(&tcmu_cmd->cmdr_queue_entry);
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
+ list_del_init(&tcmu_cmd->queue_entry);
pr_debug("removing cmd %u on dev %s from queue\n",
tcmu_cmd->cmd_id, udev->name);
* cmd was requeued, so just put all cmds back in
* the queue
*/
- list_splice_tail(&cmds, &udev->cmdr_queue);
+ list_splice_tail(&cmds, &udev->qfull_queue);
drained = false;
- goto done;
+ break;
}
}
- if (list_empty(&udev->cmdr_queue))
- del_timer(&udev->qfull_timer);
-done:
+
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
return drained;
}
mutex_lock(&udev->cmdr_lock);
tcmu_handle_completions(udev);
- run_cmdr_queue(udev, false);
+ run_qfull_queue(udev, false);
mutex_unlock(&udev->cmdr_lock);
return 0;
/* complete IO that has executed successfully */
tcmu_handle_completions(udev);
/* fail IO waiting to be queued */
- run_cmdr_queue(udev, true);
+ run_qfull_queue(udev, true);
unlock:
mutex_unlock(&udev->cmdr_lock);
mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
- if (!list_empty(&cmd->cmdr_queue_entry))
+ if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
continue;
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
idr_remove(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ list_del_init(&cmd->queue_entry);
if (err_level == 1) {
/*
* Userspace was not able to start the
mutex_lock(&udev->cmdr_lock);
idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
+
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
+
mutex_unlock(&udev->cmdr_lock);
spin_lock_bh(&timed_out_udevs_lock);
config INT340X_THERMAL
tristate "ACPI INT340X thermal drivers"
- depends on X86 && ACPI
+ depends on X86 && ACPI && PCI
select THERMAL_GOV_USER_SPACE
select ACPI_THERMAL_REL
select ACPI_FAN
with "earlycon=smh" on the kernel command line. The console is
enabled when early_param is processed.
+config SERIAL_EARLYCON_RISCV_SBI
+ bool "Early console using RISC-V SBI"
+ depends on RISCV
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+ help
+ Support for early debug console using RISC-V SBI. This enables
+ the console before standard serial driver is probed. This is enabled
+ with "earlycon=sbi" on the kernel command line. The console is
+ enabled when early_param is processed.
+
config SERIAL_SB1250_DUART
tristate "BCM1xxx on-chip DUART serial support"
depends on SIBYTE_SB1xxx_SOC=y
obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o
+obj-$(CONFIG_SERIAL_EARLYCON_RISCV_SBI) += earlycon-riscv-sbi.o
# These Sparc drivers have to appear before others such as 8250
# which share ttySx minor node space. Otherwise console device
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V SBI based earlycon
+ *
+ * Copyright (C) 2018 Anup Patel <anup@brainfault.org>
+ */
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <asm/sbi.h>
+
+static void sbi_console_write(struct console *con,
+ const char *s, unsigned int n)
+{
+ int i;
+
+ for (i = 0; i < n; ++i)
+ sbi_console_putchar(s[i]);
+}
+
+static int __init early_sbi_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->con->write = sbi_console_write;
+ return 0;
+}
+EARLYCON_DECLARE(sbi, early_sbi_setup);
static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg)
{
- u32 tmp = readl(reg);
+ u32 tmp = __raw_readl(reg);
- writel((tmp & ~clear) | set, reg);
+ __raw_writel((tmp & ~clear) | set, reg);
}
static inline struct
static void
lqasc_stop_rx(struct uart_port *port)
{
- writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE);
+ __raw_writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE);
}
static int
struct tty_port *tport = &port->state->port;
unsigned int ch = 0, rsr = 0, fifocnt;
- fifocnt = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK;
+ fifocnt = __raw_readl(port->membase + LTQ_ASC_FSTAT) &
+ ASCFSTAT_RXFFLMASK;
while (fifocnt--) {
u8 flag = TTY_NORMAL;
ch = readb(port->membase + LTQ_ASC_RBUF);
- rsr = (readl(port->membase + LTQ_ASC_STATE)
+ rsr = (__raw_readl(port->membase + LTQ_ASC_STATE)
& ASCSTATE_ANY) | UART_DUMMY_UER_RX;
tty_flip_buffer_push(tport);
port->icount.rx++;
return;
}
- while (((readl(port->membase + LTQ_ASC_FSTAT) &
+ while (((__raw_readl(port->membase + LTQ_ASC_FSTAT) &
ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) {
if (port->x_char) {
writeb(port->x_char, port->membase + LTQ_ASC_TBUF);
unsigned long flags;
struct uart_port *port = (struct uart_port *)_port;
spin_lock_irqsave(<q_asc_lock, flags);
- writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
+ __raw_writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
spin_unlock_irqrestore(<q_asc_lock, flags);
lqasc_start_tx(port);
return IRQ_HANDLED;
unsigned long flags;
struct uart_port *port = (struct uart_port *)_port;
spin_lock_irqsave(<q_asc_lock, flags);
- writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
+ __raw_writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
lqasc_rx_chars(port);
spin_unlock_irqrestore(<q_asc_lock, flags);
return IRQ_HANDLED;
lqasc_tx_empty(struct uart_port *port)
{
int status;
- status = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK;
+ status = __raw_readl(port->membase + LTQ_ASC_FSTAT) &
+ ASCFSTAT_TXFFLMASK;
return status ? 0 : TIOCSER_TEMT;
}
asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
port->membase + LTQ_ASC_CLC);
- writel(0, port->membase + LTQ_ASC_PISEL);
- writel(
+ __raw_writel(0, port->membase + LTQ_ASC_PISEL);
+ __raw_writel(
((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) |
ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU,
port->membase + LTQ_ASC_TXFCON);
- writel(
+ __raw_writel(
((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK)
| ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU,
port->membase + LTQ_ASC_RXFCON);
goto err2;
}
- writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
+ __raw_writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
port->membase + LTQ_ASC_IRNREN);
return 0;
free_irq(ltq_port->rx_irq, port);
free_irq(ltq_port->err_irq, port);
- writel(0, port->membase + LTQ_ASC_CON);
+ __raw_writel(0, port->membase + LTQ_ASC_CON);
asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU,
port->membase + LTQ_ASC_RXFCON);
asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON);
/* now we can write the new baudrate into the register */
- writel(divisor, port->membase + LTQ_ASC_BG);
+ __raw_writel(divisor, port->membase + LTQ_ASC_BG);
/* turn the baudrate generator back on */
asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON);
/* enable rx */
- writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
+ __raw_writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
spin_unlock_irqrestore(<q_asc_lock, flags);
return;
do {
- fifofree = (readl(port->membase + LTQ_ASC_FSTAT)
+ fifofree = (__raw_readl(port->membase + LTQ_ASC_FSTAT)
& ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF;
} while (fifofree == 0);
writeb(ch, port->membase + LTQ_ASC_TBUF);
static int tty_reopen(struct tty_struct *tty)
{
struct tty_driver *driver = tty->driver;
- int retval;
+ struct tty_ldisc *ld;
+ int retval = 0;
if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER)
if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
return -EBUSY;
- retval = tty_ldisc_lock(tty, 5 * HZ);
- if (retval)
- return retval;
+ ld = tty_ldisc_ref_wait(tty);
+ if (ld) {
+ tty_ldisc_deref(ld);
+ } else {
+ retval = tty_ldisc_lock(tty, 5 * HZ);
+ if (retval)
+ return retval;
- if (!tty->ldisc)
- retval = tty_ldisc_reinit(tty, tty->termios.c_line);
- tty_ldisc_unlock(tty);
+ if (!tty->ldisc)
+ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+ tty_ldisc_unlock(tty);
+ }
if (retval == 0)
tty->count++;
.driver_info = IGNORE_DEVICE,
},
+ { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+ { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
continue;
}
- if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) {
- best = c;
- break;
+ if (i > 0 && desc && is_audio(desc)) {
+ if (is_uac3_config(desc)) {
+ best = c;
+ break;
+ }
+ continue;
}
/* From the remaining configs, choose the first one whose
{ USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
/* Corsair K70 RGB */
- { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
/* Corsair Strafe */
{ USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
#include <linux/platform_device.h>
#include <linux/property.h>
-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
-
/**
* struct dwc3_haps - Driver private structure
* @dwc3: child dwc3 platform_device
/* Refer to BDC spec, Table 4 for description of SPB */
sp_buff_size = 1 << (sp_buff_size + 5);
dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
- bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size,
- &bdc->scratchpad.sp_dma, GFP_KERNEL);
+ bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size,
+ &bdc->scratchpad.sp_dma,
+ GFP_KERNEL);
if (!bdc->scratchpad.buff)
goto fail;
bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
bdc->srr.dqp_index = 0;
/* allocate the status report descriptors */
- bdc->srr.sr_bds = dma_zalloc_coherent(
- bdc->dev,
- NUM_SR_ENTRIES * sizeof(struct bdc_bd),
- &bdc->srr.dma_addr,
- GFP_KERNEL);
+ bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd),
+ &bdc->srr.dma_addr, GFP_KERNEL);
if (!bdc->srr.sr_bds)
return -ENOMEM;
&uhci_debug_operations);
#endif
- uhci->frame = dma_zalloc_coherent(uhci_dev(uhci),
- UHCI_NUMFRAMES * sizeof(*uhci->frame),
- &uhci->frame_dma_handle, GFP_KERNEL);
+ uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
+ UHCI_NUMFRAMES * sizeof(*uhci->frame),
+ &uhci->frame_dma_handle, GFP_KERNEL);
if (!uhci->frame) {
dev_err(uhci_dev(uhci),
"unable to allocate consistent memory for frame list\n");
xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
- void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
- flags);
+ void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
+ flags);
if (!buf)
goto fail_sp4;
struct xhci_erst_entry *entry;
size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
- erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
- size, &erst->erst_dma_addr, flags);
+ erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
+ size, &erst->erst_dma_addr, flags);
if (!erst->entries)
return -ENOMEM;
if (!(us->fflags & US_FL_NEEDS_CAP16))
sdev->try_rc_10_first = 1;
- /* assume SPC3 or latter devices support sense size > 18 */
- if (sdev->scsi_level > SCSI_SPC_2)
+ /*
+ * assume SPC3 or latter devices support sense size > 18
+ * unless US_FL_BAD_SENSE quirk is specified.
+ */
+ if (sdev->scsi_level > SCSI_SPC_2 &&
+ !(us->fflags & US_FL_BAD_SENSE))
us->fflags |= US_FL_SANE_SENSE;
/*
US_FL_FIX_CAPACITY ),
/*
+ * Reported by Icenowy Zheng <icenowy@aosc.io>
+ * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
+ * that do not process read/write command if a long sense is requested,
+ * so force to use 18-byte sense.
+ */
+UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff,
+ "SMI",
+ "SM3350 UFS-to-USB-Mass-Storage bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+
+/*
* Reported by Paul Hartman <paul.hartman+linux@gmail.com>
* This card reader returns "Illegal Request, Logical Block Address
* Out of Range" for the first READ(10) after a new card is inserted.
#endif /* _TRACE_VFIO_PCI_H */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/vfio/pci
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
return -EINVAL;
if (!unmap->size || unmap->size & mask)
return -EINVAL;
- if (unmap->iova + unmap->size < unmap->iova ||
+ if (unmap->iova + unmap->size - 1 < unmap->iova ||
unmap->size > SIZE_MAX)
return -EINVAL;
if (nvq->done_idx > VHOST_NET_BATCH)
vhost_net_signal_used(nvq);
if (unlikely(vq_log))
- vhost_log_write(vq, vq_log, log, vhost_len);
+ vhost_log_write(vq, vq_log, log, vhost_len,
+ vq->iov, in);
total_len += vhost_len;
if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
vhost_poll_queue(&vq->poll);
struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
{
- struct virtio_scsi_ctrl_tmf_resp __user *resp;
struct virtio_scsi_ctrl_tmf_resp rsp;
+ struct iov_iter iov_iter;
int ret;
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp));
rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
- resp = vq->iov[vc->out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
+
+ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ if (likely(ret == sizeof(rsp)))
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
{
- struct virtio_scsi_ctrl_an_resp __user *resp;
struct virtio_scsi_ctrl_an_resp rsp;
+ struct iov_iter iov_iter;
int ret;
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
rsp.response = VIRTIO_SCSI_S_OK;
- resp = vq->iov[vc->out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
+
+ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ if (likely(ret == sizeof(rsp)))
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
int type, ret;
ret = copy_from_iter(&type, sizeof(type), from);
- if (ret != sizeof(type))
+ if (ret != sizeof(type)) {
+ ret = -EINVAL;
goto done;
+ }
switch (type) {
case VHOST_IOTLB_MSG:
iov_iter_advance(from, offset);
ret = copy_from_iter(&msg, sizeof(msg), from);
- if (ret != sizeof(msg))
+ if (ret != sizeof(msg)) {
+ ret = -EINVAL;
goto done;
+ }
if (vhost_process_iotlb_msg(dev, &msg)) {
ret = -EFAULT;
goto done;
return r;
}
+static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
+{
+ struct vhost_umem *umem = vq->umem;
+ struct vhost_umem_node *u;
+ u64 start, end, l, min;
+ int r;
+ bool hit = false;
+
+ while (len) {
+ min = len;
+ /* More than one GPAs can be mapped into a single HVA. So
+ * iterate all possible umems here to be safe.
+ */
+ list_for_each_entry(u, &umem->umem_list, link) {
+ if (u->userspace_addr > hva - 1 + len ||
+ u->userspace_addr - 1 + u->size < hva)
+ continue;
+ start = max(u->userspace_addr, hva);
+ end = min(u->userspace_addr - 1 + u->size,
+ hva - 1 + len);
+ l = end - start + 1;
+ r = log_write(vq->log_base,
+ u->start + start - u->userspace_addr,
+ l);
+ if (r < 0)
+ return r;
+ hit = true;
+ min = min(l, min);
+ }
+
+ if (!hit)
+ return -EFAULT;
+
+ len -= min;
+ hva += min;
+ }
+
+ return 0;
+}
+
+static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
+{
+ struct iovec iov[64];
+ int i, ret;
+
+ if (!vq->iotlb)
+ return log_write(vq->log_base, vq->log_addr + used_offset, len);
+
+ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
+ len, iov, 64, VHOST_ACCESS_WO);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ret; i++) {
+ ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+ iov[i].iov_len);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
- unsigned int log_num, u64 len)
+ unsigned int log_num, u64 len, struct iovec *iov, int count)
{
int i, r;
/* Make sure data written is seen before log. */
smp_wmb();
+
+ if (vq->iotlb) {
+ for (i = 0; i < count; i++) {
+ r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+ iov[i].iov_len);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+ }
+
for (i = 0; i < log_num; ++i) {
u64 l = min(log[i].len, len);
r = log_write(vq->log_base, log[i].addr, l);
smp_wmb();
/* Log used flag write. */
used = &vq->used->flags;
- log_write(vq->log_base, vq->log_addr +
- (used - (void __user *)vq->used),
- sizeof vq->used->flags);
+ log_used(vq, (used - (void __user *)vq->used),
+ sizeof vq->used->flags);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
smp_wmb();
/* Log avail event write */
used = vhost_avail_event(vq);
- log_write(vq->log_base, vq->log_addr +
- (used - (void __user *)vq->used),
- sizeof *vhost_avail_event(vq));
+ log_used(vq, (used - (void __user *)vq->used),
+ sizeof *vhost_avail_event(vq));
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
/* Make sure data is seen before log. */
smp_wmb();
/* Log used ring entry write. */
- log_write(vq->log_base,
- vq->log_addr +
- ((void __user *)used - (void __user *)vq->used),
- count * sizeof *used);
+ log_used(vq, ((void __user *)used - (void __user *)vq->used),
+ count * sizeof *used);
}
old = vq->last_used_idx;
new = (vq->last_used_idx += count);
/* Make sure used idx is seen before log. */
smp_wmb();
/* Log used index update. */
- log_write(vq->log_base,
- vq->log_addr + offsetof(struct vring_used, idx),
- sizeof vq->used->idx);
+ log_used(vq, offsetof(struct vring_used, idx),
+ sizeof vq->used->idx);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
- unsigned int log_num, u64 len);
+ unsigned int log_num, u64 len,
+ struct iovec *iov, int count);
int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
hash_del_rcu(&vsock->hash);
vsock->guest_cid = guest_cid;
- hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
+ hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
mutex_unlock(&vhost_vsock_mutex);
return 0;
return -ENODEV;
}
for_each_child_of_node(nproot, np) {
- if (!of_node_cmp(np->name, name)) {
+ if (of_node_name_eq(np, name)) {
of_property_read_u32(np, "marvell,88pm860x-iset",
&iset);
data->iset = PM8606_WLED_CURRENT(iset);
struct device *dev;
unsigned int lth_brightness;
unsigned int *levels;
+ bool enabled;
struct regulator *power_supply;
struct gpio_desc *enable_gpio;
unsigned int scale;
int err;
pwm_get_state(pb->pwm, &state);
- if (state.enabled)
+ if (pb->enabled)
return;
err = regulator_enable(pb->power_supply);
if (pb->enable_gpio)
gpiod_set_value_cansleep(pb->enable_gpio, 1);
+
+ pb->enabled = true;
}
static void pwm_backlight_power_off(struct pwm_bl_data *pb)
struct pwm_state state;
pwm_get_state(pb->pwm, &state);
- if (!state.enabled)
+ if (!pb->enabled)
return;
if (pb->enable_gpio)
pwm_apply_state(pb->pwm, &state);
regulator_disable(pb->power_supply);
+ pb->enabled = false;
}
static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
memset(data, 0, sizeof(*data));
/*
+ * These values are optional and set as 0 by default, the out values
+ * are modified only if a valid u32 value can be decoded.
+ */
+ of_property_read_u32(node, "post-pwm-on-delay-ms",
+ &data->post_pwm_on_delay);
+ of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
+
+ data->enable_gpio = -EINVAL;
+
+ /*
* Determine the number of brightness levels, if this property is not
* set a default table of brightness levels will be used.
*/
data->max_brightness--;
}
- /*
- * These values are optional and set as 0 by default, the out values
- * are modified only if a valid u32 value can be decoded.
- */
- of_property_read_u32(node, "post-pwm-on-delay-ms",
- &data->post_pwm_on_delay);
- of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
-
- data->enable_gpio = -EINVAL;
return 0;
}
pb->check_fb = data->check_fb;
pb->exit = data->exit;
pb->dev = &pdev->dev;
+ pb->enabled = false;
pb->post_pwm_on_delay = data->post_pwm_on_delay;
pb->pwm_off_delay = data->pwm_off_delay;
depends on FB
config FB_BACKLIGHT
- bool
+ tristate
depends on FB
select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
config FB_GOLDFISH
tristate "Goldfish Framebuffer"
- depends on FB && HAS_DMA && (GOLDFISH || COMPILE_TEST)
+ depends on FB
+ depends on GOLDFISH || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
}
ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE);
- if (ret)
+ if (ret) {
+ of_node_put(disp);
goto out_fb_release;
+ }
of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale);
cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert");
ret = of_property_read_u32(disp, "bits-per-pixel",
&info->var.bits_per_pixel);
+ of_node_put(disp);
if (ret)
goto out_fb_release;
continue;
}
#endif
+
+ if (!strncmp(options, "logo-pos:", 9)) {
+ options += 9;
+ if (!strcmp(options, "center"))
+ fb_center_logo = true;
+ continue;
+ }
}
return 1;
}
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] != idx &&
con2fb_map[i] != -1) {
- new_idx = i;
+ new_idx = con2fb_map[i];
break;
}
}
int num_registered_fb __read_mostly;
EXPORT_SYMBOL(num_registered_fb);
+bool fb_center_logo __read_mostly;
+EXPORT_SYMBOL(fb_center_logo);
+
static struct fb_info *get_fb_info(unsigned int idx)
{
struct fb_info *fb_info;
image->dx += image->width + 8;
}
} else if (rotate == FB_ROTATE_UD) {
- for (x = 0; x < num; x++) {
+ u32 dx = image->dx;
+
+ for (x = 0; x < num && image->dx <= dx; x++) {
info->fbops->fb_imageblit(info, image);
image->dx -= image->width + 8;
}
image->dy += image->height + 8;
}
} else if (rotate == FB_ROTATE_CCW) {
- for (x = 0; x < num; x++) {
+ u32 dy = image->dy;
+
+ for (x = 0; x < num && image->dy <= dy; x++) {
info->fbops->fb_imageblit(info, image);
image->dy -= image->height + 8;
}
fb_set_logo(info, logo, logo_new, fb_logo.depth);
}
- image.dx = 0;
- image.dy = y;
+ if (fb_center_logo) {
+ int xres = info->var.xres;
+ int yres = info->var.yres;
+
+ if (rotate == FB_ROTATE_CW || rotate == FB_ROTATE_CCW) {
+ xres = info->var.yres;
+ yres = info->var.xres;
+ }
+
+ while (n && (n * (logo->width + 8) - 8 > xres))
+ --n;
+ image.dx = (xres - n * (logo->width + 8) - 8) / 2;
+ image.dy = y ?: (yres - logo->height) / 2;
+ } else {
+ image.dx = 0;
+ image.dy = y;
+ }
+
image.width = logo->width;
image.height = logo->height;
info->pseudo_palette = saved_pseudo_palette;
kfree(logo_new);
kfree(logo_rotate);
- return logo->height;
+ return image.dy + logo->height;
}
unsigned int i;
for (i = 0; i < fb_logo_ex_num; i++)
- y += fb_show_logo_line(info, rotate,
- fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
+ y = fb_show_logo_line(info, rotate,
+ fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
return y;
}
{
int depth = fb_get_color_depth(&info->var, &info->fix);
unsigned int yres;
+ int height;
memset(&fb_logo, 0, sizeof(struct logo_data));
}
}
- return fb_prepare_extra_logos(info, fb_logo.logo->height, yres);
+ height = fb_logo.logo->height;
+ if (fb_center_logo)
+ height += (yres - fb_logo.logo->height) / 2;
+
+ return fb_prepare_extra_logos(info, height, yres);
}
int fb_show_logo(struct fb_info *info, int rotate)
info->device = dev;
info->fbcon_rotate_hint = -1;
-#ifdef CONFIG_FB_BACKLIGHT
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
mutex_init(&info->bl_curve_mutex);
#endif
return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->state);
}
-#ifdef CONFIG_FB_BACKLIGHT
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
static ssize_t store_bl_curve(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
__ATTR(stride, S_IRUGO, show_stride, NULL),
__ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
__ATTR(state, S_IRUGO|S_IWUSR, show_fbstate, store_fbstate),
-#ifdef CONFIG_FB_BACKLIGHT
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
__ATTR(bl_curve, S_IRUGO|S_IWUSR, show_bl_curve, store_bl_curve),
#endif
};
}
}
-#ifdef CONFIG_FB_BACKLIGHT
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
/* This function generates a linear backlight curve
*
* 0: off
da8xx_fb_fix.line_length - 1;
/* allocate palette buffer */
- par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE,
- &par->p_palette_base,
- GFP_KERNEL | GFP_DMA);
+ par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE,
+ &par->p_palette_base,
+ GFP_KERNEL | GFP_DMA);
if (!par->v_palette_base) {
dev_err(&device->dev,
"GLCD: kmalloc for palette buffer failed\n");
unregister_framebuffer(info);
unmap_video_memory(info);
- if (&info->cmap)
- fb_dealloc_cmap(&info->cmap);
+ fb_dealloc_cmap(&info->cmap);
mfbi->registered = 0;
}
}
static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp,
- const char *name, unsigned long address)
+ unsigned long address)
{
struct offb_par *par = (struct offb_par *) info->par;
- if (dp && !strncmp(name, "ATY,Rage128", 11)) {
+ if (of_node_name_prefix(dp, "ATY,Rage128")) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_r128;
- } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12)
- || !strncmp(name, "ATY,RageM3p12A", 14))) {
+ } else if (of_node_name_prefix(dp, "ATY,RageM3pA") ||
+ of_node_name_prefix(dp, "ATY,RageM3p12A")) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_M3A;
- } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) {
+ } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_M3B;
- } else if (dp && !strncmp(name, "ATY,Rage6", 9)) {
+ } else if (of_node_name_prefix(dp, "ATY,Rage6")) {
par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_radeon;
- } else if (!strncmp(name, "ATY,", 4)) {
+ } else if (of_node_name_prefix(dp, "ATY,")) {
unsigned long base = address & 0xff000000UL;
par->cmap_adr =
ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
if (par->cmap_adr)
par->cmap_type = cmap_gxt2000;
- } else if (dp && !strncmp(name, "vga,Display-", 12)) {
+ } else if (of_node_name_prefix(dp, "vga,Display-")) {
/* Look for AVIVO initialized by SLOF */
struct device_node *pciparent = of_get_parent(dp);
const u32 *vid, *did;
par->cmap_type = cmap_unknown;
if (depth == 8)
- offb_init_palette_hacks(info, dp, name, address);
+ offb_init_palette_hacks(info, dp, address);
else
fix->visual = FB_VISUAL_TRUECOLOR;
}
#if defined(CONFIG_FB_OMAP2_DSS_DEBUGFS)
-static int dss_debug_show(struct seq_file *s, void *unused)
+static int dss_show(struct seq_file *s, void *unused)
{
void (*func)(struct seq_file *) = s->private;
func(s);
return 0;
}
-static int dss_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dss_debug_show, inode->i_private);
-}
-
-static const struct file_operations dss_debug_fops = {
- .open = dss_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dss);
static struct dentry *dss_debugfs_dir;
}
debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
- &dss_debug_dump_clocks, &dss_debug_fops);
+ &dss_debug_dump_clocks, &dss_fops);
return 0;
}
struct dentry *d;
d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
- write, &dss_debug_fops);
+ write, &dss_fops);
return PTR_ERR_OR_ZERO(d);
}
int r = 0;
+ memset(&p, 0, sizeof(p));
+
switch (cmd) {
case OMAPFB_SYNC_GFX:
DBG("ioctl SYNC_GFX\n");
/* check whether divisor is too small. */
if (divider_int < 2) {
- dev_warn(fbi->dev, "Warning: clock source is too slow."
+ dev_warn(fbi->dev, "Warning: clock source is too slow. "
"Try smaller resolution\n");
divider_int = 2;
}
if (!info)
return ERR_PTR(-ENOMEM);
ret = of_get_pxafb_mode_info(dev, info);
- if (ret) {
- kfree(info->modes);
+ if (ret)
return ERR_PTR(ret);
- }
/*
* On purpose, neither lccrX registers nor video memory size can be
dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
if (!dlfb) {
dev_err(&intf->dev, "%s: failed to allocate dlfb\n", __func__);
- goto error;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&dlfb->deferred_free);
error:
if (dlfb->info) {
dlfb_ops_destroy(dlfb->info);
- } else if (dlfb) {
+ } else {
usb_put_dev(dlfb->udev);
kfree(dlfb);
}
/* this function will wait for all in-flight urbs to complete */
dlfb_free_urb_list(dlfb);
- if (info) {
- /* remove udlfb's sysfs interfaces */
- for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
- device_remove_file(info->dev, &fb_device_attrs[i]);
- device_remove_bin_file(info->dev, &edid_attr);
- }
+ /* remove udlfb's sysfs interfaces */
+ for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
+ device_remove_file(info->dev, &fb_device_attrs[i]);
+ device_remove_bin_file(info->dev, &edid_attr);
unregister_framebuffer(info);
}
module_param(vram_remap, uint, 0);
MODULE_PARM_DESC(vram_remap, "Set amount of video memory to be used [MiB]");
module_param(vram_total, uint, 0);
-MODULE_PARM_DESC(vram_total, "Set total amount of video memoery [MiB]");
+MODULE_PARM_DESC(vram_total, "Set total amount of video memory [MiB]");
module_param(maxclk, ushort, 0);
MODULE_PARM_DESC(maxclk, "Maximum pixelclock [MHz], overrides EDID data");
module_param(maxhf, ushort, 0);
VIRTIO_BALLOON_VQ_MAX
};
+enum virtio_balloon_config_read {
+ VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
+};
+
struct virtio_balloon {
struct virtio_device *vdev;
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
/* Prevent updating balloon when it is being canceled. */
spinlock_t stop_update_lock;
bool stop_update;
+ /* Bitmap to indicate if reading the related config fields are needed */
+ unsigned long config_read_bitmap;
/* The list of allocated free pages, waiting to be given back to mm */
struct list_head free_page_list;
spinlock_t free_page_list_lock;
/* The number of free page blocks on the above list */
unsigned long num_free_page_blocks;
- /* The cmd id received from host */
- u32 cmd_id_received;
+ /*
+ * The cmd id received from host.
+ * Read it via virtio_balloon_cmd_id_received to get the latest value
+ * sent from host.
+ */
+ u32 cmd_id_received_cache;
/* The cmd id that is actively in use */
__virtio32 cmd_id_active;
/* Buffer to store the stop sign */
return num_returned;
}
+static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
+{
+ if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ return;
+
+ /* No need to queue the work if the bit was already set. */
+ if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
+ &vb->config_read_bitmap))
+ return;
+
+ queue_work(vb->balloon_wq, &vb->report_free_page_work);
+}
+
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
unsigned long flags;
- s64 diff = towards_target(vb);
-
- if (diff) {
- spin_lock_irqsave(&vb->stop_update_lock, flags);
- if (!vb->stop_update)
- queue_work(system_freezable_wq,
- &vb->update_balloon_size_work);
- spin_unlock_irqrestore(&vb->stop_update_lock, flags);
- }
- if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
- virtio_cread(vdev, struct virtio_balloon_config,
- free_page_report_cmd_id, &vb->cmd_id_received);
- if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
- /* Pass ULONG_MAX to give back all the free pages */
- return_free_pages_to_mm(vb, ULONG_MAX);
- } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
- vb->cmd_id_received !=
- virtio32_to_cpu(vdev, vb->cmd_id_active)) {
- spin_lock_irqsave(&vb->stop_update_lock, flags);
- if (!vb->stop_update) {
- queue_work(vb->balloon_wq,
- &vb->report_free_page_work);
- }
- spin_unlock_irqrestore(&vb->stop_update_lock, flags);
- }
+ spin_lock_irqsave(&vb->stop_update_lock, flags);
+ if (!vb->stop_update) {
+ queue_work(system_freezable_wq,
+ &vb->update_balloon_size_work);
+ virtio_balloon_queue_free_page_work(vb);
}
+ spin_unlock_irqrestore(&vb->stop_update_lock, flags);
}
static void update_balloon_size(struct virtio_balloon *vb)
return 0;
}
+static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
+{
+ if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
+ &vb->config_read_bitmap))
+ virtio_cread(vb->vdev, struct virtio_balloon_config,
+ free_page_report_cmd_id,
+ &vb->cmd_id_received_cache);
+
+ return vb->cmd_id_received_cache;
+}
+
static int send_cmd_id_start(struct virtio_balloon *vb)
{
struct scatterlist sg;
while (virtqueue_get_buf(vq, &unused))
;
- vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received);
+ vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
+ virtio_balloon_cmd_id_received(vb));
sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
if (!err)
* stop the reporting.
*/
cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
- if (cmd_id_active != vb->cmd_id_received)
+ if (unlikely(cmd_id_active !=
+ virtio_balloon_cmd_id_received(vb)))
break;
/*
return 0;
}
-static void report_free_page_func(struct work_struct *work)
+static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
{
int err;
- struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
- report_free_page_work);
struct device *dev = &vb->vdev->dev;
/* Start by sending the received cmd id to host with an outbuf. */
dev_err(dev, "Failed to send a stop id, err = %d\n", err);
}
+static void report_free_page_func(struct work_struct *work)
+{
+ struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
+ report_free_page_work);
+ u32 cmd_id_received;
+
+ cmd_id_received = virtio_balloon_cmd_id_received(vb);
+ if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
+ /* Pass ULONG_MAX to give back all the free pages */
+ return_free_pages_to_mm(vb, ULONG_MAX);
+ } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
+ cmd_id_received !=
+ virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
+ virtio_balloon_report_free_page(vb);
+ }
+}
+
#ifdef CONFIG_BALLOON_COMPACTION
/*
* virtballoon_migratepage - perform the balloon page migration on behalf of
goto out_del_vqs;
}
INIT_WORK(&vb->report_free_page_work, report_free_page_func);
- vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP;
+ vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
VIRTIO_BALLOON_CMD_ID_STOP);
vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
- int i, err;
+ int i, err, queue_idx = 0;
err = request_irq(irq, vm_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vm_dev);
return err;
for (i = 0; i < nvqs; ++i) {
- vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i],
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
vm_del_vqs(vdev);
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u16 msix_vec;
- int i, err, nvectors, allocated_vectors;
+ int i, err, nvectors, allocated_vectors, queue_idx = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
msix_vec = allocated_vectors++;
else
msix_vec = VP_MSIX_VQ_VECTOR;
- vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+ vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false,
msix_vec);
if (IS_ERR(vqs[i])) {
const char * const names[], const bool *ctx)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- int i, err;
+ int i, err, queue_idx = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
vqs[i] = NULL;
continue;
}
- vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+ vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false,
VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) {
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
+ if (!res)
+ return -ENODEV;
priv->io_base = devm_ioport_map(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(priv->io_base))
- return PTR_ERR(priv->io_base);
+ if (!priv->io_base)
+ return -ENOMEM;
watchdog_set_drvdata(&priv->wdd, priv);
xen_have_vector_callback = 0;
return;
}
- pr_info("Xen HVM callback vector for event delivery is enabled\n");
+ pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
xen_hvm_callback_vector);
}
/* write the data, then modify the indexes */
virt_wmb();
- if (ret < 0)
+ if (ret < 0) {
+ atomic_set(&map->read, 0);
intf->in_error = ret;
- else
+ } else
intf->in_prod = prod + ret;
/* update the indexes, then notify the other end */
virt_wmb();
static void pvcalls_sk_state_change(struct sock *sock)
{
struct sock_mapping *map = sock->sk_user_data;
- struct pvcalls_data_intf *intf;
if (map == NULL)
return;
- intf = map->ring;
- intf->in_error = -ENOTCONN;
+ atomic_inc(&map->read);
notify_remote_via_irq(map->irq);
}
#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
#define PVCALLS_FRONT_MAX_SPIN 5000
+static struct proto pvcalls_proto = {
+ .name = "PVCalls",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct sock),
+};
+
struct pvcalls_bedata {
struct xen_pvcalls_front_ring ring;
grant_ref_t ref;
return ret;
}
+static void free_active_ring(struct sock_mapping *map)
+{
+ if (!map->active.ring)
+ return;
+
+ free_pages((unsigned long)map->active.data.in,
+ map->active.ring->ring_order);
+ free_page((unsigned long)map->active.ring);
+}
+
+static int alloc_active_ring(struct sock_mapping *map)
+{
+ void *bytes;
+
+ map->active.ring = (struct pvcalls_data_intf *)
+ get_zeroed_page(GFP_KERNEL);
+ if (!map->active.ring)
+ goto out;
+
+ map->active.ring->ring_order = PVCALLS_RING_ORDER;
+ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ PVCALLS_RING_ORDER);
+ if (!bytes)
+ goto out;
+
+ map->active.data.in = bytes;
+ map->active.data.out = bytes +
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+ return 0;
+
+out:
+ free_active_ring(map);
+ return -ENOMEM;
+}
+
static int create_active(struct sock_mapping *map, int *evtchn)
{
void *bytes;
*evtchn = -1;
init_waitqueue_head(&map->active.inflight_conn_req);
- map->active.ring = (struct pvcalls_data_intf *)
- __get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (map->active.ring == NULL)
- goto out_error;
- map->active.ring->ring_order = PVCALLS_RING_ORDER;
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- PVCALLS_RING_ORDER);
- if (bytes == NULL)
- goto out_error;
+ bytes = map->active.data.in;
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
map->active.ring->ref[i] = gnttab_grant_foreign_access(
pvcalls_front_dev->otherend_id,
pvcalls_front_dev->otherend_id,
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
- map->active.data.in = bytes;
- map->active.data.out = bytes +
- XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
-
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
if (ret)
goto out_error;
out_error:
if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
- free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
- free_page((unsigned long)map->active.ring);
return ret;
}
return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+ ret = alloc_active_ring(map);
+ if (ret < 0) {
+ pvcalls_exit_sock(sock);
+ return ret;
+ }
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
virt_mb();
size = pvcalls_queued(prod, cons, array_size);
- if (size >= array_size)
+ if (size > array_size)
return -EINVAL;
+ if (size == array_size)
+ return 0;
if (len > array_size - size)
len = array_size - size;
error = intf->in_error;
/* get pointers before reading from the ring */
virt_rmb();
- if (error < 0)
- return error;
size = pvcalls_queued(prod, cons, array_size);
masked_prod = pvcalls_mask(prod, array_size);
masked_cons = pvcalls_mask(cons, array_size);
if (size == 0)
- return 0;
+ return error ?: size;
if (len > size)
len = size;
}
}
- spin_lock(&bedata->socket_lock);
- ret = get_request(bedata, &req_id);
- if (ret < 0) {
+ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
- spin_unlock(&bedata->socket_lock);
+ pvcalls_exit_sock(sock);
+ return -ENOMEM;
+ }
+ ret = alloc_active_ring(map2);
+ if (ret < 0) {
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ kfree(map2);
pvcalls_exit_sock(sock);
return ret;
}
- map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
- if (map2 == NULL) {
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map2);
+ kfree(map2);
pvcalls_exit_sock(sock);
- return -ENOMEM;
+ return ret;
}
+
ret = create_active(map2, &evtchn);
if (ret < 0) {
+ free_active_ring(map2);
kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
received:
map2->sock = newsock;
- newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
+ newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
if (!newsock->sk) {
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
spin_lock(&bedata->socket_lock);
list_del(&map->list);
spin_unlock(&bedata->socket_lock);
- if (READ_ONCE(map->passive.inflight_req_id) !=
- PVCALLS_INVALID_ID) {
+ if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
+ READ_ONCE(map->passive.inflight_req_id) != 0) {
pvcalls_front_free_map(bedata,
map->passive.accept_map);
}
ASM_ALIGN = $(if $(CONFIG_64BIT),3,2)
PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
-filechk_fwbin = { \
+filechk_fwbin = \
echo "/* Generated by $(src)/Makefile */" ;\
echo " .section .rodata" ;\
echo " .p2align $(ASM_ALIGN)" ;\
echo " .p2align $(ASM_ALIGN)" ;\
echo " $(ASM_WORD) _fw_$(FWSTR)_name" ;\
echo " $(ASM_WORD) _fw_$(FWSTR)_bin" ;\
- echo " $(ASM_WORD) _fw_end - _fw_$(FWSTR)_bin" ;\
-}
+ echo " $(ASM_WORD) _fw_end - _fw_$(FWSTR)_bin"
$(obj)/%.gen.S: FORCE
$(call filechk,fwbin)
/* The new front of the queue now owns the state variables. */
next = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
- vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
goto again;
/* The new front of the queue now owns the state variables. */
next = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
- vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
afs_lock_may_be_available(vnode);
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
valid = true;
} else {
- vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
vnode->cb_v_break = vnode->volume->cb_v_break;
valid = false;
}
#endif
afs_put_permits(rcu_access_pointer(vnode->permit_cache));
+ key_put(vnode->lock_key);
+ vnode->lock_key = NULL;
_leave("");
}
struct yfs_xdr_u64 max_quota;
struct yfs_xdr_u64 file_quota;
} __packed;
+
+enum yfs_lock_type {
+ yfs_LockNone = -1,
+ yfs_LockRead = 0,
+ yfs_LockWrite = 1,
+ yfs_LockExtend = 2,
+ yfs_LockRelease = 3,
+ yfs_LockMandatoryRead = 0x100,
+ yfs_LockMandatoryWrite = 0x101,
+ yfs_LockMandatoryExtend = 0x102,
+};
static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
+static void afs_delete_async_call(struct work_struct *);
static void afs_process_async_call(struct work_struct *);
static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
}
}
+static struct afs_call *afs_get_call(struct afs_call *call,
+ enum afs_call_trace why)
+{
+ int u = atomic_inc_return(&call->usage);
+
+ trace_afs_call(call, why, u,
+ atomic_read(&call->net->nr_outstanding_calls),
+ __builtin_return_address(0));
+ return call;
+}
+
/*
* Queue the call for actual work.
*/
static void afs_queue_call_work(struct afs_call *call)
{
if (call->type->work) {
- int u = atomic_inc_return(&call->usage);
-
- trace_afs_call(call, afs_call_trace_work, u,
- atomic_read(&call->net->nr_outstanding_calls),
- __builtin_return_address(0));
-
INIT_WORK(&call->work, call->type->work);
+ afs_get_call(call, afs_call_trace_work);
if (!queue_work(afs_wq, &call->work))
afs_put_call(call);
}
}
}
+ /* If the call is going to be asynchronous, we need an extra ref for
+ * the call to hold itself so the caller need not hang on to its ref.
+ */
+ if (call->async)
+ afs_get_call(call, afs_call_trace_get);
+
/* create a call */
rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
(unsigned long)call,
goto error_do_abort;
}
- /* at this point, an async call may no longer exist as it may have
- * already completed */
- if (call->async)
+ /* Note that at this point, we may have received the reply or an abort
+ * - and an asynchronous call may already have completed.
+ */
+ if (call->async) {
+ afs_put_call(call);
return -EINPROGRESS;
+ }
return afs_wait_for_call_to_complete(call, ac);
error_do_abort:
- call->state = AFS_CALL_COMPLETE;
if (ret != -ECONNABORTED) {
rxrpc_kernel_abort_call(call->net->socket, rxcall,
RX_USER_ABORT, ret, "KSD");
error_kill_call:
if (call->type->done)
call->type->done(call);
- afs_put_call(call);
+
+ /* We need to dispose of the extra ref we grabbed for an async call.
+ * The call, however, might be queued on afs_async_calls and we need to
+ * make sure we don't get any more notifications that might requeue it.
+ */
+ if (call->rxcall) {
+ rxrpc_kernel_end_call(call->net->socket, call->rxcall);
+ call->rxcall = NULL;
+ }
+ if (call->async) {
+ if (cancel_work_sync(&call->async_work))
+ afs_put_call(call);
+ afs_put_call(call);
+ }
+
ac->error = ret;
+ call->state = AFS_CALL_COMPLETE;
+ afs_put_call(call);
_leave(" = %d", ret);
return ret;
}
if (vldb->fs_mask[i] & type_mask)
nr_servers++;
- slist = kzalloc(sizeof(struct afs_server_list) +
- sizeof(struct afs_server_entry) * nr_servers,
- GFP_KERNEL);
+ slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL);
if (!slist)
goto error;
bp = xdr_encode_YFSFid(bp, &vnode->fid);
bp = xdr_encode_string(bp, name, namesz);
bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
- bp = xdr_encode_u32(bp, 0); /* ViceLockType */
+ bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
yfs_check_req(call, bp);
afs_use_fs_server(call, fc->cbi);
}
EXPORT_SYMBOL(invalidate_bdev);
+static void set_init_blocksize(struct block_device *bdev)
+{
+ unsigned bsize = bdev_logical_block_size(bdev);
+ loff_t size = i_size_read(bdev->bd_inode);
+
+ while (bsize < PAGE_SIZE) {
+ if (size & bsize)
+ break;
+ bsize <<= 1;
+ }
+ bdev->bd_block_size = bsize;
+ bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+}
+
int set_blocksize(struct block_device *bdev, int size)
{
/* Size must be a power of two, and between 512 and PAGE_SIZE */
void bd_set_size(struct block_device *bdev, loff_t size)
{
- unsigned bsize = bdev_logical_block_size(bdev);
-
inode_lock(bdev->bd_inode);
i_size_write(bdev->bd_inode, size);
inode_unlock(bdev->bd_inode);
- while (bsize < PAGE_SIZE) {
- if (size & bsize)
- break;
- bsize <<= 1;
- }
- bdev->bd_block_size = bsize;
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
}
EXPORT_SYMBOL(bd_set_size);
}
}
- if (!ret)
+ if (!ret) {
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
+ set_init_blocksize(bdev);
+ }
/*
* If the device is invalidated, rescan partition
goto out_clear;
}
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
+ set_init_blocksize(bdev);
}
if (bdev->bd_bdi == &noop_backing_dev_info)
parent_start = parent->start;
/*
- * If we are COWing a node/leaf from the extent, chunk or device trees,
- * make sure that we do not finish block group creation of pending block
- * groups. We do this to avoid a deadlock.
+ * If we are COWing a node/leaf from the extent, chunk, device or free
+ * space trees, make sure that we do not finish block group creation of
+ * pending block groups. We do this to avoid a deadlock.
* COWing can result in allocation of a new chunk, and flushing pending
* block groups (btrfs_create_pending_block_groups()) can be triggered
* when finishing allocation of a new chunk. Creation of a pending block
- * group modifies the extent, chunk and device trees, therefore we could
- * deadlock with ourselves since we are holding a lock on an extent
- * buffer that btrfs_create_pending_block_groups() may try to COW later.
+ * group modifies the extent, chunk, device and free space trees,
+ * therefore we could deadlock with ourselves since we are holding a
+ * lock on an extent buffer that btrfs_create_pending_block_groups() may
+ * try to COW later.
*/
if (root == fs_info->extent_root ||
root == fs_info->chunk_root ||
- root == fs_info->dev_root)
+ root == fs_info->dev_root ||
+ root == fs_info->free_space_root)
trans->can_flush_pending_bgs = false;
cow = btrfs_alloc_tree_block(trans, root, parent_start,
struct btrfs_trans_handle;
struct btrfs_transaction;
struct btrfs_pending_snapshot;
+struct btrfs_delayed_ref_root;
extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep;
* main phase. The fs_info::balance_ctl is initialized.
*/
BTRFS_FS_BALANCE_RUNNING,
+
+ /* Indicate that the cleaner thread is awake and doing something. */
+ BTRFS_FS_CLEANER_RUNNING,
};
struct btrfs_fs_info {
unsigned long count);
int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
unsigned long count, u64 transid, int wait);
+void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head);
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
while (1) {
again = 0;
+ set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
+
/* Make the cleaner go to sleep early. */
if (btrfs_need_cleaner_sleep(fs_info))
goto sleep;
*/
btrfs_delete_unused_bgs(fs_info);
sleep:
+ clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
if (kthread_should_park())
kthread_parkme();
if (kthread_should_stop())
spin_lock(&fs_info->ordered_root_lock);
}
spin_unlock(&fs_info->ordered_root_lock);
+
+ /*
+ * We need this here because if we've been flipped read-only we won't
+ * get sync() from the umount, so we need to make sure any ordered
+ * extents that haven't had their dirty pages IO start writeout yet
+ * actually get run and error out properly.
+ */
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
}
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
if (pin_bytes)
btrfs_pin_extent(fs_info, head->bytenr,
head->num_bytes, 1);
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
btrfs_put_delayed_ref_head(head);
cond_resched();
spin_lock(&delayed_refs->lock);
return ret ? ret : 1;
}
-static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *head)
+void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_delayed_ref_root *delayed_refs =
- &trans->transaction->delayed_refs;
int nr_items = 1; /* Dropping this ref head update. */
if (head->total_ref_mod < 0) {
}
}
- cleanup_ref_head_accounting(trans, head);
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
trace_run_delayed_ref_head(fs_info, head, 0);
btrfs_delayed_ref_unlock(head);
ret = 0;
break;
case COMMIT_TRANS:
+ /*
+ * If we have pending delayed iputs then we could free up a
+ * bunch of pinned space, so make sure we run the iputs before
+ * we do our pinned bytes check below.
+ */
+ mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
+ btrfs_run_delayed_iputs(fs_info);
+ mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
+
ret = may_commit_transaction(fs_info, space_info);
break;
default:
if (head->must_insert_reserved)
ret = 1;
- cleanup_ref_head_accounting(trans, head);
+ btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
return ret;
/* once for the tree */
btrfs_put_ordered_extent(ordered_extent);
- /* Try to release some metadata so we don't get an OOM but don't wait */
- btrfs_btree_balance_dirty_nodelay(fs_info);
-
return ret;
}
ASSERT(list_empty(&binode->delayed_iput));
list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
+ if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
+ wake_up_process(fs_info->cleaner_kthread);
}
void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
inode_lock_nested(inode2, I_MUTEX_CHILD);
}
+static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
+ struct inode *inode2, u64 loff2, u64 len)
+{
+ unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
+ unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
+}
+
+static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
+ struct inode *inode2, u64 loff2, u64 len)
+{
+ if (inode1 < inode2) {
+ swap(inode1, inode2);
+ swap(loff1, loff2);
+ } else if (inode1 == inode2 && loff2 < loff1) {
+ swap(loff1, loff2);
+ }
+ lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
+ lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
+}
+
static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
struct inode *dst, u64 dst_loff)
{
return -EINVAL;
/*
- * Lock destination range to serialize with concurrent readpages().
+ * Lock destination range to serialize with concurrent readpages() and
+ * source range to serialize with relocation.
*/
- lock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1);
+ btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
- unlock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1);
+ btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
return ret;
}
len = ALIGN(src->i_size, bs) - off;
if (destoff > inode->i_size) {
+ const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
+
ret = btrfs_cont_expand(inode, inode->i_size, destoff);
if (ret)
return ret;
+ /*
+ * We may have truncated the last block if the inode's size is
+ * not sector size aligned, so we need to wait for writeback to
+ * complete before proceeding further, otherwise we can race
+ * with cloning and attempt to increment a reference to an
+ * extent that no longer exists (writeback completed right after
+ * we found the previous extent covering eof and before we
+ * attempted to increment its reference count).
+ */
+ ret = btrfs_wait_ordered_range(inode, wb_start,
+ destoff - wb_start);
+ if (ret)
+ return ret;
}
/*
- * Lock destination range to serialize with concurrent readpages().
+ * Lock destination range to serialize with concurrent readpages() and
+ * source range to serialize with relocation.
*/
- lock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1);
+ btrfs_double_extent_lock(src, off, inode, destoff, len);
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
- unlock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1);
+ btrfs_double_extent_unlock(src, off, inode, destoff, len);
/*
* Truncate page cache pages so that future reads will see the cloned
* data immediately and not the previous data.
ret = -EUCLEAN;
goto out;
}
+
+ /* It's possible this device is a dummy for seed device */
+ if (dev->disk_total_bytes == 0) {
+ dev = find_device(fs_info->fs_devices->seed, devid, NULL);
+ if (!dev) {
+ btrfs_err(fs_info, "failed to find seed devid %llu",
+ devid);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+
if (physical_offset + physical_len > dev->disk_total_bytes) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
if (err < 0 || off >= i_size_read(inode)) {
unlock_page(page);
put_page(page);
- if (err == -ENOMEM)
- ret = VM_FAULT_OOM;
- else
- ret = VM_FAULT_SIGBUS;
+ ret = vmf_error(err);
goto out_inline;
}
if (err < PAGE_SIZE)
seq_putc(m, ',');
pos = m->count;
- ret = ceph_print_client_options(m, fsc->client);
+ ret = ceph_print_client_options(m, fsc->client, false);
if (ret)
return ret;
opt = NULL; /* fsc->client now owns this */
fsc->client->extra_mon_dispatch = extra_mon_dispatch;
- fsc->client->osdc.abort_on_full = true;
+ ceph_set_opt(fsc->client, ABORT_ON_FULL);
if (!fsopt->mds_namespace) {
ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.15"
+#define CIFS_VERSION "2.16"
#endif /* _CIFSFS_H */
int mid_state; /* wish this were enum but can not pass to wait_event */
unsigned int mid_flags;
__le16 command; /* smb command code */
+ unsigned int optype; /* operation type */
bool large_buf:1; /* if valid response, is pointer to large buf */
bool multiRsp:1; /* multiple trans2 responses for one request */
bool multiEnd:1; /* both received */
kfree(param);
}
+static inline bool is_interrupt_error(int error)
+{
+ switch (error) {
+ case -EINTR:
+ case -ERESTARTSYS:
+ case -ERESTARTNOHAND:
+ case -ERESTARTNOINTR:
+ return true;
+ }
+ return false;
+}
+
+static inline bool is_retryable_error(int error)
+{
+ if (is_interrupt_error(error) || error == -EAGAIN)
+ return true;
+ return false;
+}
+
#define MID_FREE 0
#define MID_REQUEST_ALLOCATED 1
#define MID_REQUEST_SUBMITTED 2
int rc;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *it = NULL;
- char tree[MAX_TREE_SIZE + 1];
+ char *tree;
const char *tcp_host;
size_t tcp_host_len;
const char *dfs_host;
size_t dfs_host_len;
+ tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
+ if (!tree)
+ return -ENOMEM;
+
if (tcon->ipc) {
- snprintf(tree, sizeof(tree), "\\\\%s\\IPC$",
+ snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
tcon->ses->server->hostname);
- return CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
+ rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
+ goto out;
}
- if (!tcon->dfs_path)
- return CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (!tcon->dfs_path) {
+ rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ goto out;
+ }
rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl);
if (rc)
- return rc;
+ goto out;
extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
&tcp_host_len);
continue;
}
- snprintf(tree, sizeof(tree), "\\%s", tgt);
+ snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
if (!rc)
rc = -ENOENT;
}
dfs_cache_free_tgts(&tl);
+out:
+ kfree(tree);
return rc;
}
#else
for (j = 0; j < nr_pages; j++) {
unlock_page(wdata2->pages[j]);
- if (rc != 0 && rc != -EAGAIN) {
+ if (rc != 0 && !is_retryable_error(rc)) {
SetPageError(wdata2->pages[j]);
end_page_writeback(wdata2->pages[j]);
put_page(wdata2->pages[j]);
if (rc) {
kref_put(&wdata2->refcount, cifs_writedata_release);
- if (rc == -EAGAIN)
+ if (is_retryable_error(rc))
continue;
break;
}
i += nr_pages;
} while (i < wdata->nr_pages);
- mapping_set_error(inode->i_mapping, rc);
+ if (rc != 0 && !is_retryable_error(rc))
+ mapping_set_error(inode->i_mapping, rc);
kref_put(&wdata->refcount, cifs_writedata_release);
}
kfree(server->hostname);
server->hostname = extract_hostname(name);
- if (!server->hostname) {
- cifs_dbg(FYI, "%s: failed to extract hostname from target: %d\n",
- __func__, -ENOMEM);
+ if (IS_ERR(server->hostname)) {
+ cifs_dbg(FYI,
+ "%s: failed to extract hostname from target: %ld\n",
+ __func__, PTR_ERR(server->hostname));
}
}
it->it_name = kstrndup(t->t_name, strlen(t->t_name),
GFP_KERNEL);
if (!it->it_name) {
+ kfree(it);
rc = -ENOMEM;
goto err_free_it;
}
if (can_flush) {
rc = filemap_write_and_wait(inode->i_mapping);
- mapping_set_error(inode->i_mapping, rc);
+ if (!is_interrupt_error(rc))
+ mapping_set_error(inode->i_mapping, rc);
if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path,
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf) {
+ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
free_xid(xid);
return -EINVAL;
}
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+ PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+ PAGE_SIZE);
max_num = (max_buf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf)
+ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+ PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+ PAGE_SIZE);
max_num = (max_buf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
pgoff_t end, index;
struct cifs_writedata *wdata;
int rc = 0;
+ int saved_rc = 0;
unsigned int xid;
/*
rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
&wsize, &credits);
- if (rc)
+ if (rc != 0) {
+ done = true;
break;
+ }
tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
&found_pages);
if (!wdata) {
rc = -ENOMEM;
+ done = true;
add_credits_and_wake_if(server, credits, 0);
break;
}
if (rc != 0) {
add_credits_and_wake_if(server, wdata->credits, 0);
for (i = 0; i < nr_pages; ++i) {
- if (rc == -EAGAIN)
+ if (is_retryable_error(rc))
redirty_page_for_writepage(wbc,
wdata->pages[i]);
else
end_page_writeback(wdata->pages[i]);
put_page(wdata->pages[i]);
}
- if (rc != -EAGAIN)
+ if (!is_retryable_error(rc))
mapping_set_error(mapping, rc);
}
kref_put(&wdata->refcount, cifs_writedata_release);
continue;
}
+ /* Return immediately if we received a signal during writing */
+ if (is_interrupt_error(rc)) {
+ done = true;
+ break;
+ }
+
+ if (rc != 0 && saved_rc == 0)
+ saved_rc = rc;
+
wbc->nr_to_write -= nr_pages;
if (wbc->nr_to_write <= 0)
done = true;
goto retry;
}
+ if (saved_rc != 0)
+ rc = saved_rc;
+
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
set_page_writeback(page);
retry_write:
rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
- if (rc == -EAGAIN) {
- if (wbc->sync_mode == WB_SYNC_ALL)
+ if (is_retryable_error(rc)) {
+ if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
goto retry_write;
redirty_page_for_writepage(wbc, page);
} else if (rc != 0) {
* the flush returns error?
*/
rc = filemap_write_and_wait(inode->i_mapping);
+ if (is_interrupt_error(rc)) {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
mapping_set_error(inode->i_mapping, rc);
rc = 0;
* the flush returns error?
*/
rc = filemap_write_and_wait(inode->i_mapping);
+ if (is_interrupt_error(rc)) {
+ rc = -ERESTARTSYS;
+ goto cifs_setattr_exit;
+ }
+
mapping_set_error(inode->i_mapping, rc);
rc = 0;
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf)
+ if (max_buf < sizeof(struct smb2_lock_element))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
max_num = max_buf / sizeof(struct smb2_lock_element);
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
if (!buf)
return -EINVAL;
}
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
max_num = max_buf / sizeof(struct smb2_lock_element);
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
if (!buf) {
int rc;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *it = NULL;
- char tree[MAX_TREE_SIZE + 1];
+ char *tree;
const char *tcp_host;
size_t tcp_host_len;
const char *dfs_host;
size_t dfs_host_len;
+ tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
+ if (!tree)
+ return -ENOMEM;
+
if (tcon->ipc) {
- snprintf(tree, sizeof(tree), "\\\\%s\\IPC$",
+ snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
tcon->ses->server->hostname);
- return SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
+ rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
+ goto out;
}
- if (!tcon->dfs_path)
- return SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (!tcon->dfs_path) {
+ rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ goto out;
+ }
rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl);
if (rc)
- return rc;
+ goto out;
extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
&tcp_host_len);
continue;
}
- snprintf(tree, sizeof(tree), "\\%s", tgt);
+ snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
if (!rc)
rc = -ENOENT;
}
dfs_cache_free_tgts(&tl);
+out:
+ kfree(tree);
return rc;
}
#else
if (rdata->credits) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
SMB2_MAX_BUFFER_SIZE));
- shdr->CreditRequest = shdr->CreditCharge;
+ shdr->CreditRequest =
+ cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
spin_lock(&server->req_lock);
server->credits += rdata->credits -
le16_to_cpu(shdr->CreditCharge);
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
+ rdata->credits = le16_to_cpu(shdr->CreditCharge);
flags |= CIFS_HAS_CREDITS;
}
if (wdata->credits) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
SMB2_MAX_BUFFER_SIZE));
- shdr->CreditRequest = shdr->CreditCharge;
+ shdr->CreditRequest =
+ cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
spin_lock(&server->req_lock);
server->credits += wdata->credits -
le16_to_cpu(shdr->CreditCharge);
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
+ wdata->credits = le16_to_cpu(shdr->CreditCharge);
flags |= CIFS_HAS_CREDITS;
}
if (rc < 0 && rc != -EINTR)
cifs_dbg(VFS, "Error %d sending data on socket to server\n",
rc);
- else
+ else if (rc > 0)
rc = 0;
return rc;
}
static void
-cifs_noop_callback(struct mid_q_entry *mid)
+cifs_compound_callback(struct mid_q_entry *mid)
{
+ struct TCP_Server_Info *server = mid->server;
+ unsigned int optype = mid->optype;
+ unsigned int credits_received = 0;
+
+ if (mid->mid_state == MID_RESPONSE_RECEIVED) {
+ if (mid->resp_buf)
+ credits_received = server->ops->get_credits(mid);
+ else
+ cifs_dbg(FYI, "Bad state for cancelled MID\n");
+ }
+
+ add_credits(server, credits_received, optype);
+}
+
+static void
+cifs_compound_last_callback(struct mid_q_entry *mid)
+{
+ cifs_compound_callback(mid);
+ cifs_wake_up_task(mid);
+}
+
+static void
+cifs_cancelled_callback(struct mid_q_entry *mid)
+{
+ cifs_compound_callback(mid);
+ DeleteMidQEntry(mid);
}
int
int i, j, rc = 0;
int timeout, optype;
struct mid_q_entry *midQ[MAX_COMPOUND];
- unsigned int credits = 0;
+ bool cancelled_mid[MAX_COMPOUND] = {false};
+ unsigned int credits[MAX_COMPOUND] = {0};
char *buf;
timeout = flags & CIFS_TIMEOUT_MASK;
return -ENOENT;
/*
- * Ensure that we do not send more than 50 overlapping requests
- * to the same server. We may make this configurable later or
- * use ses->maxReq.
+ * Ensure we obtain 1 credit per request in the compound chain.
+ * It can be optimized further by waiting for all the credits
+ * at once but this can wait long enough if we don't have enough
+ * credits due to some heavy operations in progress or the server
+ * not granting us much, so a fallback to the current approach is
+ * needed anyway.
*/
- rc = wait_for_free_request(ses->server, timeout, optype);
- if (rc)
- return rc;
+ for (i = 0; i < num_rqst; i++) {
+ rc = wait_for_free_request(ses->server, timeout, optype);
+ if (rc) {
+ /*
+ * We haven't sent an SMB packet to the server yet but
+ * we already obtained credits for i requests in the
+ * compound chain - need to return those credits back
+ * for future use. Note that we need to call add_credits
+ * multiple times to match the way we obtained credits
+ * in the first place and to account for in flight
+ * requests correctly.
+ */
+ for (j = 0; j < i; j++)
+ add_credits(ses->server, 1, optype);
+ return rc;
+ }
+ credits[i] = 1;
+ }
/*
* Make sure that we sign in the same order that we send on this socket
for (j = 0; j < i; j++)
cifs_delete_mid(midQ[j]);
mutex_unlock(&ses->server->srv_mutex);
+
/* Update # of requests on wire to server */
- add_credits(ses->server, 1, optype);
+ for (j = 0; j < num_rqst; j++)
+ add_credits(ses->server, credits[j], optype);
return PTR_ERR(midQ[i]);
}
midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
+ midQ[i]->optype = optype;
/*
- * We don't invoke the callback compounds unless it is the last
- * request.
+ * Invoke callback for every part of the compound chain
+ * to calculate credits properly. Wake up this thread only when
+ * the last element is received.
*/
if (i < num_rqst - 1)
- midQ[i]->callback = cifs_noop_callback;
+ midQ[i]->callback = cifs_compound_callback;
+ else
+ midQ[i]->callback = cifs_compound_last_callback;
}
cifs_in_send_inc(ses->server);
rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
mutex_unlock(&ses->server->srv_mutex);
- if (rc < 0)
+ if (rc < 0) {
+ /* Sending failed for some reason - return credits back */
+ for (i = 0; i < num_rqst; i++)
+ add_credits(ses->server, credits[i], optype);
goto out;
+ }
+
+ /*
+ * At this point the request is passed to the network stack - we assume
+ * that any credits taken from the server structure on the client have
+ * been spent and we can't return them back. Once we receive responses
+ * we will collect credits granted by the server in the mid callbacks
+ * and add those credits to the server structure.
+ */
/*
* Compounding is never used during session establish.
for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(ses->server, midQ[i]);
- if (rc != 0) {
+ if (rc != 0)
+ break;
+ }
+ if (rc != 0) {
+ for (; i < num_rqst; i++) {
cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(ses->server, &rqst[i], midQ[i]);
spin_lock(&GlobalMid_Lock);
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
- midQ[i]->callback = DeleteMidQEntry;
- spin_unlock(&GlobalMid_Lock);
- add_credits(ses->server, 1, optype);
- return rc;
+ midQ[i]->callback = cifs_cancelled_callback;
+ cancelled_mid[i] = true;
+ credits[i] = 0;
}
spin_unlock(&GlobalMid_Lock);
}
}
- for (i = 0; i < num_rqst; i++)
- if (midQ[i]->resp_buf)
- credits += ses->server->ops->get_credits(midQ[i]);
- if (!credits)
- credits = 1;
-
for (i = 0; i < num_rqst; i++) {
if (rc < 0)
goto out;
rc = cifs_sync_mid_result(midQ[i], ses->server);
if (rc != 0) {
- add_credits(ses->server, credits, optype);
- return rc;
+ /* mark this mid as cancelled to not free it below */
+ cancelled_mid[i] = true;
+ goto out;
}
if (!midQ[i]->resp_buf ||
* This is prevented above by using a noop callback that will not
* wake this thread except for the very last PDU.
*/
- for (i = 0; i < num_rqst; i++)
- cifs_delete_mid(midQ[i]);
- add_credits(ses->server, credits, optype);
+ for (i = 0; i < num_rqst; i++) {
+ if (!cancelled_mid[i])
+ cifs_delete_mid(midQ[i]);
+ }
return rc;
}
}
EXPORT_SYMBOL(fscrypt_get_ctx);
+void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
+ const struct fscrypt_info *ci)
+{
+ memset(iv, 0, ci->ci_mode->ivsize);
+ iv->lblk_num = cpu_to_le64(lblk_num);
+
+ if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY)
+ memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+
+ if (ci->ci_essiv_tfm != NULL)
+ crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
+}
+
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
u64 lblk_num, struct page *src_page,
struct page *dest_page, unsigned int len,
unsigned int offs, gfp_t gfp_flags)
{
- struct {
- __le64 index;
- u8 padding[FS_IV_SIZE - sizeof(__le64)];
- } iv;
+ union fscrypt_iv iv;
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist dst, src;
BUG_ON(len == 0);
- BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
- BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
- iv.index = cpu_to_le64(lblk_num);
- memset(iv.padding, 0, sizeof(iv.padding));
-
- if (ci->ci_essiv_tfm != NULL) {
- crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
- (u8 *)&iv);
- }
+ fscrypt_generate_iv(&iv, lblk_num, ci);
req = skcipher_request_alloc(tfm, gfp_flags);
if (!req)
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
- struct crypto_skcipher *tfm = inode->i_crypt_info->ci_ctfm;
- int res = 0;
- char iv[FS_CRYPTO_BLOCK_SIZE];
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ union fscrypt_iv iv;
struct scatterlist sg;
+ int res;
/*
* Copy the filename to the output buffer for encrypting in-place and
memset(out + iname->len, 0, olen - iname->len);
/* Initialize the IV */
- memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
+ fscrypt_generate_iv(&iv, 0, ci);
/* Set up the encryption request */
req = skcipher_request_alloc(tfm, GFP_NOFS);
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &wait);
sg_init_one(&sg, out, olen);
- skcipher_request_set_crypt(req, &sg, &sg, olen, iv);
+ skcipher_request_set_crypt(req, &sg, &sg, olen, &iv);
/* Do the encryption */
res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
- struct crypto_skcipher *tfm = inode->i_crypt_info->ci_ctfm;
- int res = 0;
- char iv[FS_CRYPTO_BLOCK_SIZE];
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ union fscrypt_iv iv;
+ int res;
/* Allocate request */
req = skcipher_request_alloc(tfm, GFP_NOFS);
crypto_req_done, &wait);
/* Initialize IV */
- memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
+ fscrypt_generate_iv(&iv, 0, ci);
/* Create decryption request */
sg_init_one(&src_sg, iname->name, iname->len);
sg_init_one(&dst_sg, oname->name, oname->len);
- skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, &iv);
res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
skcipher_request_free(req);
if (res < 0) {
#include <crypto/hash.h>
/* Encryption parameters */
-#define FS_IV_SIZE 16
#define FS_KEY_DERIVATION_NONCE_SIZE 16
/**
} __packed;
/*
- * A pointer to this structure is stored in the file system's in-core
- * representation of an inode.
+ * fscrypt_info - the "encryption key" for an inode
+ *
+ * When an encrypted file's key is made available, an instance of this struct is
+ * allocated and stored in ->i_crypt_info. Once created, it remains until the
+ * inode is evicted.
*/
struct fscrypt_info {
+
+ /* The actual crypto transform used for encryption and decryption */
+ struct crypto_skcipher *ci_ctfm;
+
+ /*
+ * Cipher for ESSIV IV generation. Only set for CBC contents
+ * encryption, otherwise is NULL.
+ */
+ struct crypto_cipher *ci_essiv_tfm;
+
+ /*
+ * Encryption mode used for this inode. It corresponds to either
+ * ci_data_mode or ci_filename_mode, depending on the inode type.
+ */
+ struct fscrypt_mode *ci_mode;
+
+ /*
+ * If non-NULL, then this inode uses a master key directly rather than a
+ * derived key, and ci_ctfm will equal ci_master_key->mk_ctfm.
+ * Otherwise, this inode uses a derived key.
+ */
+ struct fscrypt_master_key *ci_master_key;
+
+ /* fields from the fscrypt_context */
u8 ci_data_mode;
u8 ci_filename_mode;
u8 ci_flags;
- struct crypto_skcipher *ci_ctfm;
- struct crypto_cipher *ci_essiv_tfm;
- u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+ u8 ci_master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE];
};
typedef enum {
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
+ if (contents_mode == FS_ENCRYPTION_MODE_ADIANTUM &&
+ filenames_mode == FS_ENCRYPTION_MODE_ADIANTUM)
+ return true;
+
return false;
}
#define fscrypt_err(sb, fmt, ...) \
fscrypt_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__)
+#define FSCRYPT_MAX_IV_SIZE 32
+
+union fscrypt_iv {
+ struct {
+ /* logical block number within the file */
+ __le64 lblk_num;
+
+ /* per-file nonce; only set in DIRECT_KEY mode */
+ u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+ };
+ u8 raw[FSCRYPT_MAX_IV_SIZE];
+};
+
+void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
+ const struct fscrypt_info *ci);
+
/* fname.c */
extern int fname_encrypt(struct inode *inode, const struct qstr *iname,
u8 *out, unsigned int olen);
u32 *encrypted_len_ret);
/* keyinfo.c */
+
+struct fscrypt_mode {
+ const char *friendly_name;
+ const char *cipher_str;
+ int keysize;
+ int ivsize;
+ bool logged_impl_name;
+ bool needs_essiv;
+};
+
extern void __exit fscrypt_essiv_cleanup(void);
#endif /* _FSCRYPT_PRIVATE_H */
*/
#include <keys/user-type.h>
+#include <linux/hashtable.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
static struct crypto_shash *essiv_hash_tfm;
+/* Table of keys referenced by FS_POLICY_FLAG_DIRECT_KEY policies */
+static DEFINE_HASHTABLE(fscrypt_master_keys, 6); /* 6 bits = 64 buckets */
+static DEFINE_SPINLOCK(fscrypt_master_keys_lock);
+
/*
* Key derivation function. This generates the derived key by encrypting the
* master key with AES-128-ECB using the inode's nonce as the AES key.
return ERR_PTR(-ENOKEY);
}
-/* Find the master key, then derive the inode's actual encryption key */
-static int find_and_derive_key(const struct inode *inode,
- const struct fscrypt_context *ctx,
- u8 *derived_key, unsigned int derived_keysize)
-{
- struct key *key;
- const struct fscrypt_key *payload;
- int err;
-
- key = find_and_lock_process_key(FS_KEY_DESC_PREFIX,
- ctx->master_key_descriptor,
- derived_keysize, &payload);
- if (key == ERR_PTR(-ENOKEY) && inode->i_sb->s_cop->key_prefix) {
- key = find_and_lock_process_key(inode->i_sb->s_cop->key_prefix,
- ctx->master_key_descriptor,
- derived_keysize, &payload);
- }
- if (IS_ERR(key))
- return PTR_ERR(key);
- err = derive_key_aes(payload->raw, ctx, derived_key, derived_keysize);
- up_read(&key->sem);
- key_put(key);
- return err;
-}
-
-static struct fscrypt_mode {
- const char *friendly_name;
- const char *cipher_str;
- int keysize;
- bool logged_impl_name;
-} available_modes[] = {
+static struct fscrypt_mode available_modes[] = {
[FS_ENCRYPTION_MODE_AES_256_XTS] = {
.friendly_name = "AES-256-XTS",
.cipher_str = "xts(aes)",
.keysize = 64,
+ .ivsize = 16,
},
[FS_ENCRYPTION_MODE_AES_256_CTS] = {
.friendly_name = "AES-256-CTS-CBC",
.cipher_str = "cts(cbc(aes))",
.keysize = 32,
+ .ivsize = 16,
},
[FS_ENCRYPTION_MODE_AES_128_CBC] = {
.friendly_name = "AES-128-CBC",
.cipher_str = "cbc(aes)",
.keysize = 16,
+ .ivsize = 16,
+ .needs_essiv = true,
},
[FS_ENCRYPTION_MODE_AES_128_CTS] = {
.friendly_name = "AES-128-CTS-CBC",
.cipher_str = "cts(cbc(aes))",
.keysize = 16,
+ .ivsize = 16,
+ },
+ [FS_ENCRYPTION_MODE_ADIANTUM] = {
+ .friendly_name = "Adiantum",
+ .cipher_str = "adiantum(xchacha12,aes)",
+ .keysize = 32,
+ .ivsize = 32,
},
};
return ERR_PTR(-EINVAL);
}
-static void put_crypt_info(struct fscrypt_info *ci)
+/* Find the master key, then derive the inode's actual encryption key */
+static int find_and_derive_key(const struct inode *inode,
+ const struct fscrypt_context *ctx,
+ u8 *derived_key, const struct fscrypt_mode *mode)
{
- if (!ci)
+ struct key *key;
+ const struct fscrypt_key *payload;
+ int err;
+
+ key = find_and_lock_process_key(FS_KEY_DESC_PREFIX,
+ ctx->master_key_descriptor,
+ mode->keysize, &payload);
+ if (key == ERR_PTR(-ENOKEY) && inode->i_sb->s_cop->key_prefix) {
+ key = find_and_lock_process_key(inode->i_sb->s_cop->key_prefix,
+ ctx->master_key_descriptor,
+ mode->keysize, &payload);
+ }
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+
+ if (ctx->flags & FS_POLICY_FLAG_DIRECT_KEY) {
+ if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) {
+ fscrypt_warn(inode->i_sb,
+ "direct key mode not allowed with %s",
+ mode->friendly_name);
+ err = -EINVAL;
+ } else if (ctx->contents_encryption_mode !=
+ ctx->filenames_encryption_mode) {
+ fscrypt_warn(inode->i_sb,
+ "direct key mode not allowed with different contents and filenames modes");
+ err = -EINVAL;
+ } else {
+ memcpy(derived_key, payload->raw, mode->keysize);
+ err = 0;
+ }
+ } else {
+ err = derive_key_aes(payload->raw, ctx, derived_key,
+ mode->keysize);
+ }
+ up_read(&key->sem);
+ key_put(key);
+ return err;
+}
+
+/* Allocate and key a symmetric cipher object for the given encryption mode */
+static struct crypto_skcipher *
+allocate_skcipher_for_mode(struct fscrypt_mode *mode, const u8 *raw_key,
+ const struct inode *inode)
+{
+ struct crypto_skcipher *tfm;
+ int err;
+
+ tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0);
+ if (IS_ERR(tfm)) {
+ fscrypt_warn(inode->i_sb,
+ "error allocating '%s' transform for inode %lu: %ld",
+ mode->cipher_str, inode->i_ino, PTR_ERR(tfm));
+ return tfm;
+ }
+ if (unlikely(!mode->logged_impl_name)) {
+ /*
+ * fscrypt performance can vary greatly depending on which
+ * crypto algorithm implementation is used. Help people debug
+ * performance problems by logging the ->cra_driver_name the
+ * first time a mode is used. Note that multiple threads can
+ * race here, but it doesn't really matter.
+ */
+ mode->logged_impl_name = true;
+ pr_info("fscrypt: %s using implementation \"%s\"\n",
+ mode->friendly_name,
+ crypto_skcipher_alg(tfm)->base.cra_driver_name);
+ }
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize);
+ if (err)
+ goto err_free_tfm;
+
+ return tfm;
+
+err_free_tfm:
+ crypto_free_skcipher(tfm);
+ return ERR_PTR(err);
+}
+
+/* Master key referenced by FS_POLICY_FLAG_DIRECT_KEY policy */
+struct fscrypt_master_key {
+ struct hlist_node mk_node;
+ refcount_t mk_refcount;
+ const struct fscrypt_mode *mk_mode;
+ struct crypto_skcipher *mk_ctfm;
+ u8 mk_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ u8 mk_raw[FS_MAX_KEY_SIZE];
+};
+
+static void free_master_key(struct fscrypt_master_key *mk)
+{
+ if (mk) {
+ crypto_free_skcipher(mk->mk_ctfm);
+ kzfree(mk);
+ }
+}
+
+static void put_master_key(struct fscrypt_master_key *mk)
+{
+ if (!refcount_dec_and_lock(&mk->mk_refcount, &fscrypt_master_keys_lock))
return;
+ hash_del(&mk->mk_node);
+ spin_unlock(&fscrypt_master_keys_lock);
- crypto_free_skcipher(ci->ci_ctfm);
- crypto_free_cipher(ci->ci_essiv_tfm);
- kmem_cache_free(fscrypt_info_cachep, ci);
+ free_master_key(mk);
+}
+
+/*
+ * Find/insert the given master key into the fscrypt_master_keys table. If
+ * found, it is returned with elevated refcount, and 'to_insert' is freed if
+ * non-NULL. If not found, 'to_insert' is inserted and returned if it's
+ * non-NULL; otherwise NULL is returned.
+ */
+static struct fscrypt_master_key *
+find_or_insert_master_key(struct fscrypt_master_key *to_insert,
+ const u8 *raw_key, const struct fscrypt_mode *mode,
+ const struct fscrypt_info *ci)
+{
+ unsigned long hash_key;
+ struct fscrypt_master_key *mk;
+
+ /*
+ * Careful: to avoid potentially leaking secret key bytes via timing
+ * information, we must key the hash table by descriptor rather than by
+ * raw key, and use crypto_memneq() when comparing raw keys.
+ */
+
+ BUILD_BUG_ON(sizeof(hash_key) > FS_KEY_DESCRIPTOR_SIZE);
+ memcpy(&hash_key, ci->ci_master_key_descriptor, sizeof(hash_key));
+
+ spin_lock(&fscrypt_master_keys_lock);
+ hash_for_each_possible(fscrypt_master_keys, mk, mk_node, hash_key) {
+ if (memcmp(ci->ci_master_key_descriptor, mk->mk_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE) != 0)
+ continue;
+ if (mode != mk->mk_mode)
+ continue;
+ if (crypto_memneq(raw_key, mk->mk_raw, mode->keysize))
+ continue;
+ /* using existing tfm with same (descriptor, mode, raw_key) */
+ refcount_inc(&mk->mk_refcount);
+ spin_unlock(&fscrypt_master_keys_lock);
+ free_master_key(to_insert);
+ return mk;
+ }
+ if (to_insert)
+ hash_add(fscrypt_master_keys, &to_insert->mk_node, hash_key);
+ spin_unlock(&fscrypt_master_keys_lock);
+ return to_insert;
+}
+
+/* Prepare to encrypt directly using the master key in the given mode */
+static struct fscrypt_master_key *
+fscrypt_get_master_key(const struct fscrypt_info *ci, struct fscrypt_mode *mode,
+ const u8 *raw_key, const struct inode *inode)
+{
+ struct fscrypt_master_key *mk;
+ int err;
+
+ /* Is there already a tfm for this key? */
+ mk = find_or_insert_master_key(NULL, raw_key, mode, ci);
+ if (mk)
+ return mk;
+
+ /* Nope, allocate one. */
+ mk = kzalloc(sizeof(*mk), GFP_NOFS);
+ if (!mk)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&mk->mk_refcount, 1);
+ mk->mk_mode = mode;
+ mk->mk_ctfm = allocate_skcipher_for_mode(mode, raw_key, inode);
+ if (IS_ERR(mk->mk_ctfm)) {
+ err = PTR_ERR(mk->mk_ctfm);
+ mk->mk_ctfm = NULL;
+ goto err_free_mk;
+ }
+ memcpy(mk->mk_descriptor, ci->ci_master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE);
+ memcpy(mk->mk_raw, raw_key, mode->keysize);
+
+ return find_or_insert_master_key(mk, raw_key, mode, ci);
+
+err_free_mk:
+ free_master_key(mk);
+ return ERR_PTR(err);
}
static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
crypto_free_shash(essiv_hash_tfm);
}
+/*
+ * Given the encryption mode and key (normally the derived key, but for
+ * FS_POLICY_FLAG_DIRECT_KEY mode it's the master key), set up the inode's
+ * symmetric cipher transform object(s).
+ */
+static int setup_crypto_transform(struct fscrypt_info *ci,
+ struct fscrypt_mode *mode,
+ const u8 *raw_key, const struct inode *inode)
+{
+ struct fscrypt_master_key *mk;
+ struct crypto_skcipher *ctfm;
+ int err;
+
+ if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY) {
+ mk = fscrypt_get_master_key(ci, mode, raw_key, inode);
+ if (IS_ERR(mk))
+ return PTR_ERR(mk);
+ ctfm = mk->mk_ctfm;
+ } else {
+ mk = NULL;
+ ctfm = allocate_skcipher_for_mode(mode, raw_key, inode);
+ if (IS_ERR(ctfm))
+ return PTR_ERR(ctfm);
+ }
+ ci->ci_master_key = mk;
+ ci->ci_ctfm = ctfm;
+
+ if (mode->needs_essiv) {
+ /* ESSIV implies 16-byte IVs which implies !DIRECT_KEY */
+ WARN_ON(mode->ivsize != AES_BLOCK_SIZE);
+ WARN_ON(ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY);
+
+ err = init_essiv_generator(ci, raw_key, mode->keysize);
+ if (err) {
+ fscrypt_warn(inode->i_sb,
+ "error initializing ESSIV generator for inode %lu: %d",
+ inode->i_ino, err);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void put_crypt_info(struct fscrypt_info *ci)
+{
+ if (!ci)
+ return;
+
+ if (ci->ci_master_key) {
+ put_master_key(ci->ci_master_key);
+ } else {
+ crypto_free_skcipher(ci->ci_ctfm);
+ crypto_free_cipher(ci->ci_essiv_tfm);
+ }
+ kmem_cache_free(fscrypt_info_cachep, ci);
+}
+
int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
struct fscrypt_context ctx;
- struct crypto_skcipher *ctfm;
struct fscrypt_mode *mode;
u8 *raw_key = NULL;
int res;
if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
return -EINVAL;
- crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
+ crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_NOFS);
if (!crypt_info)
return -ENOMEM;
crypt_info->ci_flags = ctx.flags;
crypt_info->ci_data_mode = ctx.contents_encryption_mode;
crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
- crypt_info->ci_ctfm = NULL;
- crypt_info->ci_essiv_tfm = NULL;
- memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
- sizeof(crypt_info->ci_master_key));
+ memcpy(crypt_info->ci_master_key_descriptor, ctx.master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE);
+ memcpy(crypt_info->ci_nonce, ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
mode = select_encryption_mode(crypt_info, inode);
if (IS_ERR(mode)) {
res = PTR_ERR(mode);
goto out;
}
+ WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
+ crypt_info->ci_mode = mode;
/*
- * This cannot be a stack buffer because it is passed to the scatterlist
- * crypto API as part of key derivation.
+ * This cannot be a stack buffer because it may be passed to the
+ * scatterlist crypto API as part of key derivation.
*/
res = -ENOMEM;
raw_key = kmalloc(mode->keysize, GFP_NOFS);
if (!raw_key)
goto out;
- res = find_and_derive_key(inode, &ctx, raw_key, mode->keysize);
+ res = find_and_derive_key(inode, &ctx, raw_key, mode);
if (res)
goto out;
- ctfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0);
- if (IS_ERR(ctfm)) {
- res = PTR_ERR(ctfm);
- fscrypt_warn(inode->i_sb,
- "error allocating '%s' transform for inode %lu: %d",
- mode->cipher_str, inode->i_ino, res);
- goto out;
- }
- if (unlikely(!mode->logged_impl_name)) {
- /*
- * fscrypt performance can vary greatly depending on which
- * crypto algorithm implementation is used. Help people debug
- * performance problems by logging the ->cra_driver_name the
- * first time a mode is used. Note that multiple threads can
- * race here, but it doesn't really matter.
- */
- mode->logged_impl_name = true;
- pr_info("fscrypt: %s using implementation \"%s\"\n",
- mode->friendly_name,
- crypto_skcipher_alg(ctfm)->base.cra_driver_name);
- }
- crypt_info->ci_ctfm = ctfm;
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
- res = crypto_skcipher_setkey(ctfm, raw_key, mode->keysize);
+ res = setup_crypto_transform(crypt_info, mode, raw_key, inode);
if (res)
goto out;
- if (S_ISREG(inode->i_mode) &&
- crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
- res = init_essiv_generator(crypt_info, raw_key, mode->keysize);
- if (res) {
- fscrypt_warn(inode->i_sb,
- "error initializing ESSIV generator for inode %lu: %d",
- inode->i_ino, res);
- goto out;
- }
- }
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;
out:
child_ci = child->i_crypt_info;
if (parent_ci && child_ci) {
- return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key,
+ return memcmp(parent_ci->ci_master_key_descriptor,
+ child_ci->ci_master_key_descriptor,
FS_KEY_DESCRIPTOR_SIZE) == 0 &&
(parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
(parent_ci->ci_filename_mode ==
ctx.contents_encryption_mode = ci->ci_data_mode;
ctx.filenames_encryption_mode = ci->ci_filename_mode;
ctx.flags = ci->ci_flags;
- memcpy(ctx.master_key_descriptor, ci->ci_master_key,
+ memcpy(ctx.master_key_descriptor, ci->ci_master_key_descriptor,
FS_KEY_DESCRIPTOR_SIZE);
get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
BUILD_BUG_ON(sizeof(ctx) != FSCRYPT_SET_CONTEXT_MAX_SIZE);
goto out;
}
+ ret = file_write_and_wait_range(file, start, end);
+ if (ret)
+ return ret;
+
if (!journal) {
- ret = __generic_file_fsync(file, start, end, datasync);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL
+ };
+
+ ret = ext4_write_inode(inode, &wbc);
if (!ret)
ret = ext4_sync_parent(inode);
if (test_opt(inode->i_sb, BARRIER))
goto out;
}
- ret = file_write_and_wait_range(file, start, end);
- if (ret)
- return ret;
/*
* data=writeback,ordered:
* The caller's filemap_fdatawrite()/wait will sync the data.
ret = err;
}
out:
+ err = file_check_and_advance_wb_err(file);
+ if (ret == 0)
+ ret = err;
trace_ext4_sync_file_exit(inode, ret);
return ret;
}
physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
physical += offsetof(struct ext4_inode, i_block);
- if (physical)
- error = fiemap_fill_next_extent(fieinfo, start, physical,
- inline_len, flags);
brelse(iloc.bh);
out:
up_read(&EXT4_I(inode)->xattr_sem);
+ if (physical)
+ error = fiemap_fill_next_extent(fieinfo, start, physical,
+ inline_len, flags);
return (error < 0 ? error : 0);
}
* We may need to convert up to one extent per block in
* the page and we may dirty the inode.
*/
- rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
+ rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
+ PAGE_SIZE >> inode->i_blkbits);
}
/*
gid_t i_gid;
projid_t i_projid;
- if (((flags & EXT4_IGET_NORMAL) &&
+ if ((!(flags & EXT4_IGET_SPECIAL) &&
(ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
(ino < EXT4_ROOT_INO) ||
(ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
ext4_superblock_csum_set(sb);
if (sync)
lock_buffer(sbh);
- if (buffer_write_io_error(sbh)) {
+ if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
/*
* Oh, dear. A previous attempt to write the
* superblock failed. This could happen because the
* truncation is indicated by end of range being LLONG_MAX
* In this case, we first scan the range and release found pages.
* After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
- * maps and global counts.
+ * maps and global counts. Page faults can not race with truncation
+ * in this routine. hugetlb_no_page() prevents page faults in the
+ * truncated range. It checks i_size before allocation, and again after
+ * with the page table lock for the page held. The same lock must be
+ * acquired to unmap a page.
* hole punch is indicated if end is not LLONG_MAX
* In the hole punch case we scan the range and release found pages.
* Only when releasing a page is the associated region/reserv map
* deleted. The region/reserv map for ranges without associated
- * pages are not modified.
- *
- * Callers of this routine must hold the i_mmap_rwsem in write mode to prevent
- * races with page faults.
- *
+ * pages are not modified. Page faults can race with hole punch.
+ * This is indicated if we find a mapped page.
* Note: If the passed end of range value is beyond the end of file, but
* not LLONG_MAX this routine still performs a hole punch operation.
*/
for (i = 0; i < pagevec_count(&pvec); ++i) {
struct page *page = pvec.pages[i];
+ u32 hash;
index = page->index;
+ hash = hugetlb_fault_mutex_hash(h, current->mm,
+ &pseudo_vma,
+ mapping, index, 0);
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
+
/*
- * A mapped page is impossible as callers should unmap
- * all references before calling. And, i_mmap_rwsem
- * prevents the creation of additional mappings.
+ * If page is mapped, it was faulted in after being
+ * unmapped in caller. Unmap (again) now after taking
+ * the fault mutex. The mutex will prevent faults
+ * until we finish removing the page.
+ *
+ * This race can only happen in the hole punch case.
+ * Getting here in a truncate operation is a bug.
*/
- VM_BUG_ON(page_mapped(page));
+ if (unlikely(page_mapped(page))) {
+ BUG_ON(truncate_op);
+
+ i_mmap_lock_write(mapping);
+ hugetlb_vmdelete_list(&mapping->i_mmap,
+ index * pages_per_huge_page(h),
+ (index + 1) * pages_per_huge_page(h));
+ i_mmap_unlock_write(mapping);
+ }
lock_page(page);
/*
}
unlock_page(page);
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
}
huge_pagevec_release(&pvec);
cond_resched();
static void hugetlbfs_evict_inode(struct inode *inode)
{
- struct address_space *mapping = inode->i_mapping;
struct resv_map *resv_map;
- /*
- * The vfs layer guarantees that there are no other users of this
- * inode. Therefore, it would be safe to call remove_inode_hugepages
- * without holding i_mmap_rwsem. We acquire and hold here to be
- * consistent with other callers. Since there will be no contention
- * on the semaphore, overhead is negligible.
- */
- i_mmap_lock_write(mapping);
remove_inode_hugepages(inode, 0, LLONG_MAX);
- i_mmap_unlock_write(mapping);
-
resv_map = (struct resv_map *)inode->i_mapping->private_data;
/* root inode doesn't have the resv_map, so we should check it */
if (resv_map)
i_mmap_lock_write(mapping);
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
- remove_inode_hugepages(inode, offset, LLONG_MAX);
i_mmap_unlock_write(mapping);
+ remove_inode_hugepages(inode, offset, LLONG_MAX);
return 0;
}
hugetlb_vmdelete_list(&mapping->i_mmap,
hole_start >> PAGE_SHIFT,
hole_end >> PAGE_SHIFT);
- remove_inode_hugepages(inode, hole_start, hole_end);
i_mmap_unlock_write(mapping);
+ remove_inode_hugepages(inode, hole_start, hole_end);
inode_unlock(inode);
}
/* addr is the offset within the file (zero based) */
addr = index * hpage_size;
- /*
- * fault mutex taken here, protects against fault path
- * and hole punch. inode_lock previously taken protects
- * against truncation.
- */
+ /* mutex taken here, fault path and hole punch */
hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
index, addr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
struct file *file_out, loff_t pos_out,
size_t count, unsigned int flags)
{
- ssize_t ret;
-
if (file_inode(file_in) == file_inode(file_out))
return -EINVAL;
-retry:
- ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
- if (ret == -EAGAIN)
- goto retry;
- return ret;
+ return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
}
static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
struct pstore_record *record)
{
struct persistent_ram_zone *prz;
- bool update = (record->type == PSTORE_TYPE_DMESG);
/* Give up if we never existed or have hit the end. */
if (!przs)
return NULL;
/* Update old/shadowed buffer. */
- if (update)
+ if (prz->type == PSTORE_TYPE_DMESG)
persistent_ram_save_old(prz);
if (!persistent_ram_old_size(prz))
{
struct device *dev = &pdev->dev;
struct ramoops_platform_data *pdata = dev->platform_data;
+ struct ramoops_platform_data pdata_local;
struct ramoops_context *cxt = &oops_cxt;
size_t dump_mem_sz;
phys_addr_t paddr;
int err = -EINVAL;
if (dev_of_node(dev) && !pdata) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- pr_err("cannot allocate platform data buffer\n");
- err = -ENOMEM;
- goto fail_out;
- }
+ pdata = &pdata_local;
+ memset(pdata, 0, sizeof(*pdata));
err = ramoops_parse_dt(pdev, pdata);
if (err < 0)
kuid_t uid;
kgid_t gid;
- BUG_ON(!kobj);
+ if (WARN_ON(!kobj))
+ return -EINVAL;
if (kobj->parent)
parent = kobj->parent->sd;
kuid_t uid;
kgid_t gid;
- BUG_ON(!kobj || !kobj->sd || !attr);
+ if (WARN_ON(!kobj || !kobj->sd || !attr))
+ return -EINVAL;
kobject_get_ownership(kobj, &uid, &gid);
return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode,
kuid_t uid;
kgid_t gid;
- BUG_ON(!kobj || !kobj->sd || !attr);
+ if (WARN_ON(!kobj || !kobj->sd || !attr))
+ return -EINVAL;
kobject_get_ownership(kobj, &uid, &gid);
return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true,
kgid_t gid;
int error;
- BUG_ON(!kobj || (!update && !kobj->sd));
+ if (WARN_ON(!kobj || (!update && !kobj->sd)))
+ return -EINVAL;
/* Updates may happen before the object has been instantiated */
if (unlikely(update && !kobj->sd))
{
struct kernfs_node *kn, *target = NULL;
- BUG_ON(!name || !parent);
+ if (WARN_ON(!name || !parent))
+ return -EINVAL;
/*
* We don't own @target_kobj and it may be removed at any time.
* to 16 bits. So will give a constant value (0x8000) for compatability.
*/
DP_DPCD_QUIRK_CONSTANT_N,
+ /**
+ * @DP_DPCD_QUIRK_NO_PSR:
+ *
+ * The device does not support PSR even if reports that it supports or
+ * driver still need to implement proper handling for such device.
+ */
+ DP_DPCD_QUIRK_NO_PSR,
};
/**
struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
-int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+int __must_check
+drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- *
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
* Copyright (c) 2017 Amlogic, inc.
* Author: Yixun Lan <yixun.lan@amlogic.com>
*
- * SPDX-License-Identifier: (GPL-2.0+ OR BSD)
*/
#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H
struct bcma_soc {
struct bcma_bus bus;
+ struct device *dev;
};
int __init bcma_host_soc_register(struct bcma_soc *soc);
#define BPF_ALU_SANITIZE_SRC 1U
#define BPF_ALU_SANITIZE_DST 2U
#define BPF_ALU_NEG_VALUE (1U << 2)
+#define BPF_ALU_NON_POINTER (1U << 3)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
#define _LINUX_BPFILTER_H
#include <uapi/linux/bpfilter.h>
+#include <linux/umh.h>
struct sock;
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
-extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
- char __user *optval,
- unsigned int optlen, bool is_set);
+struct bpfilter_umh_ops {
+ struct umh_info info;
+ /* since ip_getsockopt() can run in parallel, serialize access to umh */
+ struct mutex lock;
+ int (*sockopt)(struct sock *sk, int optname,
+ char __user *optval,
+ unsigned int optlen, bool is_set);
+ int (*start)(void);
+ bool stop;
+};
+extern struct bpfilter_umh_ops bpfilter_ops;
#endif
#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
+#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
unsigned long osd_request_timeout; /* jiffies */
/*
- * any type that can't be simply compared or doesn't need need
+ * any type that can't be simply compared or doesn't need
* to be compared should go beyond this point,
* ceph_compare_options() should be updated accordingly
*/
const char *dev_name, const char *dev_name_end,
int (*parse_extra_token)(char *c, void *private),
void *private);
-int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
+ bool show_all);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);
struct rb_root linger_map_checks;
atomic_t num_requests;
atomic_t num_homeless;
- bool abort_on_full; /* abort w/ ENOSPC when full */
int abort_err;
struct delayed_work timeout_work;
struct delayed_work osds_timeout_work;
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
-/* Some compiler specific definitions are overwritten here
- * for Clang compiler
- */
+/* Compiler specific definitions for Clang compiler */
+
#define uninitialized_var(x) x = *(&(x))
/* same as gcc, this was present in clang-2.6 so we can assume it works
(typeof(ptr)) (__ptr + (off)); \
})
-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
-#define OPTIMIZER_HIDE_VAR(var) \
- __asm__ ("" : "=r" (var) : "0" (var))
-
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
#ifdef __ECC
-/* Some compiler specific definitions are overwritten here
- * for Intel ECC compiler
- */
+/* Compiler specific definitions for Intel ECC compiler */
#include <asm/intrinsics.h>
#endif
#ifndef OPTIMIZER_HIDE_VAR
-#define OPTIMIZER_HIDE_VAR(var) barrier()
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var) \
+ __asm__ ("" : "=r" (var) : "0" (var))
#endif
/* Not-quite-unique ID. */
extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
- int direction, dma_addr_t dma_addr,
- bool map_single);
+ int direction, dma_addr_t dma_addr);
extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction, bool map_single);
+ size_t size, int direction);
extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction);
static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
- int direction, dma_addr_t dma_addr,
- bool map_single)
+ int direction, dma_addr_t dma_addr)
{
}
}
static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction,
- bool map_single)
+ size_t size, int direction)
{
}
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-#ifdef CONFIG_HAS_DMA
-#include <asm/dma-mapping.h>
-static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
-{
- if (dev && dev->dma_ops)
- return dev->dma_ops;
- return get_arch_dma_ops(dev ? dev->bus : NULL);
-}
-
-static inline void set_dma_ops(struct device *dev,
- const struct dma_map_ops *dma_ops)
-{
- dev->dma_ops = dma_ops;
-}
-#else
-/*
- * Define the dma api to allow compilation of dma dependent code.
- * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA'
- * in its Kconfig, unless it already depends on <something> || COMPILE_TEST,
- * where <something> guarantuees the availability of the dma-mapping API.
- */
-static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
-{
- return NULL;
-}
-#endif
-
static inline bool dma_is_direct(const struct dma_map_ops *ops)
{
return likely(!ops);
}
#endif
-static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+#ifdef CONFIG_HAS_DMA
+#include <asm/dma-mapping.h>
+
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->dma_ops)
+ return dev->dma_ops;
+ return get_arch_dma_ops(dev ? dev->bus : NULL);
+}
+
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+ dev->dma_ops = dma_ops;
+}
+
+static inline dma_addr_t dma_map_page_attrs(struct device *dev,
+ struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
- debug_dma_map_single(dev, ptr, size);
if (dma_is_direct(ops))
- addr = dma_direct_map_page(dev, virt_to_page(ptr),
- offset_in_page(ptr), size, dir, attrs);
+ addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
else
- addr = ops->map_page(dev, virt_to_page(ptr),
- offset_in_page(ptr), size, dir, attrs);
- debug_dma_map_page(dev, virt_to_page(ptr),
- offset_in_page(ptr), size,
- dir, addr, true);
+ addr = ops->map_page(dev, page, offset, size, dir, attrs);
+ debug_dma_map_page(dev, page, offset, size, dir, addr);
+
return addr;
}
-static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
dma_direct_unmap_page(dev, addr, size, dir, attrs);
else if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir, true);
-}
-
-static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- return dma_unmap_single_attrs(dev, addr, size, dir, attrs);
+ debug_dma_unmap_page(dev, addr, size, dir);
}
/*
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
-static inline dma_addr_t dma_map_page_attrs(struct device *dev,
- struct page *page,
- size_t offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
- else
- addr = ops->map_page(dev, page, offset, size, dir, attrs);
- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
- return addr;
-}
-
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr,
size_t size,
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t addr, unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
-}
-
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
debug_dma_sync_single_for_device(dev, addr, size, dir);
}
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t addr, unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- return dma_sync_single_for_device(dev, addr + offset, size, dir);
-}
-
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
}
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ debug_dma_mapping_error(dev, dma_addr);
+
+ if (dma_addr == DMA_MAPPING_ERROR)
+ return -ENOMEM;
+ return 0;
+}
+
+void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, unsigned long attrs);
+void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs);
+void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir);
+int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int dma_supported(struct device *dev, u64 mask);
+int dma_set_mask(struct device *dev, u64 mask);
+int dma_set_coherent_mask(struct device *dev, u64 mask);
+u64 dma_get_required_mask(struct device *dev);
+#else /* CONFIG_HAS_DMA */
+static inline dma_addr_t dma_map_page_attrs(struct device *dev,
+ struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ return 0;
+}
+static inline void dma_unmap_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+}
+static inline dma_addr_t dma_map_resource(struct device *dev,
+ phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
+{
+}
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
+{
+}
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return -ENOMEM;
+}
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
+{
+ return NULL;
+}
+static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+}
+static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ return NULL;
+}
+static inline void dmam_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+}
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+static inline int dma_get_sgtable_attrs(struct device *dev,
+ struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, unsigned long attrs)
+{
+ return -ENXIO;
+}
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ return -ENXIO;
+}
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ return 0;
+}
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ return -EIO;
+}
+static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ return -EIO;
+}
+static inline u64 dma_get_required_mask(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_HAS_DMA */
+
+static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ debug_dma_map_single(dev, ptr, size);
+ return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
+ size, dir, attrs);
+}
+
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_sync_single_for_device(dev, addr + offset, size, dir);
+}
+
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
-
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir);
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
bool dma_free_from_pool(void *start, size_t size);
-int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
-
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
dma_addr_t dma_addr, size_t size, unsigned long attrs);
-int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
-
-void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag, unsigned long attrs);
-void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle, unsigned long attrs);
-
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
}
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- debug_dma_mapping_error(dev, dma_addr);
-
- if (dma_addr == DMA_MAPPING_ERROR)
- return -ENOMEM;
- return 0;
-}
-
-int dma_supported(struct device *dev, u64 mask);
-int dma_set_mask(struct device *dev, u64 mask);
-int dma_set_coherent_mask(struct device *dev, u64 mask);
static inline u64 dma_get_mask(struct device *dev)
{
return dma_set_mask_and_coherent(dev, mask);
}
-extern u64 dma_get_required_mask(struct device *dev);
-
#ifndef arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, const struct iommu_ops *iommu,
}
#endif
-/*
- * Please always use dma_alloc_coherent instead as it already zeroes the memory!
- */
-static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return dma_alloc_coherent(dev, size, dma_handle, flag);
-}
-
static inline int dma_get_cache_alignment(void)
{
#ifdef ARCH_DMA_MINALIGN
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-/*
- * Managed DMA API
- */
-#ifdef CONFIG_HAS_DMA
-extern void *dmam_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-#else /* !CONFIG_HAS_DMA */
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{ return NULL; }
-static inline void dmam_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle) { }
-#endif /* !CONFIG_HAS_DMA */
-
-extern void *dmam_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs);
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-extern int dmam_declare_coherent_memory(struct device *dev,
- phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size,
- int flags);
-extern void dmam_release_declared_memory(struct device *dev);
-#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-static inline int dmam_declare_coherent_memory(struct device *dev,
- phys_addr_t phys_addr, dma_addr_t device_addr,
- size_t size, gfp_t gfp)
-{
- return 0;
-}
-
-static inline void dmam_release_declared_memory(struct device *dev)
+ dma_addr_t *dma_handle, gfp_t gfp)
{
+ return dmam_alloc_attrs(dev, size, dma_handle, gfp,
+ (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
}
-#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#if defined(CONFIG_JUMP_LABEL)
#include <linux/jump_label.h>
#endif
#define _DPRINTK_FLAGS_DEFAULT 0
#endif
unsigned int flags:8;
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
union {
struct static_key_true dd_key_true;
struct static_key_false dd_key_false;
dd_key_init(key, init) \
}
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
#define dd_key_init(key, init) key = (init)
struct list_head modelist; /* mode list */
struct fb_videomode *mode; /* current mode */
-#ifdef CONFIG_FB_BACKLIGHT
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
/* assigned backlight device */
/* set before framebuffer registration,
remove after unregister */
extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
+extern bool fb_center_logo;
extern struct class *fb_class;
#define for_each_registered_fb(i) \
* Additional babbling in: Documentation/static-keys.txt
*/
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
-# define HAVE_JUMP_LABEL
-#endif
-
#ifndef __ASSEMBLY__
#include <linux/types.h>
"%s(): static key '%pS' used before call to jump_label_init()", \
__func__, (key))
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
struct static_key {
atomic_t enabled;
struct static_key {
atomic_t enabled;
};
-#endif /* HAVE_JUMP_LABEL */
+#endif /* CONFIG_JUMP_LABEL */
#endif /* __ASSEMBLY__ */
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
#include <asm/jump_label.h>
#ifndef __ASSEMBLY__
struct module;
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
{ .enabled = { 0 }, \
{ .entries = (void *)JUMP_TYPE_FALSE } }
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
#include <linux/atomic.h>
#include <linux/bug.h>
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
-#endif /* HAVE_JUMP_LABEL */
+#endif /* CONFIG_JUMP_LABEL */
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
#define jump_label_enabled static_key_enabled
static_key_count((struct static_key *)x) > 0; \
})
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
/*
* Combine the right initial value (type) with the right branch order
unlikely(branch); \
})
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
-#endif /* HAVE_JUMP_LABEL */
+#endif /* CONFIG_JUMP_LABEL */
/*
* Advanced usage; refcount, branch is enabled when: count != 0
#include <linux/jump_label.h>
#include <linux/workqueue.h>
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#if defined(CONFIG_JUMP_LABEL)
struct static_key_deferred {
struct static_key key;
unsigned long timeout;
struct delayed_work work;
};
-#endif
-#ifdef HAVE_JUMP_LABEL
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
extern void static_key_deferred_flush(struct static_key_deferred *key);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
struct static_key_deferred {
struct static_key key;
};
{
STATIC_KEY_CHECK_USE(key);
}
-#endif /* HAVE_JUMP_LABEL */
+#endif /* CONFIG_JUMP_LABEL */
#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
}
enum nvdimm_security_state {
+ NVDIMM_SECURITY_ERROR = -1,
NVDIMM_SECURITY_DISABLED,
NVDIMM_SECURITY_UNLOCKED,
NVDIMM_SECURITY_LOCKED,
* @wake_event: Pointer to a bool set to true upon return if the event might be
* treated as a wake event. Ignored if null.
*
- * Return: 0 on success or negative error code.
+ * Return: negative error code on errors; 0 for no data; or else number of
+ * bytes received (i.e., an event was retrieved successfully). Event types are
+ * written out to @ec_dev->event_data.event_type on success.
*/
int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event);
* events raised and call the functions in the ec notifier. This function
* is a helper to know which events are raised.
*
- * Return: 0 on success or negative error code.
+ * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*.
*/
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
} __packed;
/*****************************************************************************/
+/* Commands for I2S recording on audio codec. */
+
+#define EC_CMD_CODEC_I2S 0x00BC
+
+enum ec_codec_i2s_subcmd {
+ EC_CODEC_SET_SAMPLE_DEPTH = 0x0,
+ EC_CODEC_SET_GAIN = 0x1,
+ EC_CODEC_GET_GAIN = 0x2,
+ EC_CODEC_I2S_ENABLE = 0x3,
+ EC_CODEC_I2S_SET_CONFIG = 0x4,
+ EC_CODEC_I2S_SET_TDM_CONFIG = 0x5,
+ EC_CODEC_I2S_SET_BCLK = 0x6,
+};
+
+enum ec_sample_depth_value {
+ EC_CODEC_SAMPLE_DEPTH_16 = 0,
+ EC_CODEC_SAMPLE_DEPTH_24 = 1,
+};
+
+enum ec_i2s_config {
+ EC_DAI_FMT_I2S = 0,
+ EC_DAI_FMT_RIGHT_J = 1,
+ EC_DAI_FMT_LEFT_J = 2,
+ EC_DAI_FMT_PCM_A = 3,
+ EC_DAI_FMT_PCM_B = 4,
+ EC_DAI_FMT_PCM_TDM = 5,
+};
+
+struct ec_param_codec_i2s {
+ /*
+ * enum ec_codec_i2s_subcmd
+ */
+ uint8_t cmd;
+ union {
+ /*
+ * EC_CODEC_SET_SAMPLE_DEPTH
+ * Value should be one of ec_sample_depth_value.
+ */
+ uint8_t depth;
+
+ /*
+ * EC_CODEC_SET_GAIN
+ * Value should be 0~43 for both channels.
+ */
+ struct ec_param_codec_i2s_set_gain {
+ uint8_t left;
+ uint8_t right;
+ } __packed gain;
+
+ /*
+ * EC_CODEC_I2S_ENABLE
+ * 1 to enable, 0 to disable.
+ */
+ uint8_t i2s_enable;
+
+ /*
+ * EC_CODEC_I2S_SET_COFNIG
+ * Value should be one of ec_i2s_config.
+ */
+ uint8_t i2s_config;
+
+ /*
+ * EC_CODEC_I2S_SET_TDM_CONFIG
+ * Value should be one of ec_i2s_config.
+ */
+ struct ec_param_codec_i2s_tdm {
+ /*
+ * 0 to 496
+ */
+ int16_t ch0_delay;
+ /*
+ * -1 to 496
+ */
+ int16_t ch1_delay;
+ uint8_t adjacent_to_ch0;
+ uint8_t adjacent_to_ch1;
+ } __packed tdm_param;
+
+ /*
+ * EC_CODEC_I2S_SET_BCLK
+ */
+ uint32_t bclk;
+ };
+} __packed;
+
+/*
+ * For subcommand EC_CODEC_GET_GAIN.
+ */
+struct ec_response_codec_gain {
+ uint8_t left;
+ uint8_t right;
+} __packed;
+
+/*****************************************************************************/
/* System commands */
/*
#define TCU_TCSR_PRESCALE_LSB 3
#define TCU_TCSR_PRESCALE_MASK 0x38
-#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */
+#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */
#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/madera/pdata.h>
+#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#define MADERA_MAX_MICBIAS 4
+#define MADERA_MAX_HP_OUTPUT 3
+
/* Notifier events */
#define MADERA_NOTIFY_VOICE_TRIGGER 0x1
#define MADERA_NOTIFY_HPDET 0x2
unsigned int num_childbias[MADERA_MAX_MICBIAS];
struct snd_soc_dapm_context *dapm;
+ struct mutex dapm_ptr_lock;
+ unsigned int hp_ena;
+ bool out_clamp[MADERA_MAX_HP_OUTPUT];
+ bool out_shorted[MADERA_MAX_HP_OUTPUT];
struct blocking_notifier_head notifier;
};
#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
#define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30)
+#define IMX6SX_GPR12_PCIE_PM_TURN_OFF BIT(16)
#define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0)
#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0)
#define STEPCONFIG_YNN BIT(8)
#define STEPCONFIG_XNP BIT(9)
#define STEPCONFIG_YPN BIT(10)
+#define STEPCONFIG_RFP(val) ((val) << 12)
+#define STEPCONFIG_RFP_VREFP (0x3 << 12)
#define STEPCONFIG_INM_MASK (0xF << 15)
#define STEPCONFIG_INM(val) ((val) << 15)
#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
#define STEPCONFIG_FIFO1 BIT(26)
+#define STEPCONFIG_RFM(val) ((val) << 23)
+#define STEPCONFIG_RFM_VREFN (0x3 << 23)
/* Delay register */
#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
/* Some controllers have a CBSY bit */
#define TMIO_MMC_HAVE_CBSY BIT(11)
-/* Some controllers that support HS400 use use 4 taps while others use 8. */
+/* Some controllers that support HS400 use 4 taps while others use 8. */
#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13)
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
};
+enum zone_flags {
+ ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
+ * Cleared when kswapd is woken.
+ */
+};
+
static inline unsigned long zone_managed_pages(struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
unsigned int num_bpf_raw_events;
struct bpf_raw_event_map *bpf_raw_events;
#endif
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
#endif
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
extern bool retpoline_module_ok(bool has_retpoline);
#else
static inline bool retpoline_module_ok(bool has_retpoline)
int nf_register_sockopt(struct nf_sockopt_ops *reg);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#endif
struct nf_hook_entries *hook_head = NULL;
int ret = 1;
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook) &&
!static_key_false(&nf_hooks_needed[pf][hook]))
#ifdef CONFIG_NETFILTER_INGRESS
static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
{
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
return false;
#endif
struct device_node {
const char *name;
- const char *type;
phandle phandle;
const char *full_name;
struct fwnode_handle fwnode;
pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
- return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
+ return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void
unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
unsigned int is_probed:1; /* Device probing in progress */
unsigned int link_active_reporting:1;/* Device capable of reporting link active */
+ unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
int (*suspend_late)(struct pci_dev *dev, pm_message_t state);
int (*resume_early)(struct pci_dev *dev);
- int (*resume) (struct pci_dev *dev); /* Device woken up */
- void (*shutdown) (struct pci_dev *dev);
- int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */
+ int (*resume)(struct pci_dev *dev); /* Device woken up */
+ void (*shutdown)(struct pci_dev *dev);
+ int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
const struct pci_error_handlers *err_handler;
const struct attribute_group **groups;
struct device_driver driver;
#define PCI_DEVICE_ID_CENATEK_IDE 0x0001
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
#define PCI_VENDOR_ID_USR 0x16ec
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
+#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
extern const int phy_10_100_features_array[4];
* only works for PHYs with IDs which match this field
* name: The friendly name of this PHY type
* phy_id_mask: Defines the important bits of the phy_id
- * features: A list of features (speed, duplex, etc) supported
- * by this PHY
+ * features: A mandatory list of features (speed, duplex, etc)
+ * supported by this PHY
* flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
*
PHY_MODE_PCIE,
PHY_MODE_ETHERNET,
PHY_MODE_MIPI_DPHY,
+ PHY_MODE_SATA
};
/**
int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt);
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
+void dev_pm_opp_remove_all_dynamic(struct device *dev);
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
{
}
+static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
+{
+}
+
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return 0;
#ifndef __QCOM_SCM_H
#define __QCOM_SCM_H
+#include <linux/err.h>
#include <linux/types.h>
#include <linux/cpumask.h>
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
{
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+ u32 cur_prod, page_mask, page_cnt, page_diff;
+
+ cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
+ p_chain->u.chain32.prod_idx;
+
+ /* Assume that number of elements in a page is power of 2 */
+ page_mask = ~p_chain->elem_per_page_mask;
+
+ /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
+ * reaches the first element of next page before the page index
+ * is incremented. See qed_chain_produce().
+ * Index wrap around is not a problem because the difference
+ * between current and given producer indices is always
+ * positive and lower than the chain's capacity.
+ */
+ page_diff = (((cur_prod - 1) & page_mask) -
+ ((prod_idx - 1) & page_mask)) /
+ p_chain->elem_per_page;
+
+ page_cnt = qed_chain_get_page_cnt(p_chain);
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.u16.prod_page_idx =
+ (p_chain->pbl.c.u16.prod_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ else
+ p_chain->pbl.c.u32.prod_page_idx =
+ (p_chain->pbl.c.u32.prod_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ }
+
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx = (u16) prod_idx;
else
#include <limits.h>
#include <stddef.h>
#include <sys/mman.h>
+#include <sys/time.h>
#include <sys/types.h>
/* Not standard, but glibc defines it */
#define __init
#define __exit
-#define __attribute_const__ __attribute__((const))
+#ifndef __attribute_const__
+# define __attribute_const__ __attribute__((const))
+#endif
#define noinline __attribute__((noinline))
#define preempt_enable()
#define MODULE_DESCRIPTION(desc)
#define subsys_initcall(x)
#define module_exit(x)
+
+#define IS_ENABLED(x) (x)
+#define CONFIG_RAID6_PQ_BENCHMARK 1
#endif /* __KERNEL__ */
/* Routine choices */
struct reset_control *of_reset_control_array_get(struct device_node *np,
bool shared, bool optional);
+int reset_control_get_count(struct device *dev);
+
#else
static inline int reset_control_reset(struct reset_control *rstc)
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline int reset_control_get_count(struct device *dev)
+{
+ return -ENOENT;
+}
+
#endif /* CONFIG_RESET_CONTROLLER */
static inline int __must_check device_reset(struct device *dev)
*
* Returns a struct reset_control or IS_ERR() condition containing errno.
* This function is intended for use with reset-controls which are shared
- * between hardware-blocks.
+ * between hardware blocks.
*
* When a reset-control is shared, the behavior of reset_control_assert /
* deassert is changed, the reset-core will keep track of a deassert_count
}
/**
- * of_reset_control_get_shared - Lookup and obtain an shared reference
+ * of_reset_control_get_shared - Lookup and obtain a shared reference
* to a reset controller.
* @node: device to be reset by the controller
* @id: reset line name
}
/**
- * of_reset_control_get_shared_by_index - Lookup and obtain an shared
+ * of_reset_control_get_shared_by_index - Lookup and obtain a shared
* reference to a reset controller
* by index.
* @node: device to be reset by the controller
/**
* devm_reset_control_get_shared_by_index - resource managed
- * reset_control_get_shared
+ * reset_control_get_shared
* @dev: device to be reset by the controller
* @index: index of the reset controller
*
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
-#ifdef CONFIG_RESCTRL
+#ifdef CONFIG_X86_RESCTRL
u32 closid;
u32 rmid;
#endif
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
+#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#endif
+void __exit_umh(struct task_struct *tsk);
+
+static inline void exit_umh(struct task_struct *tsk)
+{
+ if (unlikely(tsk->flags & PF_UMH))
+ __exit_umh(tsk);
+}
+
#ifdef CONFIG_DEBUG_RSEQ
void rseq_syscall(struct pt_regs *regs);
*
* This is exactly the same as pskb_trim except that it ensures the
* checksum of received packets are still valid after the operation.
+ * It can change skb pointers.
*/
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
#define SWITCHTEC_EVENT_FATAL BIT(4)
+#define SWITCHTEC_DMA_MRPC_EN BIT(0)
enum {
SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
u32 cmd;
u32 status;
u32 ret_value;
+ u32 dma_en;
+ u64 dma_addr;
+ u32 dma_vector;
+ u32 dma_ver;
} __packed;
enum mrpc_status {
struct switchtec_ntb;
+struct dma_mrpc_output {
+ u32 status;
+ u32 cmd_id;
+ u32 rtn_code;
+ u32 output_size;
+ u8 data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
+};
+
struct switchtec_dev {
struct pci_dev *pdev;
struct device dev;
u8 link_event_count[SWITCHTEC_MAX_PFF_CSR];
struct switchtec_ntb *sndev;
+
+ struct dma_mrpc_output *dma_mrpc;
+ dma_addr_t dma_mrpc_dma_addr;
};
static inline struct switchtec_dev *to_stdev(struct device *dev)
const char *cmdline;
struct file *pipe_to_umh;
struct file *pipe_from_umh;
+ struct list_head list;
+ void (*cleanup)(struct umh_info *info);
pid_t pid;
};
int fork_usermode_blob(void *data, size_t len, struct umh_info *info);
/**
* virtio_config_ops - operations for configuring a virtio device
+ * Note: Do not assume that a transport implements all of the operations
+ * getting/setting a value as a simple read/write! Generally speaking,
+ * any of @get/@set, @get_status/@set_status, or @get_features/
+ * @finalize_features are NOT safe to be called from an atomic
+ * context.
* @get: read the value of a configuration field
* vdev: the virtio_device
* offset: the offset of the configuration field
* offset: the offset of the configuration field
* buf: the buffer to read the field value from.
* len: the length of the buffer
- * @generation: config generation counter
+ * @generation: config generation counter (optional)
* vdev: the virtio_device
* Returns the config generation counter
* @get_status: read the status byte
* @del_vqs: free virtqueues found by find_vqs().
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
- * Returns the first 32 feature bits (all we currently need).
+ * Returns the first 64 feature bits (all we currently need).
* @finalize_features: confirm what device features we'll be using.
* vdev: the virtio_device
* This gives the final feature bits for the device: it can change
* the dev->feature bits if it wants.
* Returns 0 on success or error status
- * @bus_name: return the bus name associated with the device
+ * @bus_name: return the bus name associated with the device (optional)
* vdev: the virtio_device
* This returns a pointer to the bus name a la pci_name from which
* the caller can then copy.
- * @set_vq_affinity: set the affinity for a virtqueue.
+ * @set_vq_affinity: set the affinity for a virtqueue (optional).
* @get_vq_affinity: get the affinity for a virtqueue (optional).
*/
typedef void vq_callback_t(struct virtqueue *);
*/
static inline bool xa_is_err(const void *entry)
{
- return unlikely(xa_is_internal(entry));
+ return unlikely(xa_is_internal(entry) &&
+ entry >= xa_mk_internal(-MAX_ERRNO));
}
/**
*/
#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
-void xa_init_flags(struct xarray *, gfp_t flags);
void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *xa_erase(struct xarray *, unsigned long index);
void xa_destroy(struct xarray *);
/**
+ * xa_init_flags() - Initialise an empty XArray with flags.
+ * @xa: XArray.
+ * @flags: XA_FLAG values.
+ *
+ * If you need to initialise an XArray with special flags (eg you need
+ * to take the lock from interrupt context), use this function instead
+ * of xa_init().
+ *
+ * Context: Any context.
+ */
+static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
+{
+ spin_lock_init(&xa->xa_lock);
+ xa->xa_flags = flags;
+ xa->xa_head = NULL;
+}
+
+/**
* xa_init() - Initialise an empty XArray.
* @xa: XArray.
*
}
/**
- * xa_for_each() - Iterate over a portion of an XArray.
+ * xa_for_each_start() - Iterate over a portion of an XArray.
* @xa: XArray.
+ * @index: Index of @entry.
* @entry: Entry retrieved from array.
+ * @start: First index to retrieve from array.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index. You may modify @index during the iteration if you
+ * want to skip or reprocess indices. It is safe to modify the array
+ * during the iteration. At the end of the iteration, @entry will be set
+ * to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_start() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each() iterator instead.
+ * The xas_for_each() iterator will expand into more inline code than
+ * xa_for_each_start().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ */
+#define xa_for_each_start(xa, index, entry, start) \
+ for (index = start, \
+ entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \
+ entry; \
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
+
+/**
+ * xa_for_each() - Iterate over present entries in an XArray.
+ * @xa: XArray.
* @index: Index of @entry.
- * @max: Maximum index to retrieve from array.
- * @filter: Selection criterion.
+ * @entry: Entry retrieved from array.
*
- * Initialise @index to the lowest index you want to retrieve from the
- * array. During the iteration, @entry will have the value of the entry
- * stored in @xa at @index. The iteration will skip all entries in the
- * array which do not match @filter. You may modify @index during the
- * iteration if you want to skip or reprocess indices. It is safe to modify
- * the array during the iteration. At the end of the iteration, @entry will
- * be set to NULL and @index will have a value less than or equal to max.
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index. You may modify @index during the iteration if you want
+ * to skip or reprocess indices. It is safe to modify the array during the
+ * iteration. At the end of the iteration, @entry will be set to NULL and
+ * @index will have a value less than or equal to max.
*
* xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
* to handle your own locking with xas_for_each(), and if you have to unlock
*
* Context: Any context. Takes and releases the RCU lock.
*/
-#define xa_for_each(xa, entry, index, max, filter) \
- for (entry = xa_find(xa, &index, max, filter); entry; \
- entry = xa_find_after(xa, &index, max, filter))
+#define xa_for_each(xa, index, entry) \
+ xa_for_each_start(xa, index, entry, 0)
+
+/**
+ * xa_for_each_marked() - Iterate over marked entries in an XArray.
+ * @xa: XArray.
+ * @index: Index of @entry.
+ * @entry: Entry retrieved from array.
+ * @filter: Selection criterion.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index. The iteration will skip all entries in the array
+ * which do not match @filter. You may modify @index during the iteration
+ * if you want to skip or reprocess indices. It is safe to modify the array
+ * during the iteration. At the end of the iteration, @entry will be set to
+ * NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
+ * You have to handle your own locking with xas_for_each(), and if you have
+ * to unlock after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each_marked() iterator
+ * instead. The xas_for_each_marked() iterator will expand into more inline
+ * code than xa_for_each_marked().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ */
+#define xa_for_each_marked(xa, index, entry, filter) \
+ for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
+ entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t);
+int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
/**
- * __xa_insert() - Store this entry in the XArray unless another entry is
- * already present.
- * @xa: XArray.
- * @index: Index into array.
- * @entry: New entry.
- * @gfp: Memory allocation flags.
- *
- * If you would rather see the existing entry in the array, use __xa_cmpxchg().
- * This function is for users who don't care what the entry is, only that
- * one is present.
- *
- * Context: Any context. Expects xa_lock to be held on entry. May
- * release and reacquire xa_lock if the @gfp flags permit.
- * Return: 0 if the store succeeded. -EEXIST if another entry was present.
- * -ENOMEM if memory could not be allocated.
- */
-static inline int __xa_insert(struct xarray *xa, unsigned long index,
- void *entry, gfp_t gfp)
-{
- void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
- if (!curr)
- return 0;
- if (xa_is_err(curr))
- return xa_err(curr);
- return -EEXIST;
-}
-
-/**
* xa_store_bh() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
}
/**
- * xa_store_irq() - Erase this entry from the XArray.
+ * xa_store_irq() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
- * If you would rather see the existing entry in the array, use xa_cmpxchg().
- * This function is for users who don't care what the entry is, only that
- * one is present.
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present. Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
*
- * Context: Process context. Takes and releases the xa_lock.
- * May sleep if the @gfp flags permit.
+ * Context: Any context. Takes and releases the xa_lock. May sleep if
+ * the @gfp flags permit.
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int xa_insert(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
- void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
- if (!curr)
- return 0;
- if (xa_is_err(curr))
- return xa_err(curr);
- return -EEXIST;
+ int err;
+
+ xa_lock(xa);
+ err = __xa_insert(xa, index, entry, gfp);
+ xa_unlock(xa);
+
+ return err;
+}
+
+/**
+ * xa_insert_bh() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present. Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs. May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ int err;
+
+ xa_lock_bh(xa);
+ err = __xa_insert(xa, index, entry, gfp);
+ xa_unlock_bh(xa);
+
+ return err;
+}
+
+/**
+ * xa_insert_irq() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present. Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts. May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert_irq(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ int err;
+
+ xa_lock_irq(xa);
+ err = __xa_insert(xa, index, entry, gfp);
+ xa_unlock_irq(xa);
+
+ return err;
}
/**
(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
}
-#define XA_ZERO_ENTRY xa_mk_internal(256)
-#define XA_RETRY_ENTRY xa_mk_internal(257)
+#define XA_RETRY_ENTRY xa_mk_internal(256)
+#define XA_ZERO_ENTRY xa_mk_internal(257)
/**
* xa_is_zero() - Is the entry a zero entry?
}
/**
+ * xa_is_advanced() - Is the entry only permitted for the advanced API?
+ * @entry: Entry to be stored in the XArray.
+ *
+ * Return: %true if the entry cannot be stored by the normal API.
+ */
+static inline bool xa_is_advanced(const void *entry)
+{
+ return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
+}
+
+/**
* typedef xa_update_node_t - A callback function from the XArray.
* @node: The node which is being processed
*
struct rxrpc_call;
/*
- * Call completion condition (state == RXRPC_CALL_COMPLETE).
- */
-enum rxrpc_call_completion {
- RXRPC_CALL_SUCCEEDED, /* - Normal termination */
- RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
- RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
- RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
- RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
- NR__RXRPC_CALL_COMPLETIONS
-};
-
-/*
* Debug ID counter for tracing.
*/
extern atomic_t rxrpc_debug_id;
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
- struct sockaddr_rxrpc *, struct key *);
-int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
- enum rxrpc_call_completion *, u32 *);
u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
struct netlink_ext_ack *extack);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb, struct fib_dump_filter *filter);
-int fib_table_flush(struct net *net, struct fib_table *table);
+int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
struct nf_flow_route {
struct {
struct dst_entry *dst;
- int ifindex;
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
enum afs_call_trace {
afs_call_trace_alloc,
afs_call_trace_free,
+ afs_call_trace_get,
afs_call_trace_put,
afs_call_trace_wake,
afs_call_trace_work,
#define afs_call_traces \
EM(afs_call_trace_alloc, "ALLOC") \
EM(afs_call_trace_free, "FREE ") \
+ EM(afs_call_trace_get, "GET ") \
EM(afs_call_trace_put, "PUT ") \
EM(afs_call_trace_wake, "WAKE ") \
E_(afs_call_trace_work, "WORK ")
-# UAPI Header export list
-
ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/a.out.h),)
no-export-headers += a.out.h
endif
/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */
#define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_RISCV32 (EM_RISCV|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_RISCV64 (EM_RISCV|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_S390 (EM_S390)
#define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_SH (EM_SH)
struct fb_image image; /* Cursor image */
};
-#ifdef CONFIG_FB_BACKLIGHT
/* Settings for the generic backlight code */
#define FB_BACKLIGHT_LEVELS 128
#define FB_BACKLIGHT_MAX 0xFF
-#endif
#endif /* _UAPI_LINUX_FB_H */
#define FS_POLICY_FLAGS_PAD_16 0x02
#define FS_POLICY_FLAGS_PAD_32 0x03
#define FS_POLICY_FLAGS_PAD_MASK 0x03
-#define FS_POLICY_FLAGS_VALID 0x03
+#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */
+#define FS_POLICY_FLAGS_VALID 0x07
/* Encryption algorithms */
#define FS_ENCRYPTION_MODE_INVALID 0
#define FS_ENCRYPTION_MODE_AES_128_CTS 6
#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
+#define FS_ENCRYPTION_MODE_ADIANTUM 9
struct fscrypt_policy {
__u8 version;
#define IN_MULTICAST(a) IN_CLASSD(a)
#define IN_MULTICAST_NET 0xe0000000
-#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
+#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff)
#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
#define PTP_SYS_OFFSET_PRECISE \
_IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
#define PTP_SYS_OFFSET_EXTENDED \
- _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
+ _IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
struct ptp_extts_event {
struct ptp_clock_time t; /* Time event occured. */
PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
PVRDMA_WR_BIND_MW,
PVRDMA_WR_REG_SIG_MR,
+ PVRDMA_WR_ERROR,
};
enum pvrdma_wc_status {
int
default $(shell,$(srctree)/scripts/clang-version.sh $(CC))
+config CC_HAS_ASM_GOTO
+ def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
+
config CONSTRUCTORS
bool
depends on !UML
bool "Dead code and data elimination (EXPERIMENTAL)"
depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
depends on EXPERT
+ depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
depends on $(cc-option,-ffunction-sections -fdata-sections)
depends on $(ld-option,--gc-sections)
help
$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
$(call if_changed,gzip)
- filechk_ikconfiggz = (echo "static const char kernel_config_data[] __used = MAGIC_START"; cat $< | scripts/bin2c; echo "MAGIC_END;")
+filechk_ikconfiggz = \
+ echo "static const char kernel_config_data[] __used = MAGIC_START"; \
+ cat $< | scripts/bin2c; \
+ echo "MAGIC_END;"
+
targets += config_data.h
$(obj)/config_data.h: $(obj)/config_data.gz FORCE
$(call filechk,ikconfiggz)
return kind_ops[BTF_INFO_KIND(t->info)];
}
-bool btf_name_offset_valid(const struct btf *btf, u32 offset)
+static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
{
return BTF_STR_OFFSET_VALID(offset) &&
offset < btf->hdr.str_len;
u8 nr_copy_bits;
u64 print_num;
- data += BITS_ROUNDDOWN_BYTES(bits_offset);
- bits_offset = BITS_PER_BYTE_MASKED(bits_offset);
nr_copy_bits = nr_bits + bits_offset;
nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
* BTF_INT_OFFSET() cannot exceed 64 bits.
*/
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
- btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m);
+ data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
+ bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+ btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
}
static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
member_offset = btf_member_bit_offset(t, member);
bitfield_size = btf_member_bitfield_size(t, member);
+ bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
+ bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
if (bitfield_size) {
- btf_bitfield_seq_show(data, member_offset,
+ btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
bitfield_size, m);
} else {
- bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
- bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
ops = btf_type_ops(member_type);
ops->seq_show(btf, member_type, member->type,
data + bytes_offset, bits8_offset, m);
case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto();
+ /* fall through */
default:
return NULL;
}
struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
{
struct bpf_map *inner_map, *inner_map_meta;
+ u32 inner_map_meta_size;
struct fd f;
f = fdget(inner_map_ufd);
return ERR_PTR(-EINVAL);
}
- inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
+ inner_map_meta_size = sizeof(*inner_map_meta);
+ /* In some cases verifier needs to access beyond just base map. */
+ if (inner_map->ops == &array_map_ops)
+ inner_map_meta_size = sizeof(struct bpf_array);
+
+ inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
if (!inner_map_meta) {
fdput(f);
return ERR_PTR(-ENOMEM);
inner_map_meta->key_size = inner_map->key_size;
inner_map_meta->value_size = inner_map->value_size;
inner_map_meta->map_flags = inner_map->map_flags;
- inner_map_meta->ops = inner_map->ops;
inner_map_meta->max_entries = inner_map->max_entries;
+ /* Misc members not needed in bpf_map_meta_equal() check. */
+ inner_map_meta->ops = inner_map->ops;
+ if (inner_map->ops == &array_map_ops) {
+ inner_map_meta->unpriv_array = inner_map->unpriv_array;
+ container_of(inner_map_meta, struct bpf_array, map)->index_mask =
+ container_of(inner_map, struct bpf_array, map)->index_mask;
+ }
+
fdput(f);
return inner_map_meta;
}
if (nhdr->n_type == BPF_BUILD_ID &&
nhdr->n_namesz == sizeof("GNU") &&
- nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
+ nhdr->n_descsz > 0 &&
+ nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
memcpy(build_id,
note_start + note_offs +
ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
- BPF_BUILD_ID_SIZE);
+ nhdr->n_descsz);
+ memset(build_id + nhdr->n_descsz, 0,
+ BPF_BUILD_ID_SIZE - nhdr->n_descsz);
return 0;
}
new_offs = note_offs + sizeof(Elf32_Nhdr) +
return -EFAULT; /* page not mapped */
ret = -EINVAL;
- page_addr = page_address(page);
+ page_addr = kmap_atomic(page);
ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
ret = stack_map_get_build_id_64(page_addr, build_id);
out:
+ kunmap_atomic(page_addr);
put_page(page);
return ret;
}
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
}
return;
}
/* per entry fall back to ips */
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
continue;
}
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
}
}
+static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
+ const struct bpf_insn *insn)
+{
+ return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
+}
+
+static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
+ u32 alu_state, u32 alu_limit)
+{
+ /* If we arrived here from different branches with different
+ * state or limits to sanitize, then this won't work.
+ */
+ if (aux->alu_state &&
+ (aux->alu_state != alu_state ||
+ aux->alu_limit != alu_limit))
+ return -EACCES;
+
+ /* Corresponding fixup done in fixup_bpf_calls(). */
+ aux->alu_state = alu_state;
+ aux->alu_limit = alu_limit;
+ return 0;
+}
+
+static int sanitize_val_alu(struct bpf_verifier_env *env,
+ struct bpf_insn *insn)
+{
+ struct bpf_insn_aux_data *aux = cur_aux(env);
+
+ if (can_skip_alu_sanitation(env, insn))
+ return 0;
+
+ return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+}
+
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
struct bpf_reg_state tmp;
bool ret;
- if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
+ if (can_skip_alu_sanitation(env, insn))
return 0;
/* We already marked aux for masking from non-speculative
if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
return 0;
-
- /* If we arrived here from different branches with different
- * limits to sanitize, then this won't work.
- */
- if (aux->alu_state &&
- (aux->alu_state != alu_state ||
- aux->alu_limit != alu_limit))
+ if (update_alu_sanitation_state(aux, alu_state, alu_limit))
return -EACCES;
-
- /* Corresponding fixup done in fixup_bpf_calls(). */
- aux->alu_state = alu_state;
- aux->alu_limit = alu_limit;
-
do_sim:
/* Simulate and find potential out-of-bounds access under
* speculative execution from truncation as a result of
s64 smin_val, smax_val;
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+ u32 dst = insn->dst_reg;
+ int ret;
if (insn_bitness == 32) {
/* Relevant for 32-bit RSH: Information can propagate towards
switch (opcode) {
case BPF_ADD:
+ ret = sanitize_val_alu(env, insn);
+ if (ret < 0) {
+ verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
+ return ret;
+ }
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
+ ret = sanitize_val_alu(env, insn);
+ if (ret < 0) {
+ verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
+ return ret;
+ }
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */
*/
return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
-EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
{
return __dma_release_from_coherent(mem, order, vaddr);
}
-EXPORT_SYMBOL(dma_release_from_dev_coherent);
int dma_release_from_global_coherent(int order, void *vaddr)
{
enum {
dma_debug_single,
- dma_debug_page,
dma_debug_sg,
dma_debug_coherent,
dma_debug_resource,
EXPORT_SYMBOL(debug_dma_map_single);
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
- size_t size, int direction, dma_addr_t dma_addr,
- bool map_single)
+ size_t size, int direction, dma_addr_t dma_addr)
{
struct dma_debug_entry *entry;
return;
entry->dev = dev;
- entry->type = dma_debug_page;
+ entry->type = dma_debug_single;
entry->pfn = page_to_pfn(page);
entry->offset = offset,
entry->dev_addr = dma_addr;
entry->direction = direction;
entry->map_err_type = MAP_ERR_NOT_CHECKED;
- if (map_single)
- entry->type = dma_debug_single;
-
check_for_stack(dev, page, offset);
if (!PageHighMem(page)) {
EXPORT_SYMBOL(debug_dma_mapping_error);
void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction, bool map_single)
+ size_t size, int direction)
{
struct dma_debug_entry ref = {
- .type = dma_debug_page,
+ .type = dma_debug_single,
.dev = dev,
.dev_addr = addr,
.size = size,
if (unlikely(dma_debug_disabled()))
return;
-
- if (map_single)
- ref.type = dma_debug_single;
-
check_unmap(&ref);
}
EXPORT_SYMBOL(debug_dma_unmap_page);
add_dma_entry(entry);
}
-EXPORT_SYMBOL(debug_dma_alloc_coherent);
void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr)
check_unmap(&ref);
}
-EXPORT_SYMBOL(debug_dma_free_coherent);
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
int direction, dma_addr_t dma_addr)
}
/**
- * dmam_alloc_coherent - Managed dma_alloc_coherent()
- * @dev: Device to allocate coherent memory for
- * @size: Size of allocation
- * @dma_handle: Out argument for allocated DMA handle
- * @gfp: Allocation flags
- *
- * Managed dma_alloc_coherent(). Memory allocated using this function
- * will be automatically released on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-void *dmam_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- struct dma_devres *dr;
- void *vaddr;
-
- dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
- if (!dr)
- return NULL;
-
- vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
- if (!vaddr) {
- devres_free(dr);
- return NULL;
- }
-
- dr->vaddr = vaddr;
- dr->dma_handle = *dma_handle;
- dr->size = size;
-
- devres_add(dev, dr);
-
- return vaddr;
-}
-EXPORT_SYMBOL(dmam_alloc_coherent);
-
-/**
* dmam_free_coherent - Managed dma_free_coherent()
* @dev: Device to free coherent memory for
* @size: Size of allocation
}
EXPORT_SYMBOL(dmam_alloc_attrs);
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-
-static void dmam_coherent_decl_release(struct device *dev, void *res)
-{
- dma_release_declared_memory(dev);
-}
-
-/**
- * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
- * @dev: Device to declare coherent memory for
- * @phys_addr: Physical address of coherent memory to be declared
- * @device_addr: Device address of coherent memory to be declared
- * @size: Size of coherent memory to be declared
- * @flags: Flags
- *
- * Managed dma_declare_coherent_memory().
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags)
-{
- void *res;
- int rc;
-
- res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
- flags);
- if (!rc)
- devres_add(dev, res);
- else
- devres_free(res);
-
- return rc;
-}
-EXPORT_SYMBOL(dmam_declare_coherent_memory);
-
-/**
- * dmam_release_declared_memory - Managed dma_release_declared_memory().
- * @dev: Device to release declared coherent memory for
- *
- * Managed dmam_release_declared_memory().
- */
-void dmam_release_declared_memory(struct device *dev)
-{
- WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
-}
-EXPORT_SYMBOL(dmam_release_declared_memory);
-
-#endif
-
/*
* Create scatter-list for the already allocated DMA buffer.
*/
ret = dma_alloc_from_pool(size, &page, flags);
if (!ret)
return NULL;
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- return ret;
+ goto done;
}
page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, size);
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
- return page; /* opaque cookie */
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ ret = page; /* opaque cookie */
+ goto done;
+ }
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
return ret;
}
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
memset(ret, 0, size);
-
+done:
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret;
}
memblock_free_late(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
+ io_tlb_start = 0;
+ io_tlb_end = 0;
io_tlb_nslabs = 0;
max_segment = 0;
}
exit_task_namespaces(tsk);
exit_task_work(tsk);
exit_thread(tsk);
+ exit_umh(tsk);
/*
* Flush inherited counters to the parent - before the parent
memset(s->addr, 0, THREAD_SIZE);
tsk->stack_vm_area = s;
+ tsk->stack = s->addr;
return s->addr;
}
posix_cpu_timers_init(p);
- p->start_time = ktime_get_ns();
- p->real_start_time = ktime_get_boot_ns();
p->io_context = NULL;
audit_set_context(p, NULL);
cgroup_fork(p);
goto bad_fork_free_pid;
/*
+ * From this point on we must avoid any synchronous user-space
+ * communication until we take the tasklist-lock. In particular, we do
+ * not want user-space to be able to predict the process start-time by
+ * stalling fork(2) after we recorded the start_time but before it is
+ * visible to the system.
+ */
+
+ p->start_time = ktime_get_ns();
+ p->real_start_time = ktime_get_boot_ns();
+
+ /*
* Make it visible to the rest of the system, but dont wake it up yet.
* Need tasklist lock for parent etc handling!
*/
#include <linux/cpu.h>
#include <asm/sections.h>
-#ifdef HAVE_JUMP_LABEL
-
/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX(jump_label_mutex);
static void jump_label_update(struct static_key *key);
/*
- * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
+ * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
* The use of 'atomic_read()' requires atomic.h and its problematic for some
* kernel headers such as kernel.h and others. Since static_key_count() is not
- * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
+ * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
* to have it be a function here. Similarly, for 'static_key_enable()' and
* 'static_key_disable()', which require bug.h. This should allow jump_label.h
- * to be included from most/all places for HAVE_JUMP_LABEL.
+ * to be included from most/all places for CONFIG_JUMP_LABEL.
*/
int static_key_count(struct static_key *key)
{
}
early_initcall(jump_label_test);
#endif /* STATIC_KEYS_SELFTEST */
-
-#endif /* HAVE_JUMP_LABEL */
sizeof(*mod->bpf_raw_events),
&mod->num_bpf_raw_events);
#endif
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
mod->jump_entries = section_objs(info, "__jump_table",
sizeof(*mod->jump_entries),
&mod->num_jump_entries);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
/*
* Debugging: various feature bits
*
return 0;
}
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
#define jump_label_key__true STATIC_KEY_INIT_TRUE
#define jump_label_key__false STATIC_KEY_INIT_FALSE
#else
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
-#endif /* HAVE_JUMP_LABEL */
+#endif /* CONFIG_JUMP_LABEL */
static int sched_feat_set(char *cmp)
{
#ifdef CONFIG_CFS_BANDWIDTH
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
static struct static_key __cfs_bandwidth_used;
static inline bool cfs_bandwidth_used(void)
{
static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
}
-#else /* HAVE_JUMP_LABEL */
+#else /* CONFIG_JUMP_LABEL */
static bool cfs_bandwidth_used(void)
{
return true;
void cfs_bandwidth_usage_inc(void) {}
void cfs_bandwidth_usage_dec(void) {}
-#endif /* HAVE_JUMP_LABEL */
+#endif /* CONFIG_JUMP_LABEL */
/*
* default period for cfs group bandwidth.
#undef SCHED_FEAT
-#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
/*
* To support run-time toggling of sched features, all the translation units
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
-#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
+#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
/*
* Each translation unit has its own copy of sysctl_sched_features to allow
#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
-#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
+#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
extern struct static_key_false sched_numa_balancing;
extern struct static_key_false sched_schedstats;
struct seccomp_filter *filter = file->private_data;
struct seccomp_knotif *knotif;
+ if (!filter)
+ return 0;
+
mutex_lock(&filter->notify_lock);
/*
out_put_fd:
if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
if (ret < 0) {
+ listener_f->private_data = NULL;
fput(listener_f);
put_unused_fd(listener);
} else {
/*
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
- * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
+ * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
+ * 2.6.60.
*/
static int override_release(char __user *release, size_t len)
{
char buf[MAX_EVENT_NAME_LEN];
unsigned int flags = TPARG_FL_KERNEL;
- /* argc must be >= 1 */
- if (argv[0][0] == 'r') {
+ switch (argv[0][0]) {
+ case 'r':
is_return = true;
flags |= TPARG_FL_RETURN;
- } else if (argv[0][0] != 'p' || argc < 2)
+ break;
+ case 'p':
+ break;
+ default:
+ return -ECANCELED;
+ }
+ if (argc < 2)
return -ECANCELED;
event = strchr(&argv[0][1], ':');
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
static DEFINE_SPINLOCK(umh_sysctl_lock);
static DECLARE_RWSEM(umhelper_sem);
+static LIST_HEAD(umh_list);
+static DEFINE_MUTEX(umh_list_lock);
static void call_usermodehelper_freeinfo(struct subprocess_info *info)
{
commit_creds(new);
sub_info->pid = task_pid_nr(current);
- if (sub_info->file)
+ if (sub_info->file) {
retval = do_execve_file(sub_info->file,
sub_info->argv, sub_info->envp);
- else
+ if (!retval)
+ current->flags |= PF_UMH;
+ } else
retval = do_execve(getname_kernel(sub_info->path),
(const char __user *const __user *)sub_info->argv,
(const char __user *const __user *)sub_info->envp);
goto out;
err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
+ if (!err) {
+ mutex_lock(&umh_list_lock);
+ list_add(&info->list, &umh_list);
+ mutex_unlock(&umh_list_lock);
+ }
out:
fput(file);
return err;
return 0;
}
+void __exit_umh(struct task_struct *tsk)
+{
+ struct umh_info *info;
+ pid_t pid = tsk->pid;
+
+ mutex_lock(&umh_list_lock);
+ list_for_each_entry(info, &umh_list, list) {
+ if (info->pid == pid) {
+ list_del(&info->list);
+ mutex_unlock(&umh_list_lock);
+ goto out;
+ }
+ }
+ mutex_unlock(&umh_list_lock);
+ return;
+out:
+ if (info->cleanup)
+ info->cleanup(info);
+}
+
struct ctl_table usermodehelper_table[] = {
{
.procname = "bset",
config RAID6_PQ
tristate
+config RAID6_PQ_BENCHMARK
+ bool "Automatically choose fastest RAID6 PQ functions"
+ depends on RAID6_PQ
+ default y
+ help
+ Benchmark all available RAID6 PQ functions on init and choose the
+ fastest one.
+
config BITREVERSE
tristate
newflags = (dp->flags & mask) | flags;
if (newflags == dp->flags)
continue;
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
if (dp->flags & _DPRINTK_FLAGS_PRINT) {
if (!(flags & _DPRINTK_FLAGS_PRINT))
static_branch_disable(&dp->key.dd_key_true);
if (x <= ULONG_MAX)
return int_sqrt((unsigned long) x);
- m = 1ULL << (fls64(x) & ~1ULL);
+ m = 1ULL << ((fls64(x) - 1) & ~1ULL);
while (m != 0) {
b = y + m;
y >>= 1;
hostprogs-y += mktables
quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
- < $< > $@ || ( rm -f $@ && exit 1 )
+ cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) < $< > $@
ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
$(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@
- cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
+ cmd_mktable = $(obj)/mktables > $@
targets += tables.c
$(obj)/tables.c: $(obj)/mktables FORCE
EXPORT_SYMBOL_GPL(raid6_call);
const struct raid6_calls * const raid6_algos[] = {
-#if defined(__ia64__)
- &raid6_intx16,
- &raid6_intx32,
-#endif
#if defined(__i386__) && !defined(__arch_um__)
- &raid6_mmxx1,
- &raid6_mmxx2,
- &raid6_sse1x1,
- &raid6_sse1x2,
- &raid6_sse2x1,
- &raid6_sse2x2,
-#ifdef CONFIG_AS_AVX2
- &raid6_avx2x1,
- &raid6_avx2x2,
-#endif
#ifdef CONFIG_AS_AVX512
- &raid6_avx512x1,
&raid6_avx512x2,
+ &raid6_avx512x1,
#endif
-#endif
-#if defined(__x86_64__) && !defined(__arch_um__)
- &raid6_sse2x1,
- &raid6_sse2x2,
- &raid6_sse2x4,
#ifdef CONFIG_AS_AVX2
- &raid6_avx2x1,
&raid6_avx2x2,
- &raid6_avx2x4,
+ &raid6_avx2x1,
+#endif
+ &raid6_sse2x2,
+ &raid6_sse2x1,
+ &raid6_sse1x2,
+ &raid6_sse1x1,
+ &raid6_mmxx2,
+ &raid6_mmxx1,
#endif
+#if defined(__x86_64__) && !defined(__arch_um__)
#ifdef CONFIG_AS_AVX512
- &raid6_avx512x1,
- &raid6_avx512x2,
&raid6_avx512x4,
+ &raid6_avx512x2,
+ &raid6_avx512x1,
#endif
+#ifdef CONFIG_AS_AVX2
+ &raid6_avx2x4,
+ &raid6_avx2x2,
+ &raid6_avx2x1,
+#endif
+ &raid6_sse2x4,
+ &raid6_sse2x2,
+ &raid6_sse2x1,
#endif
#ifdef CONFIG_ALTIVEC
- &raid6_altivec1,
- &raid6_altivec2,
- &raid6_altivec4,
- &raid6_altivec8,
- &raid6_vpermxor1,
- &raid6_vpermxor2,
- &raid6_vpermxor4,
&raid6_vpermxor8,
+ &raid6_vpermxor4,
+ &raid6_vpermxor2,
+ &raid6_vpermxor1,
+ &raid6_altivec8,
+ &raid6_altivec4,
+ &raid6_altivec2,
+ &raid6_altivec1,
#endif
#if defined(CONFIG_S390)
&raid6_s390vx8,
#endif
- &raid6_intx1,
- &raid6_intx2,
- &raid6_intx4,
- &raid6_intx8,
#ifdef CONFIG_KERNEL_MODE_NEON
- &raid6_neonx1,
- &raid6_neonx2,
- &raid6_neonx4,
&raid6_neonx8,
+ &raid6_neonx4,
+ &raid6_neonx2,
+ &raid6_neonx1,
#endif
+#if defined(__ia64__)
+ &raid6_intx32,
+ &raid6_intx16,
+#endif
+ &raid6_intx8,
+ &raid6_intx4,
+ &raid6_intx2,
+ &raid6_intx1,
NULL
};
if ((*algo)->valid && !(*algo)->valid())
continue;
+ if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
+ best = *algo;
+ break;
+ }
+
perf = 0;
preempt_disable();
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
+ CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \
+ gcc -c -x assembler - >&/dev/null && \
+ rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX2=1)
static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
{
unsigned long mask, val;
- unsigned long __maybe_unused flags;
bool ret = false;
+ unsigned long flags;
- /* Silence bogus lockdep warning */
-#if defined(CONFIG_LOCKDEP)
- local_irq_save(flags);
-#endif
- spin_lock(&sb->map[index].swap_lock);
+ spin_lock_irqsave(&sb->map[index].swap_lock, flags);
if (!sb->map[index].cleared)
goto out_unlock;
ret = true;
out_unlock:
- spin_unlock(&sb->map[index].swap_lock);
-#if defined(CONFIG_LOCKDEP)
- local_irq_restore(flags);
-#endif
+ spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
return ret;
}
XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
xa_set_mark(xa, index + 1, XA_MARK_0);
XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
- xa_set_mark(xa, index + 2, XA_MARK_1);
+ xa_set_mark(xa, index + 2, XA_MARK_2);
XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
xa_store_order(xa, index, order, xa_mk_index(index),
GFP_KERNEL);
void *entry;
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
- XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
- XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
+ XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
/* We should see two elements in the array */
rcu_read_lock();
static noinline void check_reserve(struct xarray *xa)
{
void *entry;
- unsigned long index = 0;
+ unsigned long index;
/* An array with a reserved entry is not empty */
XA_BUG_ON(xa, !xa_empty(xa));
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
- /* And so does xa_insert */
+ /* But xa_insert does not */
xa_reserve(xa, 12345678, GFP_KERNEL);
- XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0);
- xa_erase_index(xa, 12345678);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
+ -EEXIST);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
XA_BUG_ON(xa, !xa_empty(xa));
/* Can iterate through a reserved entry */
xa_reserve(xa, 6, GFP_KERNEL);
xa_store_index(xa, 7, GFP_KERNEL);
- xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, index != 5 && index != 7);
}
xa_destroy(xa);
static noinline void check_find_2(struct xarray *xa)
{
void *entry;
- unsigned long i, j, index = 0;
+ unsigned long i, j, index;
- xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, true);
}
for (i = 0; i < 1024; i++) {
xa_store_index(xa, index, GFP_KERNEL);
j = 0;
- index = 0;
- xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, xa_mk_index(index) != entry);
XA_BUG_ON(xa, index != j++);
}
for (i = 0; i < 100; i++) {
for (j = 0; j < 100; j++) {
+ rcu_read_lock();
for (k = 0; k < 100; k++) {
xas_set(&xas, j);
xas_for_each_marked(&xas, entry, k, XA_MARK_0)
XA_BUG_ON(xa,
xas.xa_node != XAS_RESTART);
}
+ rcu_read_unlock();
}
xa_store_index(xa, i, GFP_KERNEL);
xa_set_mark(xa, i, XA_MARK_0);
}
}
+static void check_align_1(struct xarray *xa, char *name)
+{
+ int i;
+ unsigned int id;
+ unsigned long index;
+ void *entry;
+
+ for (i = 0; i < 8; i++) {
+ id = 0;
+ XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL)
+ != 0);
+ XA_BUG_ON(xa, id != i);
+ }
+ xa_for_each(xa, index, entry)
+ XA_BUG_ON(xa, xa_is_err(entry));
+ xa_destroy(xa);
+}
+
+static noinline void check_align(struct xarray *xa)
+{
+ char name[] = "Motorola 68000";
+
+ check_align_1(xa, name);
+ check_align_1(xa, name + 1);
+ check_align_1(xa, name + 2);
+ check_align_1(xa, name + 3);
+// check_align_2(xa, name);
+}
+
static LIST_HEAD(shadow_nodes);
static void test_update_node(struct xa_node *node)
check_create_range(&array);
check_store_range(&array);
check_store_iter(&array);
+ check_align(&xa0);
check_workingset(&array, 0);
check_workingset(&array, 64);
if (xas->xa_shift > node->shift)
break;
entry = xas_descend(xas, node);
+ if (node->shift == 0)
+ break;
}
return entry;
}
for (;;) {
void *entry = xa_entry_locked(xas->xa, node, offset);
- if (xa_is_node(entry)) {
+ if (node->shift && xa_is_node(entry)) {
node = xa_to_node(entry);
offset = 0;
continue;
/*
* xas_create() - Create a slot to store an entry in.
* @xas: XArray operation state.
+ * @allow_root: %true if we can store the entry in the root directly
*
* Most users will not need to call this function directly, as it is called
* by xas_store(). It is useful for doing conditional store operations
* If the slot was newly created, returns %NULL. If it failed to create the
* slot, returns %NULL and indicates the error in @xas.
*/
-static void *xas_create(struct xa_state *xas)
+static void *xas_create(struct xa_state *xas, bool allow_root)
{
struct xarray *xa = xas->xa;
void *entry;
shift = xas_expand(xas, entry);
if (shift < 0)
return NULL;
+ if (!shift && !allow_root)
+ shift = XA_CHUNK_SHIFT;
entry = xa_head_locked(xa);
slot = &xa->xa_head;
} else if (xas_error(xas)) {
xas->xa_sibs = 0;
for (;;) {
- xas_create(xas);
+ xas_create(xas, true);
if (xas_error(xas))
goto restore;
if (xas->xa_index <= (index | XA_CHUNK_MASK))
bool value = xa_is_value(entry);
if (entry)
- first = xas_create(xas);
+ first = xas_create(xas, !xa_is_node(entry));
else
first = xas_load(xas);
EXPORT_SYMBOL_GPL(xas_find_conflict);
/**
- * xa_init_flags() - Initialise an empty XArray with flags.
- * @xa: XArray.
- * @flags: XA_FLAG values.
- *
- * If you need to initialise an XArray with special flags (eg you need
- * to take the lock from interrupt context), use this function instead
- * of xa_init().
- *
- * Context: Any context.
- */
-void xa_init_flags(struct xarray *xa, gfp_t flags)
-{
- unsigned int lock_type;
- static struct lock_class_key xa_lock_irq;
- static struct lock_class_key xa_lock_bh;
-
- spin_lock_init(&xa->xa_lock);
- xa->xa_flags = flags;
- xa->xa_head = NULL;
-
- lock_type = xa_lock_type(xa);
- if (lock_type == XA_LOCK_IRQ)
- lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
- else if (lock_type == XA_LOCK_BH)
- lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
-}
-EXPORT_SYMBOL(xa_init_flags);
-
-/**
* xa_load() - Load an entry from an XArray.
* @xa: XArray.
* @index: index into array.
{
if (xa_is_zero(curr))
return NULL;
- XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
if (xas_error(xas))
curr = xas->xa_node;
return curr;
XA_STATE(xas, xa, index);
void *curr;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL);
if (xa_track_free(xa) && !entry)
entry = XA_ZERO_ENTRY;
XA_STATE(xas, xa, index);
void *curr;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL);
if (xa_track_free(xa) && !entry)
entry = XA_ZERO_ENTRY;
EXPORT_SYMBOL(__xa_cmpxchg);
/**
+ * __xa_insert() - Store this entry in the XArray if no entry is present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present. Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+ if (!entry)
+ entry = XA_ZERO_ENTRY;
+
+ do {
+ curr = xas_load(&xas);
+ if (!curr) {
+ xas_store(&xas, entry);
+ if (xa_track_free(xa))
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } else {
+ xas_set_err(&xas, -EEXIST);
+ }
+ } while (__xas_nomem(&xas, gfp));
+
+ return xas_error(&xas);
+}
+EXPORT_SYMBOL(__xa_insert);
+
+/**
* __xa_reserve() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
if (last + 1)
order = __ffs(last + 1);
xas_set_order(&xas, last, order);
- xas_create(&xas);
+ xas_create(&xas, true);
if (xas_error(&xas))
goto unlock;
}
XA_STATE(xas, xa, 0);
int err;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (WARN_ON_ONCE(!xa_track_free(xa)))
return -EINVAL;
struct page *ptepage;
unsigned long addr;
int cow;
- struct address_space *mapping = vma->vm_file->f_mapping;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
struct mmu_notifier_range range;
mmu_notifier_range_init(&range, src, vma->vm_start,
vma->vm_end);
mmu_notifier_invalidate_range_start(&range);
- } else {
- /*
- * For shared mappings i_mmap_rwsem must be held to call
- * huge_pte_alloc, otherwise the returned ptep could go
- * away if part of a shared pmd and another thread calls
- * huge_pmd_unshare.
- */
- i_mmap_lock_read(mapping);
}
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
spinlock_t *src_ptl, *dst_ptl;
-
src_pte = huge_pte_offset(src, addr, sz);
if (!src_pte)
continue;
-
dst_pte = huge_pte_alloc(dst, addr, sz);
if (!dst_pte) {
ret = -ENOMEM;
if (cow)
mmu_notifier_invalidate_range_end(&range);
- else
- i_mmap_unlock_read(mapping);
return ret;
}
}
/*
- * We can not race with truncation due to holding i_mmap_rwsem.
- * Check once here for faults beyond end of file.
+ * Use page lock to guard against racing truncation
+ * before we get page_table_lock.
*/
- size = i_size_read(mapping->host) >> huge_page_shift(h);
- if (idx >= size)
- goto out;
-
retry:
page = find_lock_page(mapping, idx);
if (!page) {
+ size = i_size_read(mapping->host) >> huge_page_shift(h);
+ if (idx >= size)
+ goto out;
+
/*
* Check for page in userfault range
*/
};
/*
- * hugetlb_fault_mutex and i_mmap_rwsem must be
- * dropped before handling userfault. Reacquire
- * after handling fault to make calling code simpler.
+ * hugetlb_fault_mutex must be dropped before
+ * handling userfault. Reacquire after handling
+ * fault to make calling code simpler.
*/
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
idx, haddr);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
-
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
-
- i_mmap_lock_read(mapping);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
goto out;
}
}
ptl = huge_pte_lock(h, mm, ptep);
+ size = i_size_read(mapping->host) >> huge_page_shift(h);
+ if (idx >= size)
+ goto backout;
ret = 0;
if (!huge_pte_none(huge_ptep_get(ptep)))
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (ptep) {
- /*
- * Since we hold no locks, ptep could be stale. That is
- * OK as we are only making decisions based on content and
- * not actually modifying content here.
- */
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
migration_entry_wait_huge(vma, mm, ptep);
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
+ } else {
+ ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
+ if (!ptep)
+ return VM_FAULT_OOM;
}
- /*
- * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
- * until finished with ptep. This serves two purposes:
- * 1) It prevents huge_pmd_unshare from being called elsewhere
- * and making the ptep no longer valid.
- * 2) It synchronizes us with file truncation.
- *
- * ptep could have already be assigned via huge_pte_offset. That
- * is OK, as huge_pte_alloc will return the same value unless
- * something changed.
- */
mapping = vma->vm_file->f_mapping;
- i_mmap_lock_read(mapping);
- ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
- if (!ptep) {
- i_mmap_unlock_read(mapping);
- return VM_FAULT_OOM;
- }
+ idx = vma_hugecache_offset(h, vma, haddr);
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
- idx = vma_hugecache_offset(h, vma, haddr);
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
}
out_mutex:
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
/*
* Generally it's safe to hold refcount during waiting page lock. But
* here we just wait to defer the next page fault to avoid busy loop and
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
- * code much cleaner.
- *
- * This routine must be called with i_mmap_rwsem held in at least read mode.
- * For hugetlbfs, this prevents removal of any page table entries associated
- * with the address space. This is important as we are setting up sharing
- * based on existing page table entries (mappings).
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_rwsem section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
*/
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{
if (!vma_shareable(vma, addr))
return (pte_t *)pmd_alloc(mm, pud, addr);
+ i_mmap_lock_write(mapping);
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
if (svma == vma)
continue;
spin_unlock(ptl);
out:
pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ i_mmap_unlock_write(mapping);
return pte;
}
* indicated by page_count > 1, unmap is achieved by clearing pud and
* decrementing the ref count. If count == 1, the pte page is not shared.
*
- * Called with page table lock held and i_mmap_rwsem held in write mode.
+ * called with page table lock held.
*
* returns: 1 successfully unmapped a shared pte page
* 0 the underlying pte page is not shared, or it is the last user
return;
}
- cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE);
-
*flags |= SLAB_KASAN;
}
}
/*
- * Since it's desirable to only call object contructors once during slab
- * allocation, we preassign tags to all such objects. Also preassign tags for
- * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports.
- * For SLAB allocator we can't preassign tags randomly since the freelist is
- * stored as an array of indexes instead of a linked list. Assign tags based
- * on objects indexes, so that objects that are next to each other get
- * different tags.
- * After a tag is assigned, the object always gets allocated with the same tag.
- * The reason is that we can't change tags for objects with constructors on
- * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor
- * code can save the pointer to the object somewhere (e.g. in the object
- * itself). Then if we retag it, the old saved pointer will become invalid.
+ * This function assigns a tag to an object considering the following:
+ * 1. A cache might have a constructor, which might save a pointer to a slab
+ * object somewhere (e.g. in the object itself). We preassign a tag for
+ * each object in caches with constructors during slab creation and reuse
+ * the same tag each time a particular object is allocated.
+ * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
+ * accessed after being freed. We preassign tags for objects in these
+ * caches as well.
+ * 3. For SLAB allocator we can't preassign tags randomly since the freelist
+ * is stored as an array of indexes instead of a linked list. Assign tags
+ * based on objects indexes, so that objects that are next to each other
+ * get different tags.
*/
-static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new)
+static u8 assign_tag(struct kmem_cache *cache, const void *object,
+ bool init, bool krealloc)
{
+ /* Reuse the same tag for krealloc'ed objects. */
+ if (krealloc)
+ return get_tag(object);
+
+ /*
+ * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
+ * set, assign a tag when the object is being allocated (init == false).
+ */
if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
- return new ? KASAN_TAG_KERNEL : random_tag();
+ return init ? KASAN_TAG_KERNEL : random_tag();
+ /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
#ifdef CONFIG_SLAB
+ /* For SLAB assign tags based on the object index in the freelist. */
return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
#else
- return new ? random_tag() : get_tag(object);
+ /*
+ * For SLUB assign a random tag during slab creation, otherwise reuse
+ * the already assigned tag.
+ */
+ return init ? random_tag() : get_tag(object);
#endif
}
__memset(alloc_info, 0, sizeof(*alloc_info));
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
- object = set_tag(object, assign_tag(cache, object, true));
+ object = set_tag(object,
+ assign_tag(cache, object, true, false));
return (void *)object;
}
return __kasan_slab_free(cache, object, ip, true);
}
-void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
- size_t size, gfp_t flags)
+static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
+ size_t size, gfp_t flags, bool krealloc)
{
unsigned long redzone_start;
unsigned long redzone_end;
KASAN_SHADOW_SCALE_SIZE);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
- tag = assign_tag(cache, object, false);
+ tag = assign_tag(cache, object, false, krealloc);
/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
kasan_unpoison_shadow(set_tag(object, tag), size);
return set_tag(object, tag);
}
+
+void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
+ size_t size, gfp_t flags)
+{
+ return __kasan_kmalloc(cache, object, size, flags, false);
+}
EXPORT_SYMBOL(kasan_kmalloc);
void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
if (unlikely(!PageSlab(page)))
return kasan_kmalloc_large(object, size, flags);
else
- return kasan_kmalloc(page->slab_cache, object, size, flags);
+ return __kasan_kmalloc(page->slab_cache, object, size,
+ flags, true);
}
void kasan_poison_kfree(void *ptr, unsigned long ip)
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
struct address_space *mapping;
LIST_HEAD(tokill);
- bool unmap_success = true;
+ bool unmap_success;
int kill = 1, forcekill;
struct page *hpage = *hpagep;
bool mlocked = PageMlocked(hpage);
if (kill)
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
- if (!PageHuge(hpage)) {
- unmap_success = try_to_unmap(hpage, ttu);
- } else if (mapping) {
- /*
- * For hugetlb pages, try_to_unmap could potentially call
- * huge_pmd_unshare. Because of this, take semaphore in
- * write mode here and set TTU_RMAP_LOCKED to indicate we
- * have taken the lock at this higer level.
- */
- i_mmap_lock_write(mapping);
- unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
- i_mmap_unlock_write(mapping);
- }
+ unmap_success = try_to_unmap(hpage, ttu);
if (!unmap_success)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret;
+ /*
+ * Preallocate pte before we take page_lock because this might lead to
+ * deadlocks for memcg reclaim which waits for pages under writeback:
+ * lock_page(A)
+ * SetPageWriteback(A)
+ * unlock_page(A)
+ * lock_page(B)
+ * lock_page(B)
+ * pte_alloc_pne
+ * shrink_page_list
+ * wait_on_page_writeback(A)
+ * SetPageWriteback(B)
+ * unlock_page(B)
+ * # flush A, B to clear the writeback
+ */
+ if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
+ vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
+ if (!vmf->prealloc_pte)
+ return VM_FAULT_OOM;
+ smp_wmb(); /* See comment in __pte_alloc() */
+ }
+
ret = vma->vm_ops->fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
VM_FAULT_DONE_COW)))
goto out;
if (range) {
- range->start = address & PAGE_MASK;
- range->end = range->start + PAGE_SIZE;
+ mmu_notifier_range_init(range, mm, address & PAGE_MASK,
+ (address & PAGE_MASK) + PAGE_SIZE);
mmu_notifier_invalidate_range_start(range);
}
ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
goto put_anon;
if (page_mapped(hpage)) {
- struct address_space *mapping = page_mapping(hpage);
-
- /*
- * try_to_unmap could potentially call huge_pmd_unshare.
- * Because of this, take semaphore in write mode here and
- * set TTU_RMAP_LOCKED to let lower levels know we have
- * taken the lock.
- */
- i_mmap_lock_write(mapping);
try_to_unmap(hpage,
- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
- TTU_RMAP_LOCKED);
- i_mmap_unlock_write(mapping);
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
page_was_mapped = 1;
}
return 0;
}
-/*
- * Later we can get more picky about what "in core" means precisely.
- * For now, simply check to see if the page is in the page cache,
- * and is up to date; i.e. that no page-in operation would be required
- * at this time if an application were to map and access this page.
- */
-static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
-{
- unsigned char present = 0;
- struct page *page;
-
- /*
- * When tmpfs swaps out a page from a file, any process mapping that
- * file will not get a swp_entry_t in its pte, but rather it is like
- * any other file mapping (ie. marked !present and faulted in with
- * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
- */
-#ifdef CONFIG_SWAP
- if (shmem_mapping(mapping)) {
- page = find_get_entry(mapping, pgoff);
- /*
- * shmem/tmpfs may return swap: account for swapcache
- * page too.
- */
- if (xa_is_value(page)) {
- swp_entry_t swp = radix_to_swp_entry(page);
- page = find_get_page(swap_address_space(swp),
- swp_offset(swp));
- }
- } else
- page = find_get_page(mapping, pgoff);
-#else
- page = find_get_page(mapping, pgoff);
-#endif
- if (page) {
- present = PageUptodate(page);
- put_page(page);
- }
-
- return present;
-}
-
-static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
- struct vm_area_struct *vma, unsigned char *vec)
-{
- unsigned long nr = (end - addr) >> PAGE_SHIFT;
- int i;
-
- if (vma->vm_file) {
- pgoff_t pgoff;
-
- pgoff = linear_page_index(vma, addr);
- for (i = 0; i < nr; i++, pgoff++)
- vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
- } else {
- for (i = 0; i < nr; i++)
- vec[i] = 0;
- }
- return nr;
-}
-
static int mincore_unmapped_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- walk->private += __mincore_unmapped_range(addr, end,
- walk->vma, walk->private);
+ unsigned char *vec = walk->private;
+ unsigned long nr = (end - addr) >> PAGE_SHIFT;
+
+ memset(vec, 0, nr);
+ walk->private += nr;
return 0;
}
goto out;
}
+ /* We'll consider a THP page under construction to be there */
if (pmd_trans_unstable(pmd)) {
- __mincore_unmapped_range(addr, end, vma, vec);
+ memset(vec, 1, nr);
goto out;
}
pte_t pte = *ptep;
if (pte_none(pte))
- __mincore_unmapped_range(addr, addr + PAGE_SIZE,
- vma, vec);
+ *vec = 0;
else if (pte_present(pte))
*vec = 1;
else { /* pte is a swap entry */
swp_entry_t entry = pte_to_swp_entry(pte);
- if (non_swap_entry(entry)) {
- /*
- * migration or hwpoison entries are always
- * uptodate
- */
- *vec = 1;
- } else {
-#ifdef CONFIG_SWAP
- *vec = mincore_page(swap_address_space(entry),
- swp_offset(entry));
-#else
- WARN_ON(1);
- *vec = 1;
-#endif
- }
+ /*
+ * migration or hwpoison entries are always
+ * uptodate
+ */
+ *vec = !!non_swap_entry(entry);
}
vec++;
}
*/
boost_watermark(zone);
if (alloc_flags & ALLOC_KSWAPD)
- wakeup_kswapd(zone, 0, 0, zone_idx(zone));
+ set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
/* We are not allowed to try stealing from the whole block */
if (!whole_block)
local_irq_restore(flags);
out:
+ /* Separate test+clear to avoid unnecessary atomics */
+ if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
+ clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
+ wakeup_kswapd(zone, 0, 0, zone_idx(zone));
+ }
+
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
return page;
* page->flags PG_locked (lock_page)
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
* mapping->i_mmap_rwsem
- * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
* anon_vma->rwsem
* mm->page_table_lock or pte_lock
* zone_lru_lock (in mark_page_accessed, isolate_lru_page)
* Note that the page can not be free in this function as call of
* try_to_unmap() must hold a reference on the page.
*/
- mmu_notifier_range_init(&range, vma->vm_mm, vma->vm_start,
- min(vma->vm_end, vma->vm_start +
+ mmu_notifier_range_init(&range, vma->vm_mm, address,
+ min(vma->vm_end, address +
(PAGE_SIZE << compound_order(page))));
if (PageHuge(page)) {
/*
* If sharing is possible, start and end will be adjusted
* accordingly.
- *
- * If called for a huge page, caller must hold i_mmap_rwsem
- * in write mode as it is possible to call huge_pmd_unshare.
*/
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);
struct alien_cache *alc = NULL;
alc = kmalloc_node(memsize, gfp, node);
- init_arraycache(&alc->ac, entries, batch);
- spin_lock_init(&alc->lock);
+ if (alc) {
+ init_arraycache(&alc->ac, entries, batch);
+ spin_lock_init(&alc->lock);
+ }
return alc;
}
unsigned int offset;
size_t object_size;
+ ptr = kasan_reset_tag(ptr);
+
/* Find object and usable object size. */
s = page->slab_cache;
/*
* Validates that the given object is:
* - not bogus address
- * - known-safe heap or stack object
+ * - fully contained by stack (or stack frame, when available)
+ * - fully within SLAB object (or object whitelist area, when available)
* - not in kernel text
*/
void __check_object_size(const void *ptr, unsigned long n, bool to_user)
/* Check for invalid addresses. */
check_bogus_address((const unsigned long)ptr, n, to_user);
- /* Check for bad heap object. */
- check_heap_object(ptr, n, to_user);
-
/* Check for bad stack object. */
switch (check_stack_object(ptr, n)) {
case NOT_STACK:
usercopy_abort("process stack", NULL, to_user, 0, n);
}
+ /* Check for bad heap object. */
+ check_heap_object(ptr, n, to_user);
+
/* Check for object in kernel to avoid text exposure. */
check_kernel_text_object((const unsigned long)ptr, n, to_user);
}
VM_BUG_ON(dst_addr & ~huge_page_mask(h));
/*
- * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
- * i_mmap_rwsem ensures the dst_pte remains valid even
- * in the case of shared pmds. fault mutex prevents
- * races with other faulting threads.
+ * Serialize via hugetlb_fault_mutex
*/
- mapping = dst_vma->vm_file->f_mapping;
- i_mmap_lock_read(mapping);
idx = linear_page_index(dst_vma, dst_addr);
+ mapping = dst_vma->vm_file->f_mapping;
hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
idx, dst_addr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
if (!dst_pte) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
goto out_unlock;
}
dst_pteval = huge_ptep_get(dst_pte);
if (!huge_pte_none(dst_pteval)) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
goto out_unlock;
}
dst_addr, src_addr, &page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
vm_alloc_shared = vm_shared;
cond_resched();
return true;
if (PageHuge(page))
return false;
- for (i = 0; i < hpage_nr_pages(page); i++) {
+ for (i = 0; i < (1 << compound_order(page)); i++) {
if (atomic_read(&page[i]._mapcount) >= 0)
return true;
}
extern char bpfilter_umh_start;
extern char bpfilter_umh_end;
-static struct umh_info info;
-/* since ip_getsockopt() can run in parallel, serialize access to umh */
-static DEFINE_MUTEX(bpfilter_lock);
-
-static void shutdown_umh(struct umh_info *info)
+static void shutdown_umh(void)
{
struct task_struct *tsk;
- if (!info->pid)
+ if (bpfilter_ops.stop)
return;
- tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID);
+
+ tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID);
if (tsk) {
force_sig(SIGKILL, tsk);
put_task_struct(tsk);
}
- fput(info->pipe_to_umh);
- fput(info->pipe_from_umh);
- info->pid = 0;
}
static void __stop_umh(void)
{
- if (IS_ENABLED(CONFIG_INET)) {
- bpfilter_process_sockopt = NULL;
- shutdown_umh(&info);
- }
-}
-
-static void stop_umh(void)
-{
- mutex_lock(&bpfilter_lock);
- __stop_umh();
- mutex_unlock(&bpfilter_lock);
+ if (IS_ENABLED(CONFIG_INET))
+ shutdown_umh();
}
static int __bpfilter_process_sockopt(struct sock *sk, int optname,
req.cmd = optname;
req.addr = (long __force __user)optval;
req.len = optlen;
- mutex_lock(&bpfilter_lock);
- if (!info.pid)
+ if (!bpfilter_ops.info.pid)
goto out;
- n = __kernel_write(info.pipe_to_umh, &req, sizeof(req), &pos);
+ n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
+ &pos);
if (n != sizeof(req)) {
pr_err("write fail %zd\n", n);
__stop_umh();
goto out;
}
pos = 0;
- n = kernel_read(info.pipe_from_umh, &reply, sizeof(reply), &pos);
+ n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply),
+ &pos);
if (n != sizeof(reply)) {
pr_err("read fail %zd\n", n);
__stop_umh();
}
ret = reply.status;
out:
- mutex_unlock(&bpfilter_lock);
return ret;
}
-static int __init load_umh(void)
+static int start_umh(void)
{
int err;
/* fork usermode process */
- info.cmdline = "bpfilter_umh";
err = fork_usermode_blob(&bpfilter_umh_start,
&bpfilter_umh_end - &bpfilter_umh_start,
- &info);
+ &bpfilter_ops.info);
if (err)
return err;
- pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
+ bpfilter_ops.stop = false;
+ pr_info("Loaded bpfilter_umh pid %d\n", bpfilter_ops.info.pid);
/* health check that usermode process started correctly */
if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
- stop_umh();
+ shutdown_umh();
return -EFAULT;
}
- if (IS_ENABLED(CONFIG_INET))
- bpfilter_process_sockopt = &__bpfilter_process_sockopt;
return 0;
}
+static int __init load_umh(void)
+{
+ int err;
+
+ mutex_lock(&bpfilter_ops.lock);
+ if (!bpfilter_ops.stop) {
+ err = -EFAULT;
+ goto out;
+ }
+ err = start_umh();
+ if (!err && IS_ENABLED(CONFIG_INET)) {
+ bpfilter_ops.sockopt = &__bpfilter_process_sockopt;
+ bpfilter_ops.start = &start_umh;
+ }
+out:
+ mutex_unlock(&bpfilter_ops.lock);
+ return err;
+}
+
static void __exit fini_umh(void)
{
- stop_umh();
+ mutex_lock(&bpfilter_ops.lock);
+ if (IS_ENABLED(CONFIG_INET)) {
+ shutdown_umh();
+ bpfilter_ops.start = NULL;
+ bpfilter_ops.sockopt = NULL;
+ }
+ mutex_unlock(&bpfilter_ops.lock);
}
module_init(load_umh);
module_exit(fini_umh);
/* SPDX-License-Identifier: GPL-2.0 */
- .section .init.rodata, "a"
+ .section .rodata, "a"
.global bpfilter_umh_start
bpfilter_umh_start:
.incbin "net/bpfilter/bpfilter_umh"
err = -ENOMEM;
goto err_unlock;
}
+ if (swdev_notify)
+ fdb->added_by_user = 1;
fdb->added_by_external_learn = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
} else {
modified = true;
}
+ if (swdev_notify)
+ fdb->added_by_user = 1;
+
if (modified)
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
}
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ skb_push(skb, ETH_HLEN);
if (!is_skb_forwardable(skb->dev, skb))
goto drop;
- skb_push(skb, ETH_HLEN);
br_drop_fake_rtable(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ skb->tstamp = 0;
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
net, sk, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
net = dev_net(indev);
} else {
if (unlikely(netpoll_tx_running(to->br->dev))) {
- if (!is_skb_forwardable(skb->dev, skb)) {
+ skb_push(skb, ETH_HLEN);
+ if (!is_skb_forwardable(skb->dev, skb))
kfree_skb(skb);
- } else {
- skb_push(skb, ETH_HLEN);
+ else
br_netpoll_send_skb(to, skb);
- }
return;
}
br_hook = NF_BR_LOCAL_OUT;
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
int ret;
- if (neigh->hh.hh_len) {
+ if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
neigh_hh_bridge(&neigh->hh, skb);
skb->dev = nf_bridge->physindev;
ret = br_handle_frame_finish(net, sk, skb);
IPSTATS_MIB_INDISCARDS);
goto drop;
}
+ hdr = ipv6_hdr(skb);
}
if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
goto drop;
/* private vlan flags */
enum {
BR_VLFLAG_PER_PORT_STATS = BIT(0),
+ BR_VLFLAG_ADDED_BY_SWITCHDEV = BIT(1),
};
/**
}
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
- u16 vid, u16 flags, struct netlink_ext_ack *extack)
+ struct net_bridge_vlan *v, u16 flags,
+ struct netlink_ext_ack *extack)
{
int err;
/* Try switchdev op first. In case it is not supported, fallback to
* 8021q add.
*/
- err = br_switchdev_port_vlan_add(dev, vid, flags, extack);
+ err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
if (err == -EOPNOTSUPP)
- return vlan_vid_add(dev, br->vlan_proto, vid);
+ return vlan_vid_add(dev, br->vlan_proto, v->vid);
+ v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
return err;
}
}
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
- u16 vid)
+ const struct net_bridge_vlan *v)
{
int err;
/* Try switchdev op first. In case it is not supported, fallback to
* 8021q del.
*/
- err = br_switchdev_port_vlan_del(dev, vid);
- if (err == -EOPNOTSUPP) {
- vlan_vid_del(dev, br->vlan_proto, vid);
- return 0;
- }
- return err;
+ err = br_switchdev_port_vlan_del(dev, v->vid);
+ if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
+ vlan_vid_del(dev, br->vlan_proto, v->vid);
+ return err == -EOPNOTSUPP ? 0 : err;
}
/* Returns a master vlan, if it didn't exist it gets created. In all cases a
* This ensures tagged traffic enters the bridge when
* promiscuous mode is disabled by br_manage_promisc().
*/
- err = __vlan_vid_add(dev, br, v->vid, flags, extack);
+ err = __vlan_vid_add(dev, br, v, flags, extack);
if (err)
goto out;
out_filt:
if (p) {
- __vlan_vid_del(dev, br, v->vid);
+ __vlan_vid_del(dev, br, v);
if (masterv) {
if (v->stats && masterv->stats != v->stats)
free_percpu(v->stats);
__vlan_delete_pvid(vg, v->vid);
if (p) {
- err = __vlan_vid_del(p->dev, p->br, v->vid);
+ err = __vlan_vid_del(p->dev, p->br, v);
if (err)
goto out;
} else {
tmp.name[sizeof(tmp.name) - 1] = 0;
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
- newinfo = vmalloc(sizeof(*newinfo) + countersize);
+ newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
+ PAGE_KERNEL);
if (!newinfo)
return -ENOMEM;
if (countersize)
memset(newinfo->counters, 0, countersize);
- newinfo->entries = vmalloc(tmp.entries_size);
+ newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
+ PAGE_KERNEL);
if (!newinfo->entries) {
ret = -ENOMEM;
goto free_newinfo;
pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
return false;
+ ip6h = ipv6_hdr(skb);
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
- /* check for checksum updates when the CAN frame has been modified */
+ /* Has the CAN frame been modified? */
if (modidx) {
- if (gwj->mod.csumfunc.crc8)
+ /* get available space for the processed CAN frame type */
+ int max_len = nskb->len - offsetof(struct can_frame, data);
+
+ /* dlc may have changed, make sure it fits to the CAN frame */
+ if (cf->can_dlc > max_len)
+ goto out_delete;
+
+ /* check for checksum updates in classic CAN length only */
+ if (gwj->mod.csumfunc.crc8) {
+ if (cf->can_dlc > 8)
+ goto out_delete;
+
(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+ }
+
+ if (gwj->mod.csumfunc.xor) {
+ if (cf->can_dlc > 8)
+ goto out_delete;
- if (gwj->mod.csumfunc.xor)
(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
+ }
}
/* clear the skb timestamp if not configured the other way */
gwj->dropped_frames++;
else
gwj->handled_frames++;
+
+ return;
+
+ out_delete:
+ /* delete frame due to misconfiguration */
+ gwj->deleted_frames++;
+ kfree_skb(nskb);
+ return;
}
static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
Opt_nocephx_sign_messages,
Opt_tcp_nodelay,
Opt_notcp_nodelay,
+ Opt_abort_on_full,
};
static match_table_t opt_tokens = {
{Opt_nocephx_sign_messages, "nocephx_sign_messages"},
{Opt_tcp_nodelay, "tcp_nodelay"},
{Opt_notcp_nodelay, "notcp_nodelay"},
+ {Opt_abort_on_full, "abort_on_full"},
{-1, NULL}
};
opt->flags &= ~CEPH_OPT_TCP_NODELAY;
break;
+ case Opt_abort_on_full:
+ opt->flags |= CEPH_OPT_ABORT_ON_FULL;
+ break;
+
default:
BUG_ON(token);
}
}
EXPORT_SYMBOL(ceph_parse_options);
-int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
+ bool show_all)
{
struct ceph_options *opt = client->options;
size_t pos = m->count;
seq_puts(m, "nocephx_sign_messages,");
if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
seq_puts(m, "notcp_nodelay,");
+ if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL))
+ seq_puts(m, "abort_on_full,");
if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
seq_printf(m, "mount_timeout=%d,",
struct ceph_client *client = s->private;
int ret;
- ret = ceph_print_client_options(s, client);
+ ret = ceph_print_client_options(s, client, true);
if (ret)
return ret;
(ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
pool_full(osdc, req->r_t.base_oloc.pool))) {
dout("req %p full/pool_full\n", req);
- if (osdc->abort_on_full) {
+ if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
err = -ENOSPC;
} else {
pr_warn_ratelimited("FULL or reached pool quota\n");
{
bool victims = false;
- if (osdc->abort_on_full &&
+ if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
(ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
for_each_request(osdc, abort_on_full_fn, &victims);
}
#endif
static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
static atomic_t netstamp_needed_deferred;
static atomic_t netstamp_wanted;
static void netstamp_clear(struct work_struct *work)
void net_enable_timestamp(void)
{
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
int wanted;
while (1) {
void net_disable_timestamp(void)
{
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
int wanted;
while (1) {
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
- /* skb->mac_len is not set on normal egress */
- unsigned int mlen = skb->network_header - skb->mac_header;
+ unsigned int mlen = skb_network_offset(skb);
- __skb_pull(skb, mlen);
+ if (mlen) {
+ __skb_pull(skb, mlen);
- /* At ingress, the mac header has already been pulled once.
- * At egress, skb_pospull_rcsum has to be done in case that
- * the skb is originated from ingress (i.e. a forwarded skb)
- * to ensure that rcsum starts at net header.
- */
- if (!skb_at_tc_ingress(skb))
- skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+ /* At ingress, the mac header has already been pulled once.
+ * At egress, skb_pospull_rcsum has to be done in case that
+ * the skb is originated from ingress (i.e. a forwarded skb)
+ * to ensure that rcsum starts at net header.
+ */
+ if (!skb_at_tc_ingress(skb))
+ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+ }
skb_pop_mac_header(skb);
skb_reset_mac_len(skb);
return flags & BPF_F_INGRESS ?
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
break;
case SO_MAX_PACING_RATE: /* 32bit version */
+ if (val != ~0U)
+ cmpxchg(&sk->sk_pacing_status,
+ SK_PACING_NONE,
+ SK_PACING_NEEDED);
sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
sk->sk_rcvlowat = val ? : 1;
break;
case SO_MARK:
- sk->sk_mark = val;
+ if (sk->sk_mark != val) {
+ sk->sk_mark = val;
+ sk_dst_reset(sk);
+ }
break;
default:
ret = -EINVAL;
/* Only some options are supported */
switch (optname) {
case TCP_BPF_IW:
- if (val <= 0 || tp->data_segs_out > 0)
+ if (val <= 0 || tp->data_segs_out > tp->syn_data)
ret = -EINVAL;
else
tp->snd_cwnd = val;
case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto();
- /* else: fall through */
+ /* else, fall through */
default:
return NULL;
}
lwt->name ? : "<unknown>");
ret = BPF_OK;
} else {
+ skb_reset_mac_header(skb);
ret = skb_do_redirect(skb);
if (ret == 0)
ret = BPF_REDIRECT;
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
return NULL;
- if (size <= PAGE_SIZE)
+ if (size <= PAGE_SIZE) {
buckets = kzalloc(size, GFP_ATOMIC);
- else
+ } else {
buckets = (struct neighbour __rcu **)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
+ kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
+ }
if (!buckets) {
kfree(ret);
return NULL;
size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
struct neighbour __rcu **buckets = nht->hash_buckets;
- if (size <= PAGE_SIZE)
+ if (size <= PAGE_SIZE) {
kfree(buckets);
- else
+ } else {
+ kmemleak_free(buckets);
free_pages((unsigned long)buckets, get_order(size));
+ }
kfree(nht);
}
if (neigh->ops->solicit)
neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
- kfree_skb(skb);
+ consume_skb(skb);
}
/* Called when a timer expires for a neighbour entry. */
unsigned long chunk;
struct sk_buff *skb;
struct page *page;
- gfp_t gfp_head;
int i;
*errcode = -EMSGSIZE;
if (npages > MAX_SKB_FRAGS)
return NULL;
- gfp_head = gfp_mask;
- if (gfp_head & __GFP_DIRECT_RECLAIM)
- gfp_head |= __GFP_RETRY_MAYFAIL;
-
*errcode = -ENOBUFS;
- skb = alloc_skb(header_len, gfp_head);
+ skb = alloc_skb(header_len, gfp_mask);
if (!skb)
return NULL;
// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/bpfilter.h>
#include <uapi/linux/bpf.h>
#include <linux/wait.h>
#include <linux/kmod.h>
+#include <linux/fs.h>
+#include <linux/file.h>
-int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
- char __user *optval,
- unsigned int optlen, bool is_set);
-EXPORT_SYMBOL_GPL(bpfilter_process_sockopt);
+struct bpfilter_umh_ops bpfilter_ops;
+EXPORT_SYMBOL_GPL(bpfilter_ops);
+
+static void bpfilter_umh_cleanup(struct umh_info *info)
+{
+ mutex_lock(&bpfilter_ops.lock);
+ bpfilter_ops.stop = true;
+ fput(info->pipe_to_umh);
+ fput(info->pipe_from_umh);
+ info->pid = 0;
+ mutex_unlock(&bpfilter_ops.lock);
+}
static int bpfilter_mbox_request(struct sock *sk, int optname,
char __user *optval,
unsigned int optlen, bool is_set)
{
- if (!bpfilter_process_sockopt) {
- int err = request_module("bpfilter");
+ int err;
+ mutex_lock(&bpfilter_ops.lock);
+ if (!bpfilter_ops.sockopt) {
+ mutex_unlock(&bpfilter_ops.lock);
+ err = request_module("bpfilter");
+ mutex_lock(&bpfilter_ops.lock);
if (err)
- return err;
- if (!bpfilter_process_sockopt)
- return -ECHILD;
+ goto out;
+ if (!bpfilter_ops.sockopt) {
+ err = -ECHILD;
+ goto out;
+ }
+ }
+ if (bpfilter_ops.stop) {
+ err = bpfilter_ops.start();
+ if (err)
+ goto out;
}
- return bpfilter_process_sockopt(sk, optname, optval, optlen, is_set);
+ err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set);
+out:
+ mutex_unlock(&bpfilter_ops.lock);
+ return err;
}
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
return bpfilter_mbox_request(sk, optname, optval, len, false);
}
+
+static int __init bpfilter_sockopt_init(void)
+{
+ mutex_init(&bpfilter_ops.lock);
+ bpfilter_ops.stop = true;
+ bpfilter_ops.info.cmdline = "bpfilter_umh";
+ bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup;
+
+ return 0;
+}
+
+module_init(bpfilter_sockopt_init);
if (fillargs.netnsid >= 0)
put_net(tgt_net);
- return err < 0 ? err : skb->len;
+ return skb->len ? : err;
}
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
struct fib_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
- flushed += fib_table_flush(net, tb);
+ flushed += fib_table_flush(net, tb, false);
}
if (flushed)
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
hlist_del(&tb->tb_hlist);
- fib_table_flush(net, tb);
+ fib_table_flush(net, tb, true);
fib_free_table(tb);
}
}
}
/* Caller must hold RTNL. */
-int fib_table_flush(struct net *net, struct fib_table *tb)
+int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
- if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
- tb->tb_id != fa->tb_id) {
+ if (!fi || tb->tb_id != fa->tb_id ||
+ (!(fi->fib_flags & RTNH_F_DEAD) &&
+ !fib_props[fa->fa_type].error)) {
+ slen = fa->fa_slen;
+ continue;
+ }
+
+ /* Do not flush error routes if network namespace is
+ * not being dismantled
+ */
+ if (!flush_all && fib_props[fa->fa_type].error) {
slen = fa->fa_slen;
continue;
}
{
int transport_offset = skb_transport_offset(skb);
struct guehdr *guehdr;
- size_t optlen;
+ size_t len, optlen;
int ret;
- if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
+ len = sizeof(struct udphdr) + sizeof(struct guehdr);
+ if (!pskb_may_pull(skb, len))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
optlen = guehdr->hlen << 2;
+ if (!pskb_may_pull(skb, len + optlen))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
if (validate_gue_flags(guehdr, optlen))
return -EINVAL;
* recursion. Besides, this kind of encapsulation can't even be
* configured currently. Discard this.
*/
- if (guehdr->proto_ctype == IPPROTO_UDP)
+ if (guehdr->proto_ctype == IPPROTO_UDP ||
+ guehdr->proto_ctype == IPPROTO_UDPLITE)
return -EOPNOTSUPP;
skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
dev->stats.tx_dropped++;
}
-static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
- __be16 proto)
+static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_info *tun_info;
struct erspan_metadata *md;
struct rtable *rt = NULL;
bool truncate = false;
+ __be16 df, proto;
struct flowi4 fl;
int tunnel_hlen;
int version;
- __be16 df;
int nhoff;
int thoff;
if (version == 1) {
erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
ntohl(md->u.index), truncate, true);
+ proto = htons(ETH_P_ERSPAN);
} else if (version == 2) {
erspan_build_header_v2(skb,
ntohl(tunnel_id_to_key32(key->tun_id)),
md->u.md2.dir,
get_hwid(&md->u.md2),
truncate, true);
+ proto = htons(ETH_P_ERSPAN2);
} else {
goto err_free_rt;
}
gre_build_header(skb, 8, TUNNEL_SEQ,
- htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
+ proto, 0, htonl(tunnel->o_seqno++));
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
{
struct ip_tunnel *tunnel = netdev_priv(dev);
bool truncate = false;
+ __be16 proto;
if (!pskb_inet_may_pull(skb))
goto free_skb;
if (tunnel->collect_md) {
- erspan_fb_xmit(skb, dev, skb->protocol);
+ erspan_fb_xmit(skb, dev);
return NETDEV_TX_OK;
}
}
/* Push ERSPAN header */
- if (tunnel->erspan_ver == 1)
+ if (tunnel->erspan_ver == 1) {
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
tunnel->index,
truncate, true);
- else if (tunnel->erspan_ver == 2)
+ proto = htons(ETH_P_ERSPAN);
+ } else if (tunnel->erspan_ver == 2) {
erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
tunnel->dir, tunnel->hwid,
truncate, true);
- else
+ proto = htons(ETH_P_ERSPAN2);
+ } else {
goto free_skb;
+ }
tunnel->parms.o_flags &= ~TUNNEL_KEY;
- __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
+ __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
return NETDEV_TX_OK;
free_skb:
goto drop;
}
+ iph = ip_hdr(skb);
skb->transport_header = skb->network_header + iph->ihl*4;
/* Remove any debris in the socket control block */
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
+ __be16 _ports[2], *ports;
struct sockaddr_in sin;
- __be16 *ports;
- int end;
-
- end = skb_transport_offset(skb) + 4;
- if (end > 0 && !pskb_may_pull(skb, end))
- return;
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
- ports = (__be16 *)skb_transport_header(skb);
+ ports = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_ports), &_ports);
+ if (!ports)
+ return;
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
flags = msg->msg_flags;
if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
- if (sk->sk_state != TCP_ESTABLISHED) {
+ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
err = -EINVAL;
goto out_err;
}
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits) {
dst_negative_advice(sk);
- } else if (!tp->syn_data && !tp->syn_fastopen) {
+ } else {
sk_rethink_txhash(sk);
}
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
const int hlen = skb_network_header_len(skb) +
sizeof(struct udphdr);
- if (hlen + cork->gso_size > cork->fragsize)
+ if (hlen + cork->gso_size > cork->fragsize) {
+ kfree_skb(skb);
return -EINVAL;
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
+ }
+ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ kfree_skb(skb);
return -EINVAL;
- if (sk->sk_no_check_tx)
+ }
+ if (sk->sk_no_check_tx) {
+ kfree_skb(skb);
return -EINVAL;
+ }
if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb)))
+ dst_xfrm(skb_dst(skb))) {
+ kfree_skb(skb);
return -EIO;
+ }
skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
}
EXPORT_SYMBOL(udp_lib_rehash);
-static void udp_v4_rehash(struct sock *sk)
+void udp_v4_rehash(struct sock *sk)
{
u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
inet_sk(sk)->inet_rcv_saddr,
int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
int udp_v4_get_port(struct sock *sk, unsigned short snum);
+void udp_v4_rehash(struct sock *sk);
int udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
.sendpage = udp_sendpage,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
if (fillargs.netnsid >= 0)
put_net(tgt_net);
- return err < 0 ? err : skb->len;
+ return skb->len ? : err;
}
static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
/* Check if the address belongs to the host. */
if (addr_type == IPV6_ADDR_MAPPED) {
+ struct net_device *dev = NULL;
int chk_addr_ret;
/* Binding to v4-mapped address on a v6-only socket
goto out;
}
+ rcu_read_lock();
+ if (sk->sk_bound_dev_if) {
+ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+ if (!dev) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+ }
+
/* Reproduce AF_INET checks to make the bindings consistent */
v4addr = addr->sin6_addr.s6_addr32[3];
- chk_addr_ret = inet_addr_type(net, v4addr);
+ chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
+ rcu_read_unlock();
+
if (!inet_can_nonlocal_bind(net, inet) &&
v4addr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
iph->daddr = fl6->daddr;
+ ip6_flow_hdr(iph, 0, 0);
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
}
if (np->rxopt.bits.rxorigdstaddr) {
struct sockaddr_in6 sin6;
- __be16 *ports;
- int end;
+ __be16 _ports[2], *ports;
- end = skb_transport_offset(skb) + 4;
- if (end <= 0 || pskb_may_pull(skb, end)) {
+ ports = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_ports), &_ports);
+ if (ports) {
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
- ports = (__be16 *)skb_transport_header(skb);
-
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ipv6_hdr(skb)->daddr;
sin6.sin6_port = ports[1];
{
int transport_offset = skb_transport_offset(skb);
struct guehdr *guehdr;
- size_t optlen;
+ size_t len, optlen;
int ret;
- if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
+ len = sizeof(struct udphdr) + sizeof(struct guehdr);
+ if (!pskb_may_pull(skb, len))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
optlen = guehdr->hlen << 2;
+ if (!pskb_may_pull(skb, len + optlen))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
if (validate_gue_flags(guehdr, optlen))
return -EINVAL;
+ /* Handling exceptions for direct UDP encapsulation in GUE would lead to
+ * recursion. Besides, this kind of encapsulation can't even be
+ * configured currently. Discard this.
+ */
+ if (guehdr->proto_ctype == IPPROTO_UDP ||
+ guehdr->proto_ctype == IPPROTO_UDPLITE)
+ return -EOPNOTSUPP;
+
skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr));
ret = gue6_err_proto_handler(guehdr->proto_ctype, skb,
opt, type, code, offset, info);
static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr)
{
- struct net *net = dev_net(skb->dev);
struct inet6_dev *idev = NULL;
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct sock *sk;
+ struct net *net;
struct ipv6_pinfo *np;
const struct in6_addr *saddr = NULL;
struct dst_entry *dst;
int iif = 0;
int addr_type = 0;
int len;
- u32 mark = IP6_REPLY_MARK(net, skb->mark);
+ u32 mark;
if ((u8 *)hdr < skb->head ||
(skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
return;
+ if (!skb->dev)
+ return;
+ net = dev_net(skb->dev);
+ mark = IP6_REPLY_MARK(net, skb->mark);
/*
* Make sure we respect the rules
* i.e. RFC 1885 2.4(e)
__u8 dsfield = false;
struct flowi6 fl6;
int err = -EINVAL;
+ __be16 proto;
__u32 mtu;
int nhoff;
int thoff;
}
/* Push GRE header. */
- gre_build_header(skb, 8, TUNNEL_SEQ,
- htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
+ proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
+ : htons(ETH_P_ERSPAN2);
+ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
/* TooBig packet may have updated dst->dev's mtu */
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
t->parms.i_flags = p->i_flags;
t->parms.o_flags = p->o_flags;
t->parms.fwmark = p->fwmark;
+ t->parms.erspan_ver = p->erspan_ver;
+ t->parms.index = p->index;
+ t->parms.dir = p->dir;
+ t->parms.hwid = p->hwid;
dst_cache_reset(&t->dst_cache);
}
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
struct __ip6_tnl_parm p;
- struct ip6_tnl *t;
t = ip6gre_changelink_common(dev, tb, data, &p, extack);
if (IS_ERR(t))
struct list_head next;
};
-static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
-{
- struct rt6_nh *nh;
-
- list_for_each_entry(nh, rt6_nh_list, next) {
- pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
- &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
- nh->r_cfg.fc_ifindex);
- }
-}
-
static int ip6_route_info_append(struct net *net,
struct list_head *rt6_nh_list,
struct fib6_info *rt,
nh->fib6_info = NULL;
if (err) {
if (replace && nhn)
- ip6_print_replace_route_err(&rt6_nh_list);
+ NL_SET_ERR_MSG_MOD(extack,
+ "multipath route replace failed (check consistency of installed routes)");
err_nh = nh;
goto add_errout;
}
return udp_lib_get_port(sk, snum, hash2_nulladdr);
}
-static void udp_v6_rehash(struct sock *sk)
+void udp_v6_rehash(struct sock *sk)
{
u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
&sk->sk_v6_rcv_saddr,
const int hlen = skb_network_header_len(skb) +
sizeof(struct udphdr);
- if (hlen + cork->gso_size > cork->fragsize)
+ if (hlen + cork->gso_size > cork->fragsize) {
+ kfree_skb(skb);
return -EINVAL;
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
+ }
+ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ kfree_skb(skb);
return -EINVAL;
- if (udp_sk(sk)->no_check6_tx)
+ }
+ if (udp_sk(sk)->no_check6_tx) {
+ kfree_skb(skb);
return -EINVAL;
+ }
if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb)))
+ dst_xfrm(skb_dst(skb))) {
+ kfree_skb(skb);
return -EIO;
+ }
skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
ipc6.opt = opt;
fl6.flowi6_proto = sk->sk_protocol;
- if (!ipv6_addr_any(daddr))
- fl6.daddr = *daddr;
- else
- fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+ fl6.daddr = *daddr;
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
}
}
+ if (ipv6_addr_any(&fl6.daddr))
+ fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+
final_p = fl6_update_dst(&fl6, opt, &final);
if (final_p)
connected = false;
__be32, struct udp_table *);
int udp_v6_get_port(struct sock *sk, unsigned short snum);
+void udp_v6_rehash(struct sock *sk);
int udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
.recvmsg = udpv6_recvmsg,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
DEFINE_PER_CPU(bool, nf_skb_duplicated);
EXPORT_SYMBOL_GPL(nf_skb_duplicated);
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
EXPORT_SYMBOL(nf_hooks_needed);
#endif
if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
net_inc_ingress_queue();
#endif
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]);
#endif
BUG_ON(p == new_hooks);
if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
net_dec_ingress_queue();
#endif
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
static_key_slow_dec(&nf_hooks_needed[pf][reg->hooknum]);
#endif
} else {
{
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
+ struct dst_entry *other_dst = route->tuple[!dir].dst;
struct dst_entry *dst = route->tuple[dir].dst;
ft->dir = dir;
ft->src_port = ctt->src.u.tcp.port;
ft->dst_port = ctt->dst.u.tcp.port;
- ft->iifidx = route->tuple[dir].ifindex;
- ft->oifidx = route->tuple[!dir].ifindex;
+ ft->iifidx = other_dst->dev->ifindex;
+ ft->oifidx = dst->dev->ifindex;
ft->dst_cache = dst;
}
struct net *net = sock_net(skb->sk);
unsigned int s_idx = cb->args[0];
const struct nft_rule *rule;
- int rc = 1;
list_for_each_entry_rcu(rule, &chain->rules, list) {
if (!nft_is_active(net, rule))
NLM_F_MULTI | NLM_F_APPEND,
table->family,
table, chain, rule) < 0)
- goto out_unfinished;
+ return 1;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
(*idx)++;
}
- rc = 0;
-out_unfinished:
- cb->args[0] = *idx;
- return rc;
+ return 0;
}
static int nf_tables_dump_rules(struct sk_buff *skb,
if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
continue;
- if (ctx && ctx->chain) {
+ if (ctx && ctx->table && ctx->chain) {
struct rhlist_head *list, *tmp;
list = rhltable_lookup(&table->chains_ht, ctx->chain,
}
done:
rcu_read_unlock();
+
+ cb->args[0] = idx;
return skb->len;
}
err5:
kfree(trans);
err4:
+ if (obj)
+ obj->use--;
kfree(elem.priv);
err3:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
#include <net/netfilter/nf_conntrack_core.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack_helper.h>
struct nft_flow_offload {
struct nft_flowtable *flowtable;
memset(&fl, 0, sizeof(fl));
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
- fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
+ fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
+ fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
break;
case NFPROTO_IPV6:
- fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
+ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
+ fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
break;
}
return -ENOENT;
route->tuple[dir].dst = this_dst;
- route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
route->tuple[!dir].dst = other_dst;
- route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
return 0;
}
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
struct nf_flowtable *flowtable = &priv->flowtable->data;
+ const struct nf_conn_help *help;
enum ip_conntrack_info ctinfo;
struct nf_flow_route route;
struct flow_offload *flow;
goto out;
}
- if (test_bit(IPS_HELPER_BIT, &ct->status))
+ help = nfct_help(ct);
+ if (help)
goto out;
if (ctinfo == IP_CT_NEW ||
nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
if (flags & IP6_FH_F_FRAG) {
- if (frag_off)
+ if (frag_off) {
key->ip.frag = OVS_FRAG_TYPE_LATER;
- else
- key->ip.frag = OVS_FRAG_TYPE_FIRST;
+ key->ip.proto = nexthdr;
+ return 0;
+ }
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
} else {
key->ip.frag = OVS_FRAG_TYPE_NONE;
}
return -EINVAL;
}
- if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
+ if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
attrs |= 1 << type;
a[type] = nla;
}
addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
- goto out;
+ goto out_put;
}
err = -ENXIO;
addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
- goto out;
+ goto out_unlock;
}
err = -ENXIO;
goto out_free;
} else if (reserve) {
skb_reserve(skb, -reserve);
- if (len < reserve)
+ if (len < reserve + sizeof(struct ipv6hdr) &&
+ dev->min_header_len != dev->hard_header_len)
skb_reset_network_header(skb);
}
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
else
- i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
+ i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc == 0) {
* Instead of knowing how to return a partial rdma read/write we insist that there
* be enough work requests to send the entire message.
*/
- i = ceil(op->op_count, max_sge);
+ i = DIV_ROUND_UP(op->op_count, max_sge);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc != i) {
{
struct rds_message *rm;
unsigned int i;
- int num_sgs = ceil(total_len, PAGE_SIZE);
+ int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
int extra_bytes = num_sgs * sizeof(struct scatterlist);
int ret;
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
- rm->data.op_nents = ceil(total_len, PAGE_SIZE);
+ rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
if (!rm->data.op_sg) {
rds_message_put(rm);
}
#endif
-/* XXX is there one of these somewhere? */
-#define ceil(x, y) \
- ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
-
#define RDS_FRAG_SHIFT 12
#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
size_t total_payload_len = payload_len, rdma_payload_len = 0;
bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
- int num_sgs = ceil(payload_len, PAGE_SIZE);
+ int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
int namelen;
struct rds_iov_vector_arr vct;
int ind;
EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
/**
- * rxrpc_kernel_check_call - Check a call's state
- * @sock: The socket the call is on
- * @call: The call to check
- * @_compl: Where to store the completion state
- * @_abort_code: Where to store any abort code
- *
- * Allow a kernel service to query the state of a call and find out the manner
- * of its termination if it has completed. Returns -EINPROGRESS if the call is
- * still going, 0 if the call finished successfully, -ECONNABORTED if the call
- * was aborted and an appropriate error if the call failed in some other way.
- */
-int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
- enum rxrpc_call_completion *_compl, u32 *_abort_code)
-{
- if (call->state != RXRPC_CALL_COMPLETE)
- return -EINPROGRESS;
- smp_rmb();
- *_compl = call->completion;
- *_abort_code = call->abort_code;
- return call->error;
-}
-EXPORT_SYMBOL(rxrpc_kernel_check_call);
-
-/**
- * rxrpc_kernel_retry_call - Allow a kernel service to retry a call
- * @sock: The socket the call is on
- * @call: The call to retry
- * @srx: The address of the peer to contact
- * @key: The security context to use (defaults to socket setting)
- *
- * Allow a kernel service to try resending a client call that failed due to a
- * network error to a new address. The Tx queue is maintained intact, thereby
- * relieving the need to re-encrypt any request data that has already been
- * buffered.
- */
-int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call,
- struct sockaddr_rxrpc *srx, struct key *key)
-{
- struct rxrpc_conn_parameters cp;
- struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
- int ret;
-
- _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
-
- if (!key)
- key = rx->key;
- if (key && !key->payload.data[0])
- key = NULL; /* a no-security key */
-
- memset(&cp, 0, sizeof(cp));
- cp.local = rx->local;
- cp.key = key;
- cp.security_level = 0;
- cp.exclusive = false;
- cp.service_id = srx->srx_service;
-
- mutex_lock(&call->user_mutex);
-
- ret = rxrpc_prepare_call_for_retry(rx, call);
- if (ret == 0)
- ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL);
-
- mutex_unlock(&call->user_mutex);
- rxrpc_put_peer(cp.peer);
- _leave(" = %d", ret);
- return ret;
-}
-EXPORT_SYMBOL(rxrpc_kernel_retry_call);
-
-/**
* rxrpc_kernel_new_call_notification - Get notifications of new calls
* @sock: The socket to intercept received messages on
* @notify_new_call: Function to be called when new calls appear
RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
- RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
RXRPC_CALL_PINGING, /* Ping in process */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
};
/*
+ * Call completion condition (state == RXRPC_CALL_COMPLETE).
+ */
+enum rxrpc_call_completion {
+ RXRPC_CALL_SUCCEEDED, /* - Normal termination */
+ RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
+ RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
+ RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
+ RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
+ NR__RXRPC_CALL_COMPLETIONS
+};
+
+/*
* Call Tx congestion management modes.
*/
enum rxrpc_congest_mode {
struct sockaddr_rxrpc *,
struct rxrpc_call_params *, gfp_t,
unsigned int);
-int rxrpc_retry_client_call(struct rxrpc_sock *,
- struct rxrpc_call *,
- struct rxrpc_conn_parameters *,
- struct sockaddr_rxrpc *,
- gfp_t);
void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
struct sk_buff *);
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
-int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *);
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
bool __rxrpc_queue_call(struct rxrpc_call *);
bool rxrpc_queue_call(struct rxrpc_call *);
}
/*
- * Retry a call to a new address. It is expected that the Tx queue of the call
- * will contain data previously packaged for an old call.
- */
-int rxrpc_retry_client_call(struct rxrpc_sock *rx,
- struct rxrpc_call *call,
- struct rxrpc_conn_parameters *cp,
- struct sockaddr_rxrpc *srx,
- gfp_t gfp)
-{
- const void *here = __builtin_return_address(0);
- int ret;
-
- /* Set up or get a connection record and set the protocol parameters,
- * including channel number and call ID.
- */
- ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
- if (ret < 0)
- goto error;
-
- trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
- here, NULL);
-
- rxrpc_start_call_timer(call);
-
- _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
-
- if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
- rxrpc_queue_call(call);
-
- _leave(" = 0");
- return 0;
-
-error:
- rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
- RX_CALL_DEAD, ret);
- trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
- here, ERR_PTR(ret));
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
* Set up an incoming call. call->conn points to the connection.
* This is called in BH context and isn't allowed to fail.
*/
}
/*
- * Prepare a kernel service call for retry.
- */
-int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
-{
- const void *here = __builtin_return_address(0);
- int i;
- u8 last = 0;
-
- _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
-
- trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
- here, (const void *)call->flags);
-
- ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
- ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
- ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
- ASSERT(list_empty(&call->recvmsg_link));
-
- del_timer_sync(&call->timer);
-
- _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
-
- if (call->conn)
- rxrpc_disconnect_call(call);
-
- if (rxrpc_is_service_call(call) ||
- !call->tx_phase ||
- call->tx_hard_ack != 0 ||
- call->rx_hard_ack != 0 ||
- call->rx_top != 0)
- return -EINVAL;
-
- call->state = RXRPC_CALL_UNINITIALISED;
- call->completion = RXRPC_CALL_SUCCEEDED;
- call->call_id = 0;
- call->cid = 0;
- call->cong_cwnd = 0;
- call->cong_extra = 0;
- call->cong_ssthresh = 0;
- call->cong_mode = 0;
- call->cong_dup_acks = 0;
- call->cong_cumul_acks = 0;
- call->acks_lowest_nak = 0;
-
- for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
- last |= call->rxtx_annotations[i];
- call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
- call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
- }
-
- _leave(" = 0");
- return 0;
-}
-
-/*
* release all the calls associated with a socket
*/
void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
write_lock_bh(&call->state_lock);
- if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
- call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
- else
- call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+ call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
write_unlock_bh(&call->state_lock);
rxrpc_see_call(call);
ASSERTCMP(seq, ==, call->tx_top + 1);
- if (last) {
+ if (last)
annotation |= RXRPC_TX_ANNO_LAST;
- set_bit(RXRPC_CALL_TX_LASTQ, &call->flags);
- }
/* We have to set the timestamp before queueing as the retransmit
* algorithm can see the packet as soon as we queue it.
call->tx_total_len -= copy;
}
+ /* check for the far side aborting the call or a network error
+ * occurring */
+ if (call->state == RXRPC_CALL_COMPLETE)
+ goto call_terminated;
+
/* add the packet to the send queue if it's now full */
if (sp->remain <= 0 ||
(msg_data_left(msg) == 0 && !more)) {
notify_end_tx);
skb = NULL;
}
-
- /* Check for the far side aborting the call or a network error
- * occurring. If this happens, save any packet that was under
- * construction so that in the case of a network error, the
- * call can be retried or redirected.
- */
- if (call->state == RXRPC_CALL_COMPLETE) {
- ret = call->error;
- goto out;
- }
} while (msg_data_left(msg) > 0);
success:
_leave(" = %d", ret);
return ret;
+call_terminated:
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ _leave(" = %d", call->error);
+ return call->error;
+
maybe_error:
if (copied)
goto success;
[TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
};
+static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
+{
+ if (!p)
+ return;
+ if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
+ dst_release(&p->tcft_enc_metadata->dst);
+ kfree_rcu(p, rcu);
+}
+
static int tunnel_key_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
rcu_swap_protected(t->params, params_new,
lockdep_is_held(&t->tcf_lock));
spin_unlock_bh(&t->tcf_lock);
- if (params_new)
- kfree_rcu(params_new, rcu);
+ tunnel_key_release_params(params_new);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
struct tcf_tunnel_key_params *params;
params = rcu_dereference_protected(t->params, 1);
- if (params) {
- if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
- dst_release(¶ms->tcft_enc_metadata->dst);
-
- kfree_rcu(params, rcu);
- }
+ tunnel_key_release_params(params);
}
static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
- __be16 protocol = tc_skb_protocol(skb);
#ifdef CONFIG_NET_CLS_ACT
const int max_reclassify_loop = 4;
const struct tcf_proto *orig_tp = tp;
reclassify:
#endif
for (; tp; tp = rcu_dereference_bh(tp->next)) {
+ __be16 protocol = tc_skb_protocol(skb);
int err;
if (tp->protocol != protocol &&
}
tp = first_tp;
- protocol = tc_skb_protocol(skb);
goto reclassify;
#endif
}
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *fold = *arg;
struct cls_fl_filter *fnew;
+ struct fl_flow_mask *mask;
struct nlattr **tb;
- struct fl_flow_mask mask = {};
int err;
if (!tca[TCA_OPTIONS])
return -EINVAL;
- tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
- if (!tb)
+ mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
+ if (!mask)
return -ENOBUFS;
+ tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
+ if (!tb) {
+ err = -ENOBUFS;
+ goto errout_mask_alloc;
+ }
+
err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
fl_policy, NULL);
if (err < 0)
}
}
- err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
+ err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
tp->chain->tmplt_priv, extack);
if (err)
goto errout_idr;
- err = fl_check_assign_mask(head, fnew, fold, &mask);
+ err = fl_check_assign_mask(head, fnew, fold, mask);
if (err)
goto errout_idr;
}
kfree(tb);
+ kfree(mask);
return 0;
errout_mask:
kfree(fnew);
errout_tb:
kfree(tb);
+errout_mask_alloc:
+ kfree(mask);
return err;
}
if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
- unsigned int slen = 0;
+ unsigned int slen = 0, numsegs = 0;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs))
flow_queue_add(flow, segs);
sch->q.qlen++;
+ numsegs++;
slen += segs->len;
q->buffer_used += segs->truesize;
b->packets++;
sch->qstats.backlog += slen;
q->avg_window_bytes += slen;
- qdisc_tree_reduce_backlog(sch, 1, len);
+ qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
consume_skb(skb);
} else {
/* not splitting */
struct Qdisc *child,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
int err;
err = child->ops->enqueue(skb, child, to_free);
if (err != NET_XMIT_SUCCESS)
return err;
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
int err = 0;
+ bool first;
cl = drr_classify(skb, sch, &err);
if (cl == NULL) {
return err;
}
+ first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
return err;
}
- if (cl->qdisc->q.qlen == 1) {
+ if (first) {
list_add_tail(&cl->alist, &q->active);
cl->deficit = cl->quantum;
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return err;
}
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct dsmark_qdisc_data *p = qdisc_priv(sch);
int err;
return err;
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct hfsc_class *cl;
int uninitialized_var(err);
+ bool first;
cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) {
return err;
}
+ first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
return err;
}
- if (cl->qdisc->q.qlen == 1) {
- unsigned int len = qdisc_pkt_len(skb);
-
+ if (first) {
if (cl->cl_flags & HFSC_RSC)
init_ed(cl, len);
if (cl->cl_flags & HFSC_FSC)
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
struct sk_buff **to_free)
{
int uninitialized_var(ret);
+ unsigned int len = qdisc_pkt_len(skb);
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_classify(skb, sch, &ret);
htb_activate(q, cl);
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
static int
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct Qdisc *qdisc;
int ret;
ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb), gso_segs;
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl;
struct qfq_aggregate *agg;
int err = 0;
+ bool first;
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
}
pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
- if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
+ if (unlikely(cl->agg->lmax < len)) {
pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
- cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
- err = qfq_change_agg(sch, cl, cl->agg->class_weight,
- qdisc_pkt_len(skb));
+ cl->agg->lmax, len, cl->common.classid);
+ err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
if (err) {
cl->qstats.drops++;
return qdisc_drop(skb, sch, to_free);
}
}
+ gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+ first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
return err;
}
- bstats_update(&cl->bstats, skb);
- qdisc_qstats_backlog_inc(sch, skb);
+ cl->bstats.bytes += len;
+ cl->bstats.packets += gso_segs;
+ sch->qstats.backlog += len;
++sch->q.qlen;
agg = cl->agg;
/* if the queue was not empty, then done here */
- if (cl->qdisc->q.qlen != 1) {
+ if (!first) {
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
list_first_entry(&agg->active, struct qfq_class, alist)
- == cl && cl->deficit < qdisc_pkt_len(skb))
+ == cl && cl->deficit < len)
list_move_tail(&cl->alist, &agg->active);
return err;
struct sk_buff **to_free)
{
struct tbf_sched_data *q = qdisc_priv(sch);
+ unsigned int len = qdisc_pkt_len(skb);
int ret;
if (qdisc_pkt_len(skb) > q->max_size) {
return ret;
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
switch (ev) {
case NETDEV_UP:
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
- addr->a.v6.sin6_port = 0;
- addr->a.v6.sin6_flowinfo = 0;
addr->a.v6.sin6_addr = ifa->addr;
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
addr->valid = 1;
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
- addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_addr = ifp->addr;
addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1;
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v4.sin_family = AF_INET;
- addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
switch (ev) {
case NETDEV_UP:
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v4.sin_family = AF_INET;
- addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
spin_lock_bh(&net->sctp.local_addr_lock);
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
}
+
+ sk->sk_prot->unhash(sk);
+
if (smc->clcsock) {
if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
/* wake up clcsock accept */
smc_conn_free(&smc->conn);
release_sock(sk);
- sk->sk_prot->unhash(sk);
sock_put(sk); /* final sock_put */
out:
return rc;
static struct cred machine_cred = {
.usage = ATOMIC_INIT(1),
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ .magic = CRED_MAGIC,
+#endif
};
/*
cred_len = p++;
spin_lock(&ctx->gc_seq_lock);
- req->rq_seqno = ctx->gc_seq++;
+ req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
spin_unlock(&ctx->gc_seq_lock);
+ if (req->rq_seqno == MAXSEQ)
+ goto out_expired;
*p++ = htonl((u32) RPC_GSS_VERSION);
*p++ = htonl((u32) ctx->gc_proc);
mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
- clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
+ goto out_expired;
} else if (maj_stat != 0) {
- printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
+ pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
+ task->tk_status = -EIO;
goto out_put_ctx;
}
p = xdr_encode_opaque(p, NULL, mic.len);
gss_put_ctx(ctx);
return p;
+out_expired:
+ clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
+ task->tk_status = -EKEYEXPIRED;
out_put_ctx:
gss_put_ctx(ctx);
return NULL;
xdr_buf_init(&req->rq_rcv_buf,
req->rq_rbuffer,
req->rq_rcvsize);
- req->rq_bytes_sent = 0;
p = rpc_encode_header(task);
- if (p == NULL) {
- printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
- rpc_exit(task, -EIO);
+ if (p == NULL)
return;
- }
encode = task->tk_msg.rpc_proc->p_encode;
if (encode == NULL)
/* Did the encode result in an error condition? */
if (task->tk_status != 0) {
/* Was the error nonfatal? */
- if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM)
+ switch (task->tk_status) {
+ case -EAGAIN:
+ case -ENOMEM:
rpc_delay(task, HZ >> 4);
- else
+ break;
+ case -EKEYEXPIRED:
+ task->tk_action = call_refresh;
+ break;
+ default:
rpc_exit(task, task->tk_status);
+ }
return;
}
*p++ = htonl(clnt->cl_vers); /* program version */
*p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
p = rpcauth_marshcred(task, p);
- req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
+ if (p)
+ req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
return p;
}
struct rpc_xprt *xprt = req->rq_xprt;
if (xprt_request_need_enqueue_transmit(task, req)) {
+ req->rq_bytes_sent = 0;
spin_lock(&xprt->queue_lock);
/*
* Requests that carry congestion control credits are added
INIT_LIST_HEAD(&req->rq_xmit2);
goto out;
}
- } else {
+ } else if (!req->rq_seqno) {
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
if (pos->rq_task->tk_owner != task->tk_owner)
continue;
for (i = 0; i <= buf->rb_sc_last; i++) {
sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
if (!sc)
- goto out_destroy;
+ return -ENOMEM;
sc->sc_xprt = r_xprt;
buf->rb_sc_ctxs[i] = sc;
}
return 0;
-
-out_destroy:
- rpcrdma_sendctxs_destroy(buf);
- return -ENOMEM;
}
/* The sendctx queue is not guaranteed to have a size that is a
WQ_MEM_RECLAIM | WQ_HIGHPRI,
0,
r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]);
- if (!buf->rb_completion_wq)
+ if (!buf->rb_completion_wq) {
+ rc = -ENOMEM;
goto out;
+ }
return 0;
out:
#include <net/udp.h>
#include <net/tcp.h>
#include <linux/bvec.h>
+#include <linux/highmem.h>
#include <linux/uio.h>
#include <trace/events/sunrpc.h>
return sock_recvmsg(sock, msg, flags);
}
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+static void
+xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
+{
+ struct bvec_iter bi = {
+ .bi_size = count,
+ };
+ struct bio_vec bv;
+
+ bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
+ for_each_bvec(bv, bvec, bi, bi)
+ flush_dcache_page(bv.bv_page);
+}
+#else
+static inline void
+xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
+{
+}
+#endif
+
static ssize_t
xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
seek + buf->page_base);
if (ret <= 0)
goto sock_err;
+ xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
offset += ret - buf->page_base;
if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
goto out;
return limit;
}
+static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
+{
+ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
+}
+
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
{
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
return buf;
}
+static inline bool string_is_valid(char *s, int len)
+{
+ return memchr(s, '\0', len) ? true : false;
+}
+
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg,
struct sk_buff *arg)
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_bearer_config *b;
+ int len;
b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(b->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
return -EMSGSIZE;
{
char *name;
struct nlattr *bearer;
+ int len;
name = (char *)TLV_DATA(msg->req);
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
return -EMSGSIZE;
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
int err;
+ int len;
if (!attrs[TIPC_NLA_LINK])
return -EINVAL;
return err;
name = (char *)TLV_DATA(msg->req);
+
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
return 0;
struct nlattr *prop;
struct nlattr *media;
struct tipc_link_config *lc;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
if (!media)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
return -EMSGSIZE;
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_link_config *lc;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
return -EMSGSIZE;
struct tipc_link_config *lc;
struct tipc_bearer *bearer;
struct tipc_media *media;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
media = tipc_media_find(lc->name);
if (media) {
cmd->doit = &__tipc_nl_media_set;
{
char *name;
struct nlattr *link;
+ int len;
name = (char *)TLV_DATA(msg->req);
if (!link)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
return -EMSGSIZE;
};
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
+ return -EINVAL;
depth = ntohl(ntq->depth);
hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
TIPC_NL_PUBL_GET);
- if (!hdr)
+ if (!hdr) {
+ kfree_skb(args);
return -EMSGSIZE;
+ }
nest = nla_nest_start(args, TIPC_NLA_SOCK);
if (!nest) {
}
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (len && !TLV_OK(msg.req, len)) {
+ if (!len || !TLV_OK(msg.req, len)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret == -EWOULDBLOCK)
return -EWOULDBLOCK;
- if (ret > 0) {
+ if (ret == sizeof(s)) {
read_lock_bh(&sk->sk_callback_lock);
ret = tipc_conn_rcv_sub(srv, con, &s);
read_unlock_bh(&sk->sk_callback_lock);
* not know if the device has more tx queues than rx, or the opposite.
* This might also change during run time.
*/
-static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
- u16 queue_id)
+static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
+ u16 queue_id)
{
+ if (queue_id >= max_t(unsigned int,
+ dev->real_num_rx_queues,
+ dev->real_num_tx_queues))
+ return -EINVAL;
+
if (queue_id < dev->real_num_rx_queues)
dev->_rx[queue_id].umem = umem;
if (queue_id < dev->real_num_tx_queues)
dev->_tx[queue_id].umem = umem;
+
+ return 0;
}
struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
goto out_rtnl_unlock;
}
- xdp_reg_umem_at_qid(dev, umem, queue_id);
+ err = xdp_reg_umem_at_qid(dev, umem, queue_id);
+ if (err)
+ goto out_rtnl_unlock;
+
umem->dev = dev;
umem->queue_id = queue_id;
if (force_copy)
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
+ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Facebook */
+#ifndef __ASM_GOTO_WORKAROUND_H
+#define __ASM_GOTO_WORKAROUND_H
+
+/* this will bring in asm_volatile_goto macro definition
+ * if enabled by compiler and config options.
+ */
+#include <linux/types.h>
+
+#ifdef asm_volatile_goto
+#undef asm_volatile_goto
+#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
+#endif
+
+#endif
/* Create cgroup /foo, get fd, and join it */
foo = create_and_get_cgroup(FOO);
- if (!foo)
+ if (foo < 0)
goto err;
if (join_cgroup(FOO))
/* Create cgroup /foo/bar, get fd, and join it */
bar = create_and_get_cgroup(BAR);
- if (!bar)
+ if (bar < 0)
goto err;
if (join_cgroup(BAR))
goto err;
cg1 = create_and_get_cgroup("/cg1");
- if (!cg1)
+ if (cg1 < 0)
goto err;
cg2 = create_and_get_cgroup("/cg1/cg2");
- if (!cg2)
+ if (cg2 < 0)
goto err;
cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
- if (!cg3)
+ if (cg3 < 0)
goto err;
cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
- if (!cg4)
+ if (cg4 < 0)
goto err;
cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
- if (!cg5)
+ if (cg5 < 0)
goto err;
if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
cg2 = create_and_get_cgroup(CGROUP_PATH);
- if (!cg2)
+ if (cg2 < 0)
goto err;
if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
return 1;
}
- ifindex = if_nametoindex(argv[1]);
+ ifindex = if_nametoindex(argv[optind]);
if (!ifindex) {
perror("if_nametoindex");
return 1;
HOSTCFLAGS_dropper.o += $(MFLAG)
HOSTCFLAGS_bpf-helper.o += $(MFLAG)
HOSTCFLAGS_bpf-fancy.o += $(MFLAG)
+HOSTCFLAGS_user-trap.o += $(MFLAG)
HOSTLDLIBS_bpf-direct += $(MFLAG)
HOSTLDLIBS_bpf-fancy += $(MFLAG)
HOSTLDLIBS_dropper += $(MFLAG)
basetarget = $(basename $(notdir $@))
###
-# filename of first prerequisite with directory and extension stripped
-baseprereq = $(basename $(notdir $<))
-
-###
# Escape single quote for use in echo statements
escsq = $(subst $(squote),'\$(squote)',$1)
###
# filechk is used to check if the content of a generated file is updated.
# Sample usage:
-# define filechk_sample
-# echo $KERNELRELEASE
-# endef
-# version.h : Makefile
+#
+# filechk_sample = echo $(KERNELRELEASE)
+# version.h: FORCE
# $(call filechk,sample)
+#
# The rule defined shall write to stdout the content of the new file.
# The existing file will be compared with the new one.
# - If no file exist it is created
define filechk
$(Q)set -e; \
mkdir -p $(dir $@); \
- $(filechk_$(1)) > $@.tmp; \
+ { $(filechk_$(1)); } > $@.tmp; \
if [ -r $@ ] && cmp -s $@ $@.tmp; then \
rm -f $@.tmp; \
else \
include scripts/Kbuild.include
+# If arch does not implement mandatory headers, fallback to asm-generic ones.
+mandatory-y := $(filter-out $(generated-y), $(mandatory-y))
+generic-y += $(foreach f, $(mandatory-y), $(if $(wildcard $(srctree)/$(src)/$(f)),,$(f)))
+
generic-y := $(addprefix $(obj)/, $(generic-y))
generated-y := $(addprefix $(obj)/, $(generated-y))
all-files := $(header-files) $(genhdr-files)
output-files := $(addprefix $(installdir)/, $(all-files))
-ifneq ($(mandatory-y),)
-missing := $(filter-out $(all-files),$(mandatory-y))
-ifneq ($(missing),)
-$(error Some mandatory headers ($(missing)) are missing in $(obj))
-endif
-endif
-
# Work out what needs to be removed
oldheaders := $(patsubst $(installdir)/%,%,$(wildcard $(installdir)/*.h))
unwanted := $(filter-out $(all-files),$(oldheaders))
# ---------------------------------------------------------------------------
quiet_cmd_gzip = GZIP $@
-cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@) || \
- (rm -f $@ ; false)
+ cmd_gzip = cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@
# DTC
# ---------------------------------------------------------------------------
cmd_dtb_check = $(DT_CHECKER) -p $(DT_TMP_SCHEMA) $@ ;
define rule_dtc_dt_yaml
- $(call cmd_and_fixdep,dtc,yaml) \
- $(call echo-cmd,dtb_check) $(cmd_dtb_check)
+ $(call cmd_and_fixdep,dtc,yaml)
+ $(call cmd,dtb_check)
endef
$(obj)/%.dt.yaml: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE
quiet_cmd_bzip2 = BZIP2 $@
cmd_bzip2 = (cat $(filter-out FORCE,$^) | \
- bzip2 -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
- (rm -f $@ ; false)
+ bzip2 -9 && $(call size_append, $(filter-out FORCE,$^))) > $@
# Lzma
# ---------------------------------------------------------------------------
quiet_cmd_lzma = LZMA $@
cmd_lzma = (cat $(filter-out FORCE,$^) | \
- lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
- (rm -f $@ ; false)
+ lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@
quiet_cmd_lzo = LZO $@
cmd_lzo = (cat $(filter-out FORCE,$^) | \
- lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
- (rm -f $@ ; false)
+ lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@
quiet_cmd_lz4 = LZ4 $@
cmd_lz4 = (cat $(filter-out FORCE,$^) | \
- lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
- (rm -f $@ ; false)
+ lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@
# U-Boot mkimage
# ---------------------------------------------------------------------------
UIMAGE_LOADADDR ?= arch_must_set_this
UIMAGE_ENTRYADDR ?= $(UIMAGE_LOADADDR)
UIMAGE_NAME ?= 'Linux-$(KERNELRELEASE)'
-UIMAGE_IN ?= $<
-UIMAGE_OUT ?= $@
-quiet_cmd_uimage = UIMAGE $(UIMAGE_OUT)
+quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(UIMAGE_ARCH) -O linux \
-C $(UIMAGE_COMPRESSION) $(UIMAGE_OPTS-y) \
-T $(UIMAGE_TYPE) \
-a $(UIMAGE_LOADADDR) -e $(UIMAGE_ENTRYADDR) \
- -n $(UIMAGE_NAME) -d $(UIMAGE_IN) $(UIMAGE_OUT)
+ -n $(UIMAGE_NAME) -d $< $@
# XZ
# ---------------------------------------------------------------------------
quiet_cmd_xzkern = XZKERN $@
cmd_xzkern = (cat $(filter-out FORCE,$^) | \
sh $(srctree)/scripts/xz_wrap.sh && \
- $(call size_append, $(filter-out FORCE,$^))) > $@ || \
- (rm -f $@ ; false)
+ $(call size_append, $(filter-out FORCE,$^))) > $@
quiet_cmd_xzmisc = XZMISC $@
cmd_xzmisc = (cat $(filter-out FORCE,$^) | \
- xz --check=crc32 --lzma2=dict=1MiB) > $@ || \
- (rm -f $@ ; false)
+ xz --check=crc32 --lzma2=dict=1MiB) > $@
# ASM offsets
# ---------------------------------------------------------------------------
# Use filechk to avoid rebuilds when a header changes, but the resulting file
# does not
define filechk_offsets
- ( \
echo "#ifndef $2"; \
echo "#define $2"; \
echo "/*"; \
echo ""; \
sed -ne $(sed-offsets) < $<; \
echo ""; \
- echo "#endif" )
+ echo "#endif"
endef
(T *)
\(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
- dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\|
+ dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
* (T *)
\(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
- dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\|
+ dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
- (T *)
\(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
- dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\|
+ dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
(T@p *)
\(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
- dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\|
+ dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
- x = (T)vmalloc(E1);
+ x = (T)vzalloc(E1);
|
-- x = dma_alloc_coherent(E2,E1,E3,E4);
-+ x = dma_zalloc_coherent(E2,E1,E3,E4);
-|
-- x = (T *)dma_alloc_coherent(E2,E1,E3,E4);
-+ x = dma_zalloc_coherent(E2,E1,E3,E4);
-|
-- x = (T)dma_alloc_coherent(E2,E1,E3,E4);
-+ x = (T)dma_zalloc_coherent(E2,E1,E3,E4);
-|
- x = kmalloc_node(E1,E2,E3);
+ x = kzalloc_node(E1,E2,E3);
|
x << r2.x;
@@
-msg="WARNING: dma_zalloc_coherent should be used for %s, instead of dma_alloc_coherent/memset" % (x)
+msg="WARNING: dma_alloc_coherent use in %s already zeroes out memory, so memset is not needed" % (x)
coccilib.report.print_report(p[0], msg)
//-----------------------------------------------------------------
iterator name hlist_for_each_entry_safe;
statement S;
position p1,p2;
+type T;
@@
(
|
&c->member
|
+T c;
+|
c = E
|
*c@p2
@r4 depends on !patch@
bool b;
position p2;
+identifier i;
constant c != {0,1};
@@
+(
+ b = i
+|
*b@p2 = c
+)
@script:python depends on org@
p << r1.p;
# Test for gcc 'asm goto' support
# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
-cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
+cat << "END" | $@ -x c - -fno-PIE -c -o /dev/null
int main(void)
{
#if defined(__arm__) || defined(__aarch64__)
for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
const char *sym;
rtx body;
- rtx masked_sp;
+ rtx mask, masked_sp;
/*
* Find a SET insn involving a SYMBOL_REF to __stack_chk_guard
* produces the address of the copy of the stack canary value
* stored in struct thread_info
*/
+ mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
masked_sp = gen_reg_rtx(Pmode);
emit_insn_before(gen_rtx_SET(masked_sp,
gen_rtx_AND(Pmode,
stack_pointer_rtx,
- GEN_INT(sp_mask))),
+ mask)),
insn);
SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp,
#define NO_GATE
#include "gcc-generate-rtl-pass.h"
+#if BUILDING_GCC_VERSION >= 9000
+static bool no(void)
+{
+ return false;
+}
+
+static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data)
+{
+ targetm.have_stack_protect_combined_set = no;
+ targetm.have_stack_protect_combined_test = no;
+}
+#endif
+
__visible int plugin_init(struct plugin_name_args *plugin_info,
struct plugin_gcc_version *version)
{
register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP,
NULL, &arm_pertask_ssp_rtl_pass_info);
+#if BUILDING_GCC_VERSION >= 9000
+ register_callback(plugin_info->base_name, PLUGIN_START_UNIT,
+ arm_pertask_ssp_start_unit, NULL);
+#endif
+
return 0;
}
printf("#include <asm/types.h>\n");
printf("#if BITS_PER_LONG == 64\n");
printf("#define PTR .quad\n");
- printf("#define ALGN .align 8\n");
+ printf("#define ALGN .balign 8\n");
printf("#else\n");
printf("#define PTR .long\n");
- printf("#define ALGN .align 4\n");
+ printf("#define ALGN .balign 4\n");
printf("#endif\n");
printf("\t.section .rodata, \"a\"\n");
# Generated files
#
*.moc
+*conf-cfg
#
# configuration programs
hostprogs-y += nconf
nconf-objs := nconf.o nconf.gui.o $(common-objs)
-HOSTLDLIBS_nconf = $(shell . $(obj)/.nconf-cfg && echo $$libs)
-HOSTCFLAGS_nconf.o = $(shell . $(obj)/.nconf-cfg && echo $$cflags)
-HOSTCFLAGS_nconf.gui.o = $(shell . $(obj)/.nconf-cfg && echo $$cflags)
+HOSTLDLIBS_nconf = $(shell . $(obj)/nconf-cfg && echo $$libs)
+HOSTCFLAGS_nconf.o = $(shell . $(obj)/nconf-cfg && echo $$cflags)
+HOSTCFLAGS_nconf.gui.o = $(shell . $(obj)/nconf-cfg && echo $$cflags)
-$(obj)/nconf.o $(obj)/nconf.gui.o: $(obj)/.nconf-cfg
+$(obj)/nconf.o $(obj)/nconf.gui.o: $(obj)/nconf-cfg
# mconf: Used for the menuconfig target based on lxdialog
hostprogs-y += mconf
lxdialog := checklist.o inputbox.o menubox.o textbox.o util.o yesno.o
mconf-objs := mconf.o $(addprefix lxdialog/, $(lxdialog)) $(common-objs)
-HOSTLDLIBS_mconf = $(shell . $(obj)/.mconf-cfg && echo $$libs)
+HOSTLDLIBS_mconf = $(shell . $(obj)/mconf-cfg && echo $$libs)
$(foreach f, mconf.o $(lxdialog), \
- $(eval HOSTCFLAGS_$f = $$(shell . $(obj)/.mconf-cfg && echo $$$$cflags)))
+ $(eval HOSTCFLAGS_$f = $$(shell . $(obj)/mconf-cfg && echo $$$$cflags)))
-$(obj)/mconf.o: $(obj)/.mconf-cfg
-$(addprefix $(obj)/lxdialog/, $(lxdialog)): $(obj)/.mconf-cfg
+$(obj)/mconf.o: $(obj)/mconf-cfg
+$(addprefix $(obj)/lxdialog/, $(lxdialog)): $(obj)/mconf-cfg
# qconf: Used for the xconfig target based on Qt
hostprogs-y += qconf
qconf-cxxobjs := qconf.o
qconf-objs := images.o $(common-objs)
-HOSTLDLIBS_qconf = $(shell . $(obj)/.qconf-cfg && echo $$libs)
-HOSTCXXFLAGS_qconf.o = $(shell . $(obj)/.qconf-cfg && echo $$cflags)
+HOSTLDLIBS_qconf = $(shell . $(obj)/qconf-cfg && echo $$libs)
+HOSTCXXFLAGS_qconf.o = $(shell . $(obj)/qconf-cfg && echo $$cflags)
-$(obj)/qconf.o: $(obj)/.qconf-cfg $(obj)/qconf.moc
+$(obj)/qconf.o: $(obj)/qconf-cfg $(obj)/qconf.moc
quiet_cmd_moc = MOC $@
- cmd_moc = $(shell . $(obj)/.qconf-cfg && echo $$moc) -i $< -o $@
+ cmd_moc = $(shell . $(obj)/qconf-cfg && echo $$moc) -i $< -o $@
-$(obj)/%.moc: $(src)/%.h $(obj)/.qconf-cfg
+$(obj)/%.moc: $(src)/%.h $(obj)/qconf-cfg
$(call cmd,moc)
# gconf: Used for the gconfig target based on GTK+
hostprogs-y += gconf
gconf-objs := gconf.o images.o $(common-objs)
-HOSTLDLIBS_gconf = $(shell . $(obj)/.gconf-cfg && echo $$libs)
-HOSTCFLAGS_gconf.o = $(shell . $(obj)/.gconf-cfg && echo $$cflags)
+HOSTLDLIBS_gconf = $(shell . $(obj)/gconf-cfg && echo $$libs)
+HOSTCFLAGS_gconf.o = $(shell . $(obj)/gconf-cfg && echo $$cflags)
-$(obj)/gconf.o: $(obj)/.gconf-cfg
+$(obj)/gconf.o: $(obj)/gconf-cfg
# check if necessary packages are available, and configure build flags
-define filechk_conf_cfg
- $(CONFIG_SHELL) $<
-endef
+filechk_conf_cfg = $(CONFIG_SHELL) $<
-$(obj)/.%conf-cfg: $(src)/%conf-cfg.sh FORCE
+$(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE
$(call filechk,conf_cfg)
-clean-files += .*conf-cfg
+clean-files += *conf-cfg
%union
{
char *string;
- struct file *file;
struct symbol *symbol;
struct expr *expr;
struct menu *menu;
/* Cannot check for assembler */
static void add_retpoline(struct buffer *b)
{
- buf_printf(b, "\n#ifdef RETPOLINE\n");
+ buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n");
buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
buf_printf(b, "#endif\n");
}
void security_cred_free(struct cred *cred)
{
+ /*
+ * There is a failure case in prepare_creds() that
+ * may result in a call here with ->security being NULL.
+ */
+ if (unlikely(cred->security == NULL))
+ return;
+
call_void_hook(cred_free, cred);
}
kfree(key);
if (datum) {
levdatum = datum;
- ebitmap_destroy(&levdatum->level->cat);
+ if (levdatum->level)
+ ebitmap_destroy(&levdatum->level->cat);
kfree(levdatum->level);
}
kfree(datum);
break;
case YAMA_SCOPE_RELATIONAL:
rcu_read_lock();
- if (!task_is_descendant(current, child) &&
+ if (!pid_alive(child))
+ rc = -EPERM;
+ if (!rc && !task_is_descendant(current, child) &&
!ptracer_exception_found(current, child) &&
!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM;
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
- r->space = dma_zalloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev,
- r->size, &r->bus_addr, GFP_KERNEL);
+ r->space = dma_alloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev,
+ r->size, &r->bus_addr, GFP_KERNEL);
if (!r->space)
return -ENOMEM;
struct dsp_spos_instance * ins = chip->dsp_spos_instance;
int i;
+ if (!ins)
+ return 0;
+
snd_info_free_entry(ins->proc_sym_info_entry);
ins->proc_sym_info_entry = NULL;
case 0x10ec0295:
case 0x10ec0289:
case 0x10ec0299:
+ alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_process_coef_fw(codec, coef0225);
break;
case 0x10ec0867:
}
}
+static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+}
+
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
ALC255_FIXUP_DELL_SPK_NOISE,
+ ALC225_FIXUP_DISABLE_MIC_VREF,
ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC295_FIXUP_DISABLE_DAC3,
ALC280_FIXUP_HP_HEADSET_MIC,
.chained = true,
.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
},
+ [ALC225_FIXUP_DISABLE_MIC_VREF] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_mic_vref,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
[ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
{}
},
.chained = true,
- .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF
},
[ALC280_FIXUP_HP_HEADSET_MIC] = {
.type = HDA_FIXUP_FUNC,
SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
+ SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
dbri->op = op;
dbri->irq = irq;
- dbri->dma = dma_zalloc_coherent(&op->dev, sizeof(struct dbri_dma),
- &dbri->dma_dvma, GFP_KERNEL);
+ dbri->dma = dma_alloc_coherent(&op->dev, sizeof(struct dbri_dma),
+ &dbri->dma_dvma, GFP_KERNEL);
if (!dbri->dma)
return -ENOMEM;
h1 = snd_usb_find_csint_desc(host_iface->extra,
host_iface->extralen,
NULL, UAC_HEADER);
- if (!h1) {
+ if (!h1 || h1->bLength < sizeof(*h1)) {
dev_err(&dev->dev, "cannot find UAC_HEADER\n");
return -EINVAL;
}
struct uac_mixer_unit_descriptor *desc)
{
int mu_channels;
+ void *c;
- if (desc->bLength < 11)
+ if (desc->bLength < sizeof(*desc))
return -EINVAL;
if (!desc->bNrInPins)
return -EINVAL;
case UAC_VERSION_1:
case UAC_VERSION_2:
default:
+ if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1)
+ return 0; /* no bmControls -> skip */
mu_channels = uac_mixer_unit_bNrChannels(desc);
break;
case UAC_VERSION_3:
}
if (!mu_channels)
- return -EINVAL;
+ return 0;
+
+ c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
+ if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
+ return 0; /* no bmControls -> skip */
return mu_channels;
}
struct uac_mixer_unit_descriptor *d = p1;
err = uac_mixer_unit_get_channels(state, d);
- if (err < 0)
+ if (err <= 0)
return err;
term->channels = err;
if (state->mixer->protocol == UAC_VERSION_2) {
struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
+ if (d_v2->bLength < sizeof(*d_v2))
+ return -EINVAL;
control = UAC2_TE_CONNECTOR;
term_id = d_v2->bTerminalID;
bmctls = le16_to_cpu(d_v2->bmControls);
} else if (state->mixer->protocol == UAC_VERSION_3) {
struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
+ if (d_v3->bLength < sizeof(*d_v3))
+ return -EINVAL;
control = UAC3_TE_INSERTION;
term_id = d_v3->bTerminalID;
bmctls = le32_to_cpu(d_v3->bmControls);
if (err < 0)
continue;
/* no bmControls field (e.g. Maya44) -> ignore */
- if (desc->bLength <= 10 + input_pins)
+ if (!num_outs)
continue;
err = check_input_term(state, desc->baSourceID[pin], &iterm);
if (err < 0)
char *name)
{
struct uac_processing_unit_descriptor *desc = raw_desc;
- int num_ins = desc->bNrInPins;
+ int num_ins;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
int i, err, nameid, type, len;
0, NULL, default_value_info
};
- if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
+ if (desc->bLength < 13) {
+ usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
+ return -EINVAL;
+ }
+
+ num_ins = desc->bNrInPins;
+ if (desc->bLength < 13 + num_ins ||
desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
return -EINVAL;
}
}
},
+ {
+ .ifnum = -1
+ },
}
}
},
}
}
},
+ {
+ .ifnum = -1
+ },
}
}
},
* REG1: PLL binary search enable, soft mute enable.
*/
CM6206_REG1_PLLBIN_EN |
- CM6206_REG1_SOFT_MUTE_EN |
+ CM6206_REG1_SOFT_MUTE_EN,
/*
* REG2: enable output drivers,
* select front channels to the headphone output,
csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
if (!csep || csep->bLength < 7 ||
- csep->bDescriptorSubtype != UAC_EP_GENERAL) {
- usb_audio_warn(chip,
- "%u:%d : no or invalid class specific endpoint descriptor\n",
- iface_no, altsd->bAlternateSetting);
- return 0;
- }
+ csep->bDescriptorSubtype != UAC_EP_GENERAL)
+ goto error;
if (protocol == UAC_VERSION_1) {
attributes = csep->bmAttributes;
struct uac2_iso_endpoint_descriptor *csep2 =
(struct uac2_iso_endpoint_descriptor *) csep;
+ if (csep2->bLength < sizeof(*csep2))
+ goto error;
attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
/* emulate the endpoint attributes of a v1 device */
struct uac3_iso_endpoint_descriptor *csep3 =
(struct uac3_iso_endpoint_descriptor *) csep;
+ if (csep3->bLength < sizeof(*csep3))
+ goto error;
/* emulate the endpoint attributes of a v1 device */
if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
}
return attributes;
+
+ error:
+ usb_audio_warn(chip,
+ "%u:%d : no or invalid class specific endpoint descriptor\n",
+ iface_no, altsd->bAlternateSetting);
+ return 0;
}
/* find an input terminal descriptor (either UAC1 or UAC2) with the given
*/
static void *
snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
- int terminal_id)
+ int terminal_id, bool uac23)
{
struct uac2_input_terminal_descriptor *term = NULL;
+ size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) :
+ sizeof(struct uac_input_terminal_descriptor);
while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
term, UAC_INPUT_TERMINAL))) {
+ if (term->bLength < minlen)
+ continue;
if (term->bTerminalID == terminal_id)
return term;
}
while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
term, UAC_OUTPUT_TERMINAL))) {
- if (term->bTerminalID == terminal_id)
+ if (term->bLength >= sizeof(*term) &&
+ term->bTerminalID == terminal_id)
return term;
}
format = le16_to_cpu(as->wFormatTag); /* remember the format value */
iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
- as->bTerminalLink);
+ as->bTerminalLink,
+ false);
if (iterm) {
num_channels = iterm->bNrChannels;
chconfig = le16_to_cpu(iterm->wChannelConfig);
* to extract the clock
*/
input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
- as->bTerminalLink);
+ as->bTerminalLink,
+ true);
if (input_term) {
clock = input_term->bCSourceID;
if (!chconfig && (num_channels == input_term->bNrChannels))
* to extract the clock
*/
input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
- as->bTerminalLink);
+ as->bTerminalLink,
+ true);
if (input_term) {
clock = input_term->bCSourceID;
goto found_clock;
PERF_REG_POWERPC_DAR,
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
+ PERF_REG_POWERPC_MMCRA,
PERF_REG_POWERPC_MAX,
};
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * This file contains the system call numbers.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _UAPI_ASM_POWERPC_UNISTD_H_
-#define _UAPI_ASM_POWERPC_UNISTD_H_
-
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
-#define __NR_break 17
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
-#define __NR_stty 31
-#define __NR_gtty 32
-#define __NR_access 33
-#define __NR_nice 34
-#define __NR_ftime 35
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-#define __NR_prof 44
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
-#define __NR_lock 53
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-#define __NR_mpx 56
-#define __NR_setpgid 57
-#define __NR_ulimit 58
-#define __NR_oldolduname 59
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-#define __NR_profil 98
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-#define __NR_ioperm 101
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
-#define __NR_iopl 110
-#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_vm86 113
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_modify_ldt 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-#define __NR_query_module 166
-#define __NR_poll 167
-#define __NR_nfsservctl 168
-#define __NR_setresgid 169
-#define __NR_getresgid 170
-#define __NR_prctl 171
-#define __NR_rt_sigreturn 172
-#define __NR_rt_sigaction 173
-#define __NR_rt_sigprocmask 174
-#define __NR_rt_sigpending 175
-#define __NR_rt_sigtimedwait 176
-#define __NR_rt_sigqueueinfo 177
-#define __NR_rt_sigsuspend 178
-#define __NR_pread64 179
-#define __NR_pwrite64 180
-#define __NR_chown 181
-#define __NR_getcwd 182
-#define __NR_capget 183
-#define __NR_capset 184
-#define __NR_sigaltstack 185
-#define __NR_sendfile 186
-#define __NR_getpmsg 187 /* some people actually want streams */
-#define __NR_putpmsg 188 /* some people actually want streams */
-#define __NR_vfork 189
-#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
-#define __NR_readahead 191
-#ifndef __powerpc64__ /* these are 32-bit only */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#endif
-#define __NR_pciconfig_read 198
-#define __NR_pciconfig_write 199
-#define __NR_pciconfig_iobase 200
-#define __NR_multiplexer 201
-#define __NR_getdents64 202
-#define __NR_pivot_root 203
-#ifndef __powerpc64__
-#define __NR_fcntl64 204
-#endif
-#define __NR_madvise 205
-#define __NR_mincore 206
-#define __NR_gettid 207
-#define __NR_tkill 208
-#define __NR_setxattr 209
-#define __NR_lsetxattr 210
-#define __NR_fsetxattr 211
-#define __NR_getxattr 212
-#define __NR_lgetxattr 213
-#define __NR_fgetxattr 214
-#define __NR_listxattr 215
-#define __NR_llistxattr 216
-#define __NR_flistxattr 217
-#define __NR_removexattr 218
-#define __NR_lremovexattr 219
-#define __NR_fremovexattr 220
-#define __NR_futex 221
-#define __NR_sched_setaffinity 222
-#define __NR_sched_getaffinity 223
-/* 224 currently unused */
-#define __NR_tuxcall 225
-#ifndef __powerpc64__
-#define __NR_sendfile64 226
-#endif
-#define __NR_io_setup 227
-#define __NR_io_destroy 228
-#define __NR_io_getevents 229
-#define __NR_io_submit 230
-#define __NR_io_cancel 231
-#define __NR_set_tid_address 232
-#define __NR_fadvise64 233
-#define __NR_exit_group 234
-#define __NR_lookup_dcookie 235
-#define __NR_epoll_create 236
-#define __NR_epoll_ctl 237
-#define __NR_epoll_wait 238
-#define __NR_remap_file_pages 239
-#define __NR_timer_create 240
-#define __NR_timer_settime 241
-#define __NR_timer_gettime 242
-#define __NR_timer_getoverrun 243
-#define __NR_timer_delete 244
-#define __NR_clock_settime 245
-#define __NR_clock_gettime 246
-#define __NR_clock_getres 247
-#define __NR_clock_nanosleep 248
-#define __NR_swapcontext 249
-#define __NR_tgkill 250
-#define __NR_utimes 251
-#define __NR_statfs64 252
-#define __NR_fstatfs64 253
-#ifndef __powerpc64__
-#define __NR_fadvise64_64 254
-#endif
-#define __NR_rtas 255
-#define __NR_sys_debug_setcontext 256
-/* Number 257 is reserved for vserver */
-#define __NR_migrate_pages 258
-#define __NR_mbind 259
-#define __NR_get_mempolicy 260
-#define __NR_set_mempolicy 261
-#define __NR_mq_open 262
-#define __NR_mq_unlink 263
-#define __NR_mq_timedsend 264
-#define __NR_mq_timedreceive 265
-#define __NR_mq_notify 266
-#define __NR_mq_getsetattr 267
-#define __NR_kexec_load 268
-#define __NR_add_key 269
-#define __NR_request_key 270
-#define __NR_keyctl 271
-#define __NR_waitid 272
-#define __NR_ioprio_set 273
-#define __NR_ioprio_get 274
-#define __NR_inotify_init 275
-#define __NR_inotify_add_watch 276
-#define __NR_inotify_rm_watch 277
-#define __NR_spu_run 278
-#define __NR_spu_create 279
-#define __NR_pselect6 280
-#define __NR_ppoll 281
-#define __NR_unshare 282
-#define __NR_splice 283
-#define __NR_tee 284
-#define __NR_vmsplice 285
-#define __NR_openat 286
-#define __NR_mkdirat 287
-#define __NR_mknodat 288
-#define __NR_fchownat 289
-#define __NR_futimesat 290
-#ifdef __powerpc64__
-#define __NR_newfstatat 291
-#else
-#define __NR_fstatat64 291
-#endif
-#define __NR_unlinkat 292
-#define __NR_renameat 293
-#define __NR_linkat 294
-#define __NR_symlinkat 295
-#define __NR_readlinkat 296
-#define __NR_fchmodat 297
-#define __NR_faccessat 298
-#define __NR_get_robust_list 299
-#define __NR_set_robust_list 300
-#define __NR_move_pages 301
-#define __NR_getcpu 302
-#define __NR_epoll_pwait 303
-#define __NR_utimensat 304
-#define __NR_signalfd 305
-#define __NR_timerfd_create 306
-#define __NR_eventfd 307
-#define __NR_sync_file_range2 308
-#define __NR_fallocate 309
-#define __NR_subpage_prot 310
-#define __NR_timerfd_settime 311
-#define __NR_timerfd_gettime 312
-#define __NR_signalfd4 313
-#define __NR_eventfd2 314
-#define __NR_epoll_create1 315
-#define __NR_dup3 316
-#define __NR_pipe2 317
-#define __NR_inotify_init1 318
-#define __NR_perf_event_open 319
-#define __NR_preadv 320
-#define __NR_pwritev 321
-#define __NR_rt_tgsigqueueinfo 322
-#define __NR_fanotify_init 323
-#define __NR_fanotify_mark 324
-#define __NR_prlimit64 325
-#define __NR_socket 326
-#define __NR_bind 327
-#define __NR_connect 328
-#define __NR_listen 329
-#define __NR_accept 330
-#define __NR_getsockname 331
-#define __NR_getpeername 332
-#define __NR_socketpair 333
-#define __NR_send 334
-#define __NR_sendto 335
-#define __NR_recv 336
-#define __NR_recvfrom 337
-#define __NR_shutdown 338
-#define __NR_setsockopt 339
-#define __NR_getsockopt 340
-#define __NR_sendmsg 341
-#define __NR_recvmsg 342
-#define __NR_recvmmsg 343
-#define __NR_accept4 344
-#define __NR_name_to_handle_at 345
-#define __NR_open_by_handle_at 346
-#define __NR_clock_adjtime 347
-#define __NR_syncfs 348
-#define __NR_sendmmsg 349
-#define __NR_setns 350
-#define __NR_process_vm_readv 351
-#define __NR_process_vm_writev 352
-#define __NR_finit_module 353
-#define __NR_kcmp 354
-#define __NR_sched_setattr 355
-#define __NR_sched_getattr 356
-#define __NR_renameat2 357
-#define __NR_seccomp 358
-#define __NR_getrandom 359
-#define __NR_memfd_create 360
-#define __NR_bpf 361
-#define __NR_execveat 362
-#define __NR_switch_endian 363
-#define __NR_userfaultfd 364
-#define __NR_membarrier 365
-#define __NR_mlock2 378
-#define __NR_copy_file_range 379
-#define __NR_preadv2 380
-#define __NR_pwritev2 381
-#define __NR_kexec_file_load 382
-#define __NR_statx 383
-#define __NR_pkey_alloc 384
-#define __NR_pkey_free 385
-#define __NR_pkey_mprotect 386
-#define __NR_rseq 387
-#define __NR_io_pgetevents 388
-
-#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
--- /dev/null
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
+#define _UAPI_ASM_RISCV_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
+#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
#endif
+#ifdef CONFIG_X86_SMAP
+# define DISABLE_SMAP 0
+#else
+# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
+#endif
+
#ifdef CONFIG_X86_INTEL_UMIP
# define DISABLE_UMIP 0
#else
#define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0
-#define DISABLED_MASK9 (DISABLE_MPX)
+#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 0
#define DISABLED_MASK12 0
#ifndef _TOOLS_LINUX_ASM_X86_RMWcc
#define _TOOLS_LINUX_ASM_X86_RMWcc
-#ifdef CC_HAVE_ASM_GOTO
+#ifdef CONFIG_CC_HAS_ASM_GOTO
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
-#else /* !CC_HAVE_ASM_GOTO */
+#else /* !CONFIG_CC_HAS_ASM_GOTO */
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
-#endif /* CC_HAVE_ASM_GOTO */
+#endif /* CONFIG_CC_HAS_ASM_GOTO */
#endif /* _TOOLS_LINUX_ASM_X86_RMWcc */
SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c))
ifeq ($(feature-libbfd),1)
+ LIBS += -lbfd -ldl -lopcodes
+else ifeq ($(feature-libbfd-liberty),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty
+else ifeq ($(feature-libbfd-liberty-z),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty -lz
+endif
+
+ifneq ($(filter -lbfd,$(LIBS)),)
CFLAGS += -DHAVE_LIBBFD_SUPPORT
SRCS += $(BFD_SRCS)
-LIBS += -lbfd -lopcodes
endif
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
int bits_to_copy;
__u64 print_num;
- data += BITS_ROUNDDOWN_BYTES(bit_offset);
- bit_offset = BITS_PER_BYTE_MASKED(bit_offset);
bits_to_copy = bit_offset + nr_bits;
bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
* BTF_INT_OFFSET() cannot exceed 64 bits.
*/
total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
- btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw,
+ data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
+ bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+ btf_dumper_bitfield(nr_bits, bit_offset, data, jw,
is_plain_text);
}
}
jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
+ data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
if (bitfield_size) {
- btf_dumper_bitfield(bitfield_size, bit_offset,
- data, d->jw, d->is_plain_text);
+ btf_dumper_bitfield(bitfield_size,
+ BITS_PER_BYTE_MASKED(bit_offset),
+ data_off, d->jw, d->is_plain_text);
} else {
- data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
ret = btf_dumper_do_type(d, m[i].type,
BITS_PER_BYTE_MASKED(bit_offset),
data_off);
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
/*
* Simple streaming JSON writer
*
* This takes care of the annoying bits of JSON syntax like the commas
* after elements
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Authors: Stephen Hemminger <stephen@networkplumber.org>
*/
* This takes care of the annoying bits of JSON syntax like the commas
* after elements
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Authors: Stephen Hemminger <stephen@networkplumber.org>
*/
cplus-demangle \
hello \
libbabeltrace \
- liberty \
- liberty-z \
+ libbfd-liberty \
+ libbfd-liberty-z \
libunwind-debug-frame \
libunwind-debug-frame-arm \
libunwind-debug-frame-aarch64 \
test-libbfd.bin \
test-disassembler-four-args.bin \
test-reallocarray.bin \
- test-liberty.bin \
- test-liberty-z.bin \
+ test-libbfd-liberty.bin \
+ test-libbfd-liberty-z.bin \
test-cplus-demangle.bin \
test-libelf.bin \
test-libelf-getphdrnum.bin \
$(BUILD)
$(OUTPUT)test-libbfd.bin:
- $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
$(OUTPUT)test-disassembler-four-args.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
$(OUTPUT)test-reallocarray.bin:
$(BUILD)
-$(OUTPUT)test-liberty.bin:
+$(OUTPUT)test-libbfd-liberty.bin:
$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
-$(OUTPUT)test-liberty-z.bin:
+$(OUTPUT)test-libbfd-liberty-z.bin:
$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -lz
$(OUTPUT)test-cplus-demangle.bin:
# (this improves performance and avoids hard-to-debug behaviour);
MAKEFLAGS += -r
-CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon
ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
+#define __NR_kexec_file_load 294
+__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
#undef __NR_syscalls
-#define __NR_syscalls 294
+#define __NR_syscalls 295
/*
* 32 bit systems traditionally used different
#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
#elif defined(__ia64__)
#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
+#elif defined(__riscv)
+#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
#else
#include <asm-generic/bitsperlong.h>
#endif
int irq_seq;
} drm_i915_irq_wait_t;
+/*
+ * Different modes of per-process Graphics Translation Table,
+ * see I915_PARAM_HAS_ALIASING_PPGTT
+ */
+#define I915_GEM_PPGTT_NONE 0
+#define I915_GEM_PPGTT_ALIASING 1
+#define I915_GEM_PPGTT_FULL 2
+
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
#include <linux/ioctl.h>
#include <linux/types.h>
+/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */
+#if !defined(__KERNEL__)
+#include <linux/mount.h>
+#endif
+
/*
* It's silly to have NR_OPEN bigger than NR_FILE, but you can change
* the file limit at runtime and only root can increase the per-process
#define NR_FILE 8192 /* this can well be larger on a larger system */
-
-/*
- * These are the fs-independent mount-flags: up to 32 flags are supported
- */
-#define MS_RDONLY 1 /* Mount read-only */
-#define MS_NOSUID 2 /* Ignore suid and sgid bits */
-#define MS_NODEV 4 /* Disallow access to device special files */
-#define MS_NOEXEC 8 /* Disallow program execution */
-#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
-#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
-#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
-#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
-#define MS_NOATIME 1024 /* Do not update access times. */
-#define MS_NODIRATIME 2048 /* Do not update directory access times */
-#define MS_BIND 4096
-#define MS_MOVE 8192
-#define MS_REC 16384
-#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
- MS_VERBOSE is deprecated. */
-#define MS_SILENT 32768
-#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
-#define MS_UNBINDABLE (1<<17) /* change to unbindable */
-#define MS_PRIVATE (1<<18) /* change to private */
-#define MS_SLAVE (1<<19) /* change to slave */
-#define MS_SHARED (1<<20) /* change to shared */
-#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
-#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
-#define MS_I_VERSION (1<<23) /* Update inode I_version field */
-#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
-#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
-
-/* These sb flags are internal to the kernel */
-#define MS_SUBMOUNT (1<<26)
-#define MS_NOREMOTELOCK (1<<27)
-#define MS_NOSEC (1<<28)
-#define MS_BORN (1<<29)
-#define MS_ACTIVE (1<<30)
-#define MS_NOUSER (1<<31)
-
-/*
- * Superblock flags that can be altered by MS_REMOUNT
- */
-#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
- MS_LAZYTIME)
-
-/*
- * Old magic mount flag and mask
- */
-#define MS_MGC_VAL 0xC0ED0000
-#define MS_MGC_MSK 0xffff0000
-
/*
* Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR.
*/
#define FS_POLICY_FLAGS_PAD_16 0x02
#define FS_POLICY_FLAGS_PAD_32 0x03
#define FS_POLICY_FLAGS_PAD_MASK 0x03
-#define FS_POLICY_FLAGS_VALID 0x03
+#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */
+#define FS_POLICY_FLAGS_VALID 0x07
/* Encryption algorithms */
#define FS_ENCRYPTION_MODE_INVALID 0
#define FS_ENCRYPTION_MODE_AES_128_CTS 6
#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
+#define FS_ENCRYPTION_MODE_ADIANTUM 9
struct fscrypt_policy {
__u8 version;
IFLA_BR_MCAST_IGMP_VERSION,
IFLA_BR_MCAST_MLD_VERSION,
IFLA_BR_VLAN_STATS_PER_PORT,
+ IFLA_BR_MULTI_BOOLOPT,
__IFLA_BR_MAX,
};
IFLA_VXLAN_LABEL,
IFLA_VXLAN_GPE,
IFLA_VXLAN_TTL_INHERIT,
+ IFLA_VXLAN_DF,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
__be16 high;
};
+enum ifla_vxlan_df {
+ VXLAN_DF_UNSET = 0,
+ VXLAN_DF_SET,
+ VXLAN_DF_INHERIT,
+ __VXLAN_DF_END,
+ VXLAN_DF_MAX = __VXLAN_DF_END - 1,
+};
+
/* GENEVE section */
enum {
IFLA_GENEVE_UNSPEC,
IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
IFLA_GENEVE_LABEL,
IFLA_GENEVE_TTL_INHERIT,
+ IFLA_GENEVE_DF,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+enum ifla_geneve_df {
+ GENEVE_DF_UNSET = 0,
+ GENEVE_DF_SET,
+ GENEVE_DF_INHERIT,
+ __GENEVE_DF_END,
+ GENEVE_DF_MAX = __GENEVE_DF_END - 1,
+};
+
/* PPP section */
enum {
IFLA_PPP_UNSPEC,
#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
#define IN_MULTICAST(a) IN_CLASSD(a)
-#define IN_MULTICAST_NET 0xF0000000
+#define IN_MULTICAST_NET 0xe0000000
-#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
-#define IN_BADCLASS(a) IN_EXPERIMENTAL((a))
+#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
+#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
+
+#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+#define IN_CLASSE_NET 0xffffffff
+#define IN_CLASSE_NSHIFT 0
/* Address to accept any incoming messages. */
#define INADDR_ANY ((unsigned long int) 0x00000000)
};
};
+/* for KVM_CLEAR_DIRTY_LOG */
+struct kvm_clear_dirty_log {
+ __u32 slot;
+ __u32 num_pages;
+ __u64 first_page;
+ union {
+ void __user *dirty_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+};
+
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
#define KVM_CAP_EXCEPTION_PAYLOAD 164
#define KVM_CAP_ARM_VM_IPA_SIZE 165
+#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166
+#define KVM_CAP_HYPERV_CPUID 167
#ifdef KVM_CAP_IRQ_ROUTING
#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
+/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */
+#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
+
+/* Available with KVM_CAP_HYPERV_CPUID */
+#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
--- /dev/null
+#ifndef _UAPI_LINUX_MOUNT_H
+#define _UAPI_LINUX_MOUNT_H
+
+/*
+ * These are the fs-independent mount-flags: up to 32 flags are supported
+ *
+ * Usage of these is restricted within the kernel to core mount(2) code and
+ * callers of sys_mount() only. Filesystems should be using the SB_*
+ * equivalent instead.
+ */
+#define MS_RDONLY 1 /* Mount read-only */
+#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+#define MS_NODEV 4 /* Disallow access to device special files */
+#define MS_NOEXEC 8 /* Disallow program execution */
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
+#define MS_NOATIME 1024 /* Do not update access times. */
+#define MS_NODIRATIME 2048 /* Do not update directory access times */
+#define MS_BIND 4096
+#define MS_MOVE 8192
+#define MS_REC 16384
+#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
+ MS_VERBOSE is deprecated. */
+#define MS_SILENT 32768
+#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
+#define MS_UNBINDABLE (1<<17) /* change to unbindable */
+#define MS_PRIVATE (1<<18) /* change to private */
+#define MS_SLAVE (1<<19) /* change to slave */
+#define MS_SHARED (1<<20) /* change to shared */
+#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
+#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
+#define MS_I_VERSION (1<<23) /* Update inode I_version field */
+#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
+#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
+
+/* These sb flags are internal to the kernel */
+#define MS_SUBMOUNT (1<<26)
+#define MS_NOREMOTELOCK (1<<27)
+#define MS_NOSEC (1<<28)
+#define MS_BORN (1<<29)
+#define MS_ACTIVE (1<<30)
+#define MS_NOUSER (1<<31)
+
+/*
+ * Superblock flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
+ MS_LAZYTIME)
+
+/*
+ * Old magic mount flag and mask
+ */
+#define MS_MGC_VAL 0xC0ED0000
+#define MS_MGC_MSK 0xffff0000
+
+#endif /* _UAPI_LINUX_MOUNT_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LINUX_PKT_SCHED_H
+#define __LINUX_PKT_SCHED_H
+
+#include <linux/types.h>
+
+/* Logical priority bands not depending on specific packet scheduler.
+ Every scheduler will map them to real traffic classes, if it has
+ no more precise mechanism to classify packets.
+
+ These numbers have no special meaning, though their coincidence
+ with obsolete IPv6 values is not occasional :-). New IPv6 drafts
+ preferred full anarchy inspired by diffserv group.
+
+ Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
+ class, actually, as rule it will be handled with more care than
+ filler or even bulk.
+ */
+
+#define TC_PRIO_BESTEFFORT 0
+#define TC_PRIO_FILLER 1
+#define TC_PRIO_BULK 2
+#define TC_PRIO_INTERACTIVE_BULK 4
+#define TC_PRIO_INTERACTIVE 6
+#define TC_PRIO_CONTROL 7
+
+#define TC_PRIO_MAX 15
+
+/* Generic queue statistics, available for all the elements.
+ Particular schedulers may have also their private records.
+ */
+
+struct tc_stats {
+ __u64 bytes; /* Number of enqueued bytes */
+ __u32 packets; /* Number of enqueued packets */
+ __u32 drops; /* Packets dropped because of lack of resources */
+ __u32 overlimits; /* Number of throttle events when this
+ * flow goes out of allocated bandwidth */
+ __u32 bps; /* Current flow byte rate */
+ __u32 pps; /* Current flow packet rate */
+ __u32 qlen;
+ __u32 backlog;
+};
+
+struct tc_estimator {
+ signed char interval;
+ unsigned char ewma_log;
+};
+
+/* "Handles"
+ ---------
+
+ All the traffic control objects have 32bit identifiers, or "handles".
+
+ They can be considered as opaque numbers from user API viewpoint,
+ but actually they always consist of two fields: major and
+ minor numbers, which are interpreted by kernel specially,
+ that may be used by applications, though not recommended.
+
+ F.e. qdisc handles always have minor number equal to zero,
+ classes (or flows) have major equal to parent qdisc major, and
+ minor uniquely identifying class inside qdisc.
+
+ Macros to manipulate handles:
+ */
+
+#define TC_H_MAJ_MASK (0xFFFF0000U)
+#define TC_H_MIN_MASK (0x0000FFFFU)
+#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
+#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
+#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
+
+#define TC_H_UNSPEC (0U)
+#define TC_H_ROOT (0xFFFFFFFFU)
+#define TC_H_INGRESS (0xFFFFFFF1U)
+#define TC_H_CLSACT TC_H_INGRESS
+
+#define TC_H_MIN_PRIORITY 0xFFE0U
+#define TC_H_MIN_INGRESS 0xFFF2U
+#define TC_H_MIN_EGRESS 0xFFF3U
+
+/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
+enum tc_link_layer {
+ TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
+ TC_LINKLAYER_ETHERNET,
+ TC_LINKLAYER_ATM,
+};
+#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
+
+struct tc_ratespec {
+ unsigned char cell_log;
+ __u8 linklayer; /* lower 4 bits */
+ unsigned short overhead;
+ short cell_align;
+ unsigned short mpu;
+ __u32 rate;
+};
+
+#define TC_RTAB_SIZE 1024
+
+struct tc_sizespec {
+ unsigned char cell_log;
+ unsigned char size_log;
+ short cell_align;
+ int overhead;
+ unsigned int linklayer;
+ unsigned int mpu;
+ unsigned int mtu;
+ unsigned int tsize;
+};
+
+enum {
+ TCA_STAB_UNSPEC,
+ TCA_STAB_BASE,
+ TCA_STAB_DATA,
+ __TCA_STAB_MAX
+};
+
+#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
+
+/* FIFO section */
+
+struct tc_fifo_qopt {
+ __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
+};
+
+/* SKBPRIO section */
+
+/*
+ * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
+ * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
+ * to map one to one the DS field of IPV4 and IPV6 headers.
+ * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
+ */
+
+#define SKBPRIO_MAX_PRIORITY 64
+
+struct tc_skbprio_qopt {
+ __u32 limit; /* Queue length in packets. */
+};
+
+/* PRIO section */
+
+#define TCQ_PRIO_BANDS 16
+#define TCQ_MIN_PRIO_BANDS 2
+
+struct tc_prio_qopt {
+ int bands; /* Number of bands */
+ __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
+};
+
+/* MULTIQ section */
+
+struct tc_multiq_qopt {
+ __u16 bands; /* Number of bands */
+ __u16 max_bands; /* Maximum number of queues */
+};
+
+/* PLUG section */
+
+#define TCQ_PLUG_BUFFER 0
+#define TCQ_PLUG_RELEASE_ONE 1
+#define TCQ_PLUG_RELEASE_INDEFINITE 2
+#define TCQ_PLUG_LIMIT 3
+
+struct tc_plug_qopt {
+ /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
+ * buffer any incoming packets
+ * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
+ * to beginning of the next plug.
+ * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
+ * Stop buffering packets until the next TCQ_PLUG_BUFFER
+ * command is received (just act as a pass-thru queue).
+ * TCQ_PLUG_LIMIT: Increase/decrease queue size
+ */
+ int action;
+ __u32 limit;
+};
+
+/* TBF section */
+
+struct tc_tbf_qopt {
+ struct tc_ratespec rate;
+ struct tc_ratespec peakrate;
+ __u32 limit;
+ __u32 buffer;
+ __u32 mtu;
+};
+
+enum {
+ TCA_TBF_UNSPEC,
+ TCA_TBF_PARMS,
+ TCA_TBF_RTAB,
+ TCA_TBF_PTAB,
+ TCA_TBF_RATE64,
+ TCA_TBF_PRATE64,
+ TCA_TBF_BURST,
+ TCA_TBF_PBURST,
+ TCA_TBF_PAD,
+ __TCA_TBF_MAX,
+};
+
+#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
+
+
+/* TEQL section */
+
+/* TEQL does not require any parameters */
+
+/* SFQ section */
+
+struct tc_sfq_qopt {
+ unsigned quantum; /* Bytes per round allocated to flow */
+ int perturb_period; /* Period of hash perturbation */
+ __u32 limit; /* Maximal packets in queue */
+ unsigned divisor; /* Hash divisor */
+ unsigned flows; /* Maximal number of flows */
+};
+
+struct tc_sfqred_stats {
+ __u32 prob_drop; /* Early drops, below max threshold */
+ __u32 forced_drop; /* Early drops, after max threshold */
+ __u32 prob_mark; /* Marked packets, below max threshold */
+ __u32 forced_mark; /* Marked packets, after max threshold */
+ __u32 prob_mark_head; /* Marked packets, below max threshold */
+ __u32 forced_mark_head;/* Marked packets, after max threshold */
+};
+
+struct tc_sfq_qopt_v1 {
+ struct tc_sfq_qopt v0;
+ unsigned int depth; /* max number of packets per flow */
+ unsigned int headdrop;
+/* SFQRED parameters */
+ __u32 limit; /* HARD maximal flow queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags;
+ __u32 max_P; /* probability, high resolution */
+/* SFQRED stats */
+ struct tc_sfqred_stats stats;
+};
+
+
+struct tc_sfq_xstats {
+ __s32 allot;
+};
+
+/* RED section */
+
+enum {
+ TCA_RED_UNSPEC,
+ TCA_RED_PARMS,
+ TCA_RED_STAB,
+ TCA_RED_MAX_P,
+ __TCA_RED_MAX,
+};
+
+#define TCA_RED_MAX (__TCA_RED_MAX - 1)
+
+struct tc_red_qopt {
+ __u32 limit; /* HARD maximal queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags;
+#define TC_RED_ECN 1
+#define TC_RED_HARDDROP 2
+#define TC_RED_ADAPTATIVE 4
+};
+
+struct tc_red_xstats {
+ __u32 early; /* Early drops */
+ __u32 pdrop; /* Drops due to queue limits */
+ __u32 other; /* Drops due to drop() calls */
+ __u32 marked; /* Marked packets */
+};
+
+/* GRED section */
+
+#define MAX_DPs 16
+
+enum {
+ TCA_GRED_UNSPEC,
+ TCA_GRED_PARMS,
+ TCA_GRED_STAB,
+ TCA_GRED_DPS,
+ TCA_GRED_MAX_P,
+ TCA_GRED_LIMIT,
+ TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
+ __TCA_GRED_MAX,
+};
+
+#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
+
+enum {
+ TCA_GRED_VQ_ENTRY_UNSPEC,
+ TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
+ __TCA_GRED_VQ_ENTRY_MAX,
+};
+#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
+
+enum {
+ TCA_GRED_VQ_UNSPEC,
+ TCA_GRED_VQ_PAD,
+ TCA_GRED_VQ_DP, /* u32 */
+ TCA_GRED_VQ_STAT_BYTES, /* u64 */
+ TCA_GRED_VQ_STAT_PACKETS, /* u32 */
+ TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_PDROP, /* u32 */
+ TCA_GRED_VQ_STAT_OTHER, /* u32 */
+ TCA_GRED_VQ_FLAGS, /* u32 */
+ __TCA_GRED_VQ_MAX
+};
+
+#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
+
+struct tc_gred_qopt {
+ __u32 limit; /* HARD maximal queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ __u32 DP; /* up to 2^32 DPs */
+ __u32 backlog;
+ __u32 qave;
+ __u32 forced;
+ __u32 early;
+ __u32 other;
+ __u32 pdrop;
+ __u8 Wlog; /* log(W) */
+ __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
+ __u8 Scell_log; /* cell size for idle damping */
+ __u8 prio; /* prio of this VQ */
+ __u32 packets;
+ __u32 bytesin;
+};
+
+/* gred setup */
+struct tc_gred_sopt {
+ __u32 DPs;
+ __u32 def_DP;
+ __u8 grio;
+ __u8 flags;
+ __u16 pad1;
+};
+
+/* CHOKe section */
+
+enum {
+ TCA_CHOKE_UNSPEC,
+ TCA_CHOKE_PARMS,
+ TCA_CHOKE_STAB,
+ TCA_CHOKE_MAX_P,
+ __TCA_CHOKE_MAX,
+};
+
+#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
+
+struct tc_choke_qopt {
+ __u32 limit; /* Hard queue length (packets) */
+ __u32 qth_min; /* Min average threshold (packets) */
+ __u32 qth_max; /* Max average threshold (packets) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags; /* see RED flags */
+};
+
+struct tc_choke_xstats {
+ __u32 early; /* Early drops */
+ __u32 pdrop; /* Drops due to queue limits */
+ __u32 other; /* Drops due to drop() calls */
+ __u32 marked; /* Marked packets */
+ __u32 matched; /* Drops due to flow match */
+};
+
+/* HTB section */
+#define TC_HTB_NUMPRIO 8
+#define TC_HTB_MAXDEPTH 8
+#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */
+
+struct tc_htb_opt {
+ struct tc_ratespec rate;
+ struct tc_ratespec ceil;
+ __u32 buffer;
+ __u32 cbuffer;
+ __u32 quantum;
+ __u32 level; /* out only */
+ __u32 prio;
+};
+struct tc_htb_glob {
+ __u32 version; /* to match HTB/TC */
+ __u32 rate2quantum; /* bps->quantum divisor */
+ __u32 defcls; /* default class number */
+ __u32 debug; /* debug flags */
+
+ /* stats */
+ __u32 direct_pkts; /* count of non shaped packets */
+};
+enum {
+ TCA_HTB_UNSPEC,
+ TCA_HTB_PARMS,
+ TCA_HTB_INIT,
+ TCA_HTB_CTAB,
+ TCA_HTB_RTAB,
+ TCA_HTB_DIRECT_QLEN,
+ TCA_HTB_RATE64,
+ TCA_HTB_CEIL64,
+ TCA_HTB_PAD,
+ __TCA_HTB_MAX,
+};
+
+#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
+
+struct tc_htb_xstats {
+ __u32 lends;
+ __u32 borrows;
+ __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
+ __s32 tokens;
+ __s32 ctokens;
+};
+
+/* HFSC section */
+
+struct tc_hfsc_qopt {
+ __u16 defcls; /* default class */
+};
+
+struct tc_service_curve {
+ __u32 m1; /* slope of the first segment in bps */
+ __u32 d; /* x-projection of the first segment in us */
+ __u32 m2; /* slope of the second segment in bps */
+};
+
+struct tc_hfsc_stats {
+ __u64 work; /* total work done */
+ __u64 rtwork; /* work done by real-time criteria */
+ __u32 period; /* current period */
+ __u32 level; /* class level in hierarchy */
+};
+
+enum {
+ TCA_HFSC_UNSPEC,
+ TCA_HFSC_RSC,
+ TCA_HFSC_FSC,
+ TCA_HFSC_USC,
+ __TCA_HFSC_MAX,
+};
+
+#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
+
+
+/* CBQ section */
+
+#define TC_CBQ_MAXPRIO 8
+#define TC_CBQ_MAXLEVEL 8
+#define TC_CBQ_DEF_EWMA 5
+
+struct tc_cbq_lssopt {
+ unsigned char change;
+ unsigned char flags;
+#define TCF_CBQ_LSS_BOUNDED 1
+#define TCF_CBQ_LSS_ISOLATED 2
+ unsigned char ewma_log;
+ unsigned char level;
+#define TCF_CBQ_LSS_FLAGS 1
+#define TCF_CBQ_LSS_EWMA 2
+#define TCF_CBQ_LSS_MAXIDLE 4
+#define TCF_CBQ_LSS_MINIDLE 8
+#define TCF_CBQ_LSS_OFFTIME 0x10
+#define TCF_CBQ_LSS_AVPKT 0x20
+ __u32 maxidle;
+ __u32 minidle;
+ __u32 offtime;
+ __u32 avpkt;
+};
+
+struct tc_cbq_wrropt {
+ unsigned char flags;
+ unsigned char priority;
+ unsigned char cpriority;
+ unsigned char __reserved;
+ __u32 allot;
+ __u32 weight;
+};
+
+struct tc_cbq_ovl {
+ unsigned char strategy;
+#define TC_CBQ_OVL_CLASSIC 0
+#define TC_CBQ_OVL_DELAY 1
+#define TC_CBQ_OVL_LOWPRIO 2
+#define TC_CBQ_OVL_DROP 3
+#define TC_CBQ_OVL_RCLASSIC 4
+ unsigned char priority2;
+ __u16 pad;
+ __u32 penalty;
+};
+
+struct tc_cbq_police {
+ unsigned char police;
+ unsigned char __res1;
+ unsigned short __res2;
+};
+
+struct tc_cbq_fopt {
+ __u32 split;
+ __u32 defmap;
+ __u32 defchange;
+};
+
+struct tc_cbq_xstats {
+ __u32 borrows;
+ __u32 overactions;
+ __s32 avgidle;
+ __s32 undertime;
+};
+
+enum {
+ TCA_CBQ_UNSPEC,
+ TCA_CBQ_LSSOPT,
+ TCA_CBQ_WRROPT,
+ TCA_CBQ_FOPT,
+ TCA_CBQ_OVL_STRATEGY,
+ TCA_CBQ_RATE,
+ TCA_CBQ_RTAB,
+ TCA_CBQ_POLICE,
+ __TCA_CBQ_MAX,
+};
+
+#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
+
+/* dsmark section */
+
+enum {
+ TCA_DSMARK_UNSPEC,
+ TCA_DSMARK_INDICES,
+ TCA_DSMARK_DEFAULT_INDEX,
+ TCA_DSMARK_SET_TC_INDEX,
+ TCA_DSMARK_MASK,
+ TCA_DSMARK_VALUE,
+ __TCA_DSMARK_MAX,
+};
+
+#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
+
+/* ATM section */
+
+enum {
+ TCA_ATM_UNSPEC,
+ TCA_ATM_FD, /* file/socket descriptor */
+ TCA_ATM_PTR, /* pointer to descriptor - later */
+ TCA_ATM_HDR, /* LL header */
+ TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
+ TCA_ATM_ADDR, /* PVC address (for output only) */
+ TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
+ __TCA_ATM_MAX,
+};
+
+#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
+
+/* Network emulator */
+
+enum {
+ TCA_NETEM_UNSPEC,
+ TCA_NETEM_CORR,
+ TCA_NETEM_DELAY_DIST,
+ TCA_NETEM_REORDER,
+ TCA_NETEM_CORRUPT,
+ TCA_NETEM_LOSS,
+ TCA_NETEM_RATE,
+ TCA_NETEM_ECN,
+ TCA_NETEM_RATE64,
+ TCA_NETEM_PAD,
+ TCA_NETEM_LATENCY64,
+ TCA_NETEM_JITTER64,
+ TCA_NETEM_SLOT,
+ TCA_NETEM_SLOT_DIST,
+ __TCA_NETEM_MAX,
+};
+
+#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
+
+struct tc_netem_qopt {
+ __u32 latency; /* added delay (us) */
+ __u32 limit; /* fifo limit (packets) */
+ __u32 loss; /* random packet loss (0=none ~0=100%) */
+ __u32 gap; /* re-ordering gap (0 for none) */
+ __u32 duplicate; /* random packet dup (0=none ~0=100%) */
+ __u32 jitter; /* random jitter in latency (us) */
+};
+
+struct tc_netem_corr {
+ __u32 delay_corr; /* delay correlation */
+ __u32 loss_corr; /* packet loss correlation */
+ __u32 dup_corr; /* duplicate correlation */
+};
+
+struct tc_netem_reorder {
+ __u32 probability;
+ __u32 correlation;
+};
+
+struct tc_netem_corrupt {
+ __u32 probability;
+ __u32 correlation;
+};
+
+struct tc_netem_rate {
+ __u32 rate; /* byte/s */
+ __s32 packet_overhead;
+ __u32 cell_size;
+ __s32 cell_overhead;
+};
+
+struct tc_netem_slot {
+ __s64 min_delay; /* nsec */
+ __s64 max_delay;
+ __s32 max_packets;
+ __s32 max_bytes;
+ __s64 dist_delay; /* nsec */
+ __s64 dist_jitter; /* nsec */
+};
+
+enum {
+ NETEM_LOSS_UNSPEC,
+ NETEM_LOSS_GI, /* General Intuitive - 4 state model */
+ NETEM_LOSS_GE, /* Gilbert Elliot models */
+ __NETEM_LOSS_MAX
+};
+#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
+
+/* State transition probabilities for 4 state model */
+struct tc_netem_gimodel {
+ __u32 p13;
+ __u32 p31;
+ __u32 p32;
+ __u32 p14;
+ __u32 p23;
+};
+
+/* Gilbert-Elliot models */
+struct tc_netem_gemodel {
+ __u32 p;
+ __u32 r;
+ __u32 h;
+ __u32 k1;
+};
+
+#define NETEM_DIST_SCALE 8192
+#define NETEM_DIST_MAX 16384
+
+/* DRR */
+
+enum {
+ TCA_DRR_UNSPEC,
+ TCA_DRR_QUANTUM,
+ __TCA_DRR_MAX
+};
+
+#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
+
+struct tc_drr_stats {
+ __u32 deficit;
+};
+
+/* MQPRIO */
+#define TC_QOPT_BITMASK 15
+#define TC_QOPT_MAX_QUEUE 16
+
+enum {
+ TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */
+ TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
+ __TC_MQPRIO_HW_OFFLOAD_MAX
+};
+
+#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
+
+enum {
+ TC_MQPRIO_MODE_DCB,
+ TC_MQPRIO_MODE_CHANNEL,
+ __TC_MQPRIO_MODE_MAX
+};
+
+#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
+
+enum {
+ TC_MQPRIO_SHAPER_DCB,
+ TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */
+ __TC_MQPRIO_SHAPER_MAX
+};
+
+#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
+
+struct tc_mqprio_qopt {
+ __u8 num_tc;
+ __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
+ __u8 hw;
+ __u16 count[TC_QOPT_MAX_QUEUE];
+ __u16 offset[TC_QOPT_MAX_QUEUE];
+};
+
+#define TC_MQPRIO_F_MODE 0x1
+#define TC_MQPRIO_F_SHAPER 0x2
+#define TC_MQPRIO_F_MIN_RATE 0x4
+#define TC_MQPRIO_F_MAX_RATE 0x8
+
+enum {
+ TCA_MQPRIO_UNSPEC,
+ TCA_MQPRIO_MODE,
+ TCA_MQPRIO_SHAPER,
+ TCA_MQPRIO_MIN_RATE64,
+ TCA_MQPRIO_MAX_RATE64,
+ __TCA_MQPRIO_MAX,
+};
+
+#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
+
+/* SFB */
+
+enum {
+ TCA_SFB_UNSPEC,
+ TCA_SFB_PARMS,
+ __TCA_SFB_MAX,
+};
+
+#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
+
+/*
+ * Note: increment, decrement are Q0.16 fixed-point values.
+ */
+struct tc_sfb_qopt {
+ __u32 rehash_interval; /* delay between hash move, in ms */
+ __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */
+ __u32 max; /* max len of qlen_min */
+ __u32 bin_size; /* maximum queue length per bin */
+ __u32 increment; /* probability increment, (d1 in Blue) */
+ __u32 decrement; /* probability decrement, (d2 in Blue) */
+ __u32 limit; /* max SFB queue length */
+ __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
+ __u32 penalty_burst;
+};
+
+struct tc_sfb_xstats {
+ __u32 earlydrop;
+ __u32 penaltydrop;
+ __u32 bucketdrop;
+ __u32 queuedrop;
+ __u32 childdrop; /* drops in child qdisc */
+ __u32 marked;
+ __u32 maxqlen;
+ __u32 maxprob;
+ __u32 avgprob;
+};
+
+#define SFB_MAX_PROB 0xFFFF
+
+/* QFQ */
+enum {
+ TCA_QFQ_UNSPEC,
+ TCA_QFQ_WEIGHT,
+ TCA_QFQ_LMAX,
+ __TCA_QFQ_MAX
+};
+
+#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1)
+
+struct tc_qfq_stats {
+ __u32 weight;
+ __u32 lmax;
+};
+
+/* CODEL */
+
+enum {
+ TCA_CODEL_UNSPEC,
+ TCA_CODEL_TARGET,
+ TCA_CODEL_LIMIT,
+ TCA_CODEL_INTERVAL,
+ TCA_CODEL_ECN,
+ TCA_CODEL_CE_THRESHOLD,
+ __TCA_CODEL_MAX
+};
+
+#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
+
+struct tc_codel_xstats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 count; /* how many drops we've done since the last time we
+ * entered dropping state
+ */
+ __u32 lastcount; /* count at entry to dropping state */
+ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
+ __s32 drop_next; /* time to drop next packet */
+ __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
+ __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
+ __u32 dropping; /* are we in dropping state ? */
+ __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
+};
+
+/* FQ_CODEL */
+
+enum {
+ TCA_FQ_CODEL_UNSPEC,
+ TCA_FQ_CODEL_TARGET,
+ TCA_FQ_CODEL_LIMIT,
+ TCA_FQ_CODEL_INTERVAL,
+ TCA_FQ_CODEL_ECN,
+ TCA_FQ_CODEL_FLOWS,
+ TCA_FQ_CODEL_QUANTUM,
+ TCA_FQ_CODEL_CE_THRESHOLD,
+ TCA_FQ_CODEL_DROP_BATCH_SIZE,
+ TCA_FQ_CODEL_MEMORY_LIMIT,
+ __TCA_FQ_CODEL_MAX
+};
+
+#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
+
+enum {
+ TCA_FQ_CODEL_XSTATS_QDISC,
+ TCA_FQ_CODEL_XSTATS_CLASS,
+};
+
+struct tc_fq_codel_qd_stats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 drop_overlimit; /* number of time max qdisc
+ * packet limit was hit
+ */
+ __u32 ecn_mark; /* number of packets we ECN marked
+ * instead of being dropped
+ */
+ __u32 new_flow_count; /* number of time packets
+ * created a 'new flow'
+ */
+ __u32 new_flows_len; /* count of flows in new list */
+ __u32 old_flows_len; /* count of flows in old list */
+ __u32 ce_mark; /* packets above ce_threshold */
+ __u32 memory_usage; /* in bytes */
+ __u32 drop_overmemory;
+};
+
+struct tc_fq_codel_cl_stats {
+ __s32 deficit;
+ __u32 ldelay; /* in-queue delay seen by most recently
+ * dequeued packet
+ */
+ __u32 count;
+ __u32 lastcount;
+ __u32 dropping;
+ __s32 drop_next;
+};
+
+struct tc_fq_codel_xstats {
+ __u32 type;
+ union {
+ struct tc_fq_codel_qd_stats qdisc_stats;
+ struct tc_fq_codel_cl_stats class_stats;
+ };
+};
+
+/* FQ */
+
+enum {
+ TCA_FQ_UNSPEC,
+
+ TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
+
+ TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
+
+ TCA_FQ_QUANTUM, /* RR quantum */
+
+ TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
+
+ TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
+
+ TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
+
+ TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
+
+ TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
+
+ TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
+
+ TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
+
+ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+
+ TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
+
+ __TCA_FQ_MAX
+};
+
+#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
+
+struct tc_fq_qd_stats {
+ __u64 gc_flows;
+ __u64 highprio_packets;
+ __u64 tcp_retrans;
+ __u64 throttled;
+ __u64 flows_plimit;
+ __u64 pkts_too_long;
+ __u64 allocation_errors;
+ __s64 time_next_delayed_flow;
+ __u32 flows;
+ __u32 inactive_flows;
+ __u32 throttled_flows;
+ __u32 unthrottle_latency_ns;
+ __u64 ce_mark; /* packets above ce_threshold */
+};
+
+/* Heavy-Hitter Filter */
+
+enum {
+ TCA_HHF_UNSPEC,
+ TCA_HHF_BACKLOG_LIMIT,
+ TCA_HHF_QUANTUM,
+ TCA_HHF_HH_FLOWS_LIMIT,
+ TCA_HHF_RESET_TIMEOUT,
+ TCA_HHF_ADMIT_BYTES,
+ TCA_HHF_EVICT_TIMEOUT,
+ TCA_HHF_NON_HH_WEIGHT,
+ __TCA_HHF_MAX
+};
+
+#define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
+
+struct tc_hhf_xstats {
+ __u32 drop_overlimit; /* number of times max qdisc packet limit
+ * was hit
+ */
+ __u32 hh_overlimit; /* number of times max heavy-hitters was hit */
+ __u32 hh_tot_count; /* number of captured heavy-hitters so far */
+ __u32 hh_cur_count; /* number of current heavy-hitters */
+};
+
+/* PIE */
+enum {
+ TCA_PIE_UNSPEC,
+ TCA_PIE_TARGET,
+ TCA_PIE_LIMIT,
+ TCA_PIE_TUPDATE,
+ TCA_PIE_ALPHA,
+ TCA_PIE_BETA,
+ TCA_PIE_ECN,
+ TCA_PIE_BYTEMODE,
+ __TCA_PIE_MAX
+};
+#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
+
+struct tc_pie_xstats {
+ __u32 prob; /* current probability */
+ __u32 delay; /* current delay in ms */
+ __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
+ __u32 packets_in; /* total number of packets enqueued */
+ __u32 dropped; /* packets dropped due to pie_action */
+ __u32 overlimit; /* dropped due to lack of space in queue */
+ __u32 maxq; /* maximum queue size */
+ __u32 ecn_mark; /* packets marked with ecn*/
+};
+
+/* CBS */
+struct tc_cbs_qopt {
+ __u8 offload;
+ __u8 _pad[3];
+ __s32 hicredit;
+ __s32 locredit;
+ __s32 idleslope;
+ __s32 sendslope;
+};
+
+enum {
+ TCA_CBS_UNSPEC,
+ TCA_CBS_PARMS,
+ __TCA_CBS_MAX,
+};
+
+#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
+
+
+/* ETF */
+struct tc_etf_qopt {
+ __s32 delta;
+ __s32 clockid;
+ __u32 flags;
+#define TC_ETF_DEADLINE_MODE_ON BIT(0)
+#define TC_ETF_OFFLOAD_ON BIT(1)
+};
+
+enum {
+ TCA_ETF_UNSPEC,
+ TCA_ETF_PARMS,
+ __TCA_ETF_MAX,
+};
+
+#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
+
+
+/* CAKE */
+enum {
+ TCA_CAKE_UNSPEC,
+ TCA_CAKE_PAD,
+ TCA_CAKE_BASE_RATE64,
+ TCA_CAKE_DIFFSERV_MODE,
+ TCA_CAKE_ATM,
+ TCA_CAKE_FLOW_MODE,
+ TCA_CAKE_OVERHEAD,
+ TCA_CAKE_RTT,
+ TCA_CAKE_TARGET,
+ TCA_CAKE_AUTORATE,
+ TCA_CAKE_MEMORY,
+ TCA_CAKE_NAT,
+ TCA_CAKE_RAW,
+ TCA_CAKE_WASH,
+ TCA_CAKE_MPU,
+ TCA_CAKE_INGRESS,
+ TCA_CAKE_ACK_FILTER,
+ TCA_CAKE_SPLIT_GSO,
+ __TCA_CAKE_MAX
+};
+#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
+
+enum {
+ __TCA_CAKE_STATS_INVALID,
+ TCA_CAKE_STATS_PAD,
+ TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
+ TCA_CAKE_STATS_MEMORY_LIMIT,
+ TCA_CAKE_STATS_MEMORY_USED,
+ TCA_CAKE_STATS_AVG_NETOFF,
+ TCA_CAKE_STATS_MIN_NETLEN,
+ TCA_CAKE_STATS_MAX_NETLEN,
+ TCA_CAKE_STATS_MIN_ADJLEN,
+ TCA_CAKE_STATS_MAX_ADJLEN,
+ TCA_CAKE_STATS_TIN_STATS,
+ TCA_CAKE_STATS_DEFICIT,
+ TCA_CAKE_STATS_COBALT_COUNT,
+ TCA_CAKE_STATS_DROPPING,
+ TCA_CAKE_STATS_DROP_NEXT_US,
+ TCA_CAKE_STATS_P_DROP,
+ TCA_CAKE_STATS_BLUE_TIMER_US,
+ __TCA_CAKE_STATS_MAX
+};
+#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
+
+enum {
+ __TCA_CAKE_TIN_STATS_INVALID,
+ TCA_CAKE_TIN_STATS_PAD,
+ TCA_CAKE_TIN_STATS_SENT_PACKETS,
+ TCA_CAKE_TIN_STATS_SENT_BYTES64,
+ TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
+ TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
+ TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
+ TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
+ TCA_CAKE_TIN_STATS_TARGET_US,
+ TCA_CAKE_TIN_STATS_INTERVAL_US,
+ TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
+ TCA_CAKE_TIN_STATS_WAY_MISSES,
+ TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
+ TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
+ TCA_CAKE_TIN_STATS_AVG_DELAY_US,
+ TCA_CAKE_TIN_STATS_BASE_DELAY_US,
+ TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
+ TCA_CAKE_TIN_STATS_BULK_FLOWS,
+ TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
+ TCA_CAKE_TIN_STATS_MAX_SKBLEN,
+ TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
+ __TCA_CAKE_TIN_STATS_MAX
+};
+#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
+#define TC_CAKE_MAX_TINS (8)
+
+enum {
+ CAKE_FLOW_NONE = 0,
+ CAKE_FLOW_SRC_IP,
+ CAKE_FLOW_DST_IP,
+ CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
+ CAKE_FLOW_FLOWS,
+ CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_MAX,
+};
+
+enum {
+ CAKE_DIFFSERV_DIFFSERV3 = 0,
+ CAKE_DIFFSERV_DIFFSERV4,
+ CAKE_DIFFSERV_DIFFSERV8,
+ CAKE_DIFFSERV_BESTEFFORT,
+ CAKE_DIFFSERV_PRECEDENCE,
+ CAKE_DIFFSERV_MAX
+};
+
+enum {
+ CAKE_ACK_NONE = 0,
+ CAKE_ACK_FILTER,
+ CAKE_ACK_AGGRESSIVE,
+ CAKE_ACK_MAX
+};
+
+enum {
+ CAKE_ATM_NONE = 0,
+ CAKE_ATM_ATM,
+ CAKE_ATM_PTM,
+ CAKE_ATM_MAX
+};
+
+
+/* TAPRIO */
+enum {
+ TC_TAPRIO_CMD_SET_GATES = 0x00,
+ TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
+ TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
+};
+
+enum {
+ TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
+ TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
+ TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
+ TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
+ TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
+ __TCA_TAPRIO_SCHED_ENTRY_MAX,
+};
+#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
+
+/* The format for schedule entry list is:
+ * [TCA_TAPRIO_SCHED_ENTRY_LIST]
+ * [TCA_TAPRIO_SCHED_ENTRY]
+ * [TCA_TAPRIO_SCHED_ENTRY_CMD]
+ * [TCA_TAPRIO_SCHED_ENTRY_GATES]
+ * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
+ */
+enum {
+ TCA_TAPRIO_SCHED_UNSPEC,
+ TCA_TAPRIO_SCHED_ENTRY,
+ __TCA_TAPRIO_SCHED_MAX,
+};
+
+#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
+
+enum {
+ TCA_TAPRIO_ATTR_UNSPEC,
+ TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
+ TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
+ TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
+ TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
+ TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
+ TCA_TAPRIO_PAD,
+ __TCA_TAPRIO_ATTR_MAX,
+};
+
+#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
+
+#endif
# define PR_SPEC_DISABLE (1UL << 2)
# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+/* Reset arm64 pointer authentication keys */
+#define PR_PAC_RESET_KEYS 54
+# define PR_PAC_APIAKEY (1UL << 0)
+# define PR_PAC_APIBKEY (1UL << 1)
+# define PR_PAC_APDAKEY (1UL << 2)
+# define PR_PAC_APDBKEY (1UL << 3)
+# define PR_PAC_APGAKEY (1UL << 4)
+
#endif /* _LINUX_PRCTL_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*****************************************************************************/
+
+/*
+ * usbdevice_fs.h -- USB device file system.
+ *
+ * Copyright (C) 2000
+ * Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * History:
+ * 0.1 04.01.2000 Created
+ */
+
+/*****************************************************************************/
+
+#ifndef _UAPI_LINUX_USBDEVICE_FS_H
+#define _UAPI_LINUX_USBDEVICE_FS_H
+
+#include <linux/types.h>
+#include <linux/magic.h>
+
+/* --------------------------------------------------------------------- */
+
+/* usbdevfs ioctl codes */
+
+struct usbdevfs_ctrltransfer {
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
+ __u32 timeout; /* in milliseconds */
+ void __user *data;
+};
+
+struct usbdevfs_bulktransfer {
+ unsigned int ep;
+ unsigned int len;
+ unsigned int timeout; /* in milliseconds */
+ void __user *data;
+};
+
+struct usbdevfs_setinterface {
+ unsigned int interface;
+ unsigned int altsetting;
+};
+
+struct usbdevfs_disconnectsignal {
+ unsigned int signr;
+ void __user *context;
+};
+
+#define USBDEVFS_MAXDRIVERNAME 255
+
+struct usbdevfs_getdriver {
+ unsigned int interface;
+ char driver[USBDEVFS_MAXDRIVERNAME + 1];
+};
+
+struct usbdevfs_connectinfo {
+ unsigned int devnum;
+ unsigned char slow;
+};
+
+#define USBDEVFS_URB_SHORT_NOT_OK 0x01
+#define USBDEVFS_URB_ISO_ASAP 0x02
+#define USBDEVFS_URB_BULK_CONTINUATION 0x04
+#define USBDEVFS_URB_NO_FSBR 0x20 /* Not used */
+#define USBDEVFS_URB_ZERO_PACKET 0x40
+#define USBDEVFS_URB_NO_INTERRUPT 0x80
+
+#define USBDEVFS_URB_TYPE_ISO 0
+#define USBDEVFS_URB_TYPE_INTERRUPT 1
+#define USBDEVFS_URB_TYPE_CONTROL 2
+#define USBDEVFS_URB_TYPE_BULK 3
+
+struct usbdevfs_iso_packet_desc {
+ unsigned int length;
+ unsigned int actual_length;
+ unsigned int status;
+};
+
+struct usbdevfs_urb {
+ unsigned char type;
+ unsigned char endpoint;
+ int status;
+ unsigned int flags;
+ void __user *buffer;
+ int buffer_length;
+ int actual_length;
+ int start_frame;
+ union {
+ int number_of_packets; /* Only used for isoc urbs */
+ unsigned int stream_id; /* Only used with bulk streams */
+ };
+ int error_count;
+ unsigned int signr; /* signal to be sent on completion,
+ or 0 if none should be sent. */
+ void __user *usercontext;
+ struct usbdevfs_iso_packet_desc iso_frame_desc[0];
+};
+
+/* ioctls for talking directly to drivers */
+struct usbdevfs_ioctl {
+ int ifno; /* interface 0..N ; negative numbers reserved */
+ int ioctl_code; /* MUST encode size + direction of data so the
+ * macros in <asm/ioctl.h> give correct values */
+ void __user *data; /* param buffer (in, or out) */
+};
+
+/* You can do most things with hubs just through control messages,
+ * except find out what device connects to what port. */
+struct usbdevfs_hub_portinfo {
+ char nports; /* number of downstream ports in this hub */
+ char port [127]; /* e.g. port 3 connects to device 27 */
+};
+
+/* System and bus capability flags */
+#define USBDEVFS_CAP_ZERO_PACKET 0x01
+#define USBDEVFS_CAP_BULK_CONTINUATION 0x02
+#define USBDEVFS_CAP_NO_PACKET_SIZE_LIM 0x04
+#define USBDEVFS_CAP_BULK_SCATTER_GATHER 0x08
+#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10
+#define USBDEVFS_CAP_MMAP 0x20
+#define USBDEVFS_CAP_DROP_PRIVILEGES 0x40
+
+/* USBDEVFS_DISCONNECT_CLAIM flags & struct */
+
+/* disconnect-and-claim if the driver matches the driver field */
+#define USBDEVFS_DISCONNECT_CLAIM_IF_DRIVER 0x01
+/* disconnect-and-claim except when the driver matches the driver field */
+#define USBDEVFS_DISCONNECT_CLAIM_EXCEPT_DRIVER 0x02
+
+struct usbdevfs_disconnect_claim {
+ unsigned int interface;
+ unsigned int flags;
+ char driver[USBDEVFS_MAXDRIVERNAME + 1];
+};
+
+struct usbdevfs_streams {
+ unsigned int num_streams; /* Not used by USBDEVFS_FREE_STREAMS */
+ unsigned int num_eps;
+ unsigned char eps[0];
+};
+
+/*
+ * USB_SPEED_* values returned by USBDEVFS_GET_SPEED are defined in
+ * linux/usb/ch9.h
+ */
+
+#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer)
+#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
+#define USBDEVFS_BULK _IOWR('U', 2, struct usbdevfs_bulktransfer)
+#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32)
+#define USBDEVFS_RESETEP _IOR('U', 3, unsigned int)
+#define USBDEVFS_SETINTERFACE _IOR('U', 4, struct usbdevfs_setinterface)
+#define USBDEVFS_SETCONFIGURATION _IOR('U', 5, unsigned int)
+#define USBDEVFS_GETDRIVER _IOW('U', 8, struct usbdevfs_getdriver)
+#define USBDEVFS_SUBMITURB _IOR('U', 10, struct usbdevfs_urb)
+#define USBDEVFS_SUBMITURB32 _IOR('U', 10, struct usbdevfs_urb32)
+#define USBDEVFS_DISCARDURB _IO('U', 11)
+#define USBDEVFS_REAPURB _IOW('U', 12, void *)
+#define USBDEVFS_REAPURB32 _IOW('U', 12, __u32)
+#define USBDEVFS_REAPURBNDELAY _IOW('U', 13, void *)
+#define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, __u32)
+#define USBDEVFS_DISCSIGNAL _IOR('U', 14, struct usbdevfs_disconnectsignal)
+#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32)
+#define USBDEVFS_CLAIMINTERFACE _IOR('U', 15, unsigned int)
+#define USBDEVFS_RELEASEINTERFACE _IOR('U', 16, unsigned int)
+#define USBDEVFS_CONNECTINFO _IOW('U', 17, struct usbdevfs_connectinfo)
+#define USBDEVFS_IOCTL _IOWR('U', 18, struct usbdevfs_ioctl)
+#define USBDEVFS_IOCTL32 _IOWR('U', 18, struct usbdevfs_ioctl32)
+#define USBDEVFS_HUB_PORTINFO _IOR('U', 19, struct usbdevfs_hub_portinfo)
+#define USBDEVFS_RESET _IO('U', 20)
+#define USBDEVFS_CLEAR_HALT _IOR('U', 21, unsigned int)
+#define USBDEVFS_DISCONNECT _IO('U', 22)
+#define USBDEVFS_CONNECT _IO('U', 23)
+#define USBDEVFS_CLAIM_PORT _IOR('U', 24, unsigned int)
+#define USBDEVFS_RELEASE_PORT _IOR('U', 25, unsigned int)
+#define USBDEVFS_GET_CAPABILITIES _IOR('U', 26, __u32)
+#define USBDEVFS_DISCONNECT_CLAIM _IOR('U', 27, struct usbdevfs_disconnect_claim)
+#define USBDEVFS_ALLOC_STREAMS _IOR('U', 28, struct usbdevfs_streams)
+#define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams)
+#define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32)
+#define USBDEVFS_GET_SPEED _IO('U', 31)
+
+#endif /* _UAPI_LINUX_USBDEVICE_FS_H */
* device configuration.
*/
+#include <linux/vhost_types.h>
#include <linux/types.h>
-#include <linux/compiler.h>
#include <linux/ioctl.h>
-#include <linux/virtio_config.h>
-#include <linux/virtio_ring.h>
-
-struct vhost_vring_state {
- unsigned int index;
- unsigned int num;
-};
-
-struct vhost_vring_file {
- unsigned int index;
- int fd; /* Pass -1 to unbind from file. */
-
-};
-
-struct vhost_vring_addr {
- unsigned int index;
- /* Option flags. */
- unsigned int flags;
- /* Flag values: */
- /* Whether log address is valid. If set enables logging. */
-#define VHOST_VRING_F_LOG 0
-
- /* Start of array of descriptors (virtually contiguous) */
- __u64 desc_user_addr;
- /* Used structure address. Must be 32 bit aligned */
- __u64 used_user_addr;
- /* Available structure address. Must be 16 bit aligned */
- __u64 avail_user_addr;
- /* Logging support. */
- /* Log writes to used structure, at offset calculated from specified
- * address. Address must be 32 bit aligned. */
- __u64 log_guest_addr;
-};
-
-/* no alignment requirement */
-struct vhost_iotlb_msg {
- __u64 iova;
- __u64 size;
- __u64 uaddr;
-#define VHOST_ACCESS_RO 0x1
-#define VHOST_ACCESS_WO 0x2
-#define VHOST_ACCESS_RW 0x3
- __u8 perm;
-#define VHOST_IOTLB_MISS 1
-#define VHOST_IOTLB_UPDATE 2
-#define VHOST_IOTLB_INVALIDATE 3
-#define VHOST_IOTLB_ACCESS_FAIL 4
- __u8 type;
-};
-
-#define VHOST_IOTLB_MSG 0x1
-#define VHOST_IOTLB_MSG_V2 0x2
-
-struct vhost_msg {
- int type;
- union {
- struct vhost_iotlb_msg iotlb;
- __u8 padding[64];
- };
-};
-
-struct vhost_msg_v2 {
- __u32 type;
- __u32 reserved;
- union {
- struct vhost_iotlb_msg iotlb;
- __u8 padding[64];
- };
-};
-
-struct vhost_memory_region {
- __u64 guest_phys_addr;
- __u64 memory_size; /* bytes */
- __u64 userspace_addr;
- __u64 flags_padding; /* No flags are currently specified. */
-};
-
-/* All region addresses and sizes must be 4K aligned. */
-#define VHOST_PAGE_SIZE 0x1000
-
-struct vhost_memory {
- __u32 nregions;
- __u32 padding;
- struct vhost_memory_region regions[0];
-};
/* ioctls */
* device. This can be used to stop the ring (e.g. for migration). */
#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
-/* Feature bits */
-/* Log all write descriptors. Can be changed while device is active. */
-#define VHOST_F_LOG_ALL 26
-/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
-#define VHOST_NET_F_VIRTIO_NET_HDR 27
-
-/* VHOST_SCSI specific definitions */
-
-/*
- * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
- *
- * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
- * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
- * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
- * All the targets under vhost_wwpn can be seen and used by guset.
- */
-
-#define VHOST_SCSI_ABI_VERSION 1
-
-struct vhost_scsi_target {
- int abi_version;
- char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */
- unsigned short vhost_tpgt;
- unsigned short reserved;
-};
+/* VHOST_SCSI specific defines */
#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
libbpf_version.h
FEATURE-DUMP.libbpf
+test_libbpf
Format of version script and ways to handle ABI changes, including
incompatible ones, described in details in [1].
+Stand-alone build
+=================
+
+Under https://github.com/libbpf/libbpf there is a (semi-)automated
+mirror of the mainline's version of libbpf for a stand-alone build.
+
+However, all changes to libbpf's code base must be upstreamed through
+the mainline kernel tree.
+
+License
+=======
+
+libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause.
+
Links
=====
return syscall(__NR_bpf, cmd, attr, size);
}
+static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
+{
+ int fd;
+
+ do {
+ fd = sys_bpf(BPF_PROG_LOAD, attr, size);
+ } while (fd < 0 && errno == EAGAIN);
+
+ return fd;
+}
+
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
{
__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
memcpy(attr.prog_name, load_attr->name,
min(name_len, BPF_OBJ_NAME_LEN - 1));
- fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0)
return fd;
break;
}
- fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0)
goto done;
attr.log_size = log_buf_sz;
attr.log_level = 1;
log_buf[0] = 0;
- fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr));
done:
free(finfo);
free(linfo);
attr.kern_version = kern_version;
attr.prog_flags = prog_flags;
- return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ return sys_bpf_prog_load(&attr, sizeof(attr));
}
int bpf_map_update_elem(int fd, const void *key, const void *value,
}
/**
- * tep_is_file_bigendian - get if the file is in big endian order
+ * tep_file_bigendian - get if the file is in big endian order
* @pevent: a handle to the tep_handle
*
* This returns if the file is in big endian order
* If @pevent is NULL, 0 is returned.
*/
-int tep_is_file_bigendian(struct tep_handle *pevent)
+int tep_file_bigendian(struct tep_handle *pevent)
{
if(pevent)
return pevent->file_bigendian;
#ifndef _PARSE_EVENTS_INT_H
#define _PARSE_EVENTS_INT_H
-struct cmdline;
+struct tep_cmdline;
struct cmdline_list;
struct func_map;
struct func_list;
int long_size;
int page_size;
- struct cmdline *cmdlines;
+ struct tep_cmdline *cmdlines;
struct cmdline_list *cmdlist;
int cmdline_count;
return calloc(1, sizeof(struct tep_print_arg));
}
-struct cmdline {
+struct tep_cmdline {
char *comm;
int pid;
};
static int cmdline_cmp(const void *a, const void *b)
{
- const struct cmdline *ca = a;
- const struct cmdline *cb = b;
+ const struct tep_cmdline *ca = a;
+ const struct tep_cmdline *cb = b;
if (ca->pid < cb->pid)
return -1;
{
struct cmdline_list *cmdlist = pevent->cmdlist;
struct cmdline_list *item;
- struct cmdline *cmdlines;
+ struct tep_cmdline *cmdlines;
int i;
cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count);
static const char *find_cmdline(struct tep_handle *pevent, int pid)
{
- const struct cmdline *comm;
- struct cmdline key;
+ const struct tep_cmdline *comm;
+ struct tep_cmdline key;
if (!pid)
return "<idle>";
*/
int tep_pid_is_registered(struct tep_handle *pevent, int pid)
{
- const struct cmdline *comm;
- struct cmdline key;
+ const struct tep_cmdline *comm;
+ struct tep_cmdline key;
if (!pid)
return 1;
* we must add this pid. This is much slower than when cmdlines
* are added before the array is initialized.
*/
-static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
+static int add_new_comm(struct tep_handle *pevent,
+ const char *comm, int pid, bool override)
{
- struct cmdline *cmdlines = pevent->cmdlines;
- const struct cmdline *cmdline;
- struct cmdline key;
+ struct tep_cmdline *cmdlines = pevent->cmdlines;
+ struct tep_cmdline *cmdline;
+ struct tep_cmdline key;
+ char *new_comm;
if (!pid)
return 0;
cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
sizeof(*pevent->cmdlines), cmdline_cmp);
if (cmdline) {
- errno = EEXIST;
- return -1;
+ if (!override) {
+ errno = EEXIST;
+ return -1;
+ }
+ new_comm = strdup(comm);
+ if (!new_comm) {
+ errno = ENOMEM;
+ return -1;
+ }
+ free(cmdline->comm);
+ cmdline->comm = new_comm;
+
+ return 0;
}
cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1));
return 0;
}
-/**
- * tep_register_comm - register a pid / comm mapping
- * @pevent: handle for the pevent
- * @comm: the command line to register
- * @pid: the pid to map the command line to
- *
- * This adds a mapping to search for command line names with
- * a given pid. The comm is duplicated.
- */
-int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
+static int _tep_register_comm(struct tep_handle *pevent,
+ const char *comm, int pid, bool override)
{
struct cmdline_list *item;
if (pevent->cmdlines)
- return add_new_comm(pevent, comm, pid);
+ return add_new_comm(pevent, comm, pid, override);
item = malloc(sizeof(*item));
if (!item)
return 0;
}
+/**
+ * tep_register_comm - register a pid / comm mapping
+ * @pevent: handle for the pevent
+ * @comm: the command line to register
+ * @pid: the pid to map the command line to
+ *
+ * This adds a mapping to search for command line names with
+ * a given pid. The comm is duplicated. If a command with the same pid
+ * already exist, -1 is returned and errno is set to EEXIST
+ */
+int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
+{
+ return _tep_register_comm(pevent, comm, pid, false);
+}
+
+/**
+ * tep_override_comm - register a pid / comm mapping
+ * @pevent: handle for the pevent
+ * @comm: the command line to register
+ * @pid: the pid to map the command line to
+ *
+ * This adds a mapping to search for command line names with
+ * a given pid. The comm is duplicated. If a command with the same pid
+ * already exist, the command string is udapted with the new one
+ */
+int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid)
+{
+ if (!pevent->cmdlines && cmdline_init(pevent)) {
+ errno = ENOMEM;
+ return -1;
+ }
+ return _tep_register_comm(pevent, comm, pid, true);
+}
+
int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock)
{
pevent->trace_clock = strdup(trace_clock);
}
/**
- * tep_data_event_from_type - find the event by a given type
- * @pevent: a handle to the pevent
- * @type: the type of the event.
- *
- * This returns the event form a given @type;
- */
-struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type)
-{
- return tep_find_event(pevent, type);
-}
-
-/**
* tep_data_pid - parse the PID from record
* @pevent: a handle to the pevent
* @rec: the record to parse
return comm;
}
-static struct cmdline *
-pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *next)
+static struct tep_cmdline *
+pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline *next)
{
struct cmdline_list *cmdlist = (struct cmdline_list *)next;
while (cmdlist && strcmp(cmdlist->comm, comm) != 0)
cmdlist = cmdlist->next;
- return (struct cmdline *)cmdlist;
+ return (struct tep_cmdline *)cmdlist;
}
/**
* next pid.
* Also, it does a linear search, so it may be slow.
*/
-struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
- struct cmdline *next)
+struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+ struct tep_cmdline *next)
{
- struct cmdline *cmdline;
+ struct tep_cmdline *cmdline;
/*
* If the cmdlines have not been converted yet, then use
* Returns the pid for a give cmdline. If @cmdline is NULL, then
* -1 is returned.
*/
-int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline)
+int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline)
{
struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
*
* If @id is >= 0, then it is used to find the event.
* else @sys_name and @event_name are used.
+ *
+ * Returns:
+ * TEP_REGISTER_SUCCESS_OVERWRITE if an existing handler is overwritten
+ * TEP_REGISTER_SUCCESS if a new handler is registered successfully
+ * negative TEP_ERRNO_... in case of an error
+ *
*/
int tep_register_event_handler(struct tep_handle *pevent, int id,
const char *sys_name, const char *event_name,
event->handler = func;
event->context = context;
- return 0;
+ return TEP_REGISTER_SUCCESS_OVERWRITE;
not_found:
/* Save for later use. */
pevent->handlers = handle;
handle->context = context;
- return -1;
+ return TEP_REGISTER_SUCCESS;
}
static int handle_matches(struct event_handler *handler, int id,
{
struct tep_handle *pevent = calloc(1, sizeof(*pevent));
- if (pevent)
+ if (pevent) {
pevent->ref_count = 1;
+ pevent->host_bigendian = tep_host_bigendian();
+ }
return pevent;
}
tep_func_resolver_t *func, void *priv);
void tep_reset_function_resolver(struct tep_handle *pevent);
int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid);
+int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid);
int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock);
int tep_register_function(struct tep_handle *pevent, char *name,
unsigned long long addr, char *mod);
struct tep_event *event, const char *name,
struct tep_record *record, int err);
+enum tep_reg_handler {
+ TEP_REGISTER_SUCCESS = 0,
+ TEP_REGISTER_SUCCESS_OVERWRITE,
+};
+
int tep_register_event_handler(struct tep_handle *pevent, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context);
void tep_data_lat_fmt(struct tep_handle *pevent,
struct trace_seq *s, struct tep_record *record);
int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
-struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type);
int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid);
-struct cmdline;
-struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
- struct cmdline *next);
-int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline);
+struct tep_cmdline;
+struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+ struct tep_cmdline *next);
+int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline);
void tep_print_field(struct trace_seq *s, void *data,
struct tep_format_field *field);
void tep_set_long_size(struct tep_handle *pevent, int long_size);
int tep_get_page_size(struct tep_handle *pevent);
void tep_set_page_size(struct tep_handle *pevent, int _page_size);
-int tep_is_file_bigendian(struct tep_handle *pevent);
+int tep_file_bigendian(struct tep_handle *pevent);
void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian);
int tep_is_host_bigendian(struct tep_handle *pevent);
void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian);
* We can only use the structure if file is of the same
* endianness.
*/
- if (tep_is_file_bigendian(event->pevent) ==
+ if (tep_file_bigendian(event->pevent) ==
tep_is_host_bigendian(event->pevent)) {
trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
* @fmt: printf format string
*
* It returns 0 if the trace oversizes the buffer's free
- * space, 1 otherwise.
+ * space, the number of characters printed, or a negative
+ * value in case of an error.
*
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace
goto try_again;
}
- s->len += ret;
+ if (ret > 0)
+ s->len += ret;
- return 1;
+ return ret;
}
/**
* @s: trace sequence descriptor
* @fmt: printf format string
*
+ * It returns 0 if the trace oversizes the buffer's free
+ * space, the number of characters printed, or a negative
+ * value in case of an error.
+ * *
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace
* trace_seq_printf is used to store strings into a special
goto try_again;
}
- s->len += ret;
+ if (ret > 0)
+ s->len += ret;
- return len;
+ return ret;
}
/**
ifeq ($(feature-libbfd), 1)
EXTLIBS += -lbfd
+else
+ # we are on a system that requires -liberty and (maybe) -lz
+ # to link against -lbfd; test each case individually here
# call all detections now so we get correct
# status in VF output
- $(call feature_check,liberty)
- $(call feature_check,liberty-z)
- $(call feature_check,cplus-demangle)
+ $(call feature_check,libbfd-liberty)
+ $(call feature_check,libbfd-liberty-z)
- ifeq ($(feature-liberty), 1)
- EXTLIBS += -liberty
+ ifeq ($(feature-libbfd-liberty), 1)
+ EXTLIBS += -lbfd -liberty
else
- ifeq ($(feature-liberty-z), 1)
- EXTLIBS += -liberty -lz
+ ifeq ($(feature-libbfd-liberty-z), 1)
+ EXTLIBS += -lbfd -liberty -lz
endif
endif
endif
else
ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
EXTLIBS += -liberty
- CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
else
- ifneq ($(feature-libbfd), 1)
- ifneq ($(feature-liberty), 1)
- ifneq ($(feature-liberty-z), 1)
- # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
- # or any of 'bfd iberty z' trinity
- ifeq ($(feature-cplus-demangle), 1)
- EXTLIBS += -liberty
- CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
- else
- msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
- CFLAGS += -DNO_DEMANGLE
- endif
- endif
+ ifeq ($(filter -liberty,$(EXTLIBS)),)
+ $(call feature_check,cplus-demangle)
+
+ # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
+ # or any of 'bfd iberty z' trinity
+ ifeq ($(feature-cplus-demangle), 1)
+ EXTLIBS += -liberty
+ else
+ msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
+ CFLAGS += -DNO_DEMANGLE
endif
endif
endif
+
+ ifneq ($(filter -liberty,$(EXTLIBS)),)
+ CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
+ endif
endif
ifneq ($(filter -lbfd,$(EXTLIBS)),)
$(prctl_option_array): $(prctl_hdr_dir)/prctl.h $(prctl_option_tbl)
$(Q)$(SHELL) '$(prctl_option_tbl)' $(prctl_hdr_dir) > $@
+usbdevfs_ioctl_array := $(beauty_ioctl_outdir)/usbdevfs_ioctl_array.c
+usbdevfs_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/usbdevfs_ioctl.sh
+
+$(usbdevfs_ioctl_array): $(linux_uapi_dir)/usbdevice_fs.h $(usbdevfs_ioctl_tbl)
+ $(Q)$(SHELL) '$(usbdevfs_ioctl_tbl)' $(linux_uapi_dir) > $@
+
x86_arch_prctl_code_array := $(beauty_outdir)/x86_arch_prctl_code_array.c
x86_arch_prctl_code_tbl := $(srctree)/tools/perf/trace/beauty/x86_arch_prctl.sh
all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
+# Create python binding output directory if not already present
+_dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python')
+
$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
$(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
$(PYTHON_WORD) util/setup.py \
--quiet build_ext; \
- mkdir -p $(OUTPUT)python && \
cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/
please_set_SHELL_PATH_to_a_more_modern_shell:
$(mount_flags_array) \
$(perf_ioctl_array) \
$(prctl_option_array) \
+ $(usbdevfs_ioctl_array) \
$(x86_arch_prctl_code_array) \
$(rename_flags_array) \
$(arch_errno_name_array)
$(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS)
ifndef NO_PERF_READ_VDSO32
-$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-vdso-map.c
+$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-map.c
$(QUIET_CC)$(CC) -m32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
endif
ifndef NO_PERF_READ_VDSOX32
-$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-vdso-map.c
+$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-map.c
$(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
endif
$(OUTPUT)$(vhost_virtio_ioctl_array) \
$(OUTPUT)$(perf_ioctl_array) \
$(OUTPUT)$(prctl_option_array) \
+ $(OUTPUT)$(usbdevfs_ioctl_array) \
$(OUTPUT)$(x86_arch_prctl_code_array) \
$(OUTPUT)$(rename_flags_array) \
$(OUTPUT)$(arch_errno_name_array)
libperf-y += regs_load.o
libperf-y += dwarf-unwind.o
+libperf-y += vectors-page.o
libperf-y += arch-tests.o
},
#endif
{
+ .desc = "Vectors page",
+ .func = test__vectors_page,
+ },
+ {
.func = NULL,
},
};
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <string.h>
+#include <linux/compiler.h>
+
+#include "debug.h"
+#include "tests/tests.h"
+#include "util/find-map.c"
+
+#define VECTORS__MAP_NAME "[vectors]"
+
+int test__vectors_page(struct test *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ void *start, *end;
+
+ if (find_map(&start, &end, VECTORS__MAP_NAME)) {
+ pr_err("%s not found, is CONFIG_KUSER_HELPERS enabled?\n",
+ VECTORS__MAP_NAME);
+ return TEST_FAIL;
+ }
+
+ return TEST_OK;
+}
out := $(OUTPUT)arch/powerpc/include/generated/asm
header32 := $(out)/syscalls_32.c
header64 := $(out)/syscalls_64.c
-sysdef := $(srctree)/tools/arch/powerpc/include/uapi/asm/unistd.h
-sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls/
+syskrn := $(srctree)/arch/powerpc/kernel/syscalls/syscall.tbl
+sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls
+sysdef := $(sysprf)/syscall.tbl
systbl := $(sysprf)/mksyscalltbl
# Create output directory if not already present
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
$(header64): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' '64' '$(CC)' $(sysdef) > $@
+ @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+ (diff -B $(sysdef) $(syskrn) >/dev/null) \
+ || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
+ $(Q)$(SHELL) '$(systbl)' '64' $(sysdef) > $@
$(header32): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' '32' '$(CC)' $(sysdef) > $@
+ @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+ (diff -B $(sysdef) $(syskrn) >/dev/null) \
+ || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
+ $(Q)$(SHELL) '$(systbl)' '32' $(sysdef) > $@
clean::
$(call QUIET_CLEAN, powerpc) $(RM) $(header32) $(header64)
# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
wordsize=$1
-gcc=$2
-input=$3
+SYSCALL_TBL=$2
-if ! test -r $input; then
+if ! test -r $SYSCALL_TBL; then
echo "Could not read input file" >&2
exit 1
fi
create_table()
{
local wordsize=$1
- local max_nr
+ local max_nr nr abi sc discard
+ max_nr=-1
+ nr=0
echo "static const char *syscalltbl_powerpc_${wordsize}[] = {"
- while read sc nr; do
- printf '\t[%d] = "%s",\n' $nr $sc
- max_nr=$nr
+ while read nr abi sc discard; do
+ if [ "$max_nr" -lt "$nr" ]; then
+ printf '\t[%d] = "%s",\n' $nr $sc
+ max_nr=$nr
+ fi
done
echo '};'
echo "#define SYSCALLTBL_POWERPC_${wordsize}_MAX_ID $max_nr"
}
-$gcc -m${wordsize} -E -dM -x c $input \
- |sed -ne 's/^#define __NR_//p' \
- |sort -t' ' -k2 -nu \
+grep -E "^[[:digit:]]+[[:space:]]+(common|spu|nospu|${wordsize})" $SYSCALL_TBL \
+ |sort -k1 -n \
|create_table ${wordsize}
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for powerpc
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> can be common, spu, nospu, 64, or 32 for this file.
+#
+0 nospu restart_syscall sys_restart_syscall
+1 nospu exit sys_exit
+2 nospu fork ppc_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open compat_sys_open
+6 common close sys_close
+7 common waitpid sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 nospu execve sys_execve compat_sys_execve
+12 common chdir sys_chdir
+13 common time sys_time compat_sys_time
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown
+17 common break sys_ni_syscall
+18 32 oldstat sys_stat sys_ni_syscall
+18 64 oldstat sys_ni_syscall
+18 spu oldstat sys_ni_syscall
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid
+21 nospu mount sys_mount compat_sys_mount
+22 32 umount sys_oldumount
+22 64 umount sys_ni_syscall
+22 spu umount sys_ni_syscall
+23 common setuid sys_setuid
+24 common getuid sys_getuid
+25 common stime sys_stime compat_sys_stime
+26 nospu ptrace sys_ptrace compat_sys_ptrace
+27 common alarm sys_alarm
+28 32 oldfstat sys_fstat sys_ni_syscall
+28 64 oldfstat sys_ni_syscall
+28 spu oldfstat sys_ni_syscall
+29 nospu pause sys_pause
+30 nospu utime sys_utime compat_sys_utime
+31 common stty sys_ni_syscall
+32 common gtty sys_ni_syscall
+33 common access sys_access
+34 common nice sys_nice
+35 common ftime sys_ni_syscall
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times compat_sys_times
+44 common prof sys_ni_syscall
+45 common brk sys_brk
+46 common setgid sys_setgid
+47 common getgid sys_getgid
+48 nospu signal sys_signal
+49 common geteuid sys_geteuid
+50 common getegid sys_getegid
+51 nospu acct sys_acct
+52 nospu umount2 sys_umount
+53 common lock sys_ni_syscall
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common fcntl sys_fcntl compat_sys_fcntl
+56 common mpx sys_ni_syscall
+57 common setpgid sys_setpgid
+58 common ulimit sys_ni_syscall
+59 32 oldolduname sys_olduname
+59 64 oldolduname sys_ni_syscall
+59 spu oldolduname sys_ni_syscall
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 nospu ustat sys_ustat compat_sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 32 sigaction sys_sigaction compat_sys_sigaction
+67 64 sigaction sys_ni_syscall
+67 spu sigaction sys_ni_syscall
+68 common sgetmask sys_sgetmask
+69 common ssetmask sys_ssetmask
+70 common setreuid sys_setreuid
+71 common setregid sys_setregid
+72 32 sigsuspend sys_sigsuspend
+72 64 sigsuspend sys_ni_syscall
+72 spu sigsuspend sys_ni_syscall
+73 32 sigpending sys_sigpending compat_sys_sigpending
+73 64 sigpending sys_ni_syscall
+73 spu sigpending sys_ni_syscall
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit compat_sys_setrlimit
+76 32 getrlimit sys_old_getrlimit compat_sys_old_getrlimit
+76 64 getrlimit sys_ni_syscall
+76 spu getrlimit sys_ni_syscall
+77 common getrusage sys_getrusage compat_sys_getrusage
+78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 common settimeofday sys_settimeofday compat_sys_settimeofday
+80 common getgroups sys_getgroups
+81 common setgroups sys_setgroups
+82 32 select ppc_select sys_ni_syscall
+82 64 select sys_ni_syscall
+82 spu select sys_ni_syscall
+83 common symlink sys_symlink
+84 32 oldlstat sys_lstat sys_ni_syscall
+84 64 oldlstat sys_ni_syscall
+84 spu oldlstat sys_ni_syscall
+85 common readlink sys_readlink
+86 nospu uselib sys_uselib
+87 nospu swapon sys_swapon
+88 nospu reboot sys_reboot
+89 32 readdir sys_old_readdir compat_sys_old_readdir
+89 64 readdir sys_ni_syscall
+89 spu readdir sys_ni_syscall
+90 common mmap sys_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate compat_sys_truncate
+93 common ftruncate sys_ftruncate compat_sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+98 common profil sys_ni_syscall
+99 nospu statfs sys_statfs compat_sys_statfs
+100 nospu fstatfs sys_fstatfs compat_sys_fstatfs
+101 common ioperm sys_ni_syscall
+102 common socketcall sys_socketcall compat_sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer compat_sys_setitimer
+105 common getitimer sys_getitimer compat_sys_getitimer
+106 common stat sys_newstat compat_sys_newstat
+107 common lstat sys_newlstat compat_sys_newlstat
+108 common fstat sys_newfstat compat_sys_newfstat
+109 32 olduname sys_uname
+109 64 olduname sys_ni_syscall
+109 spu olduname sys_ni_syscall
+110 common iopl sys_ni_syscall
+111 common vhangup sys_vhangup
+112 common idle sys_ni_syscall
+113 common vm86 sys_ni_syscall
+114 common wait4 sys_wait4 compat_sys_wait4
+115 nospu swapoff sys_swapoff
+116 common sysinfo sys_sysinfo compat_sys_sysinfo
+117 nospu ipc sys_ipc compat_sys_ipc
+118 common fsync sys_fsync
+119 32 sigreturn sys_sigreturn compat_sys_sigreturn
+119 64 sigreturn sys_ni_syscall
+119 spu sigreturn sys_ni_syscall
+120 nospu clone ppc_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+123 common modify_ldt sys_ni_syscall
+124 common adjtimex sys_adjtimex compat_sys_adjtimex
+125 common mprotect sys_mprotect
+126 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+126 64 sigprocmask sys_ni_syscall
+126 spu sigprocmask sys_ni_syscall
+127 common create_module sys_ni_syscall
+128 nospu init_module sys_init_module
+129 nospu delete_module sys_delete_module
+130 common get_kernel_syms sys_ni_syscall
+131 nospu quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_bdflush
+135 common sysfs sys_sysfs
+136 32 personality sys_personality ppc64_personality
+136 64 personality ppc64_personality
+136 spu personality ppc64_personality
+137 common afs_syscall sys_ni_syscall
+138 common setfsuid sys_setfsuid
+139 common setfsgid sys_setfsgid
+140 common _llseek sys_llseek
+141 common getdents sys_getdents compat_sys_getdents
+142 common _newselect sys_select compat_sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv compat_sys_readv
+146 common writev sys_writev compat_sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 nospu _sysctl sys_sysctl compat_sys_sysctl
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep compat_sys_nanosleep
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid
+165 common getresuid sys_getresuid
+166 common query_module sys_ni_syscall
+167 common poll sys_poll
+168 common nfsservctl sys_ni_syscall
+169 common setresgid sys_setresgid
+170 common getresgid sys_getresgid
+171 common prctl sys_prctl
+172 nospu rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+173 nospu rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+174 nospu rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+175 nospu rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+176 nospu rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+179 common pread64 sys_pread64 compat_sys_pread64
+180 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+181 common chown sys_chown
+182 common getcwd sys_getcwd
+183 common capget sys_capget
+184 common capset sys_capset
+185 nospu sigaltstack sys_sigaltstack compat_sys_sigaltstack
+186 32 sendfile sys_sendfile compat_sys_sendfile
+186 64 sendfile sys_sendfile64
+186 spu sendfile sys_sendfile64
+187 common getpmsg sys_ni_syscall
+188 common putpmsg sys_ni_syscall
+189 nospu vfork ppc_vfork
+190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
+191 common readahead sys_readahead compat_sys_readahead
+192 32 mmap2 sys_mmap2 compat_sys_mmap2
+193 32 truncate64 sys_truncate64 compat_sys_truncate64
+194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+195 32 stat64 sys_stat64
+196 32 lstat64 sys_lstat64
+197 32 fstat64 sys_fstat64
+198 nospu pciconfig_read sys_pciconfig_read
+199 nospu pciconfig_write sys_pciconfig_write
+200 nospu pciconfig_iobase sys_pciconfig_iobase
+201 common multiplexer sys_ni_syscall
+202 common getdents64 sys_getdents64
+203 common pivot_root sys_pivot_root
+204 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+205 common madvise sys_madvise
+206 common mincore sys_mincore
+207 common gettid sys_gettid
+208 common tkill sys_tkill
+209 common setxattr sys_setxattr
+210 common lsetxattr sys_lsetxattr
+211 common fsetxattr sys_fsetxattr
+212 common getxattr sys_getxattr
+213 common lgetxattr sys_lgetxattr
+214 common fgetxattr sys_fgetxattr
+215 common listxattr sys_listxattr
+216 common llistxattr sys_llistxattr
+217 common flistxattr sys_flistxattr
+218 common removexattr sys_removexattr
+219 common lremovexattr sys_lremovexattr
+220 common fremovexattr sys_fremovexattr
+221 common futex sys_futex compat_sys_futex
+222 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+223 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+# 224 unused
+225 common tuxcall sys_ni_syscall
+226 32 sendfile64 sys_sendfile64 compat_sys_sendfile64
+227 common io_setup sys_io_setup compat_sys_io_setup
+228 common io_destroy sys_io_destroy
+229 common io_getevents sys_io_getevents compat_sys_io_getevents
+230 common io_submit sys_io_submit compat_sys_io_submit
+231 common io_cancel sys_io_cancel
+232 nospu set_tid_address sys_set_tid_address
+233 common fadvise64 sys_fadvise64 ppc32_fadvise64
+234 nospu exit_group sys_exit_group
+235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+236 common epoll_create sys_epoll_create
+237 common epoll_ctl sys_epoll_ctl
+238 common epoll_wait sys_epoll_wait
+239 common remap_file_pages sys_remap_file_pages
+240 common timer_create sys_timer_create compat_sys_timer_create
+241 common timer_settime sys_timer_settime compat_sys_timer_settime
+242 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+243 common timer_getoverrun sys_timer_getoverrun
+244 common timer_delete sys_timer_delete
+245 common clock_settime sys_clock_settime compat_sys_clock_settime
+246 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
+247 common clock_getres sys_clock_getres compat_sys_clock_getres
+248 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+249 32 swapcontext ppc_swapcontext ppc32_swapcontext
+249 64 swapcontext ppc64_swapcontext
+249 spu swapcontext sys_ni_syscall
+250 common tgkill sys_tgkill
+251 common utimes sys_utimes compat_sys_utimes
+252 common statfs64 sys_statfs64 compat_sys_statfs64
+253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+254 32 fadvise64_64 ppc_fadvise64_64
+254 spu fadvise64_64 sys_ni_syscall
+255 common rtas sys_rtas
+256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall
+256 64 sys_debug_setcontext sys_ni_syscall
+256 spu sys_debug_setcontext sys_ni_syscall
+# 257 reserved for vserver
+258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages
+259 nospu mbind sys_mbind compat_sys_mbind
+260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+262 nospu mq_open sys_mq_open compat_sys_mq_open
+263 nospu mq_unlink sys_mq_unlink
+264 nospu mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
+265 nospu mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+266 nospu mq_notify sys_mq_notify compat_sys_mq_notify
+267 nospu mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+268 nospu kexec_load sys_kexec_load compat_sys_kexec_load
+269 nospu add_key sys_add_key
+270 nospu request_key sys_request_key
+271 nospu keyctl sys_keyctl compat_sys_keyctl
+272 nospu waitid sys_waitid compat_sys_waitid
+273 nospu ioprio_set sys_ioprio_set
+274 nospu ioprio_get sys_ioprio_get
+275 nospu inotify_init sys_inotify_init
+276 nospu inotify_add_watch sys_inotify_add_watch
+277 nospu inotify_rm_watch sys_inotify_rm_watch
+278 nospu spu_run sys_spu_run
+279 nospu spu_create sys_spu_create
+280 nospu pselect6 sys_pselect6 compat_sys_pselect6
+281 nospu ppoll sys_ppoll compat_sys_ppoll
+282 common unshare sys_unshare
+283 common splice sys_splice
+284 common tee sys_tee
+285 common vmsplice sys_vmsplice compat_sys_vmsplice
+286 common openat sys_openat compat_sys_openat
+287 common mkdirat sys_mkdirat
+288 common mknodat sys_mknodat
+289 common fchownat sys_fchownat
+290 common futimesat sys_futimesat compat_sys_futimesat
+291 32 fstatat64 sys_fstatat64
+291 64 newfstatat sys_newfstatat
+291 spu newfstatat sys_newfstatat
+292 common unlinkat sys_unlinkat
+293 common renameat sys_renameat
+294 common linkat sys_linkat
+295 common symlinkat sys_symlinkat
+296 common readlinkat sys_readlinkat
+297 common fchmodat sys_fchmodat
+298 common faccessat sys_faccessat
+299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+301 common move_pages sys_move_pages compat_sys_move_pages
+302 common getcpu sys_getcpu
+303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+304 common utimensat sys_utimensat compat_sys_utimensat
+305 common signalfd sys_signalfd compat_sys_signalfd
+306 common timerfd_create sys_timerfd_create
+307 common eventfd sys_eventfd
+308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2
+309 nospu fallocate sys_fallocate compat_sys_fallocate
+310 nospu subpage_prot sys_subpage_prot
+311 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
+312 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+313 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+314 common eventfd2 sys_eventfd2
+315 common epoll_create1 sys_epoll_create1
+316 common dup3 sys_dup3
+317 common pipe2 sys_pipe2
+318 nospu inotify_init1 sys_inotify_init1
+319 common perf_event_open sys_perf_event_open
+320 common preadv sys_preadv compat_sys_preadv
+321 common pwritev sys_pwritev compat_sys_pwritev
+322 nospu rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+323 nospu fanotify_init sys_fanotify_init
+324 nospu fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+325 common prlimit64 sys_prlimit64
+326 common socket sys_socket
+327 common bind sys_bind
+328 common connect sys_connect
+329 common listen sys_listen
+330 common accept sys_accept
+331 common getsockname sys_getsockname
+332 common getpeername sys_getpeername
+333 common socketpair sys_socketpair
+334 common send sys_send
+335 common sendto sys_sendto
+336 common recv sys_recv compat_sys_recv
+337 common recvfrom sys_recvfrom compat_sys_recvfrom
+338 common shutdown sys_shutdown
+339 common setsockopt sys_setsockopt compat_sys_setsockopt
+340 common getsockopt sys_getsockopt compat_sys_getsockopt
+341 common sendmsg sys_sendmsg compat_sys_sendmsg
+342 common recvmsg sys_recvmsg compat_sys_recvmsg
+343 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+344 common accept4 sys_accept4
+345 common name_to_handle_at sys_name_to_handle_at
+346 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+347 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+348 common syncfs sys_syncfs
+349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+350 common setns sys_setns
+351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
+352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+353 nospu finit_module sys_finit_module
+354 nospu kcmp sys_kcmp
+355 common sched_setattr sys_sched_setattr
+356 common sched_getattr sys_sched_getattr
+357 common renameat2 sys_renameat2
+358 common seccomp sys_seccomp
+359 common getrandom sys_getrandom
+360 common memfd_create sys_memfd_create
+361 common bpf sys_bpf
+362 nospu execveat sys_execveat compat_sys_execveat
+363 32 switch_endian sys_ni_syscall
+363 64 switch_endian ppc_switch_endian
+363 spu switch_endian sys_ni_syscall
+364 common userfaultfd sys_userfaultfd
+365 common membarrier sys_membarrier
+378 nospu mlock2 sys_mlock2
+379 nospu copy_file_range sys_copy_file_range
+380 common preadv2 sys_preadv2 compat_sys_preadv2
+381 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+382 nospu kexec_file_load sys_kexec_file_load
+383 nospu statx sys_statx
+384 nospu pkey_alloc sys_pkey_alloc
+385 nospu pkey_free sys_pkey_free
+386 nospu pkey_mprotect sys_pkey_mprotect
+387 nospu rseq sys_rseq
+388 nospu io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
[PERF_REG_POWERPC_TRAP] = "trap",
[PERF_REG_POWERPC_DAR] = "dar",
[PERF_REG_POWERPC_DSISR] = "dsisr",
- [PERF_REG_POWERPC_SIER] = "sier"
+ [PERF_REG_POWERPC_SIER] = "sier",
+ [PERF_REG_POWERPC_MMCRA] = "mmcra"
};
static inline const char *perf_reg_name(int id)
SMPL_REG(dar, PERF_REG_POWERPC_DAR),
SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR),
SMPL_REG(sier, PERF_REG_POWERPC_SIER),
+ SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA),
SMPL_REG_END
};
struct hist_entry he;
};
-static char const *coalesce_default = "pid,iaddr";
+static char const *coalesce_default = "iaddr";
struct perf_c2c {
struct perf_tool tool;
return hpp_list__parse(&c2c_hists->list, output, sort);
}
-#define DISPLAY_LINE_LIMIT 0.0005
+#define DISPLAY_LINE_LIMIT 0.001
static bool he__display(struct hist_entry *he, struct c2c_stats *stats)
{
/*
* Print final block upto sample
+ *
+ * Due to pipeline delays the LBRs might be missing a branch
+ * or two, which can result in very large or negative blocks
+ * between final branch and sample. When this happens just
+ * continue walking after the last TO until we hit a branch.
*/
start = br->entries[0].to;
end = sample->ip;
+ if (end < start) {
+ /* Missing jump. Scan 128 bytes for the next branch */
+ end = start + 128;
+ }
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true);
printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
if (len <= 0) {
machine, thread, &x.is64bit, &x.cpumode, false);
if (len <= 0)
goto out;
-
printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", sample->ip,
dump_insn(&x, sample->ip, buffer, len, NULL));
if (PRINT_FIELD(SRCCODE))
dump_insn(&x, start + off, buffer + off, len - off, &ilen));
if (ilen == 0)
break;
+ if (arch_is_branch(buffer + off, len - off, x.is64bit) && start + off != sample->ip) {
+ /*
+ * Hit a missing branch. Just stop.
+ */
+ printed += fprintf(fp, "\t... not reaching sample ...\n");
+ break;
+ }
if (PRINT_FIELD(SRCCODE))
print_srccode(thread, x.cpumode, start + off);
}
struct addr_location *al, FILE *fp)
{
struct perf_event_attr *attr = &evsel->attr;
- size_t depth = thread_stack__depth(thread);
+ size_t depth = thread_stack__depth(thread, sample->cpu);
const char *name = NULL;
static int spacing;
int len = 0;
struct thread *thread,
struct addr_location *al)
{
- int depth = thread_stack__depth(thread);
+ int depth = thread_stack__depth(thread, sample->cpu);
if (!symbol_conf.graph_function)
return true;
break;
}
}
- wait4(child_pid, &status, 0, &stat_config.ru_data);
+ if (child_pid != -1)
+ wait4(child_pid, &status, 0, &stat_config.ru_data);
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
static int callchain_param__setup_sample_type(struct callchain_param *callchain)
{
- if (!perf_hpp_list.sym) {
- if (callchain->enabled) {
- ui__error("Selected -g but \"sym\" not present in --sort/-s.");
- return -EINVAL;
- }
- } else if (callchain->mode != CHAIN_NONE) {
+ if (callchain->mode != CHAIN_NONE) {
if (callchain_register_param(callchain) < 0) {
ui__error("Can't register callchain params.\n");
return -EINVAL;
#include <linux/stringify.h>
#include <linux/time64.h>
#include <fcntl.h>
+#include <sys/sysmacros.h>
#include "sane_ctype.h"
} stats;
unsigned int max_stack;
unsigned int min_stack;
- bool sort_events;
+ int raw_augmented_syscalls_args_size;
bool raw_augmented_syscalls;
+ bool sort_events;
bool not_ev_qualifier;
bool live;
bool full_time;
return -ENOENT;
}
-static int perf_evsel__init_augmented_syscall_tp(struct perf_evsel *evsel)
+static int perf_evsel__init_augmented_syscall_tp(struct perf_evsel *evsel, struct perf_evsel *tp)
{
struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
- if (evsel->priv != NULL) { /* field, sizeof_field, offsetof_field */
- if (__tp_field__init_uint(&sc->id, sizeof(long), sizeof(long long), evsel->needs_swap))
+ if (evsel->priv != NULL) {
+ struct tep_format_field *syscall_id = perf_evsel__field(tp, "id");
+ if (syscall_id == NULL)
+ syscall_id = perf_evsel__field(tp, "__syscall_nr");
+ if (syscall_id == NULL)
+ goto out_delete;
+ if (__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
goto out_delete;
return 0;
char *name;
} filename;
struct {
- int max;
- char **table;
- } paths;
+ int max;
+ struct file *table;
+ } files;
struct intlist *syscall_stats;
};
struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
if (ttrace)
- ttrace->paths.max = -1;
+ ttrace->files.max = -1;
ttrace->syscall_stats = intlist__new(NULL);
static const size_t trace__entry_str_size = 2048;
-static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
+static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
{
- struct thread_trace *ttrace = thread__priv(thread);
+ if (fd > ttrace->files.max) {
+ struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
- if (fd > ttrace->paths.max) {
- char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
-
- if (npath == NULL)
- return -1;
+ if (nfiles == NULL)
+ return NULL;
- if (ttrace->paths.max != -1) {
- memset(npath + ttrace->paths.max + 1, 0,
- (fd - ttrace->paths.max) * sizeof(char *));
+ if (ttrace->files.max != -1) {
+ memset(nfiles + ttrace->files.max + 1, 0,
+ (fd - ttrace->files.max) * sizeof(struct file));
} else {
- memset(npath, 0, (fd + 1) * sizeof(char *));
+ memset(nfiles, 0, (fd + 1) * sizeof(struct file));
}
- ttrace->paths.table = npath;
- ttrace->paths.max = fd;
+ ttrace->files.table = nfiles;
+ ttrace->files.max = fd;
}
- ttrace->paths.table[fd] = strdup(pathname);
+ return ttrace->files.table + fd;
+}
- return ttrace->paths.table[fd] != NULL ? 0 : -1;
+struct file *thread__files_entry(struct thread *thread, int fd)
+{
+ return thread_trace__files_entry(thread__priv(thread), fd);
+}
+
+static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
+{
+ struct thread_trace *ttrace = thread__priv(thread);
+ struct file *file = thread_trace__files_entry(ttrace, fd);
+
+ if (file != NULL) {
+ struct stat st;
+ if (stat(pathname, &st) == 0)
+ file->dev_maj = major(st.st_rdev);
+ file->pathname = strdup(pathname);
+ if (file->pathname)
+ return 0;
+ }
+
+ return -1;
}
static int thread__read_fd_path(struct thread *thread, int fd)
if (fd < 0)
return NULL;
- if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
+ if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
if (!trace->live)
return NULL;
++trace->stats.proc_getname;
return NULL;
}
- return ttrace->paths.table[fd];
+ return ttrace->files.table[fd].pathname;
}
size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
struct thread_trace *ttrace = thread__priv(arg->thread);
- if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
- zfree(&ttrace->paths.table[fd]);
+ if (ttrace && fd >= 0 && fd <= ttrace->files.max)
+ zfree(&ttrace->files.table[fd].pathname);
return printed;
}
{
struct thread_trace *ttrace;
size_t printed;
+ int len;
if (trace->failure_only || trace->current == NULL)
return 0;
return 0;
printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
- printed += fprintf(trace->output, ")%-*s ...\n", trace->args_alignment, ttrace->entry_str);
- ttrace->entry_pending = false;
+ printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
+
+ if (len < trace->args_alignment - 4)
+ printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
+
+ printed += fprintf(trace->output, " ...\n");
+ ttrace->entry_pending = false;
++trace->nr_events_printed;
return printed;
return printed;
}
-static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented)
+static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
{
void *augmented_args = NULL;
/*
* For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
- * and there we get all 6 syscall args plus the tracepoint common
- * fields (sizeof(long)) and the syscall_nr (another long). So we check
- * if that is the case and if so don't look after the sc->args_size,
- * but always after the full raw_syscalls:sys_enter payload, which is
- * fixed.
+ * and there we get all 6 syscall args plus the tracepoint common fields
+ * that gets calculated at the start and the syscall_nr (another long).
+ * So we check if that is the case and if so don't look after the
+ * sc->args_size but always after the full raw_syscalls:sys_enter payload,
+ * which is fixed.
*
* We'll revisit this later to pass s->args_size to the BPF augmenter
* (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
* use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
* traffic to just what is needed for each syscall.
*/
- int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size;
+ int args_size = raw_augmented_args_size ?: sc->args_size;
*augmented_args_size = sample->raw_size - args_size;
if (*augmented_args_size > 0)
* here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
*/
if (evsel != trace->syscalls.events.sys_enter)
- augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
goto out_put;
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
- augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
fprintf(trace->output, "%s", msg);
err = 0;
if (ttrace->entry_pending) {
printed = fprintf(trace->output, "%s", ttrace->entry_str);
} else {
- fprintf(trace->output, " ... [");
+ printed += fprintf(trace->output, " ... [");
color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
- fprintf(trace->output, "]: %s()", sc->name);
+ printed += 9;
+ printed += fprintf(trace->output, "]: %s()", sc->name);
}
printed++; /* the closing ')' */
{
if (trace->syscalls.map)
return trace__set_ev_qualifier_bpf_filter(trace);
- return trace__set_ev_qualifier_tp_filter(trace);
+ if (trace->syscalls.events.sys_enter)
+ return trace__set_ev_qualifier_tp_filter(trace);
+ return 0;
}
static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
* syscall.
*/
if (trace.syscalls.events.augmented) {
- evsel = trace.syscalls.events.augmented;
-
- if (perf_evsel__init_augmented_syscall_tp(evsel) ||
- perf_evsel__init_augmented_syscall_tp_args(evsel))
- goto out;
- evsel->handler = trace__sys_enter;
-
evlist__for_each_entry(trace.evlist, evsel) {
bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
goto init_augmented_syscall_tp;
}
+ if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) {
+ struct perf_evsel *augmented = trace.syscalls.events.augmented;
+ if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
+ perf_evsel__init_augmented_syscall_tp_args(augmented))
+ goto out;
+ augmented->handler = trace__sys_enter;
+ }
+
if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
+ struct syscall_tp *sc;
init_augmented_syscall_tp:
- perf_evsel__init_augmented_syscall_tp(evsel);
+ if (perf_evsel__init_augmented_syscall_tp(evsel, evsel))
+ goto out;
+ sc = evsel->priv;
+ /*
+ * For now with BPF raw_augmented we hook into
+ * raw_syscalls:sys_enter and there we get all
+ * 6 syscall args plus the tracepoint common
+ * fields and the syscall_nr (another long).
+ * So we check if that is the case and if so
+ * don't look after the sc->args_size but
+ * always after the full raw_syscalls:sys_enter
+ * payload, which is fixed.
+ *
+ * We'll revisit this later to pass
+ * s->args_size to the BPF augmenter (now
+ * tools/perf/examples/bpf/augmented_raw_syscalls.c,
+ * so that it copies only what we need for each
+ * syscall, like what happens when we use
+ * syscalls:sys_enter_NAME, so that we reduce
+ * the kernel/userspace traffic to just what is
+ * needed for each syscall.
+ */
+ if (trace.raw_augmented_syscalls)
+ trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
perf_evsel__init_augmented_syscall_tp_ret(evsel);
evsel->handler = trace__sys_exit;
}
include/uapi/linux/kcmp.h
include/uapi/linux/kvm.h
include/uapi/linux/in.h
+include/uapi/linux/mount.h
include/uapi/linux/perf_event.h
include/uapi/linux/prctl.h
include/uapi/linux/sched.h
include/uapi/linux/stat.h
+include/uapi/linux/usbdevice_fs.h
include/uapi/linux/vhost.h
include/uapi/sound/asound.h
include/linux/bits.h
arch/powerpc/include/uapi/asm/errno.h
arch/sparc/include/uapi/asm/errno.h
arch/x86/include/uapi/asm/errno.h
-arch/powerpc/include/uapi/asm/unistd.h
include/asm-generic/bitops/arch_hweight.h
include/asm-generic/bitops/const_hweight.h
include/asm-generic/bitops/__fls.h
#define VDSO__MAP_NAME "[vdso]"
/*
- * Include definition of find_vdso_map() also used in util/vdso.c for
+ * Include definition of find_map() also used in util/vdso.c for
* building perf.
*/
-#include "util/find-vdso-map.c"
+#include "util/find-map.c"
int main(void)
{
void *start, *end;
size_t size, written;
- if (find_vdso_map(&start, &end))
+ if (find_map(&start, &end, VDSO__MAP_NAME))
return 1;
size = end - start;
local verbose=$1
if [ $had_vfs_getname -eq 1 ] ; then
line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
+ perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
+ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
fi
}
struct thread *thread);
#endif
#endif
+
+#if defined(__arm__)
+int test__vectors_page(struct test *test, int subtest);
+#endif
+
#endif /* TESTS_H */
struct trace;
struct thread;
+struct file {
+ char *pathname;
+ int dev_maj;
+};
+
+struct file *thread__files_entry(struct thread *thread, int fd);
+
struct strarrays {
int nr_entries;
struct strarray **entries;
return scnprintf(bf, size, "(%#x, %#x, %#x)", 0xAE, nr, dir);
}
+static size_t ioctl__scnprintf_usbdevfs_cmd(int nr, int dir, char *bf, size_t size)
+{
+#include "trace/beauty/generated/ioctl/usbdevfs_ioctl_array.c"
+ static DEFINE_STRARRAY(usbdevfs_ioctl_cmds, "");
+
+ if (nr < strarray__usbdevfs_ioctl_cmds.nr_entries && strarray__usbdevfs_ioctl_cmds.entries[nr] != NULL)
+ return scnprintf(bf, size, "USBDEVFS_%s", strarray__usbdevfs_ioctl_cmds.entries[nr]);
+
+ return scnprintf(bf, size, "(%c, %#x, %#x)", 'U', nr, dir);
+}
+
static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size, bool show_prefix)
{
const char *prefix = "_IOC_";
return printed + scnprintf(bf + printed, size - printed, ", %#x, %#x, %#x)", type, nr, sz);
}
+#ifndef USB_DEVICE_MAJOR
+#define USB_DEVICE_MAJOR 189
+#endif // USB_DEVICE_MAJOR
+
size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long cmd = arg->val;
+ unsigned int fd = syscall_arg__val(arg, 0);
+ struct file *file = thread__files_entry(arg->thread, fd);
+
+ if (file != NULL) {
+ if (file->dev_maj == USB_DEVICE_MAJOR)
+ return ioctl__scnprintf_usbdevfs_cmd(_IOC_NR(cmd), _IOC_DIR(cmd), bf, size);
+ }
return ioctl__scnprintf_cmd(cmd, bf, size, arg->show_string_prefix);
}
}
P_MMAP_PROT(READ);
- P_MMAP_PROT(EXEC);
P_MMAP_PROT(WRITE);
+ P_MMAP_PROT(EXEC);
P_MMAP_PROT(SEM);
P_MMAP_PROT(GROWSDOWN);
P_MMAP_PROT(GROWSUP);
printf "static const char *mount_flags[] = {\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*'
-egrep $regex ${header_dir}/fs.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \
+egrep $regex ${header_dir}/mount.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \
sed -r "s/$regex/\2 \2 \1/g" | sort -n | \
xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*'
-egrep $regex ${header_dir}/fs.h | \
+egrep $regex ${header_dir}/mount.h | \
sed -r "s/$regex/\2 \1/g" | \
xargs printf "\t[%s + 1] = \"%s\",\n"
printf "};\n"
[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *prctl_options[] = {\n"
-regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*'
+regex='^#define[[:space:]]+PR_(\w+)[[:space:]]*([[:xdigit:]]+).*'
egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \
sed -r "s/$regex/\2 \1/g" | \
sort -n | xargs printf "\t[%s] = \"%s\",\n"
static size_t syscall_arg__scnprintf_seccomp_op(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
- const char *prefix = "SECOMP_SET_MODE_";
+ const char *prefix = "SECCOMP_SET_MODE_";
int op = arg->val;
size_t printed = 0;
struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
- const char *prefix = "SECOMP_FILTER_FLAG_";
+ const char *prefix = "SECCOMP_FILTER_FLAG_";
int printed = 0, flags = arg->val;
#define P_FLAG(n) \
--- /dev/null
+#!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
+
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+
+printf "static const char *usbdevfs_ioctl_cmds[] = {\n"
+regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
+egrep $regex ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
+ sed -r "s/$regex/\2 \1/g" | \
+ sort | xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n\n"
+printf "#if 0\n"
+printf "static const char *usbdevfs_ioctl_32_cmds[] = {\n"
+regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
+egrep $regex ${header_dir}/usbdevice_fs.h | egrep 'USBDEVFS_\w+32[[:space:]]' | \
+ sed -r "s/$regex/\2 \1/g" | \
+ sort | xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n"
+printf "#endif\n"
err = asprintf(&command,
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
- " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
+ " -l -d %s %s -C \"$1\" 2>/dev/null|grep -v \"$1:\"|expand",
opts->objdump_path ?: "objdump",
opts->disassembler_style ? "-M " : "",
opts->disassembler_style ?: "",
map__rip_2objdump(map, sym->start),
map__rip_2objdump(map, sym->end),
opts->show_asm_raw ? "" : "--no-show-raw",
- opts->annotate_src ? "-S" : "",
- symfs_filename, symfs_filename);
+ opts->annotate_src ? "-S" : "");
if (err < 0) {
pr_err("Failure allocating memory for the command to run\n");
close(stdout_fd[0]);
dup2(stdout_fd[1], 1);
close(stdout_fd[1]);
- execl("/bin/sh", "sh", "-c", command, NULL);
+ execl("/bin/sh", "sh", "-c", command, "--", symfs_filename,
+ NULL);
perror(command);
exit(-1);
}
cnode->cycles_count += node->branch_flags.cycles;
cnode->iter_count += node->nr_loop_iter;
cnode->iter_cycles += node->iter_cycles;
+ cnode->from_count++;
}
}
static int branch_from_str(char *bf, int bfsize,
u64 branch_count,
u64 cycles_count, u64 iter_count,
- u64 iter_cycles)
+ u64 iter_cycles, u64 from_count)
{
int printed = 0, i = 0;
- u64 cycles;
+ u64 cycles, v = 0;
cycles = cycles_count / branch_count;
if (cycles) {
bf + printed, bfsize - printed);
}
- if (iter_count) {
- printed += count_pri64_printf(i++, "iter",
- iter_count,
- bf + printed, bfsize - printed);
+ if (iter_count && from_count) {
+ v = iter_count / from_count;
+ if (v) {
+ printed += count_pri64_printf(i++, "iter",
+ v, bf + printed, bfsize - printed);
- printed += count_pri64_printf(i++, "avg_cycles",
- iter_cycles / iter_count,
- bf + printed, bfsize - printed);
+ printed += count_pri64_printf(i++, "avg_cycles",
+ iter_cycles / iter_count,
+ bf + printed, bfsize - printed);
+ }
}
if (i)
u64 branch_count, u64 predicted_count,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
+ u64 from_count,
struct branch_type_stat *brtype_stat)
{
int printed;
predicted_count, abort_count, brtype_stat);
} else {
printed = branch_from_str(bf, bfsize, branch_count,
- cycles_count, iter_count, iter_cycles);
+ cycles_count, iter_count, iter_cycles,
+ from_count);
}
if (!printed)
u64 branch_count, u64 predicted_count,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
+ u64 from_count,
struct branch_type_stat *brtype_stat)
{
char str[256];
counts_str_build(str, sizeof(str), branch_count,
predicted_count, abort_count, cycles_count,
- iter_count, iter_cycles, brtype_stat);
+ iter_count, iter_cycles, from_count, brtype_stat);
if (fp)
return fprintf(fp, "%s", str);
u64 branch_count, predicted_count;
u64 abort_count, cycles_count;
u64 iter_count, iter_cycles;
+ u64 from_count;
branch_count = clist->branch_count;
predicted_count = clist->predicted_count;
cycles_count = clist->cycles_count;
iter_count = clist->iter_count;
iter_cycles = clist->iter_cycles;
+ from_count = clist->from_count;
return callchain_counts_printf(fp, bf, bfsize, branch_count,
predicted_count, abort_count,
cycles_count, iter_count, iter_cycles,
- &clist->brtype_stat);
+ from_count, &clist->brtype_stat);
}
static void free_callchain_node(struct callchain_node *node)
bool has_children;
};
u64 branch_count;
+ u64 from_count;
u64 predicted_count;
u64 abort_count;
u64 cycles_count;
*lenp = 0;
return "?";
}
+
+__weak
+int arch_is_branch(const unsigned char *buf __maybe_unused,
+ size_t len __maybe_unused,
+ int x86_64 __maybe_unused)
+{
+ return 0;
+}
const char *dump_insn(struct perf_insn *x, u64 ip,
u8 *inbuf, int inlen, int *lenp);
+int arch_is_branch(const unsigned char *buf, size_t len, int x86_64);
+
#endif
// SPDX-License-Identifier: GPL-2.0
-static int find_vdso_map(void **start, void **end)
+static int find_map(void **start, void **end, const char *name)
{
FILE *maps;
char line[128];
maps = fopen("/proc/self/maps", "r");
if (!maps) {
- fprintf(stderr, "vdso: cannot open maps\n");
+ fprintf(stderr, "cannot open maps\n");
return -1;
}
if (m < 0)
continue;
- if (!strncmp(&line[m], VDSO__MAP_NAME,
- sizeof(VDSO__MAP_NAME) - 1))
+ if (!strncmp(&line[m], name, strlen(name)))
found = 1;
}
continue;
intel_bts_get_branch_type(btsq, branch);
if (btsq->bts->synth_opts.thread_stack)
- thread_stack__event(thread, btsq->sample_flags,
+ thread_stack__event(thread, btsq->cpu, btsq->sample_flags,
le64_to_cpu(branch->from),
le64_to_cpu(branch->to),
btsq->intel_pt_insn.length,
!btsq->bts->synth_opts.thread_stack && thread &&
(!old_buffer || btsq->bts->sampling_mode ||
(btsq->bts->snapshot_mode && !buffer->consecutive)))
- thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1);
+ thread_stack__set_trace_nr(thread, btsq->cpu, buffer->buffer_nr + 1);
err = intel_bts_process_buffer(btsq, buffer, thread);
return 0;
}
+int arch_is_branch(const unsigned char *buf, size_t len, int x86_64)
+{
+ struct intel_pt_insn in;
+ if (intel_pt_get_insn(buf, len, x86_64, &in) < 0)
+ return -1;
+ return in.branch != INTEL_PT_BR_NO_BRANCH;
+}
+
const char *dump_insn(struct perf_insn *x, uint64_t ip __maybe_unused,
u8 *inbuf, int inlen, int *lenp)
{
intel_pt_prep_b_sample(pt, ptq, event, sample);
if (pt->synth_opts.callchain) {
- thread_stack__sample(ptq->thread, ptq->chain,
+ thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
pt->synth_opts.callchain_sz + 1,
sample->ip, pt->kernel_start);
sample->callchain = ptq->chain;
return 0;
if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
- thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
+ thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, state->from_ip,
state->to_ip, ptq->insn_len,
state->trace_nr);
else
- thread_stack__set_trace_nr(ptq->thread, state->trace_nr);
+ thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
if (pt->sample_branches) {
err = intel_pt_synth_branch_sample(ptq);
{
int i;
- iter->nr_loop_iter = nr;
+ iter->nr_loop_iter++;
iter->cycles = 0;
for (i = 0; i < nr; i++)
file = PyFile_FromFile(fp, "perf", "r", NULL);
#else
- file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1, NULL, NULL, NULL, 1);
+ file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1,
+ NULL, NULL, NULL, 0);
#endif
if (file == NULL)
goto free_list;
return machine__findnew_thread(&session->machines.host, -1, pid);
}
+/*
+ * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
+ * So here a single thread is created for that, but actually there is a separate
+ * idle task per cpu, so there should be one 'struct thread' per cpu, but there
+ * is only 1. That causes problems for some tools, requiring workarounds. For
+ * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
+ */
int perf_session__register_idle_thread(struct perf_session *session)
{
struct thread *thread;
return ret;
}
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
- va_end(ap_saved);
if (len > strbuf_avail(sb)) {
pr_debug("this should not happen, your vsnprintf is broken");
va_end(ap_saved);
static bool symbol__is_idle(const char *name)
{
const char * const idle_symbols[] = {
+ "arch_cpu_idle",
"cpu_idle",
"cpu_startup_entry",
"intel_idle",
#include <linux/rbtree.h>
#include <linux/list.h>
+#include <linux/log2.h>
#include <errno.h>
#include "thread.h"
#include "event.h"
* @last_time: last timestamp
* @crp: call/return processor
* @comm: current comm
+ * @arr_sz: size of array if this is the first element of an array
*/
struct thread_stack {
struct thread_stack_entry *stack;
u64 last_time;
struct call_return_processor *crp;
struct comm *comm;
+ unsigned int arr_sz;
};
+/*
+ * Assume pid == tid == 0 identifies the idle task as defined by
+ * perf_session__register_idle_thread(). The idle task is really 1 task per cpu,
+ * and therefore requires a stack for each cpu.
+ */
+static inline bool thread_stack__per_cpu(struct thread *thread)
+{
+ return !(thread->tid || thread->pid_);
+}
+
static int thread_stack__grow(struct thread_stack *ts)
{
struct thread_stack_entry *new_stack;
return 0;
}
-static struct thread_stack *thread_stack__new(struct thread *thread,
- struct call_return_processor *crp)
+static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
+ struct call_return_processor *crp)
{
- struct thread_stack *ts;
-
- ts = zalloc(sizeof(struct thread_stack));
- if (!ts)
- return NULL;
+ int err;
- if (thread_stack__grow(ts)) {
- free(ts);
- return NULL;
- }
+ err = thread_stack__grow(ts);
+ if (err)
+ return err;
if (thread->mg && thread->mg->machine)
ts->kernel_start = machine__kernel_start(thread->mg->machine);
ts->kernel_start = 1ULL << 63;
ts->crp = crp;
+ return 0;
+}
+
+static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
+ struct call_return_processor *crp)
+{
+ struct thread_stack *ts = thread->ts, *new_ts;
+ unsigned int old_sz = ts ? ts->arr_sz : 0;
+ unsigned int new_sz = 1;
+
+ if (thread_stack__per_cpu(thread) && cpu > 0)
+ new_sz = roundup_pow_of_two(cpu + 1);
+
+ if (!ts || new_sz > old_sz) {
+ new_ts = calloc(new_sz, sizeof(*ts));
+ if (!new_ts)
+ return NULL;
+ if (ts)
+ memcpy(new_ts, ts, old_sz * sizeof(*ts));
+ new_ts->arr_sz = new_sz;
+ zfree(&thread->ts);
+ thread->ts = new_ts;
+ ts = new_ts;
+ }
+
+ if (thread_stack__per_cpu(thread) && cpu > 0 &&
+ (unsigned int)cpu < ts->arr_sz)
+ ts += cpu;
+
+ if (!ts->stack &&
+ thread_stack__init(ts, thread, crp))
+ return NULL;
+
return ts;
}
+static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
+{
+ struct thread_stack *ts = thread->ts;
+
+ if (cpu < 0)
+ cpu = 0;
+
+ if (!ts || (unsigned int)cpu >= ts->arr_sz)
+ return NULL;
+
+ ts += cpu;
+
+ if (!ts->stack)
+ return NULL;
+
+ return ts;
+}
+
+static inline struct thread_stack *thread__stack(struct thread *thread,
+ int cpu)
+{
+ if (!thread)
+ return NULL;
+
+ if (thread_stack__per_cpu(thread))
+ return thread__cpu_stack(thread, cpu);
+
+ return thread->ts;
+}
+
static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
bool trace_end)
{
int thread_stack__flush(struct thread *thread)
{
- if (thread->ts)
- return __thread_stack__flush(thread, thread->ts);
+ struct thread_stack *ts = thread->ts;
+ unsigned int pos;
+ int err = 0;
- return 0;
+ if (ts) {
+ for (pos = 0; pos < ts->arr_sz; pos++) {
+ int ret = __thread_stack__flush(thread, ts + pos);
+
+ if (ret)
+ err = ret;
+ }
+ }
+
+ return err;
}
-int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
+int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
u64 to_ip, u16 insn_len, u64 trace_nr)
{
+ struct thread_stack *ts = thread__stack(thread, cpu);
+
if (!thread)
return -EINVAL;
- if (!thread->ts) {
- thread->ts = thread_stack__new(thread, NULL);
- if (!thread->ts) {
+ if (!ts) {
+ ts = thread_stack__new(thread, cpu, NULL);
+ if (!ts) {
pr_warning("Out of memory: no thread stack\n");
return -ENOMEM;
}
- thread->ts->trace_nr = trace_nr;
+ ts->trace_nr = trace_nr;
}
/*
* the stack might be completely invalid. Better to report nothing than
* to report something misleading, so flush the stack.
*/
- if (trace_nr != thread->ts->trace_nr) {
- if (thread->ts->trace_nr)
- __thread_stack__flush(thread, thread->ts);
- thread->ts->trace_nr = trace_nr;
+ if (trace_nr != ts->trace_nr) {
+ if (ts->trace_nr)
+ __thread_stack__flush(thread, ts);
+ ts->trace_nr = trace_nr;
}
/* Stop here if thread_stack__process() is in use */
- if (thread->ts->crp)
+ if (ts->crp)
return 0;
if (flags & PERF_IP_FLAG_CALL) {
ret_addr = from_ip + insn_len;
if (ret_addr == to_ip)
return 0; /* Zero-length calls are excluded */
- return thread_stack__push(thread->ts, ret_addr,
+ return thread_stack__push(ts, ret_addr,
flags & PERF_IP_FLAG_TRACE_END);
} else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
/*
* address, so try to pop that. Also, do not expect a call made
* when the trace ended, to return, so pop that.
*/
- thread_stack__pop(thread->ts, to_ip);
- thread_stack__pop_trace_end(thread->ts);
+ thread_stack__pop(ts, to_ip);
+ thread_stack__pop_trace_end(ts);
} else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
- thread_stack__pop(thread->ts, to_ip);
+ thread_stack__pop(ts, to_ip);
}
return 0;
}
-void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
+void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
{
- if (!thread || !thread->ts)
+ struct thread_stack *ts = thread__stack(thread, cpu);
+
+ if (!ts)
return;
- if (trace_nr != thread->ts->trace_nr) {
- if (thread->ts->trace_nr)
- __thread_stack__flush(thread, thread->ts);
- thread->ts->trace_nr = trace_nr;
+ if (trace_nr != ts->trace_nr) {
+ if (ts->trace_nr)
+ __thread_stack__flush(thread, ts);
+ ts->trace_nr = trace_nr;
}
}
+static void __thread_stack__free(struct thread *thread, struct thread_stack *ts)
+{
+ __thread_stack__flush(thread, ts);
+ zfree(&ts->stack);
+}
+
+static void thread_stack__reset(struct thread *thread, struct thread_stack *ts)
+{
+ unsigned int arr_sz = ts->arr_sz;
+
+ __thread_stack__free(thread, ts);
+ memset(ts, 0, sizeof(*ts));
+ ts->arr_sz = arr_sz;
+}
+
void thread_stack__free(struct thread *thread)
{
- if (thread->ts) {
- __thread_stack__flush(thread, thread->ts);
- zfree(&thread->ts->stack);
+ struct thread_stack *ts = thread->ts;
+ unsigned int pos;
+
+ if (ts) {
+ for (pos = 0; pos < ts->arr_sz; pos++)
+ __thread_stack__free(thread, ts + pos);
zfree(&thread->ts);
}
}
return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
}
-void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
+void thread_stack__sample(struct thread *thread, int cpu,
+ struct ip_callchain *chain,
size_t sz, u64 ip, u64 kernel_start)
{
+ struct thread_stack *ts = thread__stack(thread, cpu);
u64 context = callchain_context(ip, kernel_start);
u64 last_context;
size_t i, j;
chain->ips[0] = context;
chain->ips[1] = ip;
- if (!thread || !thread->ts) {
+ if (!ts) {
chain->nr = 2;
return;
}
last_context = context;
- for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) {
- ip = thread->ts->stack[thread->ts->cnt - j].ret_addr;
+ for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) {
+ ip = ts->stack[ts->cnt - j].ret_addr;
context = callchain_context(ip, kernel_start);
if (context != last_context) {
if (i >= sz - 1)
return 1;
}
-static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
+static int thread_stack__bottom(struct thread_stack *ts,
struct perf_sample *sample,
struct addr_location *from_al,
struct addr_location *to_al, u64 ref)
if (!cp)
return -ENOMEM;
- return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
+ return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
true, false);
}
struct addr_location *to_al, u64 ref,
struct call_return_processor *crp)
{
- struct thread_stack *ts = thread->ts;
+ struct thread_stack *ts = thread__stack(thread, sample->cpu);
int err = 0;
- if (ts) {
- if (!ts->crp) {
- /* Supersede thread_stack__event() */
- thread_stack__free(thread);
- thread->ts = thread_stack__new(thread, crp);
- if (!thread->ts)
- return -ENOMEM;
- ts = thread->ts;
- ts->comm = comm;
- }
- } else {
- thread->ts = thread_stack__new(thread, crp);
- if (!thread->ts)
+ if (ts && !ts->crp) {
+ /* Supersede thread_stack__event() */
+ thread_stack__reset(thread, ts);
+ ts = NULL;
+ }
+
+ if (!ts) {
+ ts = thread_stack__new(thread, sample->cpu, crp);
+ if (!ts)
return -ENOMEM;
- ts = thread->ts;
ts->comm = comm;
}
/* If the stack is empty, put the current symbol on the stack */
if (!ts->cnt) {
- err = thread_stack__bottom(thread, ts, sample, from_al, to_al,
- ref);
+ err = thread_stack__bottom(ts, sample, from_al, to_al, ref);
if (err)
return err;
}
return err;
}
-size_t thread_stack__depth(struct thread *thread)
+size_t thread_stack__depth(struct thread *thread, int cpu)
{
- if (!thread->ts)
+ struct thread_stack *ts = thread__stack(thread, cpu);
+
+ if (!ts)
return 0;
- return thread->ts->cnt;
+ return ts->cnt;
}
void *data;
};
-int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
+int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
u64 to_ip, u16 insn_len, u64 trace_nr);
-void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr);
-void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
+void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr);
+void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain,
size_t sz, u64 ip, u64 kernel_start);
int thread_stack__flush(struct thread *thread);
void thread_stack__free(struct thread *thread);
-size_t thread_stack__depth(struct thread *thread);
+size_t thread_stack__depth(struct thread *thread, int cpu);
struct call_return_processor *
call_return_processor__new(int (*process)(struct call_return *cr, void *data),
#include "debug.h"
/*
- * Include definition of find_vdso_map() also used in perf-read-vdso.c for
+ * Include definition of find_map() also used in perf-read-vdso.c for
* building perf-read-vdso32 and perf-read-vdsox32.
*/
-#include "find-vdso-map.c"
+#include "find-map.c"
#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX"
if (vdso_file->found)
return vdso_file->temp_file_name;
- if (vdso_file->error || find_vdso_map(&start, &end))
+ if (vdso_file->error || find_map(&start, &end, VDSO__MAP_NAME))
return NULL;
size = end - start;
endif
turbostat : turbostat.c
-CFLAGS += -Wall
-CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
-CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
+override CFLAGS += -Wall
+override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
+override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
%: %.c
@mkdir -p $(BUILD_OUTPUT)
- $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@
+ $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS)
.PHONY : clean
clean :
endif
x86_energy_perf_policy : x86_energy_perf_policy.c
-CFLAGS += -Wall
-CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
+override CFLAGS += -Wall
+override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
%: %.c
@mkdir -p $(BUILD_OUTPUT)
- $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@
+ $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS)
.PHONY : clean
clean :
test_netcnt
test_section_names
test_tcpnotify_user
+test_libbpf
test_flow_dissector.sh \
test_xdp_vlan.sh
-TEST_PROGS_EXTENDED := with_addr.sh
+TEST_PROGS_EXTENDED := with_addr.sh \
+ with_tunnels.sh \
+ tcp_client.py \
+ tcp_server.py
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
* This function creates a cgroup under the top level workdir and returns the
* file descriptor. It is idempotent.
*
- * On success, it returns the file descriptor. On failure it returns 0.
+ * On success, it returns the file descriptor. On failure it returns -1.
* If there is a failure, it prints the error to stderr.
*/
int create_and_get_cgroup(const char *path)
format_cgroup_path(cgroup_path, path);
if (mkdir(cgroup_path, 0777) && errno != EEXIST) {
log_err("mkdiring cgroup %s .. %s", path, cgroup_path);
- return 0;
+ return -1;
}
fd = open(cgroup_path, O_RDONLY);
if (fd < 0) {
log_err("Opening Cgroup");
- return 0;
+ return -1;
}
return fd;
ENUM_TWO,
ENUM_THREE,
} aenum;
+ uint32_t ui32b;
+ uint32_t bits2c:2;
};
static struct btf_raw_test pprint_test_template[] = {
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32),
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 10), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */
BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */
BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */
BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */
+ BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */
+ BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */
BTF_END_RAW,
},
- BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"),
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32),
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */
BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
+ BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
+ BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
BTF_END_RAW,
},
- BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"),
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32),
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */
BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
+ BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
+ BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
/* typedef unsigned int ___int */ /* [17] */
BTF_TYPEDEF_ENC(NAME_TBD, 18),
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */
BTF_END_RAW,
},
- BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0___int"),
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
v->unused_bits2b = 3;
v->ui64 = i;
v->aenum = i & 0x03;
+ v->ui32b = 4;
+ v->bits2c = 1;
v = (void *)v + rounded_value_size;
}
}
nexpected_line = snprintf(expected_line, sizeof(expected_line),
"%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
- "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n",
+ "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
+ "%u,0x%x}\n",
percpu_map ? "\tcpu" : "",
percpu_map ? cpu : next_key,
cmapv->ui32, cmapv->si32,
cmapv->ui8a[2], cmapv->ui8a[3],
cmapv->ui8a[4], cmapv->ui8a[5],
cmapv->ui8a[6], cmapv->ui8a[7],
- pprint_enum_str[cmapv->aenum]);
+ pprint_enum_str[cmapv->aenum],
+ cmapv->ui32b,
+ cmapv->bits2c);
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
- if (!cgroup_fd) {
+ if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
- if (!cgroup_fd) {
+ if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
- if (!cgroup_fd) {
+ if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0;
+ int retry = 1;
+retry:
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
goto out;
previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
goto disable_pmu;
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0;
+ int retry = 1;
+retry:
err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
return;
previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
goto disable_pmu;
goto err;
cgfd = create_and_get_cgroup(CGROUP_PATH);
- if (!cgfd)
+ if (cgfd < 0)
goto err;
if (join_cgroup(CGROUP_PATH))
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
- if (!cgfd)
+ if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))
#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
#define SRC6_IP "::1"
#define SRC6_REWRITE_IP "::6"
+#define WILDCARD6_IP "::"
#define SERV6_PORT 6060
#define SERV6_REWRITE_PORT 6666
static int bind6_prog_load(const struct sock_addr_test *test);
static int connect4_prog_load(const struct sock_addr_test *test);
static int connect6_prog_load(const struct sock_addr_test *test);
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
static struct sock_addr_test tests[] = {
/* bind */
SYSCALL_ENOTSUPP,
},
{
+ "sendmsg6: set dst IP = [::] (BSD'ism)",
+ sendmsg6_rw_wildcard_prog_load,
+ BPF_CGROUP_UDP6_SENDMSG,
+ BPF_CGROUP_UDP6_SENDMSG,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ "sendmsg6: preserve dst IP = [::] (BSD'ism)",
+ sendmsg_allow_prog_load,
+ BPF_CGROUP_UDP6_SENDMSG,
+ BPF_CGROUP_UDP6_SENDMSG,
+ AF_INET6,
+ SOCK_DGRAM,
+ WILDCARD6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_PORT,
+ SRC6_IP,
+ SUCCESS,
+ },
+ {
"sendmsg6: deny call",
sendmsg_deny_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
return load_path(test, CONNECT6_PROG_PATH);
}
-static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
+static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
+ int32_t rc)
{
struct bpf_insn insns[] = {
- /* return 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* return rc */
+ BPF_MOV64_IMM(BPF_REG_0, rc),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
+{
+ return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
+}
+
+static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
+{
+ return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
+}
+
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
{
struct sockaddr_in dst4_rw_addr;
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
}
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
+{
+ return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
+}
+
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, SENDMSG6_PROG_PATH);
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
- if (!cgfd)
+ if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
- if (!cgfd)
+ if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))
goto err;
cg_fd = create_and_get_cgroup(cg_path);
- if (!cg_fd)
+ if (cg_fd < 0)
goto err;
if (join_cgroup(cg_path))
goto err;
cg_fd = create_and_get_cgroup(cg_path);
- if (!cg_fd)
+ if (cg_fd < 0)
goto err;
if (join_cgroup(cg_path))
.retval = 1,
},
{
+ "map access: mixing value pointer and scalar, 1",
+ .insns = {
+ // load map value pointer into r0 and r2
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ // load some number from the map into r1
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
+ // branch A
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_JMP_A(2),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ // common instruction
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ // branch A
+ BPF_JMP_A(4),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+ // verifier follows fall-through
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ // fake-dead code; targeted from branch A to
+ // prevent dead code sanitization
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R2 tried to add from different pointers or scalars",
+ .retval = 0,
+ },
+ {
+ "map access: mixing value pointer and scalar, 2",
+ .insns = {
+ // load map value pointer into r0 and r2
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ // load some number from the map into r1
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ // branch A
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ BPF_JMP_A(2),
+ // branch B
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ // common instruction
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ // branch A
+ BPF_JMP_A(4),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+ // verifier follows fall-through
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ // fake-dead code; targeted from branch A to
+ // prevent dead code sanitization
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R2 tried to add from different maps or paths",
+ .retval = 0,
+ },
+ {
+ "sanitation: alu with different scalars",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ BPF_JMP_A(2),
+ BPF_MOV64_IMM(BPF_REG_2, 42),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100001),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 0x100000,
+ },
+ {
"map access: value_ptr += known scalar, upper oob arith, test 1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
lag_unlink_slaves_test
lag_dev_deletion_test
vlan_interface_uppers_test
+ bridge_extern_learn_test
devlink_reload_test
"
NUM_NETIFS=2
ip link del dev br0
}
+bridge_extern_learn_test()
+{
+ # Test that externally learned entries added from user space are
+ # marked as offloaded
+ RET=0
+
+ ip link add name br0 type bridge
+ ip link set dev $swp1 master br0
+
+ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn
+
+ bridge fdb show brport $swp1 | grep de:ad:be:ef:13:37 | grep -q offload
+ check_err $? "fdb entry not marked as offloaded when should"
+
+ log_test "externally learned fdb entry"
+
+ ip link del dev br0
+}
+
devlink_reload_test()
{
# Test that after executing all the above configuration tests, a
log_test "vlan-aware - failed enslavement to vlan-aware bridge"
+ bridge vlan del vid 10 dev vxlan20
+ bridge vlan add vid 20 dev vxlan20 pvid untagged
+
+ # Test that offloading of an unsupported tunnel fails when it is
+ # triggered by addition of VLAN to a local port
+ RET=0
+
+ # TOS must be set to inherit
+ ip link set dev vxlan10 type vxlan tos 42
+
+ ip link set dev $swp1 master br0
+ bridge vlan add vid 10 dev $swp1 &> /dev/null
+ check_fail $?
+
+ log_test "vlan-aware - failed vlan addition to a local port"
+
+ ip link set dev vxlan10 type vxlan tos inherit
+
ip link del dev vxlan20
ip link del dev vxlan10
ip link del dev br0
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
+ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
NUM_NETIFS=4
CHECK_TC="yes"
source lib.sh
flood_test $swp2 $h1 $h2
}
+vlan_deletion()
+{
+ # Test that the deletion of a VLAN on a bridge port does not affect
+ # the PVID VLAN
+ log_info "Add and delete a VLAN on bridge port $swp1"
+
+ bridge vlan add vid 10 dev $swp1
+ bridge vlan del vid 10 dev $swp1
+
+ ping_ipv4
+ ping_ipv6
+}
+
+extern_learn()
+{
+ local mac=de:ad:be:ef:13:37
+ local ageing_time
+
+ # Test that externally learned FDB entries can roam, but not age out
+ RET=0
+
+ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
+
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
+ check_err $? "Did not find FDB entry when should"
+
+ # Wait for 10 seconds after the ageing time to make sure the FDB entry
+ # was not aged out
+ ageing_time=$(bridge_ageing_time_get br0)
+ sleep $((ageing_time + 10))
+
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
+ check_err $? "FDB entry was aged out when should not"
+
+ $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
+
+ bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
+ check_err $? "FDB entry did not roam when should"
+
+ log_test "Externally learned FDB entry - ageing & roaming"
+
+ bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
+ bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
+}
+
trap cleanup EXIT
setup_prepare
RET=0
tc filter add dev $h1 ingress pref 77 prot ip \
- flower ip_tos $decapped_tos action pass
+ flower ip_tos $decapped_tos action drop
sleep 1
vxlan_encapped_ping_test v2 v1 192.0.2.17 \
$orig_inner_tos $orig_outer_tos \
{
struct ip *iphdr = (struct ip *)ip_frame;
struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
+ const bool ipv4 = !ipv6;
int res;
int offset;
int frag_len;
iphdr->ip_sum = 0;
}
+ /* Occasionally test in-order fragments. */
+ if (!cfg_overlap && (rand() % 100 < 15)) {
+ offset = 0;
+ while (offset < (UDP_HLEN + payload_len)) {
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
+ offset += max_frag_len;
+ }
+ return;
+ }
+
+ /* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
+ if (ipv4 && !cfg_overlap && (rand() % 100 < 20) &&
+ (payload_len > 9 * max_frag_len)) {
+ offset = 6 * max_frag_len;
+ while (offset < (UDP_HLEN + payload_len)) {
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
+ offset += max_frag_len;
+ }
+ offset = 3 * max_frag_len;
+ while (offset < 6 * max_frag_len) {
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
+ offset += max_frag_len;
+ }
+ offset = 0;
+ while (offset < 3 * max_frag_len) {
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
+ offset += max_frag_len;
+ }
+ return;
+ }
+
/* Odd fragments. */
offset = max_frag_len;
while (offset < (UDP_HLEN + payload_len)) {
send_fragment(fd_raw, addr, alen, offset, ipv6);
+ /* IPv4 ignores duplicates, so randomly send a duplicate. */
+ if (ipv4 && (1 == rand() % 100))
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
offset += 2 * max_frag_len;
}
if (cfg_overlap) {
/* Send an extra random fragment. */
- offset = rand() % (UDP_HLEN + payload_len - 1);
- /* sendto() returns EINVAL if offset + frag_len is too small. */
if (ipv6) {
struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
+ /* sendto() returns EINVAL if offset + frag_len is too small. */
+ offset = rand() % (UDP_HLEN + payload_len - 1);
frag_len = max_frag_len + rand() % 256;
/* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
frag_len &= ~0x7;
ip6hdr->ip6_plen = htons(frag_len);
frag_len += IP6_HLEN;
} else {
- frag_len = IP4_HLEN + UDP_HLEN + rand() % 256;
+ /* In IPv4, duplicates and some fragments completely inside
+ * previously sent fragments are dropped/ignored. So
+ * random offset and frag_len can result in a dropped
+ * fragment instead of a dropped queue/packet. So we
+ * hard-code offset and frag_len.
+ *
+ * See ade446403bfb ("net: ipv4: do not handle duplicate
+ * fragments as overlapping").
+ */
+ if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
+ /* not enough payload to play with random offset and frag_len. */
+ offset = 8;
+ frag_len = IP4_HLEN + UDP_HLEN + max_frag_len;
+ } else {
+ offset = rand() % (payload_len / 2);
+ frag_len = 2 * max_frag_len + 1 + rand() % 256;
+ }
iphdr->ip_off = htons(offset / 8 | IP4_MF);
iphdr->ip_len = htons(frag_len);
}
res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen);
if (res < 0)
- error(1, errno, "sendto overlap");
+ error(1, errno, "sendto overlap: %d", frag_len);
if (res != frag_len)
error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len);
frag_counter++;
offset = 0;
while (offset < (UDP_HLEN + payload_len)) {
send_fragment(fd_raw, addr, alen, offset, ipv6);
+ /* IPv4 ignores duplicates, so randomly send a duplicate. */
+ if (ipv4 && (1 == rand() % 100))
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
offset += 2 * max_frag_len;
}
}
static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
{
int fd_tx_raw, fd_rx_udp;
- struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 };
+ /* Frag queue timeout is set to one second in the calling script;
+ * socket timeout should be just a bit longer to avoid tests interfering
+ * with each other.
+ */
+ struct timeval tv = { .tv_sec = 1, .tv_usec = 10 };
int idx;
int min_frag_len = ipv6 ? 1280 : 8;
payload_len += (rand() % 4096)) {
if (cfg_verbose)
printf("payload_len: %d\n", payload_len);
- max_frag_len = min_frag_len;
- do {
+
+ if (cfg_overlap) {
+ /* With overlaps, one send/receive pair below takes
+ * at least one second (== timeout) to run, so there
+ * is not enough test time to run a nested loop:
+ * the full overlap test takes 20-30 seconds.
+ */
+ max_frag_len = min_frag_len +
+ rand() % (1500 - FRAG_HLEN - min_frag_len);
send_udp_frags(fd_tx_raw, addr, alen, ipv6);
recv_validate_udp(fd_rx_udp);
- max_frag_len += 8 * (rand() % 8);
- } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len);
+ } else {
+ /* Without overlaps, each packet reassembly (== one
+ * send/receive pair below) takes very little time to
+ * run, so we can easily afford more thourough testing
+ * with a nested loop: the full non-overlap test takes
+ * less than one second).
+ */
+ max_frag_len = min_frag_len;
+ do {
+ send_udp_frags(fd_tx_raw, addr, alen, ipv6);
+ recv_validate_udp(fd_rx_udp);
+ max_frag_len += 8 * (rand() % 8);
+ } while (max_frag_len < (1500 - FRAG_HLEN) &&
+ max_frag_len <= payload_len);
+ }
}
/* Cleanup. */
setup() {
ip netns add "${NETNS}"
ip -netns "${NETNS}" link set lo up
+
ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1
ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_time=1 >/dev/null 2>&1
+
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1
+
+ # DST cache can get full with a lot of frags, with GC not keeping up with the test.
+ ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1
}
cleanup() {
echo "ipv4 defrag"
ip netns exec "${NETNS}" ./ip_defrag -4
-
echo "ipv4 defrag with overlaps"
ip netns exec "${NETNS}" ./ip_defrag -4o
echo "ipv6 defrag with overlaps"
ip netns exec "${NETNS}" ./ip_defrag -6o
+echo "all tests done"
cm->cmsg_type == IP_RECVERR) ||
(cm->cmsg_level == SOL_IPV6 &&
cm->cmsg_type == IPV6_RECVERR) ||
- (cm->cmsg_level = SOL_PACKET &&
+ (cm->cmsg_level == SOL_PACKET &&
cm->cmsg_type == PACKET_TX_TIMESTAMP)) {
serr = (void *) CMSG_DATA(cm);
if (serr->ee_errno != ENOMSG ||
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 90",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 90",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
"matchCount": "0",
"teardown": []
},
"cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 99",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 99",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
"matchCount": "0",
"teardown": []
},
"cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
"matchCount": "0",
"teardown": []
},
"cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 11",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 21",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 21",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
},
{
"id": "fac3",
- "name": "Create valid ife encode action with index at 32-bit maximnum",
+ "name": "Create valid ife encode action with index at 32-bit maximum",
"category": [
"actions",
"ife"
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 4294967295",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife decode pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife decode pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife decode continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife decode drop index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4294967295999",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
"matchCount": "0",
"teardown": []
},
"cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
"matchCount": "0",
"teardown": []
},
"cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
"cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
"matchCount": "0",
"teardown": []
},
"cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
"matchCount": "0",
"teardown": []
},
]
},
{
- "id": "ba4e",
- "name": "Add tunnel_key set action with missing mandatory id parameter",
- "category": [
- "actions",
- "tunnel_key"
- ],
- "setup": [
- [
- "$TC actions flush action tunnel_key",
- 0,
- 1,
- 255
- ]
- ],
- "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
- "expExitCode": "255",
- "verifyCmd": "$TC actions list action tunnel_key",
- "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
- "matchCount": "0",
- "teardown": [
- [
- "$TC actions flush action tunnel_key",
- 0,
- 1,
- 255
- ]
- ]
- },
- {
"id": "a5e0",
"name": "Add tunnel_key set action with invalid src_ip parameter",
"category": [
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action tunnel_key index 4",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
"matchCount": "1",
"teardown": [
"$TC actions flush action tunnel_key"
BINDIR=usr/bin
WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int
-CFLAGS+= -O1 ${WARNFLAGS}
+override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS}
# Add "-fstack-protector" only if toolchain supports it.
-CFLAGS+= $(call cc-option,-fstack-protector)
+override CFLAGS+= $(call cc-option,-fstack-protector-strong)
CC?= $(CROSS_COMPILE)gcc
PKG_CONFIG?= pkg-config
-CFLAGS+=-D VERSION=\"$(VERSION)\"
+override CFLAGS+=-D VERSION=\"$(VERSION)\"
LDFLAGS+=
TARGET=tmon
$(PKG_CONFIG) --libs $(STATIC) panel ncurses 2> /dev/null || \
echo -lpanel -lncurses)
-CFLAGS += $(shell $(PKG_CONFIG) --cflags $(STATIC) panelw ncursesw 2> /dev/null || \
+override CFLAGS += $(shell $(PKG_CONFIG) --cflags $(STATIC) panelw ncursesw 2> /dev/null || \
$(PKG_CONFIG) --cflags $(STATIC) panel ncurses 2> /dev/null)
OBJS = tmon.o tui.o sysfs.o pid.o
* Example use:
* cat /sys/kernel/debug/page_owner > page_owner_full.txt
* grep -v ^PFN page_owner_full.txt > page_owner.txt
- * ./sort page_owner.txt sorted_page_owner.txt
+ * ./page_owner_sort page_owner.txt sorted_page_owner.txt
+ *
+ * See Documentation/vm/page_owner.rst
*/
#include <stdio.h>
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int as_id, id, n;
+ int as_id, id;
gfn_t offset;
- unsigned long i;
+ unsigned long i, n;
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_buffer;
return -ENOENT;
n = kvm_dirty_bitmap_bytes(memslot);
+
+ if (log->first_page > memslot->npages ||
+ log->num_pages > memslot->npages - log->first_page)
+ return -EINVAL;
+
*flush = false;
dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))