min_vecs argument set to this limit, and the PCI core will return -ENOSPC
if it can't meet the minimum number of vectors.
-The flags argument should normally be set to 0, but can be used to pass the
-PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support
-MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in
-case the device does not support legacy interrupt lines.
-
-By default this function will spread the interrupts around the available
-CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
-flag.
+The flags argument is used to specify which type of interrupt can be used
+by the device and the driver (PCI_IRQ_LEGACY, PCI_IRQ_MSI, PCI_IRQ_MSIX).
+A convenient short-hand (PCI_IRQ_ALL_TYPES) is also available to ask for
+any possible kind of interrupt. If the PCI_IRQ_AFFINITY flag is set,
+pci_alloc_irq_vectors() will spread the interrupts around the available CPUs.
To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
vectors, use the following function:
capped to the supported limit, so there is no need to query the number of
vectors supported beforehand:
- nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0);
+ nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_ALL_TYPES)
if (nvec < 0)
goto out_err;
number to pci_alloc_irq_vectors() function as both 'min_vecs' and
'max_vecs' parameters:
- ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0);
+ ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_ALL_TYPES);
if (ret < 0)
goto out_err;
the single MSI mode for a device. It could be done by passing two 1s as
'min_vecs' and 'max_vecs':
- ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
goto out_err;
Some devices might not support using legacy line interrupts, in which case
-the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform
-can't provide MSI or MSI-X interrupts:
+the driver can specify that only MSI or MSI-X is acceptable:
- nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY);
+ nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nvec < 0)
goto out_err;
+ User Manual
http://dl.linux-sunxi.org/A13/A13%20User%20Manual%20-%20v1.2%20%282013-01-08%29.pdf
+ - Next Thing Co GR8 (sun5i)
+
* Dual ARM Cortex-A7 based SoCs
- Allwinner A20 (sun7i)
+ User Manual
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
| ARM | Cortex-A57 | #852523 | N/A |
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
+| ARM | Cortex-A72 | #853709 | N/A |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
This file allows to turn off the disk entropy contribution. Default
value of this file is '1'(on).
+dax (RO)
+--------
+This file indicates whether the device supports Direct Access (DAX),
+used by CPU-addressable storage to bypass the pagecache. It shows '1'
+if true, '0' if not.
+
discard_granularity (RO)
-----------------------
This shows the size of internal allocation of the device in bytes, if
-------------------
This is the hardware sector size of the device, in bytes.
+io_poll (RW)
+------------
+When read, this file shows the total number of block IO polls and how
+many returned success. Writing '0' to this file will disable polling
+for this device. Writing any non-zero value will enable this feature.
+
iostats (RW)
-------------
This file is used to control (on/off) the iostats accounting of the
setting from "write back" to "write through", since that will also
eliminate cache flushes issued by the kernel.
+write_same_max_bytes (RO)
+-------------------------
+This is the number of bytes the device can write in a single write-same
+command. A value of '0' means write-same is not supported by this
+device.
+
Jens Axboe <jens.axboe@oracle.com>, February 2009
todo_include_todos = False
primary_domain = 'C'
-highlight_language = 'C'
+highlight_language = 'guess'
# -- Options for HTML output ----------------------------------------------
allwinner,sun8i-a83t
allwinner,sun8i-h3
allwinner,sun9i-a80
+ nextthing,gr8
- interrupts: Interrupt number for McPDM
- interrupt-parent: The parent interrupt controller
- ti,hwmods: Name of the hwmod associated to the McPDM
-- clocks: phandle for the pdmclk provider, likely <&twl6040>
-- clock-names: Must be "pdmclk"
Example:
interrupt-parent = <&gic>;
ti,hwmods = "mcpdm";
};
-
-In board DTS file the pdmclk needs to be added:
-
-&mcpdm {
- clocks = <&twl6040>;
- clock-names = "pdmclk";
- status = "okay";
-};
Required properties:
- #cooling-cells: Used to provide cooling device specific information
Type: unsigned while referring to it. Must be at least 2, in order
- Size: one cell to specify minimum and maximum cooling state used
+ Size: one cell to specify minimum and maximum cooling state used
in the reference. The first cell is the minimum
cooling state requested and the second cell is
the maximum cooling state requested in the reference.
Optional property:
- contribution: The cooling contribution to the thermal zone of the
Type: unsigned referred cooling device at the referred trip point.
- Size: one cell The contribution is a ratio of the sum
+ Size: one cell The contribution is a ratio of the sum
of all cooling contributions within a thermal zone.
Note: Using the THERMAL_NO_LIMIT (-1UL) constant in the cooling-device phandle
Size: one cell
- thermal-sensors: A list of thermal sensor phandles and sensor specifier
- Type: list of used while monitoring the thermal zone.
+ Type: list of used while monitoring the thermal zone.
phandles + sensor
specifier
<&adc>; /* pcb north */
/* hotspot = 100 * bandgap - 120 * adc + 484 */
- coefficients = <100 -120 484>;
+ coefficients = <100 -120 484>;
trips {
...
thermal-sensors = <&adc>;
/* hotspot = 1 * adc + 6000 */
- coefficients = <1 6000>;
+ coefficients = <1 6000>;
(d) - Board thermal
implemented in this driver.
Specification of the chip can be found here:
-ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
-ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
+ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
+ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
Cross-referencing from reStructuredText
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. highlight:: none
-
To cross-reference the functions and types defined in the kernel-doc comments
from reStructuredText documents, please use the `Sphinx C Domain`_
references. For example::
Function documentation
----------------------
-.. highlight:: c
-
The general format of a function and function-like macro kernel-doc comment is::
/**
Converting DocBook to Sphinx
----------------------------
-.. highlight:: none
-
Over time, we expect all of the documents under ``Documentation/DocBook`` to be
converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,
PAGE_SIZE is used as alignment.
PCI-PCI bridge can be specified, if resource
windows need to be expanded.
+ To specify the alignment for several
+ instances of a device, the PCI vendor,
+ device, subvendor, and subdevice may be
+ specified, e.g., 4096@pci:8086:9c22:103c:198f
ecrc= Enable/disable PCIe ECRC (transaction layer
end-to-end CRC checking).
bios: Use BIOS/firmware settings. This is the
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
- data message has been used up, rxrpc_kernel_data_delivered() should be
- called on it..
+ data message has been used up, rxrpc_kernel_data_consumed() should be
+ called on it.
- Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
- of. It is possible to get extra refs on all types of message for later
- freeing, but this may pin the state of a call until the message is finally
- freed.
+ Messages should be handled to rxrpc_kernel_free_skb() to dispose of. It
+ is possible to get extra refs on all types of message for later freeing,
+ but this may pin the state of a call until the message is finally freed.
(*) Accept an incoming call.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
- (*) Record the delivery of a data message and free it.
+ (*) Record the delivery of a data message.
- void rxrpc_kernel_data_delivered(struct sk_buff *skb);
+ void rxrpc_kernel_data_consumed(struct rxrpc_call *call,
+ struct sk_buff *skb);
- This is used to record a data message as having been delivered and to
- update the ACK state for the call. The socket buffer will be freed.
+ This is used to record a data message as having been consumed and to
+ update the ACK state for the call. The message must still be passed to
+ rxrpc_kernel_free_skb() for disposal by the caller.
(*) Free a message.
Again, if you find the offending module(s), it(they) must be unloaded every time
before hibernation, and please report the problem with it(them).
-c) Advanced debugging
+c) Using the "test_resume" hibernation option
+
+/sys/power/disk generally tells the kernel what to do after creating a
+hibernation image. One of the available options is "test_resume" which
+causes the just created image to be used for immediate restoration. Namely,
+after doing:
+
+# echo test_resume > /sys/power/disk
+# echo disk > /sys/power/state
+
+a hibernation image will be created and a resume from it will be triggered
+immediately without involving the platform firmware in any way.
+
+That test can be used to check if failures to resume from hibernation are
+related to bad interactions with the platform firmware. That is, if the above
+works every time, but resume from actual hibernation does not work or is
+unreliable, the platform firmware may be responsible for the failures.
+
+On architectures and platforms that support using different kernels to restore
+hibernation images (that is, the kernel used to read the image from storage and
+load it into memory is different from the one included in the image) or support
+kernel address space randomization, it also can be used to check if failures
+to resume may be related to the differences between the restore and image
+kernels.
+
+d) Advanced debugging
In case that hibernation does not work on your system even in the minimal
configuration and compiling more drivers as modules is not practical or some
-Power Management Interface
-
-
-The power management subsystem provides a unified sysfs interface to
-userspace, regardless of what architecture or platform one is
-running. The interface exists in /sys/power/ directory (assuming sysfs
-is mounted at /sys).
-
-/sys/power/state controls system power state. Reading from this file
-returns what states are supported, which is hard-coded to 'freeze',
-'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
-(Suspend-to-Disk).
-
-Writing to this file one of those strings causes the system to
-transition into that state. Please see the file
-Documentation/power/states.txt for a description of each of those
-states.
-
-
-/sys/power/disk controls the operating mode of the suspend-to-disk
-mechanism. Suspend-to-disk can be handled in several ways. We have a
-few options for putting the system to sleep - using the platform driver
-(e.g. ACPI or other suspend_ops), powering off the system or rebooting the
-system (for testing).
-
-Additionally, /sys/power/disk can be used to turn on one of the two testing
-modes of the suspend-to-disk mechanism: 'testproc' or 'test'. If the
-suspend-to-disk mechanism is in the 'testproc' mode, writing 'disk' to
-/sys/power/state will cause the kernel to disable nonboot CPUs and freeze
-tasks, wait for 5 seconds, unfreeze tasks and enable nonboot CPUs. If it is
-in the 'test' mode, writing 'disk' to /sys/power/state will cause the kernel
-to disable nonboot CPUs and freeze tasks, shrink memory, suspend devices, wait
-for 5 seconds, resume devices, unfreeze tasks and enable nonboot CPUs. Then,
-we are able to look in the log messages and work out, for example, which code
-is being slow and which device drivers are misbehaving.
-
-Reading from this file will display all supported modes and the currently
-selected one in brackets, for example
-
- [shutdown] reboot test testproc
-
-Writing to this file will accept one of
-
- 'platform' (only if the platform supports it)
- 'shutdown'
- 'reboot'
- 'testproc'
- 'test'
-
-/sys/power/image_size controls the size of the image created by
-the suspend-to-disk mechanism. It can be written a string
-representing a non-negative integer that will be used as an upper
-limit of the image size, in bytes. The suspend-to-disk mechanism will
-do its best to ensure the image size will not exceed that number. However,
-if this turns out to be impossible, it will try to suspend anyway using the
-smallest image possible. In particular, if "0" is written to this file, the
-suspend image will be as small as possible.
-
-Reading from this file will display the current image size limit, which
-is set to 2/5 of available RAM by default.
-
-/sys/power/pm_trace controls the code which saves the last PM event point in
-the RTC across reboots, so that you can debug a machine that just hangs
-during suspend (or more commonly, during resume). Namely, the RTC is only
-used to save the last PM event point if this file contains '1'. Initially it
-contains '0' which may be changed to '1' by writing a string representing a
-nonzero integer into it.
-
-To use this debugging feature you should attempt to suspend the machine, then
-reboot it and run
-
- dmesg -s 1000000 | grep 'hash matches'
-
-CAUTION: Using it will cause your machine's real-time (CMOS) clock to be
-set to a random invalid time after a resume.
+Power Management Interface for System Sleep
+
+Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+The power management subsystem provides userspace with a unified sysfs interface
+for system sleep regardless of the underlying system architecture or platform.
+The interface is located in the /sys/power/ directory (assuming that sysfs is
+mounted at /sys).
+
+/sys/power/state is the system sleep state control file.
+
+Reading from it returns a list of supported sleep states, encoded as:
+
+'freeze' (Suspend-to-Idle)
+'standby' (Power-On Suspend)
+'mem' (Suspend-to-RAM)
+'disk' (Suspend-to-Disk)
+
+Suspend-to-Idle is always supported. Suspend-to-Disk is always supported
+too as long the kernel has been configured to support hibernation at all
+(ie. CONFIG_HIBERNATION is set in the kernel configuration file). Support
+for Suspend-to-RAM and Power-On Suspend depends on the capabilities of the
+platform.
+
+If one of the strings listed in /sys/power/state is written to it, the system
+will attempt to transition into the corresponding sleep state. Refer to
+Documentation/power/states.txt for a description of each of those states.
+
+/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk).
+Specifically, it tells the kernel what to do after creating a hibernation image.
+
+Reading from it returns a list of supported options encoded as:
+
+'platform' (put the system into sleep using a platform-provided method)
+'shutdown' (shut the system down)
+'reboot' (reboot the system)
+'suspend' (trigger a Suspend-to-RAM transition)
+'test_resume' (resume-after-hibernation test mode)
+
+The currently selected option is printed in square brackets.
+
+The 'platform' option is only available if the platform provides a special
+mechanism to put the system to sleep after creating a hibernation image (ACPI
+does that, for example). The 'suspend' option is available if Suspend-to-RAM
+is supported. Refer to Documentation/power/basic_pm_debugging.txt for the
+description of the 'test_resume' option.
+
+To select an option, write the string representing it to /sys/power/disk.
+
+/sys/power/image_size controls the size of hibernation images.
+
+It can be written a string representing a non-negative integer that will be
+used as a best-effort upper limit of the image size, in bytes. The hibernation
+core will do its best to ensure that the image size will not exceed that number.
+However, if that turns out to be impossible to achieve, a hibernation image will
+still be created and its size will be as small as possible. In particular,
+writing '0' to this file will enforce hibernation images to be as small as
+possible.
+
+Reading from this file returns the current image size limit, which is set to
+around 2/5 of available RAM by default.
+
+/sys/power/pm_trace controls the PM trace mechanism saving the last suspend
+or resume event point in the RTC across reboots.
+
+It helps to debug hard lockups or reboots due to device driver failures that
+occur during system suspend or resume (which is more common) more effectively.
+
+If /sys/power/pm_trace contains '1', the fingerprint of each suspend/resume
+event point in turn will be stored in the RTC memory (overwriting the actual
+RTC information), so it will survive a system crash if one occurs right after
+storing it and it can be used later to identify the driver that caused the crash
+to happen (see Documentation/power/s2ram.txt for more information).
+
+Initially it contains '0' which may be changed to '1' by writing a string
+representing a nonzero integer into it.
caption a.headerlink { opacity: 0; }
caption a.headerlink:hover { opacity: 1; }
- /* inline literal: drop the borderbox and red color */
+ /* inline literal: drop the borderbox, padding and red color */
code, .rst-content tt, .rst-content code {
color: inherit;
border: none;
+ padding: unset;
background: inherit;
font-size: 85%;
}
F: drivers/gpu/drm/arc/
F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
+ARM ARCHITECTED TIMER DRIVER
+M: Mark Rutland <mark.rutland@arm.com>
+M: Marc Zyngier <marc.zyngier@arm.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/include/asm/arch_timer.h
+F: arch/arm64/include/asm/arch_timer.h
+F: drivers/clocksource/arm_arch_timer.c
+
ARM HDLCD DRM DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
N: sun[x456789]i
+F: arch/arm/boot/dts/ntc-gr8*
ARM/Allwinner SoC Clock Support
M: Emilio López <emilio@elopez.com.ar>
ARM/Annapurna Labs ALPINE ARCHITECTURE
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
M: Antoine Tenart <antoine.tenart@free-electrons.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-alpine/
F: arch/arm/boot/dts/alpine*
ARM/OXNAS platform support
M: Neil Armstrong <narmstrong@baylibre.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-oxnas@lists.tuxfamily.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-oxnas/
F: arch/arm/boot/dts/oxnas*
F: arch/arm/boot/dts/bcm5301x*.dtsi
F: arch/arm/boot/dts/bcm470*
+BROADCOM BCM53573 ARM ARCHITECTURE
+M: Rafał Miłecki <rafal@milecki.pl>
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: arch/arm/boot/dts/bcm53573*
+F: arch/arm/boot/dts/bcm47189*
+
BROADCOM BCM63XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
M: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: drivers/edac/sb_edac.c
+EDAC-SKYLAKE
+M: Tony Luck <tony.luck@intel.com>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: drivers/edac/skx_edac.c
+
EDAC-XGENE
APPLIED MICRO (APM) X-GENE SOC EDAC
M: Loc Ho <lho@apm.com>
S: Supported
W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
Q: http://patchwork.kernel.org/project/linux-rdma/list/
-F: drivers/infiniband/hw/rxe/
+F: drivers/infiniband/sw/rxe/
F: include/uapi/rdma/rdma_user_rxe.h
MEMBARRIER SUPPORT
VERSION = 4
PATCHLEVEL = 8
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc4
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
-PHONY += gcc-plugins
-gcc-plugins: scripts_basic
-ifdef CONFIG_GCC_PLUGINS
- $(Q)$(MAKE) $(build)=scripts/gcc-plugins
-endif
- @:
-
include scripts/Makefile.gcc-plugins
ifdef CONFIG_READABLE_ASM
endchoice
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+ bool
+ help
+ An architecture should select this if it can walk the kernel stack
+ frames to determine if an object is part of either the arguments
+ or local variables (i.e. that it excludes saved return addresses,
+ and similar) by implementing an inline arch_within_stack_frames(),
+ which is used by CONFIG_HARDENED_USERCOPY.
+
config HAVE_CONTEXT_TRACKING
bool
help
#ifdef CONFIG_ARC_CURR_IN_REG
; Retrieve orig r25 and save it with rest of callee_regs
- ld.as r12, [r12, PT_user_r25]
+ ld r12, [r12, PT_user_r25]
PUSH r12
#else
PUSH r25
; SP is back to start of pt_regs
#ifdef CONFIG_ARC_CURR_IN_REG
- st.as r12, [sp, PT_user_r25]
+ st r12, [sp, PT_user_r25]
#endif
.endm
.endm
.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
- TRACE_ASM_IRQ_ENABLE
.endm
#endif /* __ASSEMBLY__ */
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
/* Machine specific ELF Hdr flags */
#define EF_ARC_OSABI_MSK 0x00000f00
-#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
-#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
+
+#define EF_ARC_OSABI_V3 0x00000300 /* v3 (no legacy syscalls) */
+#define EF_ARC_OSABI_V4 0x00000400 /* v4 (64bit data any reg align) */
+
+#if __GNUC__ < 6
+#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V3
+#else
+#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V4
+#endif
typedef unsigned long elf_greg_t;
typedef unsigned long elf_fpregset_t;
extern void __divdf3(void);
extern void __floatunsidf(void);
extern void __floatunsisf(void);
+extern void __udivdi3(void);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__divdf3);
EXPORT_SYMBOL(__floatunsidf);
EXPORT_SYMBOL(__floatunsisf);
+EXPORT_SYMBOL(__udivdi3);
/* ARC optimised assembler routines */
EXPORT_SYMBOL(memset);
}
eflags = x->e_flags;
- if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
+ if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
pr_err("ABI mismatch - you need newer toolchain\n");
force_sigsegv(SIGSEGV, current);
return 0;
cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
- n += scnprintf(buf + n, len - n,
- "OS ABI [v3]\t: no-legacy-syscalls\n");
+ n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
+ EF_ARC_OSABI_CURRENT >> 8,
+ EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
+ "no-legacy-syscalls" : "64-bit data any register aligned");
return buf;
}
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+ /*
+ * Only master CPU needs to execute rest of function:
+ * - Assume SMP so all cores will have same cache config so
+ * any geomtry checks will be same for all
+ * - IOC setup / dma callbacks only need to be setup once
+ */
+ if (cpu)
+ return;
+
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
return kmap_high(page);
}
+EXPORT_SYMBOL(kmap);
void *kmap_atomic(struct page *page)
{
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU
config DEBUG_BRCMSTB_UART
bool "Use BRCMSTB UART for low-level debug"
depends on ARCH_BRCMSTB
- select DEBUG_UART_8250
help
Say Y here if you want the debug print routines to direct
- their output to the first serial port on these devices.
+ their output to the first serial port on these devices. The
+ UART physical and virtual address is automatically provided
+ based on the chip identification register value.
If you have a Broadcom STB chip and would like early print
messages to appear over the UART, select this option.
via SCIF2 on Renesas R-Car H1 (R8A7779).
config DEBUG_RCAR_GEN2_SCIF0
- bool "Kernel low-level debugging messages via SCIF0 on R8A7790/R8A7791/R8A7793"
- depends on ARCH_R8A7790 || ARCH_R8A7791 || ARCH_R8A7793
+ bool "Kernel low-level debugging messages via SCIF0 on R8A7790/R8A7791/R8A7792/R8A7793"
+ depends on ARCH_R8A7790 || ARCH_R8A7791 || ARCH_R8A7792 || ARCH_R8A7793
help
Say Y here if you want kernel low-level debugging support
- via SCIF0 on Renesas R-Car H2 (R8A7790), M2-W (R8A7791), or
- M2-N (R8A7793).
+ via SCIF0 on Renesas R-Car H2 (R8A7790), M2-W (R8A7791), V2H
+ (R8A7792), or M2-N (R8A7793).
config DEBUG_RCAR_GEN2_SCIF2
bool "Kernel low-level debugging messages via SCIF2 on R8A7794"
default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
+ default "debug/brcmstb.S" if DEBUG_BRCMSTB_UART
default "mach/debug-macro.S"
# Compatibility options for PL01x
default 0xe6e60000 if DEBUG_RCAR_GEN2_SCIF0
default 0xe8008000 if DEBUG_R7S72100_SCIF2
default 0xf0000be0 if ARCH_EBSA110
- default 0xf040ab00 if DEBUG_BRCMSTB_UART
default 0xf1012000 if DEBUG_MVEBU_UART0_ALTERNATE
default 0xf1012100 if DEBUG_MVEBU_UART1_ALTERNATE
default 0xf7fc9000 if DEBUG_BERLIN_UART
default 0xfb009000 if DEBUG_REALVIEW_STD_PORT
default 0xfb00c000 if DEBUG_AT91_SAMA5D4_USART3
default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
- default 0xfc40ab00 if DEBUG_BRCMSTB_UART
default 0xfc705000 if DEBUG_ZTE_ZX
default 0xfcfe8600 if DEBUG_BCM63XX_UART
default 0xfd000000 if DEBUG_SPEAR3XX || DEBUG_SPEAR13XX
DEBUG_ALPINE_UART0 || \
DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
DEBUG_DAVINCI_DA8XX_UART2 || \
- DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2 || \
- DEBUG_BRCMSTB_UART
+ DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2
config DEBUG_UART_8250_PALMCHIP
bool "8250 UART is Palmchip BK-310x"
bool
depends on ARCH_MULTIPLATFORM || PLAT_SAMSUNG || ARM_SINGLE_ARMV7M
default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
- (!DEBUG_TEGRA_UART || !ZBOOT_ROM)
+ (!DEBUG_TEGRA_UART || !ZBOOT_ROM) && \
+ !DEBUG_BRCMSTB_UART
help
This option influences the normal decompressor output for
multiplatform kernels. Normally, multiplatform kernels disable
platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y)))
ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y)
+ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y)
ifeq ($(KBUILD_SRC),)
KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs))
else
KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs))
endif
endif
+endif
export TEXT_OFFSET GZFLAGS MMUEXT
* associativity as these may be erroneously set
* up by boot loader(s).
*/
- cache-size = <1048576>; // 1MB
- cache-sets = <4096>;
+ cache-size = <131072>; // 128KB
+ cache-sets = <512>;
cache-line-size = <32>;
arm,parity-disable;
- arm,tag-latency = <1>;
- arm,data-latency = <1 1>;
- arm,dirty-latency = <1>;
+ arm,tag-latency = <1 1 1>;
+ arm,data-latency = <1 1 1>;
};
scu: scu@1f000000 {
};
syscon {
- compatible = "arm,integrator-ap-syscon";
+ compatible = "arm,integrator-ap-syscon", "syscon";
reg = <0x11000000 0x100>;
interrupt-parent = <&pic>;
/* These are the logical module IRQs */
};
syscon {
- compatible = "arm,integrator-cp-syscon";
+ compatible = "arm,integrator-cp-syscon", "syscon";
reg = <0xcb000000 0x100>;
};
cpu_on = <0x84000003>;
};
- psci {
- compatible = "arm,psci";
- method = "smc";
- cpu_suspend = <0x84000001>;
- cpu_off = <0x84000002>;
- cpu_on = <0x84000003>;
- };
-
soc {
#address-cells = <1>;
#size-cells = <1>;
* Pin 41: BR_UART1_TXD
* Pin 44: BR_UART1_RXD
*/
- serial@70006000 {
+ serial@0,70006000 {
compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
status = "okay";
};
* Pin 71: UART2_CTS_L
* Pin 74: UART2_RTS_L
*/
- serial@70006040 {
+ serial@0,70006040 {
compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
status = "okay";
};
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_FIRMWARE_MEMMAP=y
CONFIG_FANOTIFY=y
-CONFIG_PRINTK_TIME=1
+CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_PAGE_POISONING=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_FIRMWARE_MEMMAP=y
CONFIG_FANOTIFY=y
-CONFIG_PRINTK_TIME=1
+CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_PAGE_POISONING=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=8
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_PHYLIB=y
CONFIG_NET_ETHERNET=y
# CONFIG_MTD_CFI_I1 is not set
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_BLK_DEV_NBD=y
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=y
# CONFIG_MTD_MAP_BANK_WIDTH_2 is not set
# CONFIG_MTD_CFI_I1 is not set
CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=y
CONFIG_LDM_PARTITION=y
CONFIG_CMDLINE_PARTITION=y
CONFIG_ARCH_PXA=y
-CONFIG_MACH_PXA27X_DT=y
-CONFIG_MACH_PXA3XX_DT=y
CONFIG_ARCH_LUBBOCK=y
CONFIG_MACH_MAINSTONE=y
CONFIG_MACH_ZYLONITE300=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=8
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
/*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#ifdef CONFIG_CACHE_UNIPHIER
int uniphier_cache_init(void);
-int uniphier_cache_l2_is_enabled(void);
-void uniphier_cache_l2_touch_range(unsigned long start, unsigned long end);
-void uniphier_cache_l2_set_locked_ways(u32 way_mask);
#else
static inline int uniphier_cache_init(void)
{
return -ENODEV;
}
-
-static inline int uniphier_cache_l2_is_enabled(void)
-{
- return 0;
-}
-
-static inline void uniphier_cache_l2_touch_range(unsigned long start,
- unsigned long end)
-{
-}
-
-static inline void uniphier_cache_l2_set_locked_ways(u32 way_mask)
-{
-}
#endif
#endif /* __CACHE_UNIPHIER_H */
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned int __ua_flags = uaccess_save_and_enable();
+ unsigned int __ua_flags;
+
+ check_object_size(to, n, false);
+ __ua_flags = uaccess_save_and_enable();
n = arm_copy_from_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
#ifndef CONFIG_UACCESS_WITH_MEMCPY
- unsigned int __ua_flags = uaccess_save_and_enable();
+ unsigned int __ua_flags;
+
+ check_object_size(from, n, true);
+ __ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
#else
+ check_object_size(from, n, true);
return arm_copy_to_user(to, from, n);
#endif
}
--- /dev/null
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/serial_reg.h>
+
+/* Physical register offset and virtual register offset */
+#define REG_PHYS_BASE 0xf0000000
+#define REG_VIRT_BASE 0xfc000000
+#define REG_PHYS_ADDR(x) ((x) + REG_PHYS_BASE)
+
+/* Product id can be read from here */
+#define SUN_TOP_CTRL_BASE REG_PHYS_ADDR(0x404000)
+
+#define UARTA_3390 REG_PHYS_ADDR(0x40a900)
+#define UARTA_7250 REG_PHYS_ADDR(0x40b400)
+#define UARTA_7268 REG_PHYS_ADDR(0x40c000)
+#define UARTA_7271 UARTA_7268
+#define UARTA_7364 REG_PHYS_ADDR(0x40b000)
+#define UARTA_7366 UARTA_7364
+#define UARTA_74371 REG_PHYS_ADDR(0x406b00)
+#define UARTA_7439 REG_PHYS_ADDR(0x40a900)
+#define UARTA_7445 REG_PHYS_ADDR(0x40ab00)
+
+#define UART_SHIFT 2
+
+#define checkuart(rp, rv, family_id, family) \
+ /* Load family id */ \
+ ldr rp, =family_id ; \
+ /* Compare SUN_TOP_CTRL value against it */ \
+ cmp rp, rv ; \
+ /* Passed test, load address */ \
+ ldreq rp, =UARTA_##family ; \
+ /* Jump to save UART address */ \
+ beq 91f
+
+ .macro addruart, rp, rv, tmp
+ adr \rp, 99f @ actual addr of 99f
+ ldr \rv, [\rp] @ linked addr is stored there
+ sub \rv, \rv, \rp @ offset between the two
+ ldr \rp, [\rp, #4] @ linked brcmstb_uart_config
+ sub \tmp, \rp, \rv @ actual brcmstb_uart_config
+ ldr \rp, [\tmp] @ Load brcmstb_uart_config
+ cmp \rp, #1 @ needs initialization?
+ bne 100f @ no; go load the addresses
+ mov \rv, #0 @ yes; record init is done
+ str \rv, [\tmp]
+
+ /* Check SUN_TOP_CTRL base */
+ ldr \rp, =SUN_TOP_CTRL_BASE @ load SUN_TOP_CTRL PA
+ ldr \rv, [\rp, #0] @ get register contents
+ and \rv, \rv, #0xffffff00 @ strip revision bits [7:0]
+
+ /* Chip specific detection starts here */
+20: checkuart(\rp, \rv, 0x33900000, 3390)
+21: checkuart(\rp, \rv, 0x72500000, 7250)
+22: checkuart(\rp, \rv, 0x72680000, 7268)
+23: checkuart(\rp, \rv, 0x72710000, 7271)
+24: checkuart(\rp, \rv, 0x73640000, 7364)
+25: checkuart(\rp, \rv, 0x73660000, 7366)
+26: checkuart(\rp, \rv, 0x07437100, 74371)
+27: checkuart(\rp, \rv, 0x74390000, 7439)
+28: checkuart(\rp, \rv, 0x74450000, 7445)
+
+ /* No valid UART found */
+90: mov \rp, #0
+ /* fall through */
+
+ /* Record whichever UART we chose */
+91: str \rp, [\tmp, #4] @ Store in brcmstb_uart_phys
+ cmp \rp, #0 @ Valid UART address?
+ bne 92f @ Yes, go process it
+ str \rp, [\tmp, #8] @ Store 0 in brcmstb_uart_virt
+ b 100f @ Done
+92: and \rv, \rp, #0xffffff @ offset within 16MB section
+ add \rv, \rv, #REG_VIRT_BASE
+ str \rv, [\tmp, #8] @ Store in brcmstb_uart_virt
+ b 100f
+
+ .align
+99: .word .
+ .word brcmstb_uart_config
+ .ltorg
+
+ /* Load previously selected UART address */
+100: ldr \rp, [\tmp, #4] @ Load brcmstb_uart_phys
+ ldr \rv, [\tmp, #8] @ Load brcmstb_uart_virt
+ .endm
+
+ .macro store, rd, rx:vararg
+ str \rd, \rx
+ .endm
+
+ .macro load, rd, rx:vararg
+ ldr \rd, \rx
+ .endm
+
+ .macro senduart,rd,rx
+ store \rd, [\rx, #UART_TX << UART_SHIFT]
+ .endm
+
+ .macro busyuart,rd,rx
+1002: load \rd, [\rx, #UART_LSR << UART_SHIFT]
+ and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ bne 1002b
+ .endm
+
+ .macro waituart,rd,rx
+ .endm
+
+/*
+ * Storage for the state maintained by the macros above.
+ *
+ * In the kernel proper, this data is located in arch/arm/mach-bcm/brcmstb.c.
+ * That's because this header is included from multiple files, and we only
+ * want a single copy of the data. In particular, the UART probing code above
+ * assumes it's running using physical addresses. This is true when this file
+ * is included from head.o, but not when included from debug.o. So we need
+ * to share the probe results between the two copies, rather than having
+ * to re-run the probing again later.
+ *
+ * In the decompressor, we put the symbol/storage right here, since common.c
+ * isn't included in the decompressor build. This symbol gets put in .text
+ * even though it's really data, since .data is discarded from the
+ * decompressor. Luckily, .text is writeable in the decompressor, unless
+ * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
+ */
+#if defined(ZIMAGE)
+brcmstb_uart_config:
+ /* Debug UART initialization required */
+ .word 1
+ /* Debug UART physical address */
+ .word 0
+ /* Debug UART virtual address */
+ .word 0
+#endif
bl __und_fault
__und_svc_finish:
+ get_thread_info tsk
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
svc_exit r5 @ return from exception
UNWIND(.fnend )
mm_segment_t fs;
long ret, err, i;
- if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
+ if (maxevents <= 0 ||
+ maxevents > (INT_MAX/sizeof(*kbuf)) ||
+ maxevents > (INT_MAX/sizeof(*events)))
return -EINVAL;
+ if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
+ return -EFAULT;
kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
if (nsops < 1 || nsops > SEMOPM)
return -EINVAL;
+ if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
+ return -EFAULT;
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
if (!sops)
return -ENOMEM;
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
+ int ret;
if (!vgic_present)
return -ENXIO;
- return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+ mutex_lock(&kvm->lock);
+ ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+ mutex_unlock(&kvm->lock);
+ return ret;
}
case KVM_ARM_SET_DEVICE_ADDR: {
struct kvm_arm_device_addr dev_addr;
smp_rmb();
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
- if (is_error_pfn(pfn))
+ if (is_error_noslot_pfn(pfn))
return -EFAULT;
if (kvm_is_device_pfn(pfn)) {
This enables support for the Broadcom BCM2835 and BCM2836 SoCs.
This SoC is used in the Raspberry Pi and Roku 2 devices.
+config ARCH_BCM_53573
+ bool "Broadcom BCM53573 SoC series support"
+ depends on ARCH_MULTI_V7
+ select ARCH_BCM_IPROC
+ select HAVE_ARM_ARCH_TIMER
+ help
+ BCM53573 series is set of SoCs using ARM Cortex-A7 CPUs with wireless
+ embedded in the chipset.
+ This SoC line is mostly used in home routers and is some cheaper
+ alternative for Northstar family.
+
+ The base chip is BCM53573 and there are some packaging modifications
+ like BCM47189 and BCM47452.
+
config ARCH_BCM_63XX
bool "Broadcom BCM63xx DSL SoC"
depends on ARCH_MULTI_V7
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+/*
+ * Storage for debug-macro.S's state.
+ *
+ * This must be in .data not .bss so that it gets initialized each time the
+ * kernel is loaded. The data is declared here rather than debug-macro.S so
+ * that multiple inclusions of debug-macro.S point at the same data.
+ */
+u32 brcmstb_uart_config[3] = {
+ /* Debug UART initialization required */
+ 1,
+ /* Debug UART physical address */
+ 0,
+ /* Debug UART virtual address */
+ 0,
+};
+
static void __init brcmstb_init_irq(void)
{
irqchip_init();
menuconfig ARCH_CLPS711X
bool "Cirrus Logic EP721x/EP731x-based"
depends on ARCH_MULTI_V4T
- select ARCH_REQUIRE_GPIOLIB
select AUTO_ZRELADDR
select CLKSRC_OF
select CLPS711X_TIMER
select COMMON_CLK
select CPU_ARM720T
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
select MFD_SYSCON
select OF_IRQ
select USE_OF
CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
CLK("da830-mmc.0", NULL, &mmcsd0_clk),
CLK("da830-mmc.1", NULL, &mmcsd1_clk),
+ CLK("ti-aemif", NULL, &aemif_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, "usb11", &usb11_clk),
CLK(NULL, "usb20", &usb20_clk),
OF_DEV_AUXDATA("ti,davinci-dm6467-emac", 0x01e20000, "davinci_emac.1",
NULL),
OF_DEV_AUXDATA("ti,da830-mcasp-audio", 0x01d00000, "davinci-mcasp.0", NULL),
+ OF_DEV_AUXDATA("ti,da850-aemif", 0x68000000, "ti-aemif", NULL),
{}
};
depends on ARCH_MULTI_V7
select ARCH_HAS_BANDGAP
select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
select COMMON_CLK_SAMSUNG
static struct map_desc exynos4_iodesc[] __initdata = {
{
- .virtual = (unsigned long)S5P_VA_CMU,
- .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
- .length = SZ_128K,
- .type = MT_DEVICE,
- }, {
.virtual = (unsigned long)S5P_VA_COREPERI_BASE,
.pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
.length = SZ_8K,
.type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_DMC0,
- .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
- .length = SZ_64K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_DMC1,
- .pfn = __phys_to_pfn(EXYNOS4_PA_DMC1),
- .length = SZ_64K,
- .type = MT_DEVICE,
},
};
#define EXYNOS_PA_CHIPID 0x10000000
-#define EXYNOS4_PA_CMU 0x10030000
-
-#define EXYNOS4_PA_DMC0 0x10400000
-#define EXYNOS4_PA_DMC1 0x10410000
-
#define EXYNOS4_PA_COREPERI 0x10500000
#endif /* __ASM_ARCH_MAP_H */
obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += cpuidle-imx6sl.o
obj-$(CONFIG_SOC_IMX6SX) += cpuidle-imx6sx.o
+obj-$(CONFIG_SOC_IMX6UL) += cpuidle-imx6sx.o
endif
ifdef CONFIG_SND_IMX_SOC
void imx_anatop_pre_suspend(void);
void imx_anatop_post_resume(void);
int imx6_set_lpm(enum mxc_cpu_pwr_mode mode);
-void imx6q_set_int_mem_clk_lpm(bool enable);
+void imx6_set_int_mem_clk_lpm(bool enable);
void imx6sl_set_wait_clk(bool enter);
int imx_mmdc_get_ddr_type(void);
int __init imx6q_cpuidle_init(void)
{
/* Set INT_MEM_CLK_LPM bit to get a reliable WAIT mode support */
- imx6q_set_int_mem_clk_lpm(true);
+ imx6_set_int_mem_clk_lpm(true);
return cpuidle_register(&imx6q_cpuidle_driver, NULL);
}
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/module.h>
+#include <asm/cacheflush.h>
#include <asm/cpuidle.h>
#include <asm/suspend.h>
static int imx6sx_idle_finish(unsigned long val)
{
+ /*
+ * for Cortex-A7 which has an internal L2
+ * cache, need to flush it before powering
+ * down ARM platform, since flushing L1 cache
+ * here again has very small overhead, compared
+ * to adding conditional code for L2 cache type,
+ * just call flush_cache_all() is fine.
+ */
+ flush_cache_all();
cpu_do_idle();
return 0;
int __init imx6sx_cpuidle_init(void)
{
+ imx6_set_int_mem_clk_lpm(true);
imx6_enable_rbc(false);
/*
* set ARM power up/down timing to the fastest,
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
+ /*
+ * Clear the OF_POPULATED flag set in of_irq_init so that
+ * later the GPC power domain driver will not be skipped.
+ */
+ of_node_clear_flag(node, OF_POPULATED);
+
return 0;
}
IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
- armadillo5x0_smc911x_resources[1].start =
- gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
- armadillo5x0_smc911x_resources[1].end =
- gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
- platform_add_devices(devices, ARRAY_SIZE(devices));
- imx_add_gpio_keys(&armadillo5x0_button_data);
imx31_add_imx_i2c1(NULL);
/* Register UART */
imx31_add_imx_uart0(&uart_pdata);
imx31_add_imx_uart1(&uart_pdata);
- /* SMSC9118 IRQ pin */
- gpio_direction_input(MX31_PIN_GPIO1_0);
-
- /* Register SDHC */
- imx31_add_mxc_mmc(0, &sdhc_pdata);
-
/* Register FB */
imx31_add_ipu_core();
imx31_add_mx3_sdc_fb(&mx3fb_pdata);
/* set NAND page size to 2k if not configured via boot mode pins */
imx_writel(imx_readl(mx3_ccm_base + MXC_CCM_RCSR) | (1 << 30),
mx3_ccm_base + MXC_CCM_RCSR);
+}
+
+static void __init armadillo5x0_late(void)
+{
+ armadillo5x0_smc911x_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
+ armadillo5x0_smc911x_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ imx_add_gpio_keys(&armadillo5x0_button_data);
+
+ /* SMSC9118 IRQ pin */
+ gpio_direction_input(MX31_PIN_GPIO1_0);
+
+ /* Register SDHC */
+ imx31_add_mxc_mmc(0, &sdhc_pdata);
/* RTC */
/* Get RTC IRQ and register the chip */
- if (gpio_request(ARMADILLO5X0_RTC_GPIO, "rtc") == 0) {
- if (gpio_direction_input(ARMADILLO5X0_RTC_GPIO) == 0)
- armadillo5x0_i2c_rtc.irq = gpio_to_irq(ARMADILLO5X0_RTC_GPIO);
+ if (!gpio_request(ARMADILLO5X0_RTC_GPIO, "rtc")) {
+ if (!gpio_direction_input(ARMADILLO5X0_RTC_GPIO))
+ armadillo5x0_i2c_rtc.irq =
+ gpio_to_irq(ARMADILLO5X0_RTC_GPIO);
else
gpio_free(ARMADILLO5X0_RTC_GPIO);
}
+
if (armadillo5x0_i2c_rtc.irq == 0)
pr_warn("armadillo5x0_init: failed to get RTC IRQ\n");
i2c_register_board_info(1, &armadillo5x0_i2c_rtc, 1);
/* USB */
-
usbotg_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
ULPI_OTG_DRVVBUS_EXT);
if (usbotg_pdata.otg)
.init_irq = mx31_init_irq,
.init_time = armadillo5x0_timer_init,
.init_machine = armadillo5x0_init,
+ .init_late = armadillo5x0_late,
.restart = mxc_restart,
MACHINE_END
static void __init visstrim_m10_board_init(void)
{
int ret;
- int mo_version;
imx27_soc_init();
visstrim_m10_revision();
if (ret)
pr_err("Failed to setup pins (%d)\n", ret);
- ret = gpio_request_array(visstrim_m10_gpios,
- ARRAY_SIZE(visstrim_m10_gpios));
- if (ret)
- pr_err("Failed to request gpios (%d)\n", ret);
-
imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata);
imx27_add_imx_uart0(&uart_pdata);
imx27_add_mxc_mmc(0, &visstrim_m10_sdhc_pdata);
imx27_add_mxc_ehci_otg(&visstrim_m10_usbotg_pdata);
imx27_add_fec(NULL);
- imx_add_gpio_keys(&visstrim_gpio_keys_platform_data);
+
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+}
+
+static void __init visstrim_m10_late_init(void)
+{
+ int mo_version, ret;
+
+ ret = gpio_request_array(visstrim_m10_gpios,
+ ARRAY_SIZE(visstrim_m10_gpios));
+ if (ret)
+ pr_err("Failed to request gpios (%d)\n", ret);
+
+ imx_add_gpio_keys(&visstrim_gpio_keys_platform_data);
+
imx_add_platform_device("mx27vis", 0, NULL, 0, &snd_mx27vis_pdata,
sizeof(snd_mx27vis_pdata));
platform_device_register_resndata(NULL, "soc-camera-pdrv", 0, NULL, 0,
&iclink_tvp5150, sizeof(iclink_tvp5150));
+
gpio_led_register_device(0, &visstrim_m10_led_data);
/* Use mother board version to decide what video devices we shall use */
visstrim_deinterlace_init();
visstrim_analog_camera_init();
}
+
visstrim_coda_init();
}
.init_irq = mx27_init_irq,
.init_time = visstrim_m10_timer_init,
.init_machine = visstrim_m10_board_init,
+ .init_late = visstrim_m10_late_init,
.restart = mxc_restart,
MACHINE_END
#include <asm/mach/map.h>
#include "common.h"
+#include "cpuidle.h"
static void __init imx6ul_enet_clk_init(void)
{
static void __init imx6ul_init_late(void)
{
+ imx6sx_cpuidle_init();
+
if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ))
platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0);
}
mxc_iomux_setup_multiple_pins(kzm_pins,
ARRAY_SIZE(kzm_pins), "kzm");
- kzm_init_ext_uart();
- kzm_init_smsc9118();
kzm_init_imx_uart();
pr_info("Clock input source is 26MHz\n");
}
+static void __init kzm_late_init(void)
+{
+ kzm_init_ext_uart();
+ kzm_init_smsc9118();
+}
+
/*
* This structure defines static mappings for the kzm-arm11-01 board.
*/
.init_irq = mx31_init_irq,
.init_time = kzm_timer_init,
.init_machine = kzm_board_init,
+ .init_late = kzm_late_init,
.restart = mxc_restart,
MACHINE_END
imx21_add_imx_uart0(&uart_pdata_rts);
imx21_add_imx_uart2(&uart_pdata_norts);
imx21_add_imx_uart3(&uart_pdata_rts);
- imx21_add_mxc_mmc(0, &mx21ads_sdhc_pdata);
imx21_add_mxc_nand(&mx21ads_nand_board_info);
- platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
-
imx21_add_imx_fb(&mx21ads_fb_data);
+}
+
+static void __init mx21ads_late_init(void)
+{
+ imx21_add_mxc_mmc(0, &mx21ads_sdhc_pdata);
+
+ platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
mx21ads_cs8900_resources[1].start =
gpio_to_irq(MX21ADS_CS8900A_IRQ_GPIO);
.init_early = imx21_init_early,
.init_irq = mx21_init_irq,
.init_time = mx21ads_timer_init,
- .init_machine = mx21ads_board_init,
+ .init_machine = mx21ads_board_init,
+ .init_late = mx21ads_late_init,
.restart = mxc_restart,
MACHINE_END
static void __init mx27pdk_init(void)
{
- int ret;
imx27_soc_init();
mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins),
"mx27pdk");
- mx27_3ds_sdhc1_enable_level_translator();
imx27_add_imx_uart0(&uart_pdata);
imx27_add_fec(NULL);
imx27_add_imx_keypad(&mx27_3ds_keymap_data);
- imx27_add_mxc_mmc(0, &sdhc1_pdata);
imx27_add_imx2_wdt();
+
+ imx27_add_spi_imx1(&spi2_pdata);
+ imx27_add_spi_imx0(&spi1_pdata);
+
+ imx27_add_imx_i2c(0, &mx27_3ds_i2c0_data);
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ imx27_add_imx_fb(&mx27_3ds_fb_data);
+
+ imx27_add_imx_ssi(0, &mx27_3ds_ssi_pdata);
+}
+
+static void __init mx27pdk_late_init(void)
+{
+ int ret;
+
+ mx27_3ds_sdhc1_enable_level_translator();
+ imx27_add_mxc_mmc(0, &sdhc1_pdata);
+
otg_phy_init();
if (otg_mode_host) {
if (!otg_mode_host)
imx27_add_fsl_usb2_udc(&otg_device_pdata);
- imx27_add_spi_imx1(&spi2_pdata);
- imx27_add_spi_imx0(&spi1_pdata);
mx27_3ds_spi_devs[0].irq = gpio_to_irq(PMIC_INT);
spi_register_board_info(mx27_3ds_spi_devs,
- ARRAY_SIZE(mx27_3ds_spi_devs));
+ ARRAY_SIZE(mx27_3ds_spi_devs));
if (mxc_expio_init(MX27_CS5_BASE_ADDR, IMX_GPIO_NR(3, 28)))
pr_warn("Init of the debugboard failed, all devices on the debugboard are unusable.\n");
- imx27_add_imx_i2c(0, &mx27_3ds_i2c0_data);
- platform_add_devices(devices, ARRAY_SIZE(devices));
- imx27_add_imx_fb(&mx27_3ds_fb_data);
ret = gpio_request_array(mx27_3ds_camera_gpios,
ARRAY_SIZE(mx27_3ds_camera_gpios));
}
imx27_add_mx2_camera(&mx27_3ds_cam_pdata);
- imx27_add_imx_ssi(0, &mx27_3ds_ssi_pdata);
imx_add_platform_device("imx_mc13783", 0, NULL, 0, NULL, 0);
}
.init_irq = mx27_init_irq,
.init_time = mx27pdk_timer_init,
.init_machine = mx27pdk_init,
+ .init_late = mx27pdk_late_init,
.restart = mxc_restart,
MACHINE_END
i2c_register_board_info(1, mx27ads_i2c_devices,
ARRAY_SIZE(mx27ads_i2c_devices));
imx27_add_imx_i2c(1, &mx27ads_i2c1_data);
- mx27ads_regulator_init();
imx27_add_imx_fb(&mx27ads_fb_data);
+
+ imx27_add_fec(NULL);
+ imx27_add_mxc_w1();
+}
+
+static void __init mx27ads_late_init(void)
+{
+ mx27ads_regulator_init();
+
imx27_add_mxc_mmc(0, &sdhc1_pdata);
imx27_add_mxc_mmc(1, &sdhc2_pdata);
- imx27_add_fec(NULL);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
- imx27_add_mxc_w1();
}
static void __init mx27ads_timer_init(void)
.init_irq = mx27_init_irq,
.init_time = mx27ads_timer_init,
.init_machine = mx27ads_board_init,
+ .init_late = mx27ads_late_init,
.restart = mxc_restart,
MACHINE_END
static void __init mx31_3ds_init(void)
{
- int ret;
-
imx31_soc_init();
/* Configure SPI1 IOMUX */
imx31_add_mxc_nand(&mx31_3ds_nand_board_info);
imx31_add_spi_imx1(&spi1_pdata);
+
+ imx31_add_imx_keypad(&mx31_3ds_keymap_data);
+
+ imx31_add_imx2_wdt();
+ imx31_add_imx_i2c0(&mx31_3ds_i2c0_data);
+
+ imx31_add_spi_imx0(&spi0_pdata);
+ imx31_add_ipu_core();
+ imx31_add_mx3_sdc_fb(&mx3fb_pdata);
+
+ imx31_add_imx_ssi(0, &mx31_3ds_ssi_pdata);
+
+ imx_add_platform_device("imx_mc13783", 0, NULL, 0, NULL, 0);
+}
+
+static void __init mx31_3ds_late(void)
+{
+ int ret;
+
mx31_3ds_spi_devs[0].irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
spi_register_board_info(mx31_3ds_spi_devs,
- ARRAY_SIZE(mx31_3ds_spi_devs));
+ ARRAY_SIZE(mx31_3ds_spi_devs));
platform_add_devices(devices, ARRAY_SIZE(devices));
- imx31_add_imx_keypad(&mx31_3ds_keymap_data);
-
mx31_3ds_usbotg_init();
if (otg_mode_host) {
otg_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
if (mxc_expio_init(MX31_CS5_BASE_ADDR, IOMUX_TO_GPIO(MX31_PIN_GPIO1_1)))
printk(KERN_WARNING "Init of the debug board failed, all "
- "devices on the debug board are unusable.\n");
- imx31_add_imx2_wdt();
- imx31_add_imx_i2c0(&mx31_3ds_i2c0_data);
- imx31_add_mxc_mmc(0, &sdhc1_pdata);
+ "devices on the debug board are unusable.\n");
- imx31_add_spi_imx0(&spi0_pdata);
- imx31_add_ipu_core();
- imx31_add_mx3_sdc_fb(&mx3fb_pdata);
+ imx31_add_mxc_mmc(0, &sdhc1_pdata);
/* CSI */
/* Camera power: default - off */
}
mx31_3ds_init_camera();
-
- imx31_add_imx_ssi(0, &mx31_3ds_ssi_pdata);
-
- imx_add_platform_device("imx_mc13783", 0, NULL, 0, NULL, 0);
}
static void __init mx31_3ds_timer_init(void)
.init_irq = mx31_init_irq,
.init_time = mx31_3ds_timer_init,
.init_machine = mx31_3ds_init,
+ .init_late = mx31_3ds_late,
.reserve = mx31_3ds_reserve,
.restart = mxc_restart,
MACHINE_END
iotable_init(mx31ads_io_desc, ARRAY_SIZE(mx31ads_io_desc));
}
-static void __init mx31ads_init_irq(void)
-{
- mx31_init_irq();
- mx31ads_init_expio();
-}
-
static void __init mx31ads_init(void)
{
imx31_soc_init();
- mxc_init_extuart();
mxc_init_imx_uart();
- mxc_init_i2c();
mxc_init_audio();
+}
+
+static void __init mx31ads_late(void)
+{
+ mx31ads_init_expio();
+ mxc_init_extuart();
+ mxc_init_i2c();
mxc_init_ext_ethernet();
}
.atag_offset = 0x100,
.map_io = mx31ads_map_io,
.init_early = imx31_init_early,
- .init_irq = mx31ads_init_irq,
+ .init_irq = mx31_init_irq,
.init_time = mx31ads_timer_init,
.init_machine = mx31ads_init,
+ .init_late = mx31ads_late,
.restart = mxc_restart,
MACHINE_END
* appropriate baseboard support code.
*/
+static unsigned int mx31lilly_pins[] __initdata = {
+ MX31_PIN_CTS1__CTS1,
+ MX31_PIN_RTS1__RTS1,
+ MX31_PIN_TXD1__TXD1,
+ MX31_PIN_RXD1__RXD1,
+ MX31_PIN_CTS2__CTS2,
+ MX31_PIN_RTS2__RTS2,
+ MX31_PIN_TXD2__TXD2,
+ MX31_PIN_RXD2__RXD2,
+ MX31_PIN_CSPI3_MOSI__RXD3,
+ MX31_PIN_CSPI3_MISO__TXD3,
+ MX31_PIN_CSPI3_SCLK__RTS3,
+ MX31_PIN_CSPI3_SPI_RDY__CTS3,
+};
+
+/* UART */
+static const struct imxuart_platform_data uart_pdata __initconst = {
+ .flags = IMXUART_HAVE_RTSCTS,
+};
+
/* SMSC ethernet support */
static struct resource smsc91x_resources[] = {
{
imx31_soc_init();
- switch (mx31lilly_baseboard) {
- case MX31LILLY_NOBOARD:
- break;
- case MX31LILLY_DB:
- mx31lilly_db_init();
- break;
- default:
- printk(KERN_ERR "Illegal mx31lilly_baseboard type %d\n",
- mx31lilly_baseboard);
- }
+ mxc_iomux_setup_multiple_pins(mx31lilly_pins,
+ ARRAY_SIZE(mx31lilly_pins), "mx31lily");
+
+ imx31_add_imx_uart0(&uart_pdata);
+ imx31_add_imx_uart1(&uart_pdata);
+ imx31_add_imx_uart2(&uart_pdata);
mxc_iomux_alloc_pin(MX31_PIN_CS4__CS4, "Ethernet CS");
imx31_add_spi_imx0(&spi0_pdata);
imx31_add_spi_imx1(&spi1_pdata);
- mc13783_dev.irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
- spi_register_board_info(&mc13783_dev, 1);
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+}
+
+static void __init mx31lilly_late_init(void)
+{
+ if (mx31lilly_baseboard == MX31LILLY_DB)
+ mx31lilly_db_init();
+
+ mc13783_dev.irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
+ spi_register_board_info(&mc13783_dev, 1);
smsc91x_resources[1].start =
gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
.init_early = imx31_init_early,
.init_irq = mx31_init_irq,
.init_time = mx31lilly_timer_init,
- .init_machine = mx31lilly_board_init,
+ .init_machine = mx31lilly_board_init,
+ .init_late = mx31lilly_late_init,
.restart = mxc_restart,
MACHINE_END
*/
static unsigned int mx31lite_pins[] = {
+ /* UART1 */
+ MX31_PIN_CTS1__CTS1,
+ MX31_PIN_RTS1__RTS1,
+ MX31_PIN_TXD1__TXD1,
+ MX31_PIN_RXD1__RXD1,
+ /* SPI 0 */
+ MX31_PIN_CSPI1_SCLK__SCLK,
+ MX31_PIN_CSPI1_MOSI__MOSI,
+ MX31_PIN_CSPI1_MISO__MISO,
+ MX31_PIN_CSPI1_SPI_RDY__SPI_RDY,
+ MX31_PIN_CSPI1_SS0__SS0,
+ MX31_PIN_CSPI1_SS1__SS1,
+ MX31_PIN_CSPI1_SS2__SS2,
/* LAN9117 IRQ pin */
IOMUX_MODE(MX31_PIN_SFS6, IOMUX_CONFIG_GPIO),
/* SPI 1 */
MX31_PIN_CSPI2_SS2__SS2,
};
+/* UART */
+static const struct imxuart_platform_data uart_pdata __initconst = {
+ .flags = IMXUART_HAVE_RTSCTS,
+};
+
+/* SPI */
+static int spi0_internal_chipselect[] = {
+ MXC_SPI_CS(0),
+ MXC_SPI_CS(1),
+ MXC_SPI_CS(2),
+};
+
+static const struct spi_imx_master spi0_pdata __initconst = {
+ .chipselect = spi0_internal_chipselect,
+ .num_chipselect = ARRAY_SIZE(spi0_internal_chipselect),
+};
+
static const struct mxc_nand_platform_data
mx31lite_nand_board_info __initconst = {
.width = 1,
* The MC13783 is the only hard-wired SPI device on the module.
*/
-static int spi_internal_chipselect[] = {
+static int spi1_internal_chipselect[] = {
MXC_SPI_CS(0),
};
static const struct spi_imx_master spi1_pdata __initconst = {
- .chipselect = spi_internal_chipselect,
- .num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
+ .chipselect = spi1_internal_chipselect,
+ .num_chipselect = ARRAY_SIZE(spi1_internal_chipselect),
};
static struct mc13xxx_platform_data mc13783_pdata __initdata = {
.num_resources = 1,
};
-
-
/*
* This structure defines the MX31 memory map.
*/
static void __init mx31lite_init(void)
{
- int ret;
-
imx31_soc_init();
- switch (mx31lite_baseboard) {
- case MX31LITE_NOBOARD:
- break;
- case MX31LITE_DB:
- mx31lite_db_init();
- break;
- default:
- printk(KERN_ERR "Illegal mx31lite_baseboard type %d\n",
- mx31lite_baseboard);
- }
-
mxc_iomux_setup_multiple_pins(mx31lite_pins, ARRAY_SIZE(mx31lite_pins),
"mx31lite");
+ imx31_add_imx_uart0(&uart_pdata);
+ imx31_add_spi_imx0(&spi0_pdata);
+
/* NOR and NAND flash */
platform_device_register(&physmap_flash_device);
imx31_add_mxc_nand(&mx31lite_nand_board_info);
imx31_add_spi_imx1(&spi1_pdata);
+
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+}
+
+static void __init mx31lite_late(void)
+{
+ int ret;
+
+ if (mx31lite_baseboard == MX31LITE_DB)
+ mx31lite_db_init();
+
mc13783_spi_dev.irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
spi_register_board_info(&mc13783_spi_dev, 1);
if (usbh2_pdata.otg)
imx31_add_mxc_ehci_hs(2, &usbh2_pdata);
- regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
-
/* SMSC9117 IRQ pin */
ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_SFS6), "sms9117-irq");
if (ret)
.init_irq = mx31_init_irq,
.init_time = mx31lite_timer_init,
.init_machine = mx31lite_init,
+ .init_late = mx31lite_late,
.restart = mxc_restart,
MACHINE_END
"moboard");
platform_add_devices(devices, ARRAY_SIZE(devices));
- gpio_led_register_device(-1, &mx31moboard_led_pdata);
imx31_add_imx2_wdt();
- moboard_uart0_init();
imx31_add_imx_uart0(&uart0_pdata);
imx31_add_imx_uart4(&uart4_pdata);
imx31_add_spi_imx1(&moboard_spi1_pdata);
imx31_add_spi_imx2(&moboard_spi2_pdata);
+ mx31moboard_init_cam();
+
+ imx31_add_imx_ssi(0, &moboard_ssi_pdata);
+
+ pm_power_off = mx31moboard_poweroff;
+}
+
+static void __init mx31moboard_late(void)
+{
+ gpio_led_register_device(-1, &mx31moboard_led_pdata);
+
+ moboard_uart0_init();
+
gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3), "pmic-irq");
gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
moboard_spi_board_info[0].irq =
imx31_add_mxc_mmc(0, &sdhc1_pdata);
- mx31moboard_init_cam();
-
usb_xcvr_reset();
-
moboard_usbh2_init();
- imx31_add_imx_ssi(0, &moboard_ssi_pdata);
-
imx_add_platform_device("imx_mc13783", 0, NULL, 0, NULL, 0);
- pm_power_off = mx31moboard_poweroff;
-
switch (mx31moboard_baseboard) {
case MX31NOBOARD:
break;
.init_irq = mx31_init_irq,
.init_time = mx31moboard_timer_init,
.init_machine = mx31moboard_init,
+ .init_late = mx31moboard_late,
.restart = mxc_restart,
MACHINE_END
*/
static void __init mx35_3ds_init(void)
{
- struct platform_device *imx35_fb_pdev;
-
imx35_soc_init();
mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads));
imx35_add_mxc_nand(&mx35pdk_nand_board_info);
imx35_add_sdhci_esdhc_imx(0, NULL);
- if (mxc_expio_init(MX35_CS5_BASE_ADDR, IMX_GPIO_NR(1, 1)))
- pr_warn("Init of the debugboard failed, all "
- "devices on the debugboard are unusable.\n");
imx35_add_imx_i2c0(&mx35_3ds_i2c0_data);
i2c_register_board_info(
imx35_add_ipu_core();
platform_device_register(&mx35_3ds_ov2640);
imx35_3ds_init_camera();
+}
+
+static void __init mx35_3ds_late_init(void)
+{
+ struct platform_device *imx35_fb_pdev;
+
+ if (mxc_expio_init(MX35_CS5_BASE_ADDR, IMX_GPIO_NR(1, 1)))
+ pr_warn("Init of the debugboard failed, all "
+ "devices on the debugboard are unusable.\n");
imx35_fb_pdev = imx35_add_mx3_sdc_fb(&mx3fb_pdata);
mx35_3ds_lcd.dev.parent = &imx35_fb_pdev->dev;
.init_irq = mx35_init_irq,
.init_time = mx35pdk_timer_init,
.init_machine = mx35_3ds_init,
+ .init_late = mx35_3ds_late_init,
.reserve = mx35_3ds_reserve,
.restart = mxc_restart,
MACHINE_END
if (ret)
printk(KERN_ERR "pca100: Failed to setup pins (%d)\n", ret);
- imx27_add_imx_ssi(0, &pca100_ssi_pdata);
-
imx27_add_imx_uart0(&uart_pdata);
- imx27_add_mxc_mmc(1, &sdhc_pdata);
-
imx27_add_mxc_nand(&pca100_nand_board_info);
/* only the i2c master 1 is used on this CPU card */
ARRAY_SIZE(pca100_spi_board_info));
imx27_add_spi_imx0(&pca100_spi0_data);
+ imx27_add_imx_fb(&pca100_fb_data);
+
+ imx27_add_fec(NULL);
+ imx27_add_imx2_wdt();
+ imx27_add_mxc_w1();
+}
+
+static void __init pca100_late_init(void)
+{
+ imx27_add_imx_ssi(0, &pca100_ssi_pdata);
+
+ imx27_add_mxc_mmc(1, &sdhc_pdata);
+
gpio_request(OTG_PHY_CS_GPIO, "usb-otg-cs");
gpio_direction_output(OTG_PHY_CS_GPIO, 1);
gpio_request(USBH2_PHY_CS_GPIO, "usb-host2-cs");
if (usbh2_pdata.otg)
imx27_add_mxc_ehci_hs(2, &usbh2_pdata);
-
- imx27_add_imx_fb(&pca100_fb_data);
-
- imx27_add_fec(NULL);
- imx27_add_imx2_wdt();
- imx27_add_mxc_w1();
}
static void __init pca100_timer_init(void)
.map_io = mx27_map_io,
.init_early = imx27_init_early,
.init_irq = mx27_init_irq,
- .init_machine = pca100_init,
+ .init_machine = pca100_init,
+ .init_late = pca100_late_init,
.init_time = pca100_timer_init,
.restart = mxc_restart,
MACHINE_END
*/
static void __init pcm037_init(void)
{
- int ret;
-
imx31_soc_init();
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
imx31_add_mxc_w1();
- /* LAN9217 IRQ pin */
- ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1), "lan9217-irq");
- if (ret)
- pr_warn("could not get LAN irq gpio\n");
- else {
- gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
- smsc911x_resources[1].start =
- gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
- smsc911x_resources[1].end =
- gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
- platform_device_register(&pcm037_eth);
- }
-
-
/* I2C adapters and devices */
i2c_register_board_info(1, pcm037_i2c_devices,
ARRAY_SIZE(pcm037_i2c_devices));
imx31_add_imx_i2c2(&pcm037_i2c2_data);
imx31_add_mxc_nand(&pcm037_nand_board_info);
- imx31_add_mxc_mmc(0, &sdhc_pdata);
imx31_add_ipu_core();
imx31_add_mx3_sdc_fb(&mx3fb_pdata);
- /* CSI */
- /* Camera power: default - off */
- ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), "mt9t031-power");
- if (!ret)
- gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), 1);
- else
- iclink_mt9t031.power = NULL;
-
- pcm037_init_camera();
-
- pcm970_sja1000_resources[1].start =
- gpio_to_irq(IOMUX_TO_GPIO(IOMUX_PIN(48, 105)));
- pcm970_sja1000_resources[1].end =
- gpio_to_irq(IOMUX_TO_GPIO(IOMUX_PIN(48, 105)));
- platform_device_register(&pcm970_sja1000);
-
if (otg_mode_host) {
otg_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
ULPI_OTG_DRVVBUS_EXT);
if (!otg_mode_host)
imx31_add_fsl_usb2_udc(&otg_device_pdata);
-
}
static void __init pcm037_timer_init(void)
static void __init pcm037_init_late(void)
{
+ int ret;
+
+ /* LAN9217 IRQ pin */
+ ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1), "lan9217-irq");
+ if (!ret) {
+ gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
+ smsc911x_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
+ smsc911x_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
+ platform_device_register(&pcm037_eth);
+ } else {
+ pr_warn("could not get LAN irq gpio\n");
+ }
+
+ imx31_add_mxc_mmc(0, &sdhc_pdata);
+
+ /* CSI */
+ /* Camera power: default - off */
+ ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), "mt9t031-power");
+ if (!ret)
+ gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), 1);
+ else
+ iclink_mt9t031.power = NULL;
+
+ pcm037_init_camera();
+
+ pcm970_sja1000_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(IOMUX_PIN(48, 105)));
+ pcm970_sja1000_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(IOMUX_PIN(48, 105)));
+ platform_device_register(&pcm970_sja1000);
+
pcm037_eet_init_devices();
}
imx35_add_imx_uart0(&uart_pdata);
imx35_add_mxc_nand(&pcm037_nand_board_info);
- imx35_add_imx_ssi(0, &pcm043_ssi_pdata);
imx35_add_imx_uart1(&uart_pdata);
imx35_add_fsl_usb2_udc(&otg_device_pdata);
imx35_add_flexcan1();
+}
+
+static void __init pcm043_late_init(void)
+{
+ imx35_add_imx_ssi(0, &pcm043_ssi_pdata);
+
imx35_add_sdhci_esdhc_imx(0, &sd1_pdata);
}
.init_early = imx35_init_early,
.init_irq = mx35_init_irq,
.init_time = pcm043_timer_init,
- .init_machine = pcm043_init,
+ .init_machine = pcm043_init,
+ .init_late = pcm043_late_init,
.restart = mxc_restart,
MACHINE_END
mxc_init_imx_uart();
qong_init_nor_mtd();
- qong_init_fpga();
imx31_add_imx2_wdt();
}
.init_irq = mx31_init_irq,
.init_time = qong_timer_init,
.init_machine = qong_init,
+ .init_late = qong_init_fpga,
.restart = mxc_restart,
MACHINE_END
imx35_add_fec(NULL);
imx35_add_imx2_wdt();
+
+ imx35_add_imx_uart0(NULL);
+ imx35_add_imx_uart2(NULL);
+
+ imx35_add_ipu_core();
+ imx35_add_mx3_sdc_fb(&mx3fb_pdata);
+
+ imx35_add_fsl_usb2_udc(&otg_device_pdata);
+ imx35_add_mxc_ehci_hs(&usb_host_pdata);
+
+ imx35_add_mxc_nand(&vpr200_nand_board_info);
+ imx35_add_sdhci_esdhc_imx(0, NULL);
+}
+
+static void __init vpr200_late_init(void)
+{
imx_add_gpio_keys(&vpr200_gpio_keys_data);
platform_add_devices(devices, ARRAY_SIZE(devices));
else
gpio_direction_input(GPIO_PMIC_INT);
- imx35_add_imx_uart0(NULL);
- imx35_add_imx_uart2(NULL);
-
- imx35_add_ipu_core();
- imx35_add_mx3_sdc_fb(&mx3fb_pdata);
-
- imx35_add_fsl_usb2_udc(&otg_device_pdata);
- imx35_add_mxc_ehci_hs(&usb_host_pdata);
-
- imx35_add_mxc_nand(&vpr200_nand_board_info);
- imx35_add_sdhci_esdhc_imx(0, NULL);
-
vpr200_i2c_devices[1].irq = gpio_to_irq(GPIO_PMIC_INT);
i2c_register_board_info(0, vpr200_i2c_devices,
ARRAY_SIZE(vpr200_i2c_devices));
.init_irq = mx35_init_irq,
.init_time = vpr200_timer_init,
.init_machine = vpr200_board_init,
+ .init_late = vpr200_late_init,
.restart = mxc_restart,
MACHINE_END
*/
static unsigned int lilly_db_board_pins[] __initdata = {
- MX31_PIN_CTS1__CTS1,
- MX31_PIN_RTS1__RTS1,
- MX31_PIN_TXD1__TXD1,
- MX31_PIN_RXD1__RXD1,
- MX31_PIN_CTS2__CTS2,
- MX31_PIN_RTS2__RTS2,
- MX31_PIN_TXD2__TXD2,
- MX31_PIN_RXD2__RXD2,
- MX31_PIN_CSPI3_MOSI__RXD3,
- MX31_PIN_CSPI3_MISO__TXD3,
- MX31_PIN_CSPI3_SCLK__RTS3,
- MX31_PIN_CSPI3_SPI_RDY__CTS3,
MX31_PIN_SD1_DATA3__SD1_DATA3,
MX31_PIN_SD1_DATA2__SD1_DATA2,
MX31_PIN_SD1_DATA1__SD1_DATA1,
MX31_PIN_CONTRAST__CONTRAST,
};
-/* UART */
-static const struct imxuart_platform_data uart_pdata __initconst = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
/* MMC support */
static int mxc_mmc1_get_ro(struct device *dev)
mxc_iomux_setup_multiple_pins(lilly_db_board_pins,
ARRAY_SIZE(lilly_db_board_pins),
"development board pins");
- imx31_add_imx_uart0(&uart_pdata);
- imx31_add_imx_uart1(&uart_pdata);
- imx31_add_imx_uart2(&uart_pdata);
imx31_add_mxc_mmc(0, &mmc_pdata);
mx31lilly_init_fb();
}
*/
static unsigned int litekit_db_board_pins[] __initdata = {
- /* UART1 */
- MX31_PIN_CTS1__CTS1,
- MX31_PIN_RTS1__RTS1,
- MX31_PIN_TXD1__TXD1,
- MX31_PIN_RXD1__RXD1,
- /* SPI 0 */
- MX31_PIN_CSPI1_SCLK__SCLK,
- MX31_PIN_CSPI1_MOSI__MOSI,
- MX31_PIN_CSPI1_MISO__MISO,
- MX31_PIN_CSPI1_SPI_RDY__SPI_RDY,
- MX31_PIN_CSPI1_SS0__SS0,
- MX31_PIN_CSPI1_SS1__SS1,
- MX31_PIN_CSPI1_SS2__SS2,
/* SDHC1 */
MX31_PIN_SD1_DATA0__SD1_DATA0,
MX31_PIN_SD1_DATA1__SD1_DATA1,
MX31_PIN_SD1_CMD__SD1_CMD,
};
-/* UART */
-static const struct imxuart_platform_data uart_pdata __initconst = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
/* MMC */
static int gpio_det, gpio_wp;
.exit = mxc_mmc1_exit,
};
-/* SPI */
-
-static int spi_internal_chipselect[] = {
- MXC_SPI_CS(0),
- MXC_SPI_CS(1),
- MXC_SPI_CS(2),
-};
-
-static const struct spi_imx_master spi0_pdata __initconst = {
- .chipselect = spi_internal_chipselect,
- .num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
-};
-
/* GPIO LEDs */
static const struct gpio_led litekit_leds[] __initconst = {
mxc_iomux_setup_multiple_pins(litekit_db_board_pins,
ARRAY_SIZE(litekit_db_board_pins),
"development board pins");
- imx31_add_imx_uart0(&uart_pdata);
imx31_add_mxc_mmc(0, &mmc_pdata);
- imx31_add_spi_imx0(&spi0_pdata);
gpio_led_register_device(-1, &litekit_led_platform_data);
imx31_add_imx2_wdt();
imx31_add_mxc_rtc();
u32 mmdc_io_val[MX6_MAX_MMDC_IO_NUM][2]; /* To save offset and value */
} __aligned(8);
-void imx6q_set_int_mem_clk_lpm(bool enable)
+void imx6_set_int_mem_clk_lpm(bool enable)
{
u32 val = readl_relaxed(ccm_base + CGPR);
switch (state) {
case PM_SUSPEND_STANDBY:
imx6_set_lpm(STOP_POWER_ON);
- imx6q_set_int_mem_clk_lpm(true);
+ imx6_set_int_mem_clk_lpm(true);
imx_gpc_pre_suspend(false);
if (cpu_is_imx6sl())
imx6sl_set_wait_clk(true);
break;
case PM_SUSPEND_MEM:
imx6_set_lpm(STOP_POWER_OFF);
- imx6q_set_int_mem_clk_lpm(false);
+ imx6_set_int_mem_clk_lpm(false);
imx6q_enable_wb(true);
/*
* For suspend into ocram, asm code already take care of
imx_gpc_post_resume();
imx6_enable_rbc(false);
imx6q_enable_wb(false);
- imx6q_set_int_mem_clk_lpm(true);
+ imx6_set_int_mem_clk_lpm(true);
imx6_set_lpm(WAIT_CLOCKED);
break;
default:
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
- -I$(srctree)/arch/arm/plat-orion/include
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-orion/include
AFLAGS_coherency_ll.o := -Wa,-march=armv7-a
CFLAGS_pmsu.o := -march=armv7-a
config MACH_OX810SE
bool "Support OX810SE Based Products"
+ select ARCH_HAS_RESET_CONTROLLER
select COMMON_CLK_OXNAS
select CPU_ARM926T
select MFD_SYSCON
select OXNAS_RPS_TIMER
select PINCTRL_OXNAS
+ select RESET_CONTROLLER
select RESET_OXNAS
select VERSATILE_FPGA_IRQ
help
comment "Intel/Marvell Dev Platforms (sorted by hardware release time)"
+config MACH_PXA25X_DT
+ bool "Support PXA25x platforms from device tree"
+ select PINCTRL
+ select POWER_SUPPLY
+ select PXA25x
+ select USE_OF
+ help
+ Include support for Marvell PXA25x based platforms using
+ the device tree. Needn't select any other machine while
+ MACH_PXA25x_DT is enabled.
+
config MACH_PXA27X_DT
bool "Support PXA27x platforms from device tree"
select PINCTRL
# NOTE: keep the order of boards in accordance to their order in Kconfig
# Device Tree support
-obj-$(CONFIG_MACH_PXA3XX_DT) += pxa-dt.o
+obj-$(CONFIG_MACH_PXA25X_DT) += pxa-dt.o
obj-$(CONFIG_MACH_PXA27X_DT) += pxa-dt.o
+obj-$(CONFIG_MACH_PXA3XX_DT) += pxa-dt.o
# Intel/Marvell Dev Platforms
obj-$(CONFIG_ARCH_LUBBOCK) += lubbock.o
*/
#include <linux/kernel.h>
+#include <linux/module.h> /* symbol_get ; symbol_put */
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/major.h>
return is_resume;
}
-static unsigned long corgi_charger_wakeup(void)
+static bool corgi_charger_wakeup(void)
{
- unsigned long ret;
-
- ret = (!gpio_get_value(CORGI_GPIO_AC_IN) << GPIO_bit(CORGI_GPIO_AC_IN))
- | (!gpio_get_value(CORGI_GPIO_KEY_INT)
- << GPIO_bit(CORGI_GPIO_KEY_INT))
- | (!gpio_get_value(CORGI_GPIO_WAKEUP)
- << GPIO_bit(CORGI_GPIO_WAKEUP));
- return ret;
+ return !gpio_get_value(CORGI_GPIO_AC_IN) ||
+ !gpio_get_value(CORGI_GPIO_KEY_INT) ||
+ !gpio_get_value(CORGI_GPIO_WAKEUP);
}
unsigned long corgipm_read_devdata(int type)
extern struct platform_device pxa93x_device_gpio;
void __init pxa_register_device(struct platform_device *dev, void *data);
+void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors);
#define pxa27x_handle_irq ichp_handle_irq
extern int __init pxa27x_clocks_init(void);
-extern void __init pxa27x_dt_init_irq(void);
extern unsigned pxa27x_get_clk_frequency_khz(int);
extern void __init pxa27x_init_irq(void);
extern void __init pxa27x_map_io(void);
#define pxa3xx_handle_irq ichp_handle_irq
extern int __init pxa3xx_clocks_init(void);
-extern void __init pxa3xx_dt_init_irq(void);
extern void __init pxa3xx_init_irq(void);
extern void __init pxa3xx_map_io(void);
/* DMA Controller Registers Definitions */
#define DMAC_REGS_VIRT io_p2v(0x40000000)
-#include <plat/dma.h>
#endif /* _ASM_ARCH_DMA_H */
GPIO107_GPIO, /* DS1WM_IRQ */
GPIO108_GPIO, /* GSM_READY */
GPIO115_GPIO, /* nPEN_IRQ */
-
- /* I2C */
- GPIO117_I2C_SCL,
- GPIO118_I2C_SDA,
};
/*
return -EINVAL;
}
- sleep_save = kmalloc(pxa_cpu_pm_fns->save_count * sizeof(unsigned long),
- GFP_KERNEL);
+ sleep_save = kmalloc_array(pxa_cpu_pm_fns->save_count,
+ sizeof(*sleep_save),
+ GFP_KERNEL);
if (!sleep_save) {
printk(KERN_ERR "failed to alloc memory for pm save\n");
return -ENOMEM;
#include "generic.h"
-#ifdef CONFIG_PXA3xx
-static const char *const pxa3xx_dt_board_compat[] __initconst = {
- "marvell,pxa300",
- "marvell,pxa310",
- "marvell,pxa320",
+#ifdef CONFIG_PXA25x
+static const char * const pxa25x_dt_board_compat[] __initconst = {
+ "marvell,pxa250",
NULL,
};
-DT_MACHINE_START(PXA_DT, "Marvell PXA3xx (Device Tree Support)")
- .map_io = pxa3xx_map_io,
- .init_irq = pxa3xx_dt_init_irq,
- .handle_irq = pxa3xx_handle_irq,
+DT_MACHINE_START(PXA25X_DT, "Marvell PXA25x (Device Tree Support)")
+ .map_io = pxa25x_map_io,
.restart = pxa_restart,
- .dt_compat = pxa3xx_dt_board_compat,
+ .dt_compat = pxa25x_dt_board_compat,
MACHINE_END
#endif
NULL,
};
-DT_MACHINE_START(PXA27X_DT, "Marvell PXA2xx (Device Tree Support)")
+DT_MACHINE_START(PXA27X_DT, "Marvell PXA27x (Device Tree Support)")
.map_io = pxa27x_map_io,
- .init_irq = pxa27x_dt_init_irq,
- .handle_irq = pxa27x_handle_irq,
.restart = pxa_restart,
.dt_compat = pxa27x_dt_board_compat,
MACHINE_END
#endif
+
+#ifdef CONFIG_PXA3xx
+static const char *const pxa3xx_dt_board_compat[] __initconst = {
+ "marvell,pxa300",
+ "marvell,pxa310",
+ "marvell,pxa320",
+ NULL,
+};
+
+DT_MACHINE_START(PXA_DT, "Marvell PXA3xx (Device Tree Support)")
+ .map_io = pxa3xx_map_io,
+ .restart = pxa_restart,
+ .dt_compat = pxa3xx_dt_board_compat,
+MACHINE_END
+#endif
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <asm/mach/map.h>
#include <asm/suspend.h>
}
#endif
+static int __init __init
+pxa25x_dt_init_irq(struct device_node *node, struct device_node *parent)
+{
+ pxa_dt_irq_init(pxa25x_set_wake);
+ set_handle_irq(ichp_handle_irq);
+
+ return 0;
+}
+IRQCHIP_DECLARE(pxa25x_intc, "marvell,pxa-intc", pxa25x_dt_init_irq);
+
static struct map_desc pxa25x_io_desc[] __initdata = {
{ /* Mem Ctl */
.virtual = (unsigned long)SMEMC_VIRT,
reset_status = RCSR;
- if ((ret = pxa_init_dma(IRQ_DMA, 16)))
- return ret;
-
pxa25x_init_pm();
register_syscore_ops(&pxa_irq_syscore_ops);
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
- pxa2xx_set_dmac_info(16, 40);
- pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
- ret = platform_add_devices(pxa25x_devices,
- ARRAY_SIZE(pxa25x_devices));
- if (ret)
- return ret;
+ if (!of_have_populated_dt()) {
+ pxa2xx_set_dmac_info(16, 40);
+ pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
+ ret = platform_add_devices(pxa25x_devices,
+ ARRAY_SIZE(pxa25x_devices));
+ }
}
return ret;
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/irqchip.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
pxa_init_irq(34, pxa27x_set_wake);
}
-void __init pxa27x_dt_init_irq(void)
+static int __init
+pxa27x_dt_init_irq(struct device_node *node, struct device_node *parent)
{
- if (IS_ENABLED(CONFIG_OF))
- pxa_dt_irq_init(pxa27x_set_wake);
+ pxa_dt_irq_init(pxa27x_set_wake);
+ set_handle_irq(ichp_handle_irq);
+
+ return 0;
}
+IRQCHIP_DECLARE(pxa27x_intc, "marvell,pxa-intc", pxa27x_dt_init_irq);
static struct map_desc pxa27x_io_desc[] __initdata = {
{ /* Mem Ctl */
reset_status = RCSR;
- if ((ret = pxa_init_dma(IRQ_DMA, 32)))
- return ret;
-
pxa27x_init_pm();
register_syscore_ops(&pxa_irq_syscore_ops);
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/syscore_ops.h>
}
#ifdef CONFIG_OF
-void __init pxa3xx_dt_init_irq(void)
+static int __init __init
+pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
{
__pxa3xx_init_irq();
pxa_dt_irq_init(pxa3xx_set_wake);
+ set_handle_irq(ichp_handle_irq);
+
+ return 0;
}
+IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq);
#endif /* CONFIG_OF */
static struct map_desc pxa3xx_io_desc[] __initdata = {
*/
NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
- if ((ret = pxa_init_dma(IRQ_DMA, 32)))
- return ret;
-
pxa3xx_init_pm();
register_syscore_ops(&pxa_irq_syscore_ops);
unsigned long pending;
unsigned int bit;
- pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
- for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
- generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
+ do {
+ pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
+ for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) {
+ generic_handle_irq(irq_find_mapping(fpga->irqdomain,
+ bit));
+ }
+ } while (pending);
return IRQ_HANDLED;
}
-static void cplds_irq_mask_ack(struct irq_data *d)
+static void cplds_irq_mask(struct irq_data *d)
{
struct cplds *fpga = irq_data_get_irq_chip_data(d);
unsigned int cplds_irq = irqd_to_hwirq(d);
- unsigned int set, bit = BIT(cplds_irq);
+ unsigned int bit = BIT(cplds_irq);
fpga->irq_mask &= ~bit;
writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
- set = readl(fpga->base + FPGA_IRQ_SET_CLR);
- writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
}
static void cplds_irq_unmask(struct irq_data *d)
{
struct cplds *fpga = irq_data_get_irq_chip_data(d);
unsigned int cplds_irq = irqd_to_hwirq(d);
- unsigned int bit = BIT(cplds_irq);
+ unsigned int set, bit = BIT(cplds_irq);
+
+ set = readl(fpga->base + FPGA_IRQ_SET_CLR);
+ writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
fpga->irq_mask |= bit;
writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
static struct irq_chip cplds_irq_chip = {
.name = "pxa_cplds",
- .irq_mask_ack = cplds_irq_mask_ack,
+ .irq_ack = cplds_irq_mask,
+ .irq_mask = cplds_irq_mask,
.irq_unmask = cplds_irq_unmask,
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
};
time = RCNR;
while (1) {
/* Check if any wakeup event had occurred */
- if (sharpsl_pm.machinfo->charger_wakeup() != 0)
+ if (sharpsl_pm.machinfo->charger_wakeup())
return 0;
/* Check for timeout */
if ((RCNR - time) > SHARPSL_WAIT_CO_TIME)
#define SHARPSL_STATUS_LOCK 5
#define SHARPSL_STATUS_CHRGFULL 6
#define SHARPSL_STATUS_FATAL 7
- unsigned long (*charger_wakeup)(void);
+ bool (*charger_wakeup)(void);
int (*should_wakeup)(unsigned int resume_on_alarm);
void (*backlight_limit)(int);
int (*backlight_get_status) (void);
*/
#include <linux/kernel.h>
+#include <linux/module.h> /* symbol_get ; symbol_put */
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio_keys.h>
return is_resume;
}
-static unsigned long spitz_charger_wakeup(void)
+static bool spitz_charger_wakeup(void)
{
- unsigned long ret;
- ret = ((!gpio_get_value(SPITZ_GPIO_KEY_INT)
- << GPIO_bit(SPITZ_GPIO_KEY_INT))
- | gpio_get_value(SPITZ_GPIO_SYNC));
- return ret;
+ return !gpio_get_value(SPITZ_GPIO_KEY_INT) ||
+ gpio_get_value(SPITZ_GPIO_SYNC);
}
unsigned long spitzpm_read_devdata(int type)
-obj-y := board.o
obj-$(CONFIG_SMP) += platsmp.o
+++ /dev/null
-/* Copyright (c) 2010-2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach/arch.h>
-
-static const char * const qcom_dt_match[] __initconst = {
- "qcom,apq8064",
- "qcom,apq8074-dragonboard",
- "qcom,apq8084",
- "qcom,ipq8062",
- "qcom,ipq8064",
- "qcom,msm8660-surf",
- "qcom,msm8960-cdp",
- "qcom,mdm9615",
- NULL
-};
-
-DT_MACHINE_START(QCOM_DT, "Qualcomm (Flattened Device Tree)")
- .dt_compat = qcom_dt_match,
-MACHINE_END
#
# Makefile for the linux kernel.
#
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
- -I$(srctree)/arch/arm/plat-versatile/include
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/include
obj-y := core.o
obj-$(CONFIG_REALVIEW_DT) += realview-dt.o
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
},
};
+#define s3c24xx_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) }))
+
#if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2412) || \
defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2442)
static struct resource s3c2410_dma_resource[] = {
.num_resources = ARRAY_SIZE(s3c2410_dma_resource),
.resource = s3c2410_dma_resource,
.dev = {
- .platform_data = &s3c2410_dma_platdata,
+ .dma_mask = &s3c24xx_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s3c2410_dma_platdata,
},
};
#endif
.num_resources = ARRAY_SIZE(s3c2410_dma_resource),
.resource = s3c2410_dma_resource,
.dev = {
- .platform_data = &s3c2412_dma_platdata,
+ .dma_mask = &s3c24xx_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s3c2412_dma_platdata,
},
};
#endif
.num_resources = ARRAY_SIZE(s3c2410_dma_resource),
.resource = s3c2410_dma_resource,
.dev = {
- .platform_data = &s3c2440_dma_platdata,
+ .dma_mask = &s3c24xx_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s3c2440_dma_platdata,
},
};
#endif
.num_resources = ARRAY_SIZE(s3c2443_dma_resource),
.resource = s3c2443_dma_resource,
.dev = {
- .platform_data = &s3c2443_dma_platdata,
+ .dma_mask = &s3c24xx_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s3c2443_dma_platdata,
},
};
#endif
&mini2440_button_device,
&s3c_device_nand,
&s3c_device_sdi,
+ &s3c2440_device_dma,
&s3c_device_iis,
&uda1340_codec,
&mini2440_audio,
static struct i2c_driver wlf_gf_module_driver = {
.driver = {
- .name = "wlf-gf-module",
- .owner = THIS_MODULE,
+ .name = "wlf-gf-module"
},
.probe = wlf_gf_module_probe,
.id_table = wlf_gf_module_id,
#
# Licensed under GPLv2
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)/arch/arm/plat-samsung/include
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/arch/arm/plat-samsung/include
# Core
bool __init shmobile_smp_init_fallback_ops(void)
{
/* fallback on PSCI/smp_ops if no other DT based method is detected */
+ if (!IS_ENABLED(CONFIG_SMP))
+ return false;
+
return platform_can_secondary_boot() ? true : false;
}
};
DT_MACHINE_START(R8A7790_DT, "Generic R8A7790 (Flattened Device Tree)")
- .smp_init = shmobile_smp_init_fallback_ops,
+ .smp_init = smp_init_ops(shmobile_smp_init_fallback_ops),
.smp = smp_ops(r8a7790_smp_ops),
.init_early = shmobile_init_delay,
.init_time = rcar_gen2_timer_init,
};
DT_MACHINE_START(R8A7791_DT, "Generic R8A7791 (Flattened Device Tree)")
- .smp_init = shmobile_smp_init_fallback_ops,
+ .smp_init = smp_init_ops(shmobile_smp_init_fallback_ops),
.smp = smp_ops(r8a7791_smp_ops),
.init_early = shmobile_init_delay,
.init_time = rcar_gen2_timer_init,
default ARCH_SUNXI
select ARM_GIC
select ARM_PSCI
+ select ARCH_SUPPORTS_BIG_ENDIAN
select HAVE_ARM_ARCH_TIMER
select SUN5I_HSTIMER
"allwinner,sun5i-a10s",
"allwinner,sun5i-a13",
"allwinner,sun5i-r8",
+ "nextthing,gr8",
NULL,
};
-obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj- += dummy.o
+++ /dev/null
-/*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/cp15.h>
-
-ENTRY(uniphier_smp_trampoline)
-ARM_BE8(setend be) @ ensure we are in BE8 mode
- mrc p15, 0, r0, c0, c0, 5 @ MPIDR (Multiprocessor Affinity Reg)
- and r2, r0, #0x3 @ CPU ID
- ldr r1, uniphier_smp_trampoline_jump
- ldr r3, uniphier_smp_trampoline_poll_addr
- mrc p15, 0, r0, c1, c0, 0 @ SCTLR (System Control Register)
- orr r0, r0, #CR_I @ Enable ICache
- bic r0, r0, #(CR_C | CR_M) @ Disable MMU and Dcache
- mcr p15, 0, r0, c1, c0, 0
- b 1f @ cache the following 5 instructions
-0: wfe
-1: ldr r0, [r3]
- cmp r0, r2
- bxeq r1 @ branch to secondary_startup
- b 0b
- .globl uniphier_smp_trampoline_jump
-uniphier_smp_trampoline_jump:
- .word 0 @ set virt_to_phys(secondary_startup)
- .globl uniphier_smp_trampoline_poll_addr
-uniphier_smp_trampoline_poll_addr:
- .word 0 @ set CPU ID to be kicked to this reg
- .globl uniphier_smp_trampoline_end
-uniphier_smp_trampoline_end:
-ENDPROC(uniphier_smp_trampoline)
+++ /dev/null
-/*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "uniphier: " fmt
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/sizes.h>
-#include <asm/cacheflush.h>
-#include <asm/hardware/cache-uniphier.h>
-#include <asm/pgtable.h>
-#include <asm/smp.h>
-#include <asm/smp_scu.h>
-
-/*
- * The secondary CPUs check this register from the boot ROM for the jump
- * destination. After that, it can be reused as a scratch register.
- */
-#define UNIPHIER_SMPCTRL_ROM_RSV2 0x208
-
-static void __iomem *uniphier_smp_rom_boot_rsv2;
-static unsigned int uniphier_smp_max_cpus;
-
-extern char uniphier_smp_trampoline;
-extern char uniphier_smp_trampoline_jump;
-extern char uniphier_smp_trampoline_poll_addr;
-extern char uniphier_smp_trampoline_end;
-
-/*
- * Copy trampoline code to the tail of the 1st section of the page table used
- * in the boot ROM. This area is directly accessible by the secondary CPUs
- * for all the UniPhier SoCs.
- */
-static const phys_addr_t uniphier_smp_trampoline_dest_end = SECTION_SIZE;
-static phys_addr_t uniphier_smp_trampoline_dest;
-
-static int __init uniphier_smp_copy_trampoline(phys_addr_t poll_addr)
-{
- size_t trmp_size;
- static void __iomem *trmp_base;
-
- if (!uniphier_cache_l2_is_enabled()) {
- pr_warn("outer cache is needed for SMP, but not enabled\n");
- return -ENODEV;
- }
-
- uniphier_cache_l2_set_locked_ways(1);
-
- outer_flush_all();
-
- trmp_size = &uniphier_smp_trampoline_end - &uniphier_smp_trampoline;
- uniphier_smp_trampoline_dest = uniphier_smp_trampoline_dest_end -
- trmp_size;
-
- uniphier_cache_l2_touch_range(uniphier_smp_trampoline_dest,
- uniphier_smp_trampoline_dest_end);
-
- trmp_base = ioremap_cache(uniphier_smp_trampoline_dest, trmp_size);
- if (!trmp_base) {
- pr_err("failed to map trampoline destination area\n");
- return -ENOMEM;
- }
-
- memcpy(trmp_base, &uniphier_smp_trampoline, trmp_size);
-
- writel(virt_to_phys(secondary_startup),
- trmp_base + (&uniphier_smp_trampoline_jump -
- &uniphier_smp_trampoline));
-
- writel(poll_addr, trmp_base + (&uniphier_smp_trampoline_poll_addr -
- &uniphier_smp_trampoline));
-
- flush_cache_all(); /* flush out trampoline code to outer cache */
-
- iounmap(trmp_base);
-
- return 0;
-}
-
-static int __init uniphier_smp_prepare_trampoline(unsigned int max_cpus)
-{
- struct device_node *np;
- struct resource res;
- phys_addr_t rom_rsv2_phys;
- int ret;
-
- np = of_find_compatible_node(NULL, NULL, "socionext,uniphier-smpctrl");
- ret = of_address_to_resource(np, 0, &res);
- of_node_put(np);
- if (ret) {
- pr_err("failed to get resource of SMP control\n");
- return ret;
- }
-
- rom_rsv2_phys = res.start + UNIPHIER_SMPCTRL_ROM_RSV2;
-
- ret = uniphier_smp_copy_trampoline(rom_rsv2_phys);
- if (ret)
- return ret;
-
- uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, SZ_4);
- if (!uniphier_smp_rom_boot_rsv2) {
- pr_err("failed to map ROM_BOOT_RSV2 register\n");
- return -ENOMEM;
- }
-
- writel(uniphier_smp_trampoline_dest, uniphier_smp_rom_boot_rsv2);
- asm("sev"); /* Bring up all secondary CPUs to the trampoline code */
-
- uniphier_smp_max_cpus = max_cpus; /* save for later use */
-
- return 0;
-}
-
-static void __init uniphier_smp_unprepare_trampoline(void)
-{
- iounmap(uniphier_smp_rom_boot_rsv2);
-
- if (uniphier_smp_trampoline_dest)
- outer_inv_range(uniphier_smp_trampoline_dest,
- uniphier_smp_trampoline_dest_end);
-
- uniphier_cache_l2_set_locked_ways(0);
-}
-
-static int __init uniphier_smp_enable_scu(void)
-{
- unsigned long scu_base_phys = 0;
- void __iomem *scu_base;
-
- if (scu_a9_has_base())
- scu_base_phys = scu_a9_get_base();
-
- if (!scu_base_phys) {
- pr_err("failed to get scu base\n");
- return -ENODEV;
- }
-
- scu_base = ioremap(scu_base_phys, SZ_128);
- if (!scu_base) {
- pr_err("failed to map scu base\n");
- return -ENOMEM;
- }
-
- scu_enable(scu_base);
- iounmap(scu_base);
-
- return 0;
-}
-
-static void __init uniphier_smp_prepare_cpus(unsigned int max_cpus)
-{
- static cpumask_t only_cpu_0 = { CPU_BITS_CPU0 };
- int ret;
-
- ret = uniphier_smp_prepare_trampoline(max_cpus);
- if (ret)
- goto err;
-
- ret = uniphier_smp_enable_scu();
- if (ret)
- goto err;
-
- return;
-err:
- pr_warn("disabling SMP\n");
- init_cpu_present(&only_cpu_0);
- uniphier_smp_unprepare_trampoline();
-}
-
-static int __init uniphier_smp_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
-{
- if (WARN_ON_ONCE(!uniphier_smp_rom_boot_rsv2))
- return -EFAULT;
-
- writel(cpu, uniphier_smp_rom_boot_rsv2);
- readl(uniphier_smp_rom_boot_rsv2); /* relax */
-
- asm("sev"); /* wake up secondary CPUs sleeping in the trampoline */
-
- if (cpu == uniphier_smp_max_cpus - 1) {
- /* clean up resources if this is the last CPU */
- uniphier_smp_unprepare_trampoline();
- }
-
- return 0;
-}
-
-static const struct smp_operations uniphier_smp_ops __initconst = {
- .smp_prepare_cpus = uniphier_smp_prepare_cpus,
- .smp_boot_secondary = uniphier_smp_boot_secondary,
-};
-CPU_METHOD_OF_DECLARE(uniphier_smp, "socionext,uniphier-smp",
- &uniphier_smp_ops);
/*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
#define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
#define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
-#define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21)
-#define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21)
-#define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21)
-#define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21)
#define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
#define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
#define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
-#define UNIPHIER_SSCOQM_S_WAY (0x2 << 17)
#define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
#define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
#define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
#define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
-#define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */
-#define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */
-#define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */
-#define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */
-#define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */
#define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
#define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
-#define UNIPHIER_SSCOQMASK 0x254 /* Cache Operation Queue Address Mask */
-#define UNIPHIER_SSCOQWN 0x258 /* Cache Operation Queue Way Number */
#define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
#define UNIPHIER_SSCOPPQSEF_FE BIT(1)
#define UNIPHIER_SSCOPPQSEF_OE BIT(0)
#define UNIPHIER_SSCOLPQS_EST BIT(1)
#define UNIPHIER_SSCOLPQS_QST BIT(0)
-/* Is the touch/pre-fetch destination specified by ways? */
-#define UNIPHIER_SSCOQM_TID_IS_WAY(op) \
- ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY)
/* Is the operation region specified by address range? */
#define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
}
-
- /* set target ways if needed */
- if (unlikely(UNIPHIER_SSCOQM_TID_IS_WAY(operation)))
- writel_relaxed(data->way_locked_mask,
- data->op_base + UNIPHIER_SSCOQWN);
} while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
(UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
__uniphier_cache_sync(data);
}
-int __init uniphier_cache_l2_is_enabled(void)
-{
- struct uniphier_cache_data *data;
-
- data = list_first_entry_or_null(&uniphier_cache_list,
- struct uniphier_cache_data, list);
- if (!data)
- return 0;
-
- return !!(readl_relaxed(data->ctrl_base + UNIPHIER_SSCC) &
- UNIPHIER_SSCC_ON);
-}
-
-void __init uniphier_cache_l2_touch_range(unsigned long start,
- unsigned long end)
-{
- struct uniphier_cache_data *data;
-
- data = list_first_entry_or_null(&uniphier_cache_list,
- struct uniphier_cache_data, list);
- if (data)
- __uniphier_cache_maint_range(data, start, end,
- UNIPHIER_SSCOQM_TID_WAY |
- UNIPHIER_SSCOQM_CM_TOUCH);
-}
-
-void __init uniphier_cache_l2_set_locked_ways(u32 way_mask)
-{
- struct uniphier_cache_data *data;
-
- data = list_first_entry_or_null(&uniphier_cache_list,
- struct uniphier_cache_data, list);
- if (data)
- __uniphier_cache_set_locked_ways(data, way_mask);
-}
-
static const struct of_device_id uniphier_cache_match[] __initconst = {
- {
- .compatible = "socionext,uniphier-system-cache",
- },
+ { .compatible = "socionext,uniphier-system-cache" },
{ /* sentinel */ }
};
{
void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
- BUG_ON(!ptr);
+ if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
+ BUG();
return ptr;
}
{
phys_addr_t memblock_limit = 0;
int highmem = 0;
- phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
+ u64 vmalloc_limit;
struct memblock_region *reg;
bool should_use_highmem = false;
+ /*
+ * Let's use our own (unoptimized) equivalent of __pa() that is
+ * not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
+ * The result is used as the upper bound on physical memory address
+ * and may itself be outside the valid range for which phys_addr_t
+ * and therefore __pa() is defined.
+ */
+ vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
if (reg->size > size_limit) {
phys_addr_t overlap_size = reg->size - size_limit;
- pr_notice("Truncating RAM at %pa-%pa to -%pa",
- &block_start, &block_end, &vmalloc_limit);
- memblock_remove(vmalloc_limit, overlap_size);
+ pr_notice("Truncating RAM at %pa-%pa",
+ &block_start, &block_end);
block_end = vmalloc_limit;
+ pr_cont(" to -%pa", &block_end);
+ memblock_remove(vmalloc_limit, overlap_size);
should_use_highmem = true;
}
}
#
ccflags-$(CONFIG_ARCH_MMP) := -I$(srctree)/$(src)/include
-obj-$(CONFIG_ARCH_PXA) := dma.o
-
obj-$(CONFIG_PXA3xx) += mfp.o
obj-$(CONFIG_ARCH_MMP) += mfp.o
+++ /dev/null
-/*
- * linux/arch/arm/plat-pxa/dma.c
- *
- * PXA DMA registration and IRQ dispatching
- *
- * Author: Nicolas Pitre
- * Created: Nov 15, 2001
- * Copyright: MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/irq.h>
-#include <asm/memory.h>
-#include <mach/hardware.h>
-#include <mach/dma.h>
-
-#define DMA_DEBUG_NAME "pxa_dma"
-#define DMA_MAX_REQUESTERS 64
-
-struct dma_channel {
- char *name;
- pxa_dma_prio prio;
- void (*irq_handler)(int, void *);
- void *data;
- spinlock_t lock;
-};
-
-static struct dma_channel *dma_channels;
-static int num_dma_channels;
-
-/*
- * Debug fs
- */
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
-
-static struct dentry *dbgfs_root, *dbgfs_state, **dbgfs_chan;
-
-static int dbg_show_requester_chan(struct seq_file *s, void *p)
-{
- int chan = (int)s->private;
- int i;
- u32 drcmr;
-
- seq_printf(s, "DMA channel %d requesters list :\n", chan);
- for (i = 0; i < DMA_MAX_REQUESTERS; i++) {
- drcmr = DRCMR(i);
- if ((drcmr & DRCMR_CHLNUM) == chan)
- seq_printf(s, "\tRequester %d (MAPVLD=%d)\n",
- i, !!(drcmr & DRCMR_MAPVLD));
- }
-
- return 0;
-}
-
-static inline int dbg_burst_from_dcmd(u32 dcmd)
-{
- int burst = (dcmd >> 16) & 0x3;
-
- return burst ? 4 << burst : 0;
-}
-
-static int is_phys_valid(unsigned long addr)
-{
- return pfn_valid(__phys_to_pfn(addr));
-}
-
-#define DCSR_STR(flag) (dcsr & DCSR_##flag ? #flag" " : "")
-#define DCMD_STR(flag) (dcmd & DCMD_##flag ? #flag" " : "")
-
-static int dbg_show_descriptors(struct seq_file *s, void *p)
-{
- int chan = (int)s->private;
- int i, max_show = 20, burst, width;
- u32 dcmd;
- unsigned long phys_desc;
- struct pxa_dma_desc *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&dma_channels[chan].lock, flags);
- phys_desc = DDADR(chan);
-
- seq_printf(s, "DMA channel %d descriptors :\n", chan);
- seq_printf(s, "[%03d] First descriptor unknown\n", 0);
- for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
- desc = phys_to_virt(phys_desc);
- dcmd = desc->dcmd;
- burst = dbg_burst_from_dcmd(dcmd);
- width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
-
- seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
- i, phys_desc, desc);
- seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
- seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
- seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
- seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
- dcmd,
- DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
- DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
- DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
- DCMD_STR(ENDIAN), burst, width,
- dcmd & DCMD_LENGTH);
- phys_desc = desc->ddadr;
- }
- if (i == max_show)
- seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
- i, phys_desc);
- else
- seq_printf(s, "[%03d] Desc at %08lx is %s\n",
- i, phys_desc, phys_desc == DDADR_STOP ?
- "DDADR_STOP" : "invalid");
-
- spin_unlock_irqrestore(&dma_channels[chan].lock, flags);
-
- return 0;
-}
-
-static int dbg_show_chan_state(struct seq_file *s, void *p)
-{
- int chan = (int)s->private;
- u32 dcsr, dcmd;
- int burst, width;
- static char *str_prio[] = { "high", "normal", "low" };
-
- dcsr = DCSR(chan);
- dcmd = DCMD(chan);
- burst = dbg_burst_from_dcmd(dcmd);
- width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
-
- seq_printf(s, "DMA channel %d\n", chan);
- seq_printf(s, "\tPriority : %s\n", str_prio[dma_channels[chan].prio]);
- seq_printf(s, "\tUnaligned transfer bit: %s\n",
- DALGN & (1 << chan) ? "yes" : "no");
- seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
- dcsr, DCSR_STR(RUN), DCSR_STR(NODESC),
- DCSR_STR(STOPIRQEN), DCSR_STR(EORIRQEN),
- DCSR_STR(EORJMPEN), DCSR_STR(EORSTOPEN),
- DCSR_STR(SETCMPST), DCSR_STR(CLRCMPST),
- DCSR_STR(CMPST), DCSR_STR(EORINTR), DCSR_STR(REQPEND),
- DCSR_STR(STOPSTATE), DCSR_STR(ENDINTR),
- DCSR_STR(STARTINTR), DCSR_STR(BUSERR));
-
- seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
- dcmd,
- DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
- DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
- DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
- DCMD_STR(ENDIAN), burst, width, dcmd & DCMD_LENGTH);
- seq_printf(s, "\tDSADR = %08x\n", DSADR(chan));
- seq_printf(s, "\tDTADR = %08x\n", DTADR(chan));
- seq_printf(s, "\tDDADR = %08x\n", DDADR(chan));
-
- return 0;
-}
-
-static int dbg_show_state(struct seq_file *s, void *p)
-{
- /* basic device status */
- seq_puts(s, "DMA engine status\n");
- seq_printf(s, "\tChannel number: %d\n", num_dma_channels);
-
- return 0;
-}
-
-#define DBGFS_FUNC_DECL(name) \
-static int dbg_open_##name(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, dbg_show_##name, inode->i_private); \
-} \
-static const struct file_operations dbg_fops_##name = { \
- .owner = THIS_MODULE, \
- .open = dbg_open_##name, \
- .llseek = seq_lseek, \
- .read = seq_read, \
- .release = single_release, \
-}
-
-DBGFS_FUNC_DECL(state);
-DBGFS_FUNC_DECL(chan_state);
-DBGFS_FUNC_DECL(descriptors);
-DBGFS_FUNC_DECL(requester_chan);
-
-static struct dentry *pxa_dma_dbg_alloc_chan(int ch, struct dentry *chandir)
-{
- char chan_name[11];
- struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
- struct dentry *chan_reqs = NULL;
- void *dt;
-
- scnprintf(chan_name, sizeof(chan_name), "%d", ch);
- chan = debugfs_create_dir(chan_name, chandir);
- dt = (void *)ch;
-
- if (chan)
- chan_state = debugfs_create_file("state", 0400, chan, dt,
- &dbg_fops_chan_state);
- if (chan_state)
- chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
- &dbg_fops_descriptors);
- if (chan_descr)
- chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
- &dbg_fops_requester_chan);
- if (!chan_reqs)
- goto err_state;
-
- return chan;
-
-err_state:
- debugfs_remove_recursive(chan);
- return NULL;
-}
-
-static void pxa_dma_init_debugfs(void)
-{
- int i;
- struct dentry *chandir;
-
- dbgfs_root = debugfs_create_dir(DMA_DEBUG_NAME, NULL);
- if (IS_ERR(dbgfs_root) || !dbgfs_root)
- goto err_root;
-
- dbgfs_state = debugfs_create_file("state", 0400, dbgfs_root, NULL,
- &dbg_fops_state);
- if (!dbgfs_state)
- goto err_state;
-
- dbgfs_chan = kmalloc(sizeof(*dbgfs_state) * num_dma_channels,
- GFP_KERNEL);
- if (!dbgfs_chan)
- goto err_alloc;
-
- chandir = debugfs_create_dir("channels", dbgfs_root);
- if (!chandir)
- goto err_chandir;
-
- for (i = 0; i < num_dma_channels; i++) {
- dbgfs_chan[i] = pxa_dma_dbg_alloc_chan(i, chandir);
- if (!dbgfs_chan[i])
- goto err_chans;
- }
-
- return;
-err_chans:
-err_chandir:
- kfree(dbgfs_chan);
-err_alloc:
-err_state:
- debugfs_remove_recursive(dbgfs_root);
-err_root:
- pr_err("pxa_dma: debugfs is not available\n");
-}
-
-static void __exit pxa_dma_cleanup_debugfs(void)
-{
- debugfs_remove_recursive(dbgfs_root);
-}
-#else
-static inline void pxa_dma_init_debugfs(void) {}
-static inline void pxa_dma_cleanup_debugfs(void) {}
-#endif
-
-int pxa_request_dma (char *name, pxa_dma_prio prio,
- void (*irq_handler)(int, void *),
- void *data)
-{
- unsigned long flags;
- int i, found = 0;
-
- /* basic sanity checks */
- if (!name || !irq_handler)
- return -EINVAL;
-
- local_irq_save(flags);
-
- do {
- /* try grabbing a DMA channel with the requested priority */
- for (i = 0; i < num_dma_channels; i++) {
- if ((dma_channels[i].prio == prio) &&
- !dma_channels[i].name &&
- !pxad_toggle_reserved_channel(i)) {
- found = 1;
- break;
- }
- }
- /* if requested prio group is full, try a hier priority */
- } while (!found && prio--);
-
- if (found) {
- DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
- dma_channels[i].name = name;
- dma_channels[i].irq_handler = irq_handler;
- dma_channels[i].data = data;
- } else {
- printk (KERN_WARNING "No more available DMA channels for %s\n", name);
- i = -ENODEV;
- }
-
- local_irq_restore(flags);
- return i;
-}
-EXPORT_SYMBOL(pxa_request_dma);
-
-void pxa_free_dma (int dma_ch)
-{
- unsigned long flags;
-
- if (!dma_channels[dma_ch].name) {
- printk (KERN_CRIT
- "%s: trying to free channel %d which is already freed\n",
- __func__, dma_ch);
- return;
- }
-
- local_irq_save(flags);
- DCSR(dma_ch) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
- dma_channels[dma_ch].name = NULL;
- pxad_toggle_reserved_channel(dma_ch);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL(pxa_free_dma);
-
-static irqreturn_t dma_irq_handler(int irq, void *dev_id)
-{
- int i, dint = DINT, done = 0;
- struct dma_channel *channel;
-
- while (dint) {
- i = __ffs(dint);
- dint &= (dint - 1);
- channel = &dma_channels[i];
- if (channel->name && channel->irq_handler) {
- channel->irq_handler(i, channel->data);
- done++;
- }
- }
- if (done)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
-}
-
-int __init pxa_init_dma(int irq, int num_ch)
-{
- int i, ret;
-
- dma_channels = kzalloc(sizeof(struct dma_channel) * num_ch, GFP_KERNEL);
- if (dma_channels == NULL)
- return -ENOMEM;
-
- /* dma channel priorities on pxa2xx processors:
- * ch 0 - 3, 16 - 19 <--> (0) DMA_PRIO_HIGH
- * ch 4 - 7, 20 - 23 <--> (1) DMA_PRIO_MEDIUM
- * ch 8 - 15, 24 - 31 <--> (2) DMA_PRIO_LOW
- */
- for (i = 0; i < num_ch; i++) {
- DCSR(i) = 0;
- dma_channels[i].prio = min((i & 0xf) >> 2, DMA_PRIO_LOW);
- spin_lock_init(&dma_channels[i].lock);
- }
-
- ret = request_irq(irq, dma_irq_handler, IRQF_SHARED, "DMA",
- dma_channels);
- if (ret) {
- printk (KERN_CRIT "Wow! Can't register IRQ for DMA\n");
- kfree(dma_channels);
- return ret;
- }
- num_dma_channels = num_ch;
-
- pxa_dma_init_debugfs();
-
- return 0;
-}
+++ /dev/null
-#ifndef __PLAT_DMA_H
-#define __PLAT_DMA_H
-
-#define DMAC_REG(x) (*((volatile u32 *)(DMAC_REGS_VIRT + (x))))
-
-#define DCSR(n) DMAC_REG((n) << 2)
-#define DALGN DMAC_REG(0x00a0) /* DMA Alignment Register */
-#define DINT DMAC_REG(0x00f0) /* DMA Interrupt Register */
-#define DDADR(n) DMAC_REG(0x0200 + ((n) << 4))
-#define DSADR(n) DMAC_REG(0x0204 + ((n) << 4))
-#define DTADR(n) DMAC_REG(0x0208 + ((n) << 4))
-#define DCMD(n) DMAC_REG(0x020c + ((n) << 4))
-#define DRCMR(n) DMAC_REG((((n) < 64) ? 0x0100 : 0x1100) + \
- (((n) & 0x3f) << 2))
-
-#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
-#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
-#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
-#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
-#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
-#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
-#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
-#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
-
-#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
-#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
-#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
-#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
-#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
-#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
-#define DCSR_EORINTR (1 << 9) /* The end of Receive */
-
-#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
-#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
-
-#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
-#define DDADR_STOP (1 << 0) /* Stop (read / write) */
-
-#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
-#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
-#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
-#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
-#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
-#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
-#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
-#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
-#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
-#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
-#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
-#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
-#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
-#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
-
-/*
- * Descriptor structure for PXA's DMA engine
- * Note: this structure must always be aligned to a 16-byte boundary.
- */
-
-typedef struct pxa_dma_desc {
- volatile u32 ddadr; /* Points to the next descriptor + flags */
- volatile u32 dsadr; /* DSADR value for the current transfer */
- volatile u32 dtadr; /* DTADR value for the current transfer */
- volatile u32 dcmd; /* DCMD value for the current transfer */
-} pxa_dma_desc;
-
-typedef enum {
- DMA_PRIO_HIGH = 0,
- DMA_PRIO_MEDIUM = 1,
- DMA_PRIO_LOW = 2
-} pxa_dma_prio;
-
-/*
- * DMA registration
- */
-
-int __init pxa_init_dma(int irq, int num_ch);
-
-int pxa_request_dma (char *name,
- pxa_dma_prio prio,
- void (*irq_handler)(int, void *),
- void *data);
-
-void pxa_free_dma (int dma_ch);
-
-/*
- * Cooperation with pxa_dma + dmaengine while there remains at least one pxa
- * driver not converted to dmaengine.
- */
-#if defined(CONFIG_PXA_DMA)
-extern int pxad_toggle_reserved_channel(int legacy_channel);
-#else
-static inline int pxad_toggle_reserved_channel(int legacy_channel)
-{
- return 0;
-}
-#endif
-
-extern void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors);
-
-#endif /* __PLAT_DMA_H */
#define __ASM_PLAT_MAP_S5P_H __FILE__
#define S5P_VA_CHIPID S3C_ADDR(0x02000000)
-#define S5P_VA_CMU S3C_ADDR(0x02100000)
-
-#define S5P_VA_DMC0 S3C_ADDR(0x02440000)
-#define S5P_VA_DMC1 S3C_ADDR(0x02480000)
#define S5P_VA_COREPERI_BASE S3C_ADDR(0x02800000)
#define S5P_VA_COREPERI(x) (S5P_VA_COREPERI_BASE + (x))
static struct vcpu_info __percpu *xen_vcpu_info;
/* Linux <-> Xen vCPU id mapping */
-DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
+DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
/* These are unused until we support booting "pre-ballooned" */
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
config ARCH_ALPINE
bool "Annapurna Labs Alpine platform"
- select ALPINE_MSI
+ select ALPINE_MSI if PCI
help
This enables support for the Annapurna Labs Alpine
Soc family.
config ARCH_HISI
bool "Hisilicon SoC Family"
select ARM_TIMER_SP804
- select HISILICON_IRQ_MBIGEN
+ select HISILICON_IRQ_MBIGEN if PCI
help
This enables support for Hisilicon ARMv8 SoC family
/dts-v1/;
#include "exynos7.dtsi"
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/samsung,s2mps11.h>
/ {
model = "Samsung Exynos7 Espresso board based on EXYNOS7";
&rtc {
status = "okay";
+ clocks = <&clock_ccore PCLK_RTC>, <&s2mps15_osc S2MPS11_CLK_AP>;
+ clock-names = "rtc", "rtc_src";
};
&watchdog {
-# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_HUGETLB=y
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_NET_NS is not set
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA=y
+CONFIG_SECCOMP=y
CONFIG_XEN=y
CONFIG_KEXEC=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
CONFIG_BPF_JIT=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
CONFIG_VIRTIO_BLK=y
CONFIG_SRAM=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_PATA_PLATFORM=y
CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
CONFIG_TUN=y
+CONFIG_VETH=m
CONFIG_VIRTIO_NET=y
CONFIG_AMD_XGBE=y
CONFIG_NET_XGENE=y
CONFIG_PWM_SAMSUNG=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
CONFIG_AUTOFS4_FS=y
-CONFIG_FUSE_FS=y
-CONFIG_CUSE=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 1
-#define MAX_STACK_SIZE 128
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
struct prev_kprobe prev_kprobe;
struct kprobe_step_ctx ss_ctx;
struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *);
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
- return __arch_copy_from_user(to, from, n);
+ check_object_size(to, n, false);
+ return __arch_copy_from_user(to, from, n);
}
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
- return __arch_copy_to_user(to, from, n);
+ check_object_size(from, n, true);
+ return __arch_copy_to_user(to, from, n);
}
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
- if (access_ok(VERIFY_READ, from, n))
+ if (access_ok(VERIFY_READ, from, n)) {
+ check_object_size(to, n, false);
n = __arch_copy_from_user(to, from, n);
- else /* security hole - plug it */
+ } else /* security hole - plug it */
memset(to, 0, n);
return n;
}
{
kasan_check_read(from, n);
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ check_object_size(from, n, true);
n = __arch_copy_to_user(to, from, n);
+ }
return n;
}
lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
b.eq el1_da
+ cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
+ b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
b.ge el1_dbg
b el1_inv
+
+el1_ia:
+ /*
+ * Fall through to the Data abort case
+ */
el1_da:
/*
* Data abort handling
isb
bl __create_page_tables // recreate kernel mapping
+ tlbi vmalle1 // Remove any stale TLB entries
+ dsb nsh
+
msr sctlr_el1, x19 // re-enable the MMU
isb
ic iallu // flush instructions fetched
#include <asm/sections.h>
#include <asm/smp.h>
#include <asm/suspend.h>
+#include <asm/sysreg.h>
#include <asm/virt.h>
/*
set_pte(pte, __pte(virt_to_phys((void *)dst) |
pgprot_val(PAGE_KERNEL_EXEC)));
- /* Load our new page tables */
- asm volatile("msr ttbr0_el1, %0;"
- "isb;"
- "tlbi vmalle1is;"
- "dsb ish;"
- "isb" : : "r"(virt_to_phys(pgd)));
+ /*
+ * Load our new page tables. A strict BBM approach requires that we
+ * ensure that TLBs are free of any entries that may overlap with the
+ * global mappings we are about to install.
+ *
+ * For a real hibernate/resume cycle TTBR0 currently points to a zero
+ * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
+ * runtime services), while for a userspace-driven test_resume cycle it
+ * points to userspace page tables (and we must point it at a zero page
+ * ourselves). Elsewhere we only (un)install the idmap with preemption
+ * disabled, so T0SZ should be as required regardless.
+ */
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+ isb();
*phys_dst_addr = virt_to_phys((void *)dst);
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
void *, phys_addr_t, phys_addr_t);
+ /*
+ * Restoring the memory image will overwrite the ttbr1 page tables.
+ * Create a second copy of just the linear map, and use this when
+ * restoring.
+ */
+ tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!tmp_pg_dir) {
+ pr_err("Failed to allocate memory for temporary page tables.");
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+ if (rc)
+ goto out;
+
+ /*
+ * Since we only copied the linear map, we need to find restore_pblist's
+ * linear map address.
+ */
+ lm_restore_pblist = LMADDR(restore_pblist);
+
+ /*
+ * We need a zero page that is zero before & after resume in order to
+ * to break before make on the ttbr1 page tables.
+ */
+ zero_page = (void *)get_safe_page(GFP_ATOMIC);
+ if (!zero_page) {
+ pr_err("Failed to allocate zero page.");
+ rc = -ENOMEM;
+ goto out;
+ }
+
/*
* Locate the exit code in the bottom-but-one page, so that *NULL
* still has disastrous affects.
*/
__flush_dcache_area(hibernate_exit, exit_size);
- /*
- * Restoring the memory image will overwrite the ttbr1 page tables.
- * Create a second copy of just the linear map, and use this when
- * restoring.
- */
- tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
- if (!tmp_pg_dir) {
- pr_err("Failed to allocate memory for temporary page tables.");
- rc = -ENOMEM;
- goto out;
- }
- rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
- if (rc)
- goto out;
-
- /*
- * Since we only copied the linear map, we need to find restore_pblist's
- * linear map address.
- */
- lm_restore_pblist = LMADDR(restore_pblist);
-
/*
* KASLR will cause the el2 vectors to be in a different location in
* the resumed kernel. Load hibernate's temporary copy into el2.
__hyp_set_vectors(el2_vectors);
}
- /*
- * We need a zero page that is zero before & after resume in order to
- * to break before make on the ttbr1 page tables.
- */
- zero_page = (void *)get_safe_page(GFP_ATOMIC);
-
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
resume_hdr.reenter_kernel, lm_restore_pblist,
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
-static inline unsigned long min_stack_size(unsigned long addr)
-{
- unsigned long size;
-
- if (on_irq_stack(addr, raw_smp_processor_id()))
- size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
- else
- size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
-
- return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
-}
-
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
/* prepare insn slot */
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long stack_ptr = kernel_stack_pointer(regs);
kcb->jprobe_saved_regs = *regs;
/*
- * As Linus pointed out, gcc assumes that the callee
- * owns the argument space and could overwrite it, e.g.
- * tailcall optimization. So, to be absolutely safe
- * we also save and restore enough stack bytes to cover
- * the argument area.
+ * Since we can't be sure where in the stack frame "stacked"
+ * pass-by-value arguments are stored we just don't try to
+ * duplicate any of the stack. Do not use jprobes on functions that
+ * use more than 64 bytes (after padding each to an 8 byte boundary)
+ * of arguments, or pass individual arguments larger than 16 bytes.
*/
- kasan_disable_current();
- memcpy(kcb->jprobes_stack, (void *)stack_ptr,
- min_stack_size(stack_ptr));
- kasan_enable_current();
instruction_pointer_set(regs, (unsigned long) jp->entry);
preempt_disable();
}
unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs;
- kasan_disable_current();
- memcpy((void *)stack_addr, kcb->jprobes_stack,
- min_stack_size(stack_addr));
- kasan_enable_current();
preempt_enable_no_resched();
return 1;
}
bl el2_setup // if in EL2 drop to EL1 cleanly
/* enable the MMU early - so we can access sleep_save_stash by va */
adr_l lr, __enable_mmu /* __cpu_setup will return here */
- ldr x27, =_cpu_resume /* __enable_mmu will branch here */
+ adr_l x27, _resume_switched /* __enable_mmu will branch here */
adrp x25, idmap_pg_dir
adrp x26, swapper_pg_dir
b __cpu_setup
ENDPROC(cpu_resume)
+ .pushsection ".idmap.text", "ax"
+_resume_switched:
+ ldr x8, =_cpu_resume
+ br x8
+ENDPROC(_resume_switched)
+ .ltorg
+ .popsection
+
ENTRY(_cpu_resume)
mrs x1, mpidr_el1
adrp x8, mpidr_hash
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
acpi_parse_gic_cpu_interface, 0);
- if (cpu_count > NR_CPUS)
- pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
- cpu_count, NR_CPUS);
+ if (cpu_count > nr_cpu_ids)
+ pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
+ cpu_count, nr_cpu_ids);
if (!bootcpu_valid) {
pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
* with entries in cpu_logical_map while initializing the cpus.
* If the cpu set-up fails, invalidate the cpu_logical_map entry.
*/
- for (i = 1; i < NR_CPUS; i++) {
+ for (i = 1; i < nr_cpu_ids; i++) {
if (cpu_logical_map(i) != INVALID_HWID) {
if (smp_cpu_setup(i))
cpu_logical_map(i) = INVALID_HWID;
/*
* We must restore the 32-bit state before the sysregs, thanks
- * to Cortex-A57 erratum #852523.
+ * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
*/
__sysreg32_restore_state(vcpu);
__sysreg_restore_guest_state(guest_ctxt);
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
*
- * We could trap ID_DFR0 and tell the guest we don't support performance
- * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
- * NAKed, so it will read the PMCR anyway.
- *
- * Therefore we tell the guest we have 0 counters. Unfortunately, we
- * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
- * all PM registers, which doesn't crash the guest kernel at least.
- *
* Debug handling: We do trap most, if not all debug related system
* registers. The implementation is good enough to ensure that a guest
* can use these with minimal performance degradation. The drawback is
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
/* ICC_SRE */
- { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
+ { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
{
- pte_t *pte = pte_offset_kernel(pmd, 0);
+ pte_t *pte = pte_offset_kernel(pmd, 0UL);
unsigned long addr;
unsigned i;
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
{
- pmd_t *pmd = pmd_offset(pud, 0);
+ pmd_t *pmd = pmd_offset(pud, 0UL);
unsigned long addr;
unsigned i;
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
{
- pud_t *pud = pud_offset(pgd, 0);
+ pud_t *pud = pud_offset(pgd, 0UL);
unsigned long addr;
unsigned i;
}
#endif
+static bool is_el1_instruction_abort(unsigned int esr)
+{
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
+}
+
/*
* The kernel tried to access some page that wasn't present.
*/
{
/*
* Are we prepared to handle this kernel fault?
+ * We are almost certainly not prepared to handle instruction faults.
*/
- if (fixup_exception(regs))
+ if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
return;
/*
unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
- return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
+ return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
+ (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
}
static bool is_el0_instruction_abort(unsigned int esr)
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
+ if (is_el1_instruction_abort(esr))
+ die("Attempting to execute userspace memory", regs, esr);
+
if (!search_exception_tables(regs->pc))
die("Accessing user space memory outside uaccess.h routines", regs, esr);
}
#include <linux/module.h>
#include <linux/of.h>
+#include <asm/acpi.h>
+
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
nodemask_t numa_nodes_parsed __initdata;
#ifdef __KERNEL__
+#include <linux/types.h>
+
/* H8/300 internal I/O functions */
#define __raw_readb __raw_readb
select MODULES_USE_ELF_RELA
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HARDENED_USERCOPY
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
static inline unsigned long
__copy_to_user (void __user *to, const void *from, unsigned long count)
{
+ if (!__builtin_constant_p(count))
+ check_object_size(from, count, true);
+
return __copy_user(to, (__force void __user *) from, count);
}
static inline unsigned long
__copy_from_user (void *to, const void __user *from, unsigned long count)
{
+ if (!__builtin_constant_p(count))
+ check_object_size(to, count, false);
+
return __copy_user((__force void __user *) to, from, count);
}
const void *__cu_from = (from); \
long __cu_len = (n); \
\
- if (__access_ok(__cu_to, __cu_len, get_fs())) \
- __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ if (__access_ok(__cu_to, __cu_len, get_fs())) { \
+ if (!__builtin_constant_p(n)) \
+ check_object_size(__cu_from, __cu_len, true); \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ } \
__cu_len; \
})
long __cu_len = (n); \
\
__chk_user_ptr(__cu_from); \
- if (__access_ok(__cu_from, __cu_len, get_fs())) \
+ if (__access_ok(__cu_from, __cu_len, get_fs())) { \
+ if (!__builtin_constant_p(n)) \
+ check_object_size(__cu_to, __cu_len, false); \
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+ } \
__cu_len; \
})
static inline void adjustformat(struct pt_regs *regs)
{
- ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
/*
* set format byte to make stack appear modulo 4, which it will
* be when doing the rte
free_all_bootmem();
mem_init_print_info(NULL);
- show_mem(0);
}
void free_initmem(void)
*/
static inline unsigned long ___pa(unsigned long x)
{
- if (config_enabled(CONFIG_64BIT)) {
+ if (IS_ENABLED(CONFIG_64BIT)) {
/*
* For MIPS64 the virtual address may either be in one of
* the compatibility segements ckseg0 or ckseg1, or it may
return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
}
- if (!config_enabled(CONFIG_EVA)) {
+ if (!IS_ENABLED(CONFIG_EVA)) {
/*
* We're using the standard MIPS32 legacy memory map, ie.
* the address x is going to be in kseg0 or kseg1. We can
preempt_disable();
if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
- if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
- kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+ if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
+ kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
+ kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
+ __func__, va, vcpu, read_c0_entryhi());
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto done;
+ }
} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
int index;
run, vcpu);
preempt_enable();
goto dont_update_pc;
- } else {
- /*
- * We fault an entry from the guest tlb to the
- * shadow host TLB
- */
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
+ }
+ /*
+ * We fault an entry from the guest tlb to the
+ * shadow host TLB
+ */
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, va, index, vcpu,
+ read_c0_entryhi());
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto done;
}
}
} else {
* OK we have a Guest TLB entry, now inject it into the
* shadow host TLB
*/
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, va, index, vcpu,
+ read_c0_entryhi());
+ er = EMULATE_FAIL;
+ }
}
}
srcu_idx = srcu_read_lock(&kvm->srcu);
pfn = gfn_to_pfn(kvm, gfn);
- if (is_error_pfn(pfn)) {
+ if (is_error_noslot_pfn(pfn)) {
kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
err = -EFAULT;
goto out;
}
gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
- if (gfn >= kvm->arch.guest_pmap_npages) {
+ if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
gfn, badvaddr);
kvm_mips_dump_host_tlbs();
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
struct kvm *kvm = vcpu->kvm;
kvm_pfn_t pfn0, pfn1;
+ gfn_t gfn0, gfn1;
+ long tlb_lo[2];
int ret;
- if ((tlb->tlb_hi & VPN2_MASK) == 0) {
- pfn0 = 0;
- pfn1 = 0;
- } else {
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0])
- >> PAGE_SHIFT) < 0)
- return -1;
-
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1])
- >> PAGE_SHIFT) < 0)
- return -1;
-
- pfn0 = kvm->arch.guest_pmap[
- mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT];
- pfn1 = kvm->arch.guest_pmap[
- mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT];
+ tlb_lo[0] = tlb->tlb_lo[0];
+ tlb_lo[1] = tlb->tlb_lo[1];
+
+ /*
+ * The commpage address must not be mapped to anything else if the guest
+ * TLB contains entries nearby, or commpage accesses will break.
+ */
+ if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
+ VPN2_MASK & (PAGE_MASK << 1)))
+ tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
+
+ gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
+ gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
+ if (gfn0 >= kvm->arch.guest_pmap_npages ||
+ gfn1 >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
+ __func__, gfn0, gfn1, tlb->tlb_hi);
+ kvm_mips_dump_guest_tlbs(vcpu);
+ return -1;
}
+ if (kvm_mips_map_page(kvm, gfn0) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(kvm, gfn1) < 0)
+ return -1;
+
+ pfn0 = kvm->arch.guest_pmap[gfn0];
+ pfn1 = kvm->arch.guest_pmap[gfn1];
+
/* Get attributes from the Guest TLB */
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
- (tlb->tlb_lo[0] & ENTRYLO_D) |
- (tlb->tlb_lo[0] & ENTRYLO_V);
+ (tlb_lo[0] & ENTRYLO_D) |
+ (tlb_lo[0] & ENTRYLO_V);
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
- (tlb->tlb_lo[1] & ENTRYLO_D) |
- (tlb->tlb_lo[1] & ENTRYLO_V);
+ (tlb_lo[1] & ENTRYLO_D) |
+ (tlb_lo[1] & ENTRYLO_V);
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
tlb->tlb_lo[0], tlb->tlb_lo[1]);
local_irq_restore(flags);
return KVM_INVALID_INST;
}
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
- &vcpu->arch.
- guest_tlb[index]);
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+ &vcpu->arch.guest_tlb[index])) {
+ kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, opc, index, vcpu,
+ read_c0_entryhi());
+ kvm_mips_dump_guest_tlbs(vcpu);
+ local_irq_restore(flags);
+ return KVM_INVALID_INST;
+ }
inst = *(opc);
}
local_irq_restore(flags);
#define ENOTCONN 235 /* Transport endpoint is not connected */
#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 237 /* Too many references: cannot splice */
-#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */
-#define EREMOTERELEASE 240 /* Remote peer released connection */
+#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */
DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
-extern int update_cr16_clocksource(void); /* from time.c */
-
/*
** PARISC CPU driver - claim "device" and initialize CPU data structures.
**
}
#endif
- /* If we've registered more than one cpu,
- * we'll use the jiffies clocksource since cr16
- * is not synchronized between CPUs.
- */
- update_cr16_clocksource();
-
return 0;
}
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-int update_cr16_clocksource(void)
-{
- /* since the cr16 cycle counters are not synchronized across CPUs,
- we'll check if we should switch to a safe clocksource: */
- if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
- clocksource_change_rating(&clocksource_cr16, 0);
- return 1;
- }
-
- return 0;
-}
-
void __init start_cpu_itimer(void)
{
unsigned int cpu = smp_processor_id();
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select GENERIC_CPU_AUTOPROBE
select HAVE_VIRT_CPU_ACCOUNTING
+ select HAVE_ARCH_HARDENED_USERCOPY
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
UTS_MACHINE := $(OLDARCH)
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-override CC += -mlittle-endian
-ifneq ($(cc-name),clang)
-override CC += -mno-strict-align
-endif
-override AS += -mlittle-endian
override LD += -EL
-override CROSS32CC += -mlittle-endian
override CROSS32AS += -mlittle-endian
LDEMULATION := lppc
GNUTARGET := powerpcle
MULTIPLEWORD := -mno-multiple
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect)
else
-ifeq ($(call cc-option-yn,-mbig-endian),y)
-override CC += -mbig-endian
-override AS += -mbig-endian
-endif
override LD += -EB
LDEMULATION := ppc
GNUTARGET := powerpc
MULTIPLEWORD := -mmultiple
endif
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
+ifneq ($(cc-name),clang)
+ cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
+endif
+
+aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
+
ifeq ($(HAS_BIARCH),y)
override AS += -a$(CONFIG_WORD_SIZE)
override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION)
KBUILD_AFLAGS += $(cpu-as-y)
KBUILD_CFLAGS += $(cpu-as-y)
+KBUILD_AFLAGS += $(aflags-y)
+KBUILD_CFLAGS += $(cflags-y)
+
head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o
head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/cpufeature.h>
#include <asm/switch_to.h>
#define CHKSUM_BLOCK_SIZE 1
crypto_unregister_shash(&alg);
}
-module_init(crc32c_vpmsum_mod_init);
+module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init);
module_exit(crc32c_vpmsum_mod_fini);
MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
#endif
+/* Idle state entry routines */
+#ifdef CONFIG_PPC_P7_NAP
+#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
+ /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
+ std r0,0(r1); \
+ ptesync; \
+ ld r0,0(r1); \
+1: cmp cr0,r0,r0; \
+ bne 1b; \
+ IDLE_INST; \
+ b .
+#endif /* CONFIG_PPC_P7_NAP */
+
#endif
#ifndef __ASSEMBLY__
void apply_feature_fixups(void);
+void setup_feature_keys(void);
#endif
#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
static inline void __giveup_spe(struct task_struct *t) { }
#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-extern void flush_tmregs_to_thread(struct task_struct *);
-#else
-static inline void flush_tmregs_to_thread(struct task_struct *t)
-{
-}
-#endif
-
static inline void clear_task_ebb(struct task_struct *t)
{
#ifdef CONFIG_PPC_BOOK3S_64
{
unsigned long over;
- if (access_ok(VERIFY_READ, from, n))
+ if (access_ok(VERIFY_READ, from, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
+ if (!__builtin_constant_p(n - over))
+ check_object_size(to, n - over, false);
return __copy_tofrom_user((__force void __user *)to, from,
n - over) + over;
}
{
unsigned long over;
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force void __user *)from, n);
+ }
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n - over, true);
return __copy_tofrom_user(to, (__force void __user *)from,
n - over) + over;
}
if (ret == 0)
return 0;
}
+
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
+
return __copy_tofrom_user((__force void __user *)to, from, n);
}
if (ret == 0)
return 0;
}
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
+
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
extern void xics_kexec_teardown_cpu(int secondary);
extern void xics_migrate_irqs_away(void);
extern void icp_native_eoi(struct irq_data *d);
+extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type);
+extern int xics_retrigger(struct irq_data *data);
#ifdef CONFIG_SMP
extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
unsigned int strict_check);
int n = 0, l = 0;
char buffer[128];
- n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n",
+ n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
edev->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
- pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n",
+ pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
edev->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
* vector
*/
SET_SCRATCH0(r13) /* save r13 */
-#ifdef CONFIG_PPC_P7_NAP
-BEGIN_FTR_SECTION
- /* Running native on arch 2.06 or later, check if we are
- * waking up from nap. We only handle no state loss and
- * supervisor state loss. We do -not- handle hypervisor
- * state loss at this time.
+ /*
+ * Running native on arch 2.06 or later, we may wakeup from winkle
+ * inside machine check. If yes, then last bit of HSPGR0 would be set
+ * to 1. Hence clear it unconditionally.
*/
- mfspr r13,SPRN_SRR1
- rlwinm. r13,r13,47-31,30,31
- OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
- beq 9f
-
- mfspr r13,SPRN_SRR1
- rlwinm. r13,r13,47-31,30,31
- /* waking up from powersave (nap) state */
- cmpwi cr1,r13,2
- /* Total loss of HV state is fatal. let's just stay stuck here */
- OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
- bgt cr1,.
-9:
- OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-#endif /* CONFIG_PPC_P7_NAP */
+ GET_PACA(r13)
+ clrrdi r13,r13,1
+ SET_PACA(r13)
EXCEPTION_PROLOG_0(PACA_EXMC)
BEGIN_FTR_SECTION
b machine_check_powernv_early
* Check if thread was in power saving mode. We come here when any
* of the following is true:
* a. thread wasn't in power saving mode
- * b. thread was in power saving mode with no state loss or
- * supervisor state loss
+ * b. thread was in power saving mode with no state loss,
+ * supervisor state loss or hypervisor state loss.
*
- * Go back to nap again if (b) is true.
+ * Go back to nap/sleep/winkle mode again if (b) is true.
*/
rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */
beq 4f /* No, it wasn;t */
/* Thread was in power saving mode. Go back to nap again. */
cmpwi r11,2
- bne 3f
- /* Supervisor state loss */
+ blt 3f
+ /* Supervisor/Hypervisor state loss */
li r0,1
stb r0,PACA_NAPSTATELOST(r13)
3: bl machine_check_queue_event
MACHINE_CHECK_HANDLER_WINDUP
GET_PACA(r13)
ld r1,PACAR1(r13)
- li r3,PNV_THREAD_NAP
- b pnv_enter_arch207_idle_mode
+ /*
+ * Check what idle state this CPU was in and go back to same mode
+ * again.
+ */
+ lbz r3,PACA_THREAD_IDLE_STATE(r13)
+ cmpwi r3,PNV_THREAD_NAP
+ bgt 10f
+ IDLE_STATE_ENTER_SEQ(PPC_NAP)
+ /* No return */
+10:
+ cmpwi r3,PNV_THREAD_SLEEP
+ bgt 2f
+ IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
+ /* No return */
+
+2:
+ /*
+ * Go back to winkle. Please note that this thread was woken up in
+ * machine check from winkle and have not restored the per-subcore
+ * state. Hence before going back to winkle, set last bit of HSPGR0
+ * to 1. This will make sure that if this thread gets woken up
+ * again at reset vector 0x100 then it will get chance to restore
+ * the subcore state.
+ */
+ ori r13,r13,1
+ SET_PACA(r13)
+ IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
+ /* No return */
4:
#endif
/*
PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
PSSCR_MTL_MASK
-/* Idle state entry routines */
-
-#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
- /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
- std r0,0(r1); \
- ptesync; \
- ld r0,0(r1); \
-1: cmp cr0,r0,r0; \
- bne 1b; \
- IDLE_INST; \
- b .
-
.text
/*
* cr3 - set to gt if waking up with partial/complete hypervisor state loss
*/
_GLOBAL(pnv_restore_hyp_resource)
- ld r2,PACATOC(r13);
BEGIN_FTR_SECTION
+ ld r2,PACATOC(r13);
/*
* POWER ISA 3. Use PSSCR to determine if we
* are waking up from deep idle state
*/
clrldi r5,r13,63
clrrdi r13,r13,1
+
+ /* Now that we are sure r13 is corrected, load TOC */
+ ld r2,PACATOC(r13);
cmpwi cr4,r5,1
mtspr SPRN_HSPRG0,r13
mce->in_use = 1;
mce->initiator = MCE_INITIATOR_CPU;
- if (handled)
+ /* Mark it recovered if we have handled it and MSR(RI=1). */
+ if (handled && (regs->msr & MSR_RI))
mce->disposition = MCE_DISPOSITION_RECOVERED;
else
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
static int get_phb_number(struct device_node *dn)
{
int ret, phb_id = -1;
+ u32 prop_32;
u64 prop;
/*
* reading "ibm,opal-phbid", only present in OPAL environment.
*/
ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
- if (ret)
- ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop);
+ if (ret) {
+ ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
+ prop = prop_32;
+ }
if (!ret)
phb_id = (int)(prop & (MAX_PHBS - 1));
#endif
}
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-void flush_tmregs_to_thread(struct task_struct *tsk)
-{
- /*
- * Process self tracing is not yet supported through
- * ptrace interface. Ptrace generic code should have
- * prevented this from happening in the first place.
- * Warn once here with the message, if some how it
- * is attempted.
- */
- WARN_ONCE(tsk == current,
- "Not expecting ptrace on self: TM regs may be incorrect\n");
-
- /*
- * If task is not current, it should have been flushed
- * already to it's thread_struct during __switch_to().
- */
-}
-#endif
-
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
/* Don't print anything after quiesce under OPAL, it crashes OFW */
if (of_platform != PLATFORM_OPAL) {
- prom_printf("Booting Linux via __start() ...\n");
+ prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
prom_debug("->dt_header_start=0x%x\n", hdr);
}
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/switch_to.h>
+#include <asm/tm.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
REG_OFFSET_END,
};
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static void flush_tmregs_to_thread(struct task_struct *tsk)
+{
+ /*
+ * If task is not current, it will have been flushed already to
+ * it's thread_struct during __switch_to().
+ *
+ * A reclaim flushes ALL the state.
+ */
+
+ if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
+ tm_reclaim_current(TM_CAUSE_SIGNAL);
+
+}
+#else
+static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
+#endif
+
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
* and we are running with enough of the MMU enabled to have our
* proper kernel virtual addresses
*
- * Find out what kind of machine we're on and save any data we need
- * from the early boot process (devtree is copied on pmac by prom_init()).
- * This is called very early on the boot process, after a minimal
- * MMU environment has been set up but before MMU_init is called.
+ * We do the initial parsing of the flat device-tree and prepares
+ * for the MMU to be fully initialized.
*/
extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
notrace void __init machine_init(u64 dt_ptr)
{
+ /* Configure static keys first, now that we're relocated. */
+ setup_feature_keys();
+
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
/* Apply all the dynamic patching */
apply_feature_fixups();
+ setup_feature_keys();
/* Initialize the hash table or TLB handling */
early_init_mmu();
#include <linux/security.h>
#include <linux/memblock.h>
+#include <asm/cpu_has_feature.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
$(call if_changed,vdso32ld)
# strip rule for the .so file
$(call if_changed,objcopy)
# assembly rules for the .S files
-$(obj-vdso32): %.o: %.S
+$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
- cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdso32ld = $(CROSS32CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
$(call if_changed,vdso64ld)
# strip rule for the .so file
$(call if_changed,objcopy)
# assembly rules for the .S files
-$(obj-vdso64): %.o: %.S
+$(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
xics->kvm = kvm;
/* Already there ? */
- mutex_lock(&kvm->lock);
if (kvm->arch.xics)
ret = -EEXIST;
else
kvm->arch.xics = xics;
- mutex_unlock(&kvm->lock);
if (ret) {
kfree(xics);
return ret;
}
- xics_debugfs_init(xics);
-
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
/* Enable real mode support */
return 0;
}
+static void kvmppc_xics_init(struct kvm_device *dev)
+{
+ struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
+
+ xics_debugfs_init(xics);
+}
+
struct kvm_device_ops kvm_xics_ops = {
.name = "kvm-xics",
.create = kvmppc_xics_create,
+ .init = kvmppc_xics_init,
.destroy = kvmppc_xics_free,
.set_attr = xics_set_attr,
.get_attr = xics_get_attr,
stw r7,12(r1)
stw r8,8(r1)
- andi. r0,r4,1 /* is destination address even ? */
- cmplwi cr7,r0,0
+ rlwinm r0,r4,3,0x8
+ rlwnm r6,r6,r0,0,31 /* odd destination address: rotate one byte */
+ cmplwi cr7,r0,0 /* is destination address even ? */
addic r12,r6,0
addi r6,r4,-4
neg r0,r4
66: addze r3,r12
addi r1,r1,16
beqlr+ cr7
- rlwinm r3,r3,8,0,31 /* swap bytes for odd destination */
+ rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */
blr
/* read fault */
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
#endif
do_final_fixups();
+}
+void __init setup_feature_keys(void)
+{
/*
* Initialise jump label. This causes all the cpu/mmu_has_feature()
* checks to take on their correct polarity based on the current set of
gang = alloc_spu_gang();
SPUFS_I(inode)->i_ctx = NULL;
SPUFS_I(inode)->i_gang = gang;
- if (!gang)
+ if (!gang) {
+ ret = -ENOMEM;
goto out_iput;
+ }
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) {
dev->dev.archdata.dma_ops = &dma_direct_ops;
+ /*
+ * Set the coherent DMA mask to prevent the iommu
+ * being used unnecessarily
+ */
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
return;
}
#endif
}
/* Install interrupt handler */
- rc = request_irq(virq, opal_interrupt, 0, "opal", NULL);
+ rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW,
+ "opal", NULL);
if (rc) {
irq_dispose_mapping(virq);
pr_warn("Error %d requesting irq %d (0x%x)\n",
if (!(regs->msr & MSR_RI)) {
/* If MSR_RI isn't set, we cannot recover */
+ pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
recovered = 0;
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
/* Platform corrected itself */
}
early_param("iommu", iommu_setup);
-static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
+static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
{
- return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
- (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
+ /*
+ * WARNING: We cannot rely on the resource flags. The Linux PCI
+ * allocation code sometimes decides to put a 64-bit prefetchable
+ * BAR in the 32-bit window, so we have to compare the addresses.
+ *
+ * For simplicity we only test resource start.
+ */
+ return (r->start >= phb->ioda.m64_base &&
+ r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
}
static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
sgsz = phb->ioda.m64_segsize;
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
r = &pdev->resource[i];
- if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags))
+ if (!r->parent || !pnv_pci_is_m64(phb, r))
continue;
start = _ALIGN_DOWN(r->start - base, sgsz);
unsigned shift, unsigned long index,
unsigned long npages)
{
- __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
unsigned long start, end, inc;
/* We'll invalidate DMA address in PE scope */
res = &pdev->resource[i + PCI_IOV_RESOURCES];
if (!res->flags || res->parent)
continue;
- if (!pnv_pci_is_mem_pref_64(res->flags)) {
+ if (!pnv_pci_is_m64(phb, res)) {
dev_warn(&pdev->dev, "Don't support SR-IOV with"
" non M64 VF BAR%d: %pR. \n",
i, res);
index++;
}
} else if ((res->flags & IORESOURCE_MEM) &&
- !pnv_pci_is_mem_pref_64(res->flags)) {
+ !pnv_pci_is_m64(phb, res)) {
region.start = res->start -
phb->hose->mem_offset[0] -
phb->ioda.m32_pci_base;
bridge = bridge->bus->self;
}
- /* We fail back to M32 if M64 isn't supported */
- if (phb->ioda.m64_segsize &&
- pnv_pci_is_mem_pref_64(type))
+ /*
+ * We fall back to M32 if M64 isn't supported. We enforce the M64
+ * alignment for any 64-bit resource, PCIe doesn't care and
+ * bridges only do 64-bit prefetchable anyway.
+ */
+ if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64))
return phb->ioda.m64_segsize;
if (type & IORESOURCE_MEM)
return phb->ioda.m32_segsize;
w = NULL;
if (r->flags & type & IORESOURCE_IO)
w = &hose->io_resource;
- else if (pnv_pci_is_mem_pref_64(r->flags) &&
+ else if (pnv_pci_is_m64(phb, r) &&
(type & IORESOURCE_PREFETCH) &&
phb->ioda.m64_segsize)
w = &hose->mem_resources[1];
return dlpar_update_device_tree_lmb(lmb);
}
-static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
-{
- unsigned long section_nr;
- struct mem_section *mem_sect;
- struct memory_block *mem_block;
-
- section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
- mem_sect = __nr_to_section(section_nr);
-
- mem_block = find_memory_block(mem_sect);
- return mem_block;
-}
-
#ifdef CONFIG_MEMORY_HOTREMOVE
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
static int dlpar_add_lmb(struct of_drconf_cell *);
+static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
+{
+ unsigned long section_nr;
+ struct mem_section *mem_sect;
+ struct memory_block *mem_block;
+
+ section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
+ mem_sect = __nr_to_section(section_nr);
+
+ mem_block = find_memory_block(mem_sect);
+ return mem_block;
+}
+
static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
{
struct memory_block *mem_block;
config PPC_XICS
def_bool n
select PPC_SMP_MUXED_IPI
+ select HARDIRQS_SW_RESEND
config PPC_ICP_NATIVE
def_bool n
.irq_mask = ics_opal_mask_irq,
.irq_unmask = ics_opal_unmask_irq,
.irq_eoi = NULL, /* Patched at init time */
- .irq_set_affinity = ics_opal_set_affinity
+ .irq_set_affinity = ics_opal_set_affinity,
+ .irq_set_type = xics_set_irq_type,
+ .irq_retrigger = xics_retrigger,
};
static int ics_opal_map(struct ics *ics, unsigned int virq);
.irq_mask = ics_rtas_mask_irq,
.irq_unmask = ics_rtas_unmask_irq,
.irq_eoi = NULL, /* Patched at init time */
- .irq_set_affinity = ics_rtas_set_affinity
+ .irq_set_affinity = ics_rtas_set_affinity,
+ .irq_set_type = xics_set_irq_type,
+ .irq_retrigger = xics_retrigger,
};
static int ics_rtas_map(struct ics *ics, unsigned int virq)
pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
- /* They aren't all level sensitive but we just don't really know */
- irq_set_status_flags(virq, IRQ_LEVEL);
+ /*
+ * Mark interrupts as edge sensitive by default so that resend
+ * actually works. The device-tree parsing will turn the LSIs
+ * back to level.
+ */
+ irq_clear_status_flags(virq, IRQ_LEVEL);
/* Don't call into ICS for IPIs */
if (hw == XICS_IPI) {
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
- /* Current xics implementation translates everything
- * to level. It is not technically right for MSIs but this
- * is irrelevant at this point. We might get smarter in the future
- */
*out_hwirq = intspec[0];
- *out_flags = IRQ_TYPE_LEVEL_LOW;
+ /*
+ * If intsize is at least 2, we look for the type in the second cell,
+ * we assume the LSB indicates a level interrupt.
+ */
+ if (intsize > 1) {
+ if (intspec[1] & 1)
+ *out_flags = IRQ_TYPE_LEVEL_LOW;
+ else
+ *out_flags = IRQ_TYPE_EDGE_RISING;
+ } else
+ *out_flags = IRQ_TYPE_LEVEL_LOW;
+
+ return 0;
+}
+
+int xics_set_irq_type(struct irq_data *d, unsigned int flow_type)
+{
+ /*
+ * We only support these. This has really no effect other than setting
+ * the corresponding descriptor bits mind you but those will in turn
+ * affect the resend function when re-enabling an edge interrupt.
+ *
+ * Set set the default to edge as explained in map().
+ */
+ if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
+ flow_type = IRQ_TYPE_EDGE_RISING;
+
+ if (flow_type != IRQ_TYPE_EDGE_RISING &&
+ flow_type != IRQ_TYPE_LEVEL_LOW)
+ return -EINVAL;
+
+ irqd_set_trigger_type(d, flow_type);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+int xics_retrigger(struct irq_data *data)
+{
+ /*
+ * We need to push a dummy CPPR when retriggering, since the subsequent
+ * EOI will try to pop it. Passing 0 works, as the function hard codes
+ * the priority value anyway.
+ */
+ xics_push_cppr(0);
+
+ /* Tell the core to do a soft retrigger */
return 0;
}
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_EARLY_PFN_TO_NID
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER
Select this option if you want to run the kernel as a guest under
the KVM hypervisor.
+config S390_GUEST_OLD_TRANSPORT
+ def_bool y
+ prompt "Guest support for old s390 virtio transport (DEPRECATED)"
+ depends on S390_GUEST
+ help
+ Enable this option to add support for the old s390-virtio
+ transport (i.e. virtio devices NOT based on virtio-ccw). This
+ type of virtio devices is only available on the experimental
+ kuli userspace or with old (< 2.6) qemu. If you are running
+ with a modern version of qemu (which supports virtio-ccw since
+ 1.4 and uses it by default since version 2.4), you probably won't
+ need this.
+
endmenu
lg %r15,.Lstack-.LPG1(%r13)
aghi %r15,-160
brasl %r14,decompress_kernel
- # setup registers for memory mover & branch to target
+ # Set up registers for memory mover. We move the decompressed image to
+ # 0x11000, starting at offset 0x11000 in the decompressed image so
+ # that code living at 0x11000 in the image will end up at 0x11000 in
+ # memory.
lgr %r4,%r2
lg %r2,.Loffset-.LPG1(%r13)
la %r4,0(%r2,%r4)
lg %r3,.Lmvsize-.LPG1(%r13)
lgr %r5,%r3
- # move the memory mover someplace safe
+ # Move the memory mover someplace safe so it doesn't overwrite itself.
la %r1,0x200
mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
- # decompress image is started at 0x11000
+ # When the memory mover is done we pass control to
+ # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in
+ # the decompressed image.
lgr %r6,%r2
br %r1
mover:
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
struct kernel_fpu vxstate; \
unsigned long prealign, aligned, remaining; \
\
+ if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \
+ return ___crc32_sw(crc, data, datalen); \
+ \
if ((unsigned long)data & VX_ALIGN_MASK) { \
prealign = VX_ALIGNMENT - \
((unsigned long)data & VX_ALIGN_MASK); \
data = (void *)((unsigned long)data + prealign); \
} \
\
- if (datalen < VX_MIN_LEN) \
- return ___crc32_sw(crc, data, datalen); \
- \
aligned = datalen & ~VX_ALIGN_MASK; \
remaining = datalen & VX_ALIGN_MASK; \
\
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set
l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-STACK_FRAME_OVERHEAD
brasl %r14,verify_facilities
- /* Continue with startup code in head64.S */
+# For uncompressed images, continue in
+# arch/s390/kernel/head64.S. For compressed images, continue in
+# arch/s390/boot/compressed/head.S.
jg startup_continue
.Lstack:
#endif
}
} else if (MACHINE_IS_KVM) {
- if (sclp.has_vt220 &&
- config_enabled(CONFIG_SCLP_VT220_CONSOLE))
+ if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
SET_CONSOLE_VT220;
- else if (sclp.has_linemode &&
- config_enabled(CONFIG_SCLP_CONSOLE))
+ else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
SET_CONSOLE_SCLP;
else
SET_CONSOLE_HVC;
KVM_SYNC_CRS |
KVM_SYNC_ARCH0 |
KVM_SYNC_PFAULT;
+ kvm_s390_set_prefix(vcpu, 0);
if (test_kvm_facility(vcpu->kvm, 64))
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
/* fprs can be synchronized via vrs, even if the guest has no vx. With
rc = gmap_mprotect_notify(vcpu->arch.gmap,
kvm_s390_get_prefix(vcpu),
PAGE_SIZE * 2, PROT_WRITE);
- if (rc)
+ if (rc) {
+ kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
return rc;
+ }
goto retry;
}
EXPORT_SYMBOL(strrchr);
static inline int clcle(const char *s1, unsigned long l1,
- const char *s2, unsigned long l2,
- int *diff)
+ const char *s2, unsigned long l2)
{
register unsigned long r2 asm("2") = (unsigned long) s1;
- register unsigned long r3 asm("3") = (unsigned long) l2;
+ register unsigned long r3 asm("3") = (unsigned long) l1;
register unsigned long r4 asm("4") = (unsigned long) s2;
register unsigned long r5 asm("5") = (unsigned long) l2;
int cc;
" srl %0,28"
: "=&d" (cc), "+a" (r2), "+a" (r3),
"+a" (r4), "+a" (r5) : : "cc");
- *diff = *(char *)r2 - *(char *)r4;
return cc;
}
return (char *) s1;
l1 = __strend(s1) - s1;
while (l1-- >= l2) {
- int cc, dummy;
+ int cc;
- cc = clcle(s1, l1, s2, l2, &dummy);
+ cc = clcle(s1, l2, s2, l2);
if (!cc)
return (char *) s1;
s1++;
*/
int memcmp(const void *cs, const void *ct, size_t n)
{
- int ret, diff;
+ int ret;
- ret = clcle(cs, n, ct, n, &diff);
+ ret = clcle(cs, n, ct, n);
if (ret)
- ret = diff;
+ ret = ret == 1 ? -1 : 1;
return ret;
}
EXPORT_SYMBOL(memcmp);
unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ check_object_size(to, n, false);
if (static_branch_likely(&have_mvcos))
return copy_from_user_mvcos(to, from, n);
return copy_from_user_mvcp(to, from, n);
unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ check_object_size(from, n, true);
if (static_branch_likely(&have_mvcos))
return copy_to_user_mvcos(to, from, n);
return copy_to_user_mvcs(to, from, n);
int rc = -EINVAL;
pgd_t *pgdp;
+ if (addr == end)
+ return 0;
if (end >= MODULES_END)
return -EINVAL;
mutex_lock(&cpa_mutex);
select OLD_SIGSUSPEND
select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS
+ select HAVE_ARCH_HARDENED_USERCOPY
config SPARC32
def_bool !64BIT
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (n && __access_ok((unsigned long) to, n))
+ if (n && __access_ok((unsigned long) to, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
- else
+ } else
return n;
}
static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
}
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (n && __access_ok((unsigned long) from, n))
+ if (n && __access_ok((unsigned long) from, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
return __copy_user((__force void __user *) to, from, n);
- else
+ } else
return n;
}
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long ret = ___copy_from_user(to, from, size);
+ unsigned long ret;
+ if (!__builtin_constant_p(size))
+ check_object_size(to, size, false);
+
+ ret = ___copy_from_user(to, from, size);
if (unlikely(ret))
ret = copy_from_user_fixup(to, from, size);
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
- unsigned long ret = ___copy_to_user(to, from, size);
+ unsigned long ret;
+ if (!__builtin_constant_p(size))
+ check_object_size(from, size, true);
+ ret = ___copy_to_user(to, from, size);
if (unlikely(ret))
ret = copy_to_user_fixup(to, from, size);
return ret;
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
- .exit.text : { *(.exit.text) }
+ .exit.text : { EXIT_TEXT }
.exit.data : { *(.exit.data) }
.preinit_array : {
}
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
- bool write, bool foreign)
+ bool write, bool execute, bool foreign)
{
/* by default, allow everything */
return true;
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_AOUT if X86_32
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
select HAVE_ARCH_SOFT_DIRTY if X86_64
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_EBPF_JIT if X86_64
select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE
req = cast_mcryptd_ctx_to_req(req_ctx);
if (irqs_disabled())
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
else {
local_bh_disable();
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
local_bh_enable();
}
}
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), tmp2_w
+ vmovd _args_digest(state , idx, 4) , %xmm0
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
- vmovdqu %xmm0, _result_digest(job_rax)
- movl tmp2_w, _result_digest+1*16(job_rax)
+ vmovdqu %xmm0, _result_digest(job_rax)
+ offset = (_result_digest + 1*16)
+ vmovdqu %xmm1, offset(job_rax)
pop %rbx
req = cast_mcryptd_ctx_to_req(req_ctx);
if (irqs_disabled())
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
else {
local_bh_disable();
- rctx->complete(&req->base, ret);
+ req_ctx->complete(&req->base, ret);
local_bh_enable();
}
}
OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
+CFLAGS_syscall_64.o += -Wno-override-init
+CFLAGS_syscall_32.o += -Wno-override-init
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o
jne opportunistic_sysret_failed
/*
- * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET,
- * restoring TF results in a trap from userspace immediately after
- * SYSRET. This would cause an infinite loop whenever #DB happens
- * with register state that satisfies the opportunistic SYSRET
- * conditions. For example, single-stepping this user code:
+ * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
+ * restore RF properly. If the slowpath sets it for whatever reason, we
+ * need to restore it correctly.
+ *
+ * SYSRET can restore TF, but unlike IRET, restoring TF results in a
+ * trap from userspace immediately after SYSRET. This would cause an
+ * infinite loop whenever #DB happens with register state that satisfies
+ * the opportunistic SYSRET conditions. For example, single-stepping
+ * this user code:
*
* movq $stuck_here, %rcx
* pushfq
.endm
#endif
+/* Make sure APIC interrupt handlers end up in the irqentry section: */
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
+# define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
+# define POP_SECTION_IRQENTRY .popsection
+#else
+# define PUSH_SECTION_IRQENTRY
+# define POP_SECTION_IRQENTRY
+#endif
+
.macro apicinterrupt num sym do_sym
+PUSH_SECTION_IRQENTRY
apicinterrupt3 \num \sym \do_sym
trace_apicinterrupt \num \sym
+POP_SECTION_IRQENTRY
.endm
#ifdef CONFIG_SMP
}
}
+static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
+ SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
+}
+
static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0)
static struct intel_uncore_ops snb_uncore_msr_ops = {
.init_box = snb_uncore_msr_init_box,
+ .enable_box = snb_uncore_msr_enable_box,
.exit_box = snb_uncore_msr_exit_box,
.disable_event = snb_uncore_msr_disable_event,
.enable_event = snb_uncore_msr_enable_event,
}
}
+static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
+ SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
+}
+
static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0)
static struct intel_uncore_ops skl_uncore_msr_ops = {
.init_box = skl_uncore_msr_init_box,
+ .enable_box = skl_uncore_msr_enable_box,
.exit_box = skl_uncore_msr_exit_box,
.disable_event = snb_uncore_msr_disable_event,
.enable_event = snb_uncore_msr_enable_event,
static struct intel_uncore_type hswep_uncore_ha = {
.name = "ha",
- .num_counters = 5,
+ .num_counters = 4,
.num_boxes = 2,
.perf_ctr_bits = 48,
SNBEP_UNCORE_PCI_COMMON_INIT(),
static struct intel_uncore_type hswep_uncore_imc = {
.name = "imc",
- .num_counters = 5,
+ .num_counters = 4,
.num_boxes = 8,
.perf_ctr_bits = 48,
.fixed_ctr_bits = 48,
static struct intel_uncore_type hswep_uncore_qpi = {
.name = "qpi",
- .num_counters = 5,
+ .num_counters = 4,
.num_boxes = 3,
.perf_ctr_bits = 48,
.perf_ctr = SNBEP_PCI_PMON_CTR0,
static struct intel_uncore_type hswep_uncore_r3qpi = {
.name = "r3qpi",
- .num_counters = 4,
+ .num_counters = 3,
.num_boxes = 3,
.perf_ctr_bits = 44,
.constraints = hswep_uncore_r3qpi_constraints,
static struct intel_uncore_type bdx_uncore_imc = {
.name = "imc",
- .num_counters = 5,
+ .num_counters = 4,
.num_boxes = 8,
.perf_ctr_bits = 48,
.fixed_ctr_bits = 48,
void register_lapic_address(unsigned long address);
extern void setup_boot_APIC_clock(void);
extern void setup_secondary_APIC_clock(void);
+extern void lapic_update_tsc_freq(void);
extern int APIC_init_uniprocessor(void);
#ifdef CONFIG_X86_64
static inline void disable_local_APIC(void) { }
# define setup_boot_APIC_clock x86_init_noop
# define setup_secondary_APIC_clock x86_init_noop
+static inline void lapic_update_tsc_freq(void) { }
#endif /* !CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_X2APIC
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
- /*
- * irq_tlb_count is double-counted in irq_call_count, so it must be
- * subtracted from irq_call_count when displaying irq_call_count
- */
unsigned int irq_tlb_count;
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
void *context; /* context for alloc_pgt_page */
unsigned long pmd_flag; /* page flag for PMD entry */
- bool kernel_mapping; /* kernel mapping or ident mapping */
+ unsigned long offset; /* ident mapping offset */
};
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
- unsigned long addr, unsigned long end);
+ unsigned long pstart, unsigned long pend);
#endif /* _ASM_X86_INIT_H */
*
* | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
* | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
- * | OFFSET (14->63) | TYPE (10-13) |0|X|X|X| X| X|X|X|0| <- swp entry
+ * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry
*
* G (8) is aliased and used as a PROT_NONE indicator for
* !present ptes. We need to start storing swap entries above
#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
#define SWP_TYPE_BITS 5
/* Place the offset above the type: */
-#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS + 1)
+#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
extern unsigned char secondary_startup_64[];
#endif
+static inline size_t real_mode_size_needed(void)
+{
+ if (real_mode_header)
+ return 0; /* already allocated. */
+
+ return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
+}
+
+void set_real_mode_mem(phys_addr_t mem, size_t size);
void reserve_real_mode(void);
-void setup_real_mode(void);
#endif /* _ARCH_X86_REALMODE_H */
return sp;
}
+/*
+ * Walks up the stack frames to make sure that the specified object is
+ * entirely contained by a single stack frame.
+ *
+ * Returns:
+ * 1 if within a frame
+ * -1 if placed across a frame boundary (or outside stack)
+ * 0 unable to determine (no frame pointers, etc)
+ */
+static inline int arch_within_stack_frames(const void * const stack,
+ const void * const stackend,
+ const void *obj, unsigned long len)
+{
+#if defined(CONFIG_FRAME_POINTER)
+ const void *frame = NULL;
+ const void *oldframe;
+
+ oldframe = __builtin_frame_address(1);
+ if (oldframe)
+ frame = __builtin_frame_address(2);
+ /*
+ * low ----------------------------------------------> high
+ * [saved bp][saved ip][args][local vars][saved bp][saved ip]
+ * ^----------------^
+ * allow copies only within here
+ */
+ while (stack <= frame && frame < stackend) {
+ /*
+ * If obj + len extends past the last frame, this
+ * check won't pass and the next frame will be 0,
+ * causing us to bail out and correctly report
+ * the copy as invalid.
+ */
+ if (obj + len <= frame)
+ return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
+ oldframe = frame;
+ frame = *(const void * const *)frame;
+ }
+ return -1;
+#else
+ return 0;
+#endif
+}
+
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_X86_64
static inline void __native_flush_tlb(void)
{
+ /*
+ * If current->mm == NULL then we borrow a mm which may change during a
+ * task switch and therefore we must not be preempted while we write CR3
+ * back:
+ */
+ preempt_disable();
native_write_cr3(native_read_cr3());
+ preempt_enable();
}
static inline void __native_flush_tlb_global_irq_disabled(void)
* case, and do only runtime checking for non-constant sizes.
*/
- if (likely(sz < 0 || sz >= n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
n = _copy_from_user(to, from, n);
- else if(__builtin_constant_p(n))
+ } else if (__builtin_constant_p(n))
copy_from_user_overflow();
else
__copy_from_user_overflow(sz, n);
might_fault();
/* See the comment in copy_from_user() above. */
- if (likely(sz < 0 || sz >= n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
n = _copy_to_user(to, from, n);
- else if(__builtin_constant_p(n))
+ } else if (__builtin_constant_p(n))
copy_to_user_overflow();
else
__copy_to_user_overflow(sz, n);
#define user_access_begin() __uaccess_begin()
#define user_access_end() __uaccess_end()
-#define unsafe_put_user(x, ptr) \
-({ \
+#define unsafe_put_user(x, ptr, err_label) \
+do { \
int __pu_err; \
__put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
- __builtin_expect(__pu_err, 0); \
-})
+ if (unlikely(__pu_err)) goto err_label; \
+} while (0)
-#define unsafe_get_user(x, ptr) \
-({ \
+#define unsafe_get_user(x, ptr, err_label) \
+do { \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
- __builtin_expect(__gu_err, 0); \
-})
+ if (unlikely(__gu_err)) goto err_label; \
+} while (0)
#endif /* _ASM_X86_UACCESS_H */
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
+ check_object_size(from, n, true);
return __copy_to_user_ll(to, from, n);
}
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
+ check_object_size(to, n, false);
if (__builtin_constant_p(n)) {
unsigned long ret;
{
int ret = 0;
+ check_object_size(dst, size, false);
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
{
int ret = 0;
+ check_object_size(src, size, true);
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
u16 nasid; /* HNasid */
u16 sockid; /* Socket ID, high bits of APIC ID */
u16 pnode; /* Index to MMR and GRU spaces */
- u32 pxm; /* ACPI proximity domain number */
+ u32 unused2;
u32 limit; /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */
};
#define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */
#define UV_SYSTAB_VERSION_UV4_1 0x401 /* + gpa_shift */
#define UV_SYSTAB_VERSION_UV4_2 0x402 /* + TYPE_NVRAM/WINDOW/MBOX */
-#define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_2
+#define UV_SYSTAB_VERSION_UV4_3 0x403 /* - GAM Range PXM Value */
+#define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_3
#define UV_SYSTAB_TYPE_UNUSED 0 /* End of table (offset == 0) */
#define UV_SYSTAB_TYPE_GAM_PARAMS 1 /* GAM PARAM conversions */
/* Clock divisor */
#define APIC_DIVISOR 16
-#define TSC_DIVISOR 32
+#define TSC_DIVISOR 8
/*
* This function sets up the local APIC timer, with a timeout of
CLOCK_EVT_FEAT_DUMMY);
levt->set_next_event = lapic_next_deadline;
clockevents_config_and_register(levt,
- (tsc_khz / TSC_DIVISOR) * 1000,
+ tsc_khz * (1000 / TSC_DIVISOR),
0xF, ~0UL);
} else
clockevents_register_device(levt);
}
+/*
+ * Install the updated TSC frequency from recalibration at the TSC
+ * deadline clockevent devices.
+ */
+static void __lapic_update_tsc_freq(void *info)
+{
+ struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
+
+ if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+ return;
+
+ clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
+}
+
+void lapic_update_tsc_freq(void)
+{
+ /*
+ * The clockevent device's ->mult and ->shift can both be
+ * changed. In order to avoid races, schedule the frequency
+ * update code on each CPU.
+ */
+ on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
+}
+
/*
* In this functions we calibrate APIC bus clocks to the external timer.
*
unsigned long flags;
int ret, ir_stat;
+ if (skip_ioapic_setup)
+ return;
+
ir_stat = irq_remapping_prepare();
if (ir_stat < 0 && !x2apic_supported())
return;
/*
* At CPU state changes, update the x2apic cluster sibling info.
*/
-int x2apic_prepare_cpu(unsigned int cpu)
+static int x2apic_prepare_cpu(unsigned int cpu)
{
if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
return -ENOMEM;
return 0;
}
-int x2apic_dead_cpu(unsigned int this_cpu)
+static int x2apic_dead_cpu(unsigned int this_cpu)
{
int cpu;
static int x2apic_cluster_probe(void)
{
int cpu = smp_processor_id();
+ int ret;
if (!x2apic_mode)
return 0;
+ ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
+ x2apic_prepare_cpu, x2apic_dead_cpu);
+ if (ret < 0) {
+ pr_err("Failed to register X2APIC_PREPARE\n");
+ return 0;
+ }
cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
- cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
- x2apic_prepare_cpu, x2apic_dead_cpu);
return 1;
}
if (strncmp(oem_id, "SGI", 3) != 0)
return 0;
+ if (numa_off) {
+ pr_err("UV: NUMA is off, disabling UV support\n");
+ return 0;
+ }
+
/* Setup early hub type field in uv_hub_info for Node 0 */
uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
struct uv_gam_range_entry *gre = uv_gre_table;
struct uv_gam_range_s *grt;
unsigned long last_limit = 0, ram_limit = 0;
- int bytes, i, sid, lsid = -1;
+ int bytes, i, sid, lsid = -1, indx = 0, lindx = -1;
if (!gre)
return;
}
sid = gre->sockid - _min_socket;
if (lsid < sid) { /* new range */
- grt = &_gr_table[sid];
- grt->base = lsid;
+ grt = &_gr_table[indx];
+ grt->base = lindx;
grt->nasid = gre->nasid;
grt->limit = last_limit = gre->limit;
lsid = sid;
+ lindx = indx++;
continue;
}
if (lsid == sid && !ram_limit) { /* update range */
}
if (!ram_limit) { /* non-contiguous ram range */
grt++;
- grt->base = sid - 1;
+ grt->base = lindx;
grt->nasid = gre->nasid;
grt->limit = last_limit = gre->limit;
continue;
for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
if (!index) {
pr_info("UV: GAM Range Table...\n");
- pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s %3s\n",
+ pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n",
"Range", "", "Size", "Type", "NASID",
- "SID", "PN", "PXM");
+ "SID", "PN");
}
pr_info(
- "UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x %3d\n",
+ "UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n",
index++,
(unsigned long)lgre << UV_GAM_RANGE_SHFT,
(unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
((unsigned long)(gre->limit - lgre)) >>
(30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
- gre->type, gre->nasid, gre->sockid,
- gre->pnode, gre->pxm);
+ gre->type, gre->nasid, gre->sockid, gre->pnode);
lgre = gre->limit;
if (sock_min > gre->sockid)
_pnode_to_socket[i] = SOCK_EMPTY;
/* fill in pnode/node/addr conversion list values */
- pr_info("UV: GAM Building socket/pnode/pxm conversion tables\n");
+ pr_info("UV: GAM Building socket/pnode conversion tables\n");
for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
if (gre->type == UV_GAM_RANGE_TYPE_HOLE)
continue;
if (_socket_to_pnode[i] != SOCK_EMPTY)
continue; /* duplicate */
_socket_to_pnode[i] = gre->pnode;
- _socket_to_node[i] = gre->pxm;
i = gre->pnode - minpnode;
_pnode_to_socket[i] = gre->sockid;
pr_info(
- "UV: sid:%02x type:%d nasid:%04x pn:%02x pxm:%2d pn2s:%2x\n",
+ "UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n",
gre->sockid, gre->type, gre->nasid,
_socket_to_pnode[gre->sockid - minsock],
- _socket_to_node[gre->sockid - minsock],
_pnode_to_socket[gre->pnode - minpnode]);
}
- /* check socket -> node values */
+ /* Set socket -> node values */
lnid = -1;
for_each_present_cpu(cpu) {
int nid = cpu_to_node(cpu);
lnid = nid;
apicid = per_cpu(x86_cpu_to_apicid, cpu);
sockid = apicid >> uv_cpuid.socketid_shift;
- i = sockid - minsock;
-
- if (nid != _socket_to_node[i]) {
- pr_warn(
- "UV: %02x: type:%d socket:%02x PXM:%02x != node:%2d\n",
- i, sockid, gre->type, _socket_to_node[i], nid);
- _socket_to_node[i] = nid;
- }
+ _socket_to_node[sockid - minsock] = nid;
+ pr_info("UV: sid:%02x: apicid:%04x node:%2d\n",
+ sockid, apicid, nid);
}
/* Setup physical blade to pnode translation from GAM Range Table */
unsigned int cpu = smp_processor_id();
struct equiv_cpu_entry *eq;
struct microcode_amd *mc;
+ u8 *cont = container;
u32 rev, eax;
u16 eq_id;
if (check_current_patch_level(&rev, false))
return;
+ /* Add CONFIG_RANDOMIZE_MEMORY offset. */
+ cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+
eax = cpuid_eax(0x00000001);
- eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
+ eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
eq_id = find_equiv_id(eq, eax);
if (!eq_id)
else
container = cont_va;
+ /* Add CONFIG_RANDOMIZE_MEMORY offset. */
+ container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+
eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
return get_xsave_addr(&fpu->state.xsave, xsave_state);
}
-
-/*
- * Set xfeatures (aka XSTATE_BV) bit for a feature that we want
- * to take out of its "init state". This will ensure that an
- * XRSTOR actually restores the state.
- */
-static void fpu__xfeature_set_non_init(struct xregs_state *xsave,
- int xstate_feature_mask)
-{
- xsave->header.xfeatures |= xstate_feature_mask;
-}
-
-/*
- * This function is safe to call whether the FPU is in use or not.
- *
- * Note that this only works on the current task.
- *
- * Inputs:
- * @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP,
- * XFEATURE_MASK_SSE, etc...)
- * @xsave_state_ptr: a pointer to a copy of the state that you would
- * like written in to the current task's FPU xsave state. This pointer
- * must not be located in the current tasks's xsave area.
- * Output:
- * address of the state in the xsave area or NULL if the state
- * is not present or is in its 'init state'.
- */
-static void fpu__xfeature_set_state(int xstate_feature_mask,
- void *xstate_feature_src, size_t len)
-{
- struct xregs_state *xsave = ¤t->thread.fpu.state.xsave;
- struct fpu *fpu = ¤t->thread.fpu;
- void *dst;
-
- if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
- WARN_ONCE(1, "%s() attempted with no xsave support", __func__);
- return;
- }
-
- /*
- * Tell the FPU code that we need the FPU state to be in
- * 'fpu' (not in the registers), and that we need it to
- * be stable while we write to it.
- */
- fpu__current_fpstate_write_begin();
-
- /*
- * This method *WILL* *NOT* work for compact-format
- * buffers. If the 'xstate_feature_mask' is unset in
- * xcomp_bv then we may need to move other feature state
- * "up" in the buffer.
- */
- if (xsave->header.xcomp_bv & xstate_feature_mask) {
- WARN_ON_ONCE(1);
- goto out;
- }
-
- /* find the location in the xsave buffer of the desired state */
- dst = __raw_xsave_addr(&fpu->state.xsave, xstate_feature_mask);
-
- /*
- * Make sure that the pointer being passed in did not
- * come from the xsave buffer itself.
- */
- WARN_ONCE(xstate_feature_src == dst, "set from xsave buffer itself");
-
- /* put the caller-provided data in the location */
- memcpy(dst, xstate_feature_src, len);
-
- /*
- * Mark the xfeature so that the CPU knows there is state
- * in the buffer now.
- */
- fpu__xfeature_set_non_init(xsave, xstate_feature_mask);
-out:
- /*
- * We are done writing to the 'fpu'. Reenable preeption
- * and (possibly) move the fpstate back in to the fpregs.
- */
- fpu__current_fpstate_write_end();
-}
-
#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
/*
- * This will go out and modify the XSAVE buffer so that PKRU is
- * set to a particular state for access to 'pkey'.
- *
- * PKRU state does affect kernel access to user memory. We do
- * not modfiy PKRU *itself* here, only the XSAVE state that will
- * be restored in to PKRU when we return back to userspace.
+ * This will go out and modify PKRU register to set the access
+ * rights for @pkey to @init_val.
*/
int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
unsigned long init_val)
{
- struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
- struct pkru_state *old_pkru_state;
- struct pkru_state new_pkru_state;
+ u32 old_pkru;
int pkey_shift = (pkey * PKRU_BITS_PER_PKEY);
u32 new_pkru_bits = 0;
*/
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return -EINVAL;
+ /*
+ * For most XSAVE components, this would be an arduous task:
+ * brining fpstate up to date with fpregs, updating fpstate,
+ * then re-populating fpregs. But, for components that are
+ * never lazily managed, we can just access the fpregs
+ * directly. PKRU is never managed lazily, so we can just
+ * manipulate it directly. Make sure it stays that way.
+ */
+ WARN_ON_ONCE(!use_eager_fpu());
/* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS)
/* Shift the bits in to the correct place in PKRU for pkey: */
new_pkru_bits <<= pkey_shift;
- /* Locate old copy of the state in the xsave buffer: */
- old_pkru_state = get_xsave_addr(xsave, XFEATURE_MASK_PKRU);
-
- /*
- * When state is not in the buffer, it is in the init
- * state, set it manually. Otherwise, copy out the old
- * state.
- */
- if (!old_pkru_state)
- new_pkru_state.pkru = 0;
- else
- new_pkru_state.pkru = old_pkru_state->pkru;
-
- /* Mask off any old bits in place: */
- new_pkru_state.pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
-
- /* Set the newly-requested bits: */
- new_pkru_state.pkru |= new_pkru_bits;
-
- /*
- * We could theoretically live without zeroing pkru.pad.
- * The current XSAVE feature state definition says that
- * only bytes 0->3 are used. But we do not want to
- * chance leaking kernel stack out to userspace in case a
- * memcpy() of the whole xsave buffer was done.
- *
- * They're in the same cacheline anyway.
- */
- new_pkru_state.pad = 0;
+ /* Get old PKRU and mask off any old bits in place: */
+ old_pkru = read_pkru();
+ old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
- fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state, sizeof(new_pkru_state));
+ /* Write old part along with new part: */
+ write_pkru(old_pkru | new_pkru_bits);
return 0;
}
/* Initialize 32bit specific setup functions */
x86_init.resources.reserve_resources = i386_reserve_resources;
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
-
- reserve_bios_regions();
}
asmlinkage __visible void __init i386_start_kernel(void)
copy_bootdata(__va(real_mode_data));
x86_early_init_platform_quirks();
- reserve_bios_regions();
switch (boot_params.hdr.hardware_subarch) {
case X86_SUBARCH_INTEL_MID:
memset(&curr_time, 0, sizeof(struct rtc_time));
if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
- mc146818_set_time(&curr_time);
+ mc146818_get_time(&curr_time);
if (hpet_rtc_flags & RTC_UIE &&
curr_time.tm_sec != hpet_prev_update_sec) {
seq_puts(p, " Rescheduling interrupts\n");
seq_printf(p, "%*s: ", prec, "CAL");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
- irq_stats(j)->irq_tlb_count);
+ seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
seq_puts(p, " Function call interrupts\n");
seq_printf(p, "%*s: ", prec, "TLB");
for_each_online_cpu(j)
x86_init.oem.arch_setup();
- kernel_randomize_memory();
-
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
setup_memory_map();
parse_setup_data();
max_possible_pfn = max_pfn;
+ /*
+ * Define random base addresses for memory sections after max_pfn is
+ * defined and before each memory section base is used.
+ */
+ kernel_randomize_memory();
+
#ifdef CONFIG_X86_32
/* max_low_pfn get updated here */
find_low_pfn_range();
efi_find_mirror();
}
+ reserve_bios_regions();
+
/*
* The EFI specification says that boot service code won't be called
* after ExitBootServices(). This is, in fact, a lie.
early_trap_pf_init();
- setup_real_mode();
+ /*
+ * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
+ * with the current CR4 value. This may not be necessary, but
+ * auditing all the early-boot CR4 manipulation would be needed to
+ * rule it out.
+ */
+ if (boot_cpu_data.cpuid_level >= 0)
+ /* A CPU has %cr4 if and only if it has CPUID. */
+ mmu_cr4_features = __read_cr4();
memblock_set_current_limit(get_max_mapped());
kasan_init();
- if (boot_cpu_data.cpuid_level >= 0) {
- /* A CPU has %cr4 if and only if it has CPUID */
- mmu_cr4_features = __read_cr4();
- if (trampoline_cr4_features)
- *trampoline_cr4_features = mmu_cr4_features;
- }
-
#ifdef CONFIG_X86_32
/* sync back kernel address range */
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
/* Logical package management. We might want to allocate that dynamically */
static int *physical_to_logical_pkg __read_mostly;
static unsigned long *physical_package_map __read_mostly;;
-static unsigned long *logical_package_map __read_mostly;
static unsigned int max_physical_pkg_id __read_mostly;
unsigned int __max_logical_packages __read_mostly;
EXPORT_SYMBOL(__max_logical_packages);
+static unsigned int logical_packages __read_mostly;
+static bool logical_packages_frozen __read_mostly;
/* Maximum number of SMT threads on any online core */
int __max_smt_threads __read_mostly;
if (test_and_set_bit(pkg, physical_package_map))
goto found;
- new = find_first_zero_bit(logical_package_map, __max_logical_packages);
- if (new >= __max_logical_packages) {
+ if (logical_packages_frozen) {
physical_to_logical_pkg[pkg] = -1;
- pr_warn("APIC(%x) Package %u exceeds logical package map\n",
+ pr_warn("APIC(%x) Package %u exceeds logical package max\n",
apicid, pkg);
return -ENOSPC;
}
- set_bit(new, logical_package_map);
+
+ new = logical_packages++;
pr_info("APIC(%x) Converting physical %u to logical package %u\n",
apicid, pkg, new);
physical_to_logical_pkg[pkg] = new;
}
__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
+ logical_packages = 0;
/*
* Possibly larger than what we need as the number of apic ids per
memset(physical_to_logical_pkg, 0xff, size);
size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
physical_package_map = kzalloc(size, GFP_KERNEL);
- size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long);
- logical_package_map = kzalloc(size, GFP_KERNEL);
-
- pr_info("Max logical packages: %u\n", __max_logical_packages);
for_each_present_cpu(cpu) {
unsigned int apicid = apic->cpu_present_to_apicid(cpu);
set_cpu_possible(cpu, false);
set_cpu_present(cpu, false);
}
+
+ if (logical_packages > __max_logical_packages) {
+ pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n",
+ logical_packages, __max_logical_packages);
+ logical_packages_frozen = true;
+ __max_logical_packages = logical_packages;
+ }
+
+ pr_info("Max logical packages: %u\n", __max_logical_packages);
}
void __init smp_store_boot_cpu_info(void)
#include <asm/nmi.h>
#include <asm/x86_init.h>
#include <asm/geode.h>
+#include <asm/apic.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
(unsigned long)tsc_khz / 1000,
(unsigned long)tsc_khz % 1000);
+ /* Inform the TSC deadline clockevent devices about the recalibration */
+ lapic_update_tsc_freq();
+
out:
if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc;
*cursor &= 0xfe;
}
/*
- * Similar treatment for VEX3 prefix.
- * TODO: add XOP/EVEX treatment when insn decoder supports them
+ * Similar treatment for VEX3/EVEX prefix.
+ * TODO: add XOP treatment when insn decoder supports them
*/
- if (insn->vex_prefix.nbytes == 3) {
+ if (insn->vex_prefix.nbytes >= 3) {
/*
* vex2: c5 rvvvvLpp (has no b bit)
* vex3/xop: c4/8f rxbmmmmm wvvvvLpp
* evex: 62 rxbR00mm wvvvv1pp zllBVaaa
- * (evex will need setting of both b and x since
- * in non-sib encoding evex.x is 4th bit of MODRM.rm)
- * Setting VEX3.b (setting because it has inverted meaning):
+ * Setting VEX3.b (setting because it has inverted meaning).
+ * Setting EVEX.x since (in non-SIB encoding) EVEX.x
+ * is the 4th bit of MODRM.rm, and needs the same treatment.
+ * For VEX3-encoded insns, VEX3.x value has no effect in
+ * non-SIB encoding, the change is superfluous but harmless.
*/
cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
- *cursor |= 0x20;
+ *cursor |= 0x60;
}
/*
reg = MODRM_REG(insn); /* Fetch modrm.reg */
reg2 = 0xff; /* Fetch vex.vvvv */
- if (insn->vex_prefix.nbytes == 2)
- reg2 = insn->vex_prefix.bytes[1];
- else if (insn->vex_prefix.nbytes == 3)
+ if (insn->vex_prefix.nbytes)
reg2 = insn->vex_prefix.bytes[2];
/*
- * TODO: add XOP, EXEV vvvv reading.
+ * TODO: add XOP vvvv reading.
*
* vex.vvvv field is in bits 6-3, bits are inverted.
* But in 32-bit mode, high-order bit may be ignored.
struct list_head vmcs02_pool;
int vmcs02_num;
u64 vmcs01_tsc_offset;
+ bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
/*
bool pi_pending;
u16 posted_intr_nv;
+ unsigned long *msr_bitmap;
+
struct hrtimer preemption_timer;
bool preemption_timer_expired;
static unsigned long *vmx_msr_bitmap_longmode;
static unsigned long *vmx_msr_bitmap_legacy_x2apic;
static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_nested;
static unsigned long *vmx_vmread_bitmap;
static unsigned long *vmx_vmwrite_bitmap;
new.control) != old.control);
}
+static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
+{
+ vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
+ vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
+}
+
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
/* Setup TSC multiplier */
if (kvm_has_tsc_control &&
- vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
- vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
- vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
- }
+ vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
+ decache_tsc_multiplier(vmx);
vmx_vcpu_pi_load(vcpu, cpu);
vmx->host_pkru = read_pkru();
unsigned long *msr_bitmap;
if (is_guest_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_nested;
+ msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
else if (cpu_has_secondary_exec_ctrls() &&
(vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
if (!vmx_msr_bitmap_longmode_x2apic)
goto out4;
- if (nested) {
- vmx_msr_bitmap_nested =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_nested)
- goto out5;
- }
-
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmread_bitmap)
goto out6;
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
- if (nested)
- memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
out7:
free_page((unsigned long)vmx_vmread_bitmap);
out6:
- if (nested)
- free_page((unsigned long)vmx_msr_bitmap_nested);
-out5:
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
out4:
free_page((unsigned long)vmx_msr_bitmap_longmode);
free_page((unsigned long)vmx_io_bitmap_a);
free_page((unsigned long)vmx_vmwrite_bitmap);
free_page((unsigned long)vmx_vmread_bitmap);
- if (nested)
- free_page((unsigned long)vmx_msr_bitmap_nested);
free_kvm_area();
}
return 1;
}
+ if (cpu_has_vmx_msr_bitmap()) {
+ vmx->nested.msr_bitmap =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx->nested.msr_bitmap)
+ goto out_msr_bitmap;
+ }
+
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
if (!vmx->nested.cached_vmcs12)
- return -ENOMEM;
+ goto out_cached_vmcs12;
if (enable_shadow_vmcs) {
shadow_vmcs = alloc_vmcs();
- if (!shadow_vmcs) {
- kfree(vmx->nested.cached_vmcs12);
- return -ENOMEM;
- }
+ if (!shadow_vmcs)
+ goto out_shadow_vmcs;
/* mark vmcs as shadow */
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
return 1;
+
+out_shadow_vmcs:
+ kfree(vmx->nested.cached_vmcs12);
+
+out_cached_vmcs12:
+ free_page((unsigned long)vmx->nested.msr_bitmap);
+
+out_msr_bitmap:
+ return -ENOMEM;
}
/*
vmx->nested.vmxon = false;
free_vpid(vmx->nested.vpid02);
nested_release_vmcs12(vmx);
+ if (vmx->nested.msr_bitmap) {
+ free_page((unsigned long)vmx->nested.msr_bitmap);
+ vmx->nested.msr_bitmap = NULL;
+ }
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
kfree(vmx->nested.cached_vmcs12);
{
u32 sec_exec_control;
+ /* Postpone execution until vmcs01 is the current VMCS. */
+ if (is_guest_mode(vcpu)) {
+ to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
+ return;
+ }
+
/*
* There is not point to enable virtualize x2apic without enable
* apicv
{
int msr;
struct page *page;
- unsigned long *msr_bitmap;
+ unsigned long *msr_bitmap_l1;
+ unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
+ /* This shortcut is ok because we support only x2APIC MSRs so far. */
if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
return false;
WARN_ON(1);
return false;
}
- msr_bitmap = (unsigned long *)kmap(page);
- if (!msr_bitmap) {
+ msr_bitmap_l1 = (unsigned long *)kmap(page);
+ if (!msr_bitmap_l1) {
nested_release_page_clean(page);
WARN_ON(1);
return false;
}
+ memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
+
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
if (nested_cpu_has_apic_reg_virt(vmcs12))
for (msr = 0x800; msr <= 0x8ff; msr++)
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
msr, MSR_TYPE_R);
- /* TPR is allowed */
- nested_vmx_disable_intercept_for_msr(msr_bitmap,
- vmx_msr_bitmap_nested,
+
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
MSR_TYPE_R | MSR_TYPE_W);
+
if (nested_cpu_has_vid(vmcs12)) {
- /* EOI and self-IPI are allowed */
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_EOI >> 4),
MSR_TYPE_W);
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
MSR_TYPE_W);
}
- } else {
- /*
- * Enable reading intercept of all the x2apic
- * MSRs. We should not rely on vmcs12 to do any
- * optimizations here, it may have been modified
- * by L1.
- */
- for (msr = 0x800; msr <= 0x8ff; msr++)
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- msr,
- MSR_TYPE_R);
-
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_TASKPRI >> 4),
- MSR_TYPE_W);
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_EOI >> 4),
- MSR_TYPE_W);
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
- MSR_TYPE_W);
}
kunmap(page);
nested_release_page_clean(page);
}
if (cpu_has_vmx_msr_bitmap() &&
- exec_control & CPU_BASED_USE_MSR_BITMAPS) {
- nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
- /* MSR_BITMAP will be set by following vmx_set_efer. */
- } else
+ exec_control & CPU_BASED_USE_MSR_BITMAPS &&
+ nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
+ ; /* MSR_BITMAP will be set by following vmx_set_efer. */
+ else
exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
/*
vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
else
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ if (kvm_has_tsc_control)
+ decache_tsc_multiplier(vmx);
if (enable_vpid) {
/*
else
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
PIN_BASED_VMX_PREEMPTION_TIMER);
+ if (kvm_has_tsc_control)
+ decache_tsc_multiplier(vmx);
+
+ if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
+ vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
+ vmx_set_virtual_x2apic_mode(vcpu,
+ vcpu->arch.apic_base & X2APIC_ENABLE);
+ }
/* This is needed for same reason as it was needed in prepare_vmcs02 */
vmx->host_rsp = 0;
ENTRY(__sw_hweight64)
#ifdef CONFIG_X86_64
+ pushq %rdi
pushq %rdx
movq %rdi, %rdx # w -> t
shrq $56, %rax # w = w_tmp >> 56
popq %rdx
+ popq %rdi
ret
#else /* CONFIG_X86_32 */
/* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
#include <asm/cpufeature.h>
#include <asm/setup.h>
-#define debug_putstr(v) early_printk(v)
+#define debug_putstr(v) early_printk("%s", v)
#define has_cpuflag(f) boot_cpu_has(f)
#define get_boot_seed() kaslr_offset()
#endif
* included by both the compressed kernel and the regular kernel.
*/
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
unsigned long addr, unsigned long end)
{
addr &= PMD_MASK;
for (; addr < end; addr += PMD_SIZE) {
pmd_t *pmd = pmd_page + pmd_index(addr);
- if (!pmd_present(*pmd))
- set_pmd(pmd, __pmd(addr | pmd_flag));
+ if (pmd_present(*pmd))
+ continue;
+
+ set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
}
}
if (pud_present(*pud)) {
pmd = pmd_offset(pud, 0);
- ident_pmd_init(info->pmd_flag, pmd, addr, next);
+ ident_pmd_init(info, pmd, addr, next);
continue;
}
pmd = (pmd_t *)info->alloc_pgt_page(info->context);
if (!pmd)
return -ENOMEM;
- ident_pmd_init(info->pmd_flag, pmd, addr, next);
+ ident_pmd_init(info, pmd, addr, next);
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
}
}
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
- unsigned long addr, unsigned long end)
+ unsigned long pstart, unsigned long pend)
{
+ unsigned long addr = pstart + info->offset;
+ unsigned long end = pend + info->offset;
unsigned long next;
int result;
- int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
for (; addr < end; addr = next) {
- pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+ pgd_t *pgd = pgd_page + pgd_index(addr);
pud_t *pud;
next = (addr & PGDIR_MASK) + PGDIR_SIZE;
return __va(pfn << PAGE_SHIFT);
}
-/* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */
-#define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE)
+/*
+ * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS.
+ * With KASLR memory randomization, depending on the machine e820 memory
+ * and the PUD alignment. We may need twice more pages when KASLR memory
+ * randomization is enabled.
+ */
+#ifndef CONFIG_RANDOMIZE_MEMORY
+#define INIT_PGD_PAGE_COUNT 6
+#else
+#define INIT_PGD_PAGE_COUNT 12
+#endif
+#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
void __init early_alloc_pgt_buf(void)
{
*/
static inline bool kaslr_memory_enabled(void)
{
- return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
+ return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
}
/* Initialize base and padding for each memory region randomized with KASLR */
* add padding if needed (especially for memory hotplug support).
*/
BUG_ON(kaslr_regions[0].base != &page_offset_base);
- memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT) +
+ memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
/* Adapt phyiscal memory region size based on available memory */
* @node: list item for parent traversal.
* @rcu: RCU callback item for freeing.
* @irq: back pointer to parent.
+ * @enabled: true if driver enabled IRQ
* @virq: the virtual IRQ value provided to the requesting driver.
*
* Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
struct list_head node;
struct rcu_head rcu;
struct vmd_irq_list *irq;
+ bool enabled;
unsigned int virq;
};
unsigned long flags;
raw_spin_lock_irqsave(&list_lock, flags);
+ WARN_ON(vmdirq->enabled);
list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+ vmdirq->enabled = true;
raw_spin_unlock_irqrestore(&list_lock, flags);
data->chip->irq_unmask(data);
data->chip->irq_mask(data);
raw_spin_lock_irqsave(&list_lock, flags);
- list_del_rcu(&vmdirq->node);
- INIT_LIST_HEAD_RCU(&vmdirq->node);
+ if (vmdirq->enabled) {
+ list_del_rcu(&vmdirq->node);
+ vmdirq->enabled = false;
+ }
raw_spin_unlock_irqrestore(&list_lock, flags);
}
for_each_efi_memory_desc(md) {
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+ size_t rm_size;
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
if (md->attribute & EFI_MEMORY_RUNTIME)
continue;
+ /*
+ * Nasty quirk: if all sub-1MB memory is used for boot
+ * services, we can get here without having allocated the
+ * real mode trampoline. It's too late to hand boot services
+ * memory back to the memblock allocator, so instead
+ * try to manually allocate the trampoline if needed.
+ *
+ * I've seen this on a Dell XPS 13 9350 with firmware
+ * 1.4.4 with SGX enabled booting Linux via Fedora 24's
+ * grub2-efi on a hard disk. (And no, I don't know why
+ * this happened, but Linux should still try to boot rather
+ * panicing early.)
+ */
+ rm_size = real_mode_size_needed();
+ if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
+ set_real_mode_mem(start, rm_size);
+ start += rm_size;
+ size -= rm_size;
+ }
+
free_bootmem_late(start, size);
}
void uv_bios_init(void)
{
uv_systab = NULL;
- if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab) {
+ if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
+ !efi.uv_systab || efi_runtime_disabled()) {
pr_crit("UV: UVsystab: missing\n");
return;
}
return;
}
+ /* Starting with UV4 the UV systab size is variable */
if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) {
+ int size = uv_systab->size;
+
iounmap(uv_systab);
- uv_systab = ioremap(efi.uv_systab, uv_systab->size);
+ uv_systab = ioremap(efi.uv_systab, size);
if (!uv_systab) {
- pr_err("UV: UVsystab: ioremap(%d) failed!\n",
- uv_systab->size);
+ pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
return;
}
}
struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page,
.pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
- .kernel_mapping = true,
+ .offset = __PAGE_OFFSET,
};
unsigned long mstart, mend;
pgd_t *pgd;
return result;
}
- temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
+ temp_level4_pgt = __pa(pgd);
return 0;
}
#include <linux/io.h>
+#include <linux/slab.h>
#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <asm/realmode.h>
+#include <asm/tlbflush.h>
struct real_mode_header *real_mode_header;
u32 *trampoline_cr4_features;
/* Hold the pgd entry used on booting additional CPUs */
pgd_t trampoline_pgd_entry;
+void __init set_real_mode_mem(phys_addr_t mem, size_t size)
+{
+ void *base = __va(mem);
+
+ real_mode_header = (struct real_mode_header *) base;
+ printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
+ base, (unsigned long long)mem, size);
+}
+
void __init reserve_real_mode(void)
{
phys_addr_t mem;
- unsigned char *base;
- size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+ size_t size = real_mode_size_needed();
+
+ if (!size)
+ return;
+
+ WARN_ON(slab_is_available());
/* Has to be under 1M so we can execute real-mode AP code. */
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
- if (!mem)
- panic("Cannot allocate trampoline\n");
+ if (!mem) {
+ pr_info("No sub-1M memory is available for the trampoline\n");
+ return;
+ }
- base = __va(mem);
memblock_reserve(mem, size);
- real_mode_header = (struct real_mode_header *) base;
- printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
- base, (unsigned long long)mem, size);
+ set_real_mode_mem(mem, size);
}
-void __init setup_real_mode(void)
+static void __init setup_real_mode(void)
{
u16 real_mode_seg;
const u32 *rel;
trampoline_header->start = (u64) secondary_startup_64;
trampoline_cr4_features = &trampoline_header->cr4;
- *trampoline_cr4_features = __read_cr4();
+ *trampoline_cr4_features = mmu_cr4_features;
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
* need to mark it executable at do_pre_smp_initcalls() at least,
* thus run it as a early_initcall().
*/
-static int __init set_real_mode_permissions(void)
+static void __init set_real_mode_permissions(void)
{
unsigned char *base = (unsigned char *) real_mode_header;
size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+}
+
+static int __init init_real_mode(void)
+{
+ if (!real_mode_header)
+ panic("Real mode trampoline was not allocated");
+
+ setup_real_mode();
+ set_real_mode_permissions();
return 0;
}
-early_initcall(set_real_mode_permissions);
+early_initcall(init_real_mode);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
/* Linux <-> Xen vCPU id mapping */
-DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
+DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
enum xen_domain_type xen_domain_type = XEN_NATIVE;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
- if (bio_op(bio) == REQ_OP_DISCARD)
- goto integrity_clone;
-
- if (bio_op(bio) == REQ_OP_WRITE_SAME) {
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ break;
+ case REQ_OP_WRITE_SAME:
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
- goto integrity_clone;
+ break;
+ default:
+ bio_for_each_segment(bv, bio_src, iter)
+ bio->bi_io_vec[bio->bi_vcnt++] = bv;
+ break;
}
- bio_for_each_segment(bv, bio_src, iter)
- bio->bi_io_vec[bio->bi_vcnt++] = bv;
-
-integrity_clone:
if (bio_integrity(bio_src)) {
int ret;
* Discards need a mutable bio_vec to accommodate the payload
* required by the DSM TRIM and UNMAP commands.
*/
- if (bio_op(bio) == REQ_OP_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
split = bio_clone_bioset(bio, gfp, bs);
else
split = bio_clone_fast(bio, gfp, bs);
void blk_set_queue_dying(struct request_queue *q)
{
- queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
+ spin_unlock_irq(q->queue_lock);
if (q->mq_ops)
blk_mq_wake_waiters(q);
bool do_split = true;
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
+ unsigned bvecs = 0;
bio_for_each_segment(bv, bio, iter) {
+ /*
+ * With arbitrary bio size, the incoming bio may be very
+ * big. We have to split the bio into small bios so that
+ * each holds at most BIO_MAX_PAGES bvecs because
+ * bio_clone() can fail to allocate big bvecs.
+ *
+ * It should have been better to apply the limit per
+ * request queue in which bio_clone() is involved,
+ * instead of globally. The biggest blocker is the
+ * bio_clone() in bio bounce.
+ *
+ * If bio is splitted by this reason, we should have
+ * allowed to continue bios merging, but don't do
+ * that now for making the change simple.
+ *
+ * TODO: deal with bio bounce's bio_clone() gracefully
+ * and convert the global limit into per-queue limit.
+ */
+ if (bvecs++ >= BIO_MAX_PAGES)
+ goto split;
+
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
struct bio *split, *res;
unsigned nsegs;
- if (bio_op(*bio) == REQ_OP_DISCARD)
+ switch (bio_op(*bio)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
split = blk_bio_discard_split(q, *bio, bs, &nsegs);
- else if (bio_op(*bio) == REQ_OP_WRITE_SAME)
+ break;
+ case REQ_OP_WRITE_SAME:
split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
- else
+ break;
+ default:
split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
+ break;
+ }
/* physical segments can be figured out during splitting */
res = split ? split : *bio;
* This should probably be returning 0, but blk_add_request_payload()
* (Christoph!!!!)
*/
- if (bio_op(bio) == REQ_OP_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
return 1;
if (bio_op(bio) == REQ_OP_WRITE_SAME)
nsegs = 0;
cluster = blk_queue_cluster(q);
- if (bio_op(bio) == REQ_OP_DISCARD) {
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
/*
* This is a hack - drivers should be neither modifying the
* biovec, nor relying on bi_vcnt - but because of
* a payload we need to set up here (thank you Christoph) and
* bi_vcnt is really the only way of telling if we need to.
*/
-
- if (bio->bi_vcnt)
- goto single_segment;
-
- return 0;
- }
-
- if (bio_op(bio) == REQ_OP_WRITE_SAME) {
-single_segment:
+ if (!bio->bi_vcnt)
+ return 0;
+ /* Fall through */
+ case REQ_OP_WRITE_SAME:
*sg = sglist;
bvec = bio_iovec(bio);
sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
return 1;
+ default:
+ break;
}
for_each_bio(bio)
struct list_head *dptr;
int queued;
- WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
-
if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return;
+ WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
+ cpu_online(hctx->next_cpu));
+
hctx->run++;
/*
EXPORT_SYMBOL(blk_mq_delay_queue);
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_ctx *ctx,
struct request *rq,
bool at_head)
{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+
trace_block_rq_insert(hctx->queue, rq);
if (at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
- __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
+ __blk_mq_insert_req_list(hctx, rq, at_head);
blk_mq_hctx_mark_pending(hctx, ctx);
}
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
- bool async)
+ bool async)
{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
-
- current_ctx = blk_mq_get_ctx(q);
- if (!cpu_online(ctx->cpu))
- rq->mq_ctx = ctx = current_ctx;
hctx = q->mq_ops->map_queue(q, ctx->cpu);
if (run_queue)
blk_mq_run_hw_queue(hctx, async);
-
- blk_mq_put_ctx(current_ctx);
}
static void blk_mq_insert_requests(struct request_queue *q,
{
struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *current_ctx;
trace_block_unplug(q, depth, !from_schedule);
- current_ctx = blk_mq_get_ctx(q);
-
- if (!cpu_online(ctx->cpu))
- ctx = current_ctx;
hctx = q->mq_ops->map_queue(q, ctx->cpu);
/*
struct request *rq;
rq = list_first_entry(list, struct request, queuelist);
+ BUG_ON(rq->mq_ctx != ctx);
list_del_init(&rq->queuelist);
- rq->mq_ctx = ctx;
- __blk_mq_insert_req_list(hctx, ctx, rq, false);
+ __blk_mq_insert_req_list(hctx, rq, false);
}
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
blk_mq_run_hw_queue(hctx, from_schedule);
- blk_mq_put_ctx(current_ctx);
}
static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
return 0;
}
+/*
+ * 'cpu' is going away. splice any existing rq_list entries from this
+ * software queue to the hw queue dispatch list, and ensure that it
+ * gets run.
+ */
static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
{
- struct request_queue *q = hctx->queue;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
- /*
- * Move ctx entries to new CPU, if this one is going away.
- */
- ctx = __blk_mq_get_ctx(q, cpu);
+ ctx = __blk_mq_get_ctx(hctx->queue, cpu);
spin_lock(&ctx->lock);
if (!list_empty(&ctx->rq_list)) {
if (list_empty(&tmp))
return NOTIFY_OK;
- ctx = blk_mq_get_ctx(q);
- spin_lock(&ctx->lock);
-
- while (!list_empty(&tmp)) {
- struct request *rq;
-
- rq = list_first_entry(&tmp, struct request, queuelist);
- rq->mq_ctx = ctx;
- list_move_tail(&rq->queuelist, &ctx->rq_list);
- }
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
- blk_mq_hctx_mark_pending(hctx, ctx);
-
- spin_unlock(&ctx->lock);
+ spin_lock(&hctx->lock);
+ list_splice_tail_init(&tmp, &hctx->dispatch);
+ spin_unlock(&hctx->lock);
blk_mq_run_hw_queue(hctx, true);
- blk_mq_put_ctx(ctx);
return NOTIFY_OK;
}
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
- if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD))
+ if (req_op(rq) != req_op(pos))
break;
if (rq_data_dir(rq) != rq_data_dir(pos))
break;
config CRYPT_CRC32C_VPMSUM
tristate "CRC32c CRC algorithm (powerpc64)"
- depends on PPC64
+ depends on PPC64 && ALTIVEC
select CRYPTO_HASH
select CRC32
help
#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
static const u64 keccakf_rndc[24] = {
- 0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
- 0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
- 0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
- 0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
- 0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
- 0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
- 0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
- 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
+ 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
+ 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
+ 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL,
+ 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
+ 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL,
+ 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
+ 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL,
+ 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
static const int keccakf_rotc[24] = {
{
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
+ const u32 STATUS_MASK = 0x80000037;
if (mmio->num_lines)
offset = to_interleave_offset(offset, mmio);
- return readl(mmio->addr.base + offset);
+ return readl(mmio->addr.base + offset) & STATUS_MASK;
}
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
- if (mode & (FMODE_READ|FMODE_WRITE)) {
- UDRS->last_checked = 0;
- clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
- check_disk_change(bdev);
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
- goto out;
- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ if (!(mode & FMODE_NDELAY)) {
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
+ UDRS->last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ check_disk_change(bdev);
+ if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+ goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ goto out;
+ }
+ res = -EROFS;
+ if ((mode & FMODE_WRITE) &&
+ !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
goto out;
}
-
- res = -EROFS;
-
- if ((mode & FMODE_WRITE) &&
- !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
- goto out;
-
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
bool need_put = !!rbd_dev->opts;
ceph_oid_destroy(&rbd_dev->header_oid);
+ ceph_oloc_destroy(&rbd_dev->header_oloc);
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
}
spec->pool_id = (u64)rc;
- /* The ceph file layout needs to fit pool id in 32 bits */
-
- if (spec->pool_id > (u64)U32_MAX) {
- rbd_warn(NULL, "pool id too large (%llu > %u)",
- (unsigned long long)spec->pool_id, U32_MAX);
- rc = -EIO;
- goto err_out_client;
- }
-
rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
if (!rbd_dev) {
rc = -ENOMEM;
num_vqs = 1;
vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
- if (!vblk->vqs) {
- err = -ENOMEM;
- goto out;
- }
+ if (!vblk->vqs)
+ return -ENOMEM;
names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
- if (!names)
- goto err_names;
-
callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
- if (!callbacks)
- goto err_callbacks;
-
vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
- if (!vqs)
- goto err_vqs;
+ if (!names || !callbacks || !vqs) {
+ err = -ENOMEM;
+ goto out;
+ }
for (i = 0; i < num_vqs; i++) {
callbacks[i] = virtblk_done;
/* Discover virtqueues and write information to configuration. */
err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
if (err)
- goto err_find_vqs;
+ goto out;
for (i = 0; i < num_vqs; i++) {
spin_lock_init(&vblk->vqs[i].lock);
}
vblk->num_vqs = num_vqs;
- err_find_vqs:
+out:
kfree(vqs);
- err_vqs:
kfree(callbacks);
- err_callbacks:
kfree(names);
- err_names:
if (err)
kfree(vblk->vqs);
- out:
return err;
}
struct mutex mutex;
struct xenbus_device *xbdev;
struct gendisk *gd;
+ u16 sector_size;
+ unsigned int physical_sector_size;
int vdevice;
blkif_vdev_t handle;
enum blkif_state connected;
.map_queue = blk_mq_map_queue,
};
+static void blkif_set_queue_limits(struct blkfront_info *info)
+{
+ struct request_queue *rq = info->rq;
+ struct gendisk *gd = info->gd;
+ unsigned int segments = info->max_indirect_segments ? :
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+
+ if (info->feature_discard) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+ blk_queue_max_discard_sectors(rq, get_capacity(gd));
+ rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_alignment = info->discard_alignment;
+ if (info->feature_secdiscard)
+ queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
+ }
+
+ /* Hard sector size and max sectors impersonate the equiv. hardware. */
+ blk_queue_logical_block_size(rq, info->sector_size);
+ blk_queue_physical_block_size(rq, info->physical_sector_size);
+ blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
+
+ /* Each segment in a request is up to an aligned page in size. */
+ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+ /* Ensure a merged request will fit in a single I/O ring slot. */
+ blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
+
+ /* Make sure buffer addresses are sector-aligned. */
+ blk_queue_dma_alignment(rq, 511);
+
+ /* Make sure we don't use bounce buffers. */
+ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
+}
+
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
- unsigned int physical_sector_size,
- unsigned int segments)
+ unsigned int physical_sector_size)
{
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
}
rq->queuedata = info;
- queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
-
- if (info->feature_discard) {
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
- blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
- rq->limits.discard_alignment = info->discard_alignment;
- if (info->feature_secdiscard)
- queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
- }
-
- /* Hard sector size and max sectors impersonate the equiv. hardware. */
- blk_queue_logical_block_size(rq, sector_size);
- blk_queue_physical_block_size(rq, physical_sector_size);
- blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
-
- /* Each segment in a request is up to an aligned page in size. */
- blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
- blk_queue_max_segment_size(rq, PAGE_SIZE);
-
- /* Ensure a merged request will fit in a single I/O ring slot. */
- blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
-
- /* Make sure buffer addresses are sector-aligned. */
- blk_queue_dma_alignment(rq, 511);
-
- /* Make sure we don't use bounce buffers. */
- blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
-
- gd->queue = rq;
+ info->rq = gd->queue = rq;
+ info->gd = gd;
+ info->sector_size = sector_size;
+ info->physical_sector_size = physical_sector_size;
+ blkif_set_queue_limits(info);
return 0;
}
gd->private_data = info;
set_capacity(gd, capacity);
- if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
- info->max_indirect_segments ? :
- BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+ if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
del_gendisk(gd);
goto release;
}
- info->rq = gd->queue;
- info->gd = gd;
-
xlvbd_flush(info);
if (vdisk_info & VDISK_READONLY)
rinfo->ring_ref[i] = GRANT_INVALID_REF;
}
}
- free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
+ free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
rinfo->ring.sring = NULL;
if (rinfo->irq)
struct split_bio *split_bio;
blkfront_gather_backend_features(info);
+ /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
+ blkif_set_queue_limits(info);
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
- blk_queue_max_segments(info->rq, segs);
+ blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
for (r_index = 0; r_index < info->nr_rings; r_index++) {
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
info->xbdev->otherend);
- return;
+ goto fail;
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
device_add_disk(&info->xbdev->dev, info->gd);
info->is_ready = 1;
+ return;
+
+fail:
+ blkif_free(info, 0);
+ return;
}
/**
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#define pr_fmt(fmt) "arm_arch_timer: " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
arch_timer_ppi[PHYS_NONSECURE_PPI]);
}
+static u32 check_ppi_trigger(int irq)
+{
+ u32 flags = irq_get_trigger_type(irq);
+
+ if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
+ pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
+ pr_warn("WARNING: Please fix your firmware\n");
+ flags = IRQF_TRIGGER_LOW;
+ }
+
+ return flags;
+}
+
static int arch_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
+ u32 flags;
__arch_timer_setup(ARCH_CP15_TIMER, clk);
- enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
+ flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
+ enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
- if (arch_timer_has_nonsecure_ppi())
- enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
+ if (arch_timer_has_nonsecure_ppi()) {
+ flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
+ enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
+ }
arch_counter_set_user_access();
if (evtstrm_enable)
}
-static void
+static int
kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
{
- int loop_limit = 4;
+ int loop_limit = 3;
/*
* Read 64-bit free running counter
* if new hi-word is equal to previously read hi-word then stop.
*/
- while (--loop_limit) {
+ do {
*msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
*lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
break;
- }
+ } while (--loop_limit);
if (!loop_limit) {
pr_err("bcm_kona_timer: getting counter failed.\n");
pr_err(" Timer will be impacted\n");
+ return -ETIMEDOUT;
}
- return;
+ return 0;
}
static int kona_timer_set_next_event(unsigned long clc,
uint32_t lsw, msw;
uint32_t reg;
+ int ret;
- kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
+ ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
+ if (ret)
+ return ret;
/* Load the "next" event tick value */
writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
gic_start_count();
}
-static void __init gic_clocksource_of_init(struct device_node *node)
+static int __init gic_clocksource_of_init(struct device_node *node)
{
struct clk *clk;
int ret;
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#include <clocksource/pxa.h>
+
#include <asm/div64.h>
#define OSMR0 0x00 /* OS Timer 0 Match Register */
.set_next_event = sun4i_clkevt_next_event,
};
+static void sun4i_timer_clear_interrupt(void)
+{
+ writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
+}
static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
- writel(0x1, timer_base + TIMER_IRQ_ST_REG);
+ sun4i_timer_clear_interrupt();
evt->event_handler(evt);
return IRQ_HANDLED;
/* Make sure timer is stopped before playing with interrupts */
sun4i_clkevt_time_stop(0);
+ /* clear timer0 interrupt */
+ sun4i_timer_clear_interrupt();
+
sun4i_clockevent.cpumask = cpu_possible_mask;
sun4i_clockevent.irq = irq;
struct clk *clk = of_clk_get_by_name(np, "fixed");
int ret;
- clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get clock");
return PTR_ERR(clk);
rate = clk_get_rate(fast_clk);
/* Disable irq's for clocksource usage */
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
/* Enable timer block */
writel(TIMER_ME_GLOBAL, pcs_gpt.base);
return PTR_ERR(data->mck);
}
+ ret = clk_prepare_enable(data->mck);
+ if (ret) {
+ pr_err("Unable to enable mck\n");
+ return ret;
+ }
+
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
if (!data->irq) {
/* Use following macros for conversions between pstate_id and index */
static inline int idx_to_pstate(unsigned int i)
{
+ if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
+ pr_warn_once("index %u is out of bound\n", i);
+ return powernv_freqs[powernv_pstate_info.nominal].driver_data;
+ }
+
return powernv_freqs[i].driver_data;
}
static inline unsigned int pstate_to_idx(int pstate)
{
+ int min = powernv_freqs[powernv_pstate_info.min].driver_data;
+ int max = powernv_freqs[powernv_pstate_info.max].driver_data;
+
+ if (min > 0) {
+ if (unlikely((pstate < max) || (pstate > min))) {
+ pr_warn_once("pstate %d is out of bound\n", pstate);
+ return powernv_pstate_info.nominal;
+ }
+ } else {
+ if (unlikely((pstate > max) || (pstate < min))) {
+ pr_warn_once("pstate %d is out of bound\n", pstate);
+ return powernv_pstate_info.nominal;
+ }
+ }
/*
* abs() is deliberately used so that is works with
* both monotonically increasing and decreasing
} else {
gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
gpstates->highest_lpstate_idx,
- freq_data.pstate_id);
+ gpstates->last_lpstate_idx);
}
/*
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
+ if (!ctx->authsize)
+ return 0;
+
/* NULL encryption / decryption */
if (!ctx->enckeylen)
return aead_null_set_sh_desc(aead);
keys_fit_inline = true;
/* aead_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
+ desc = ctx->sh_desc_enc;
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
+ t_alg->ahash_alg.setkey = NULL;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
if (rc)
return rc;
+ /* adjust the dax_region resource to the start of data */
+ res.start += le64_to_cpu(pfn_sb->dataoff);
+
nd_region = to_nd_region(dev->parent);
dax_region = alloc_dax_region(dev, nd_region->id, &res,
le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
Support for error detection and correction the Intel
Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers.
+config EDAC_SKX
+ tristate "Intel Skylake server Integrated MC"
+ depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
+ depends on PCI_MMCONFIG
+ help
+ Support for error detection and correction the Intel
+ Skylake server Integrated Memory Controllers.
+
config EDAC_MPC85XX
tristate "Freescale MPC83xx / MPC85xx"
depends on EDAC_MM_EDAC && FSL_SOC
obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
+obj-$(CONFIG_EDAC_SKX) += skx_edac.o
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
/* Knight's Landing Support */
/*
* KNL's memory channels are swizzled between memory controllers.
- * MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2
+ * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
*/
-#define knl_channel_remap(channel) ((channel + 3) % 6)
+#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
- return knl_channel_remap(mc*3 + chan);
+ return knl_channel_remap(mc, chan);
}
/*
} else {
char A = *("A");
- channel = knl_channel_remap(channel);
+ /*
+ * Reported channel is in range 0-2, so we can't map it
+ * back to mc. To figure out mc we check machine check
+ * bank register that reported this error.
+ * bank15 means mc0 and bank16 means mc1.
+ */
+ channel = knl_channel_remap(m->bank == 16, channel);
channel_mask = 1 << channel;
+
snprintf(msg, sizeof(msg),
"%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
overflow ? " OVERFLOW" : "",
--- /dev/null
+/*
+ * EDAC driver for Intel(R) Xeon(R) Skylake processors
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+#include <linux/bitmap.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
+#include <asm/processor.h>
+#include <asm/mce.h>
+
+#include "edac_core.h"
+
+#define SKX_REVISION " Ver: 1.0 "
+
+/*
+ * Debug macros
+ */
+#define skx_printk(level, fmt, arg...) \
+ edac_printk(level, "skx", fmt, ##arg)
+
+#define skx_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg)
+
+/*
+ * Get a bit field at register value <v>, from bit <lo> to bit <hi>
+ */
+#define GET_BITFIELD(v, lo, hi) \
+ (((v) & GENMASK_ULL((hi), (lo))) >> (lo))
+
+static LIST_HEAD(skx_edac_list);
+
+static u64 skx_tolm, skx_tohm;
+
+#define NUM_IMC 2 /* memory controllers per socket */
+#define NUM_CHANNELS 3 /* channels per memory controller */
+#define NUM_DIMMS 2 /* Max DIMMS per channel */
+
+#define MASK26 0x3FFFFFF /* Mask for 2^26 */
+#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
+
+/*
+ * Each cpu socket contains some pci devices that provide global
+ * information, and also some that are local to each of the two
+ * memory controllers on the die.
+ */
+struct skx_dev {
+ struct list_head list;
+ u8 bus[4];
+ struct pci_dev *sad_all;
+ struct pci_dev *util_all;
+ u32 mcroute;
+ struct skx_imc {
+ struct mem_ctl_info *mci;
+ u8 mc; /* system wide mc# */
+ u8 lmc; /* socket relative mc# */
+ u8 src_id, node_id;
+ struct skx_channel {
+ struct pci_dev *cdev;
+ struct skx_dimm {
+ u8 close_pg;
+ u8 bank_xor_enable;
+ u8 fine_grain_bank;
+ u8 rowbits;
+ u8 colbits;
+ } dimms[NUM_DIMMS];
+ } chan[NUM_CHANNELS];
+ } imc[NUM_IMC];
+};
+static int skx_num_sockets;
+
+struct skx_pvt {
+ struct skx_imc *imc;
+};
+
+struct decoded_addr {
+ struct skx_dev *dev;
+ u64 addr;
+ int socket;
+ int imc;
+ int channel;
+ u64 chan_addr;
+ int sktways;
+ int chanways;
+ int dimm;
+ int rank;
+ int channel_rank;
+ u64 rank_address;
+ int row;
+ int column;
+ int bank_address;
+ int bank_group;
+};
+
+static struct skx_dev *get_skx_dev(u8 bus, u8 idx)
+{
+ struct skx_dev *d;
+
+ list_for_each_entry(d, &skx_edac_list, list) {
+ if (d->bus[idx] == bus)
+ return d;
+ }
+
+ return NULL;
+}
+
+enum munittype {
+ CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD
+};
+
+struct munit {
+ u16 did;
+ u16 devfn[NUM_IMC];
+ u8 busidx;
+ u8 per_socket;
+ enum munittype mtype;
+};
+
+/*
+ * List of PCI device ids that we need together with some device
+ * number and function numbers to tell which memory controller the
+ * device belongs to.
+ */
+static const struct munit skx_all_munits[] = {
+ { 0x2054, { }, 1, 1, SAD_ALL },
+ { 0x2055, { }, 1, 1, UTIL_ALL },
+ { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
+ { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
+ { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
+ { 0x208e, { }, 1, 0, SAD },
+ { }
+};
+
+/*
+ * We use the per-socket device 0x2016 to count how many sockets are present,
+ * and to detemine which PCI buses are associated with each socket. Allocate
+ * and build the full list of all the skx_dev structures that we need here.
+ */
+static int get_all_bus_mappings(void)
+{
+ struct pci_dev *pdev, *prev;
+ struct skx_dev *d;
+ u32 reg;
+ int ndev = 0;
+
+ prev = NULL;
+ for (;;) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev);
+ if (!pdev)
+ break;
+ ndev++;
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+ pci_read_config_dword(pdev, 0xCC, ®);
+ d->bus[0] = GET_BITFIELD(reg, 0, 7);
+ d->bus[1] = GET_BITFIELD(reg, 8, 15);
+ d->bus[2] = GET_BITFIELD(reg, 16, 23);
+ d->bus[3] = GET_BITFIELD(reg, 24, 31);
+ edac_dbg(2, "busses: %x, %x, %x, %x\n",
+ d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+ list_add_tail(&d->list, &skx_edac_list);
+ skx_num_sockets++;
+ prev = pdev;
+ }
+
+ return ndev;
+}
+
+static int get_all_munits(const struct munit *m)
+{
+ struct pci_dev *pdev, *prev;
+ struct skx_dev *d;
+ u32 reg;
+ int i = 0, ndev = 0;
+
+ prev = NULL;
+ for (;;) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev);
+ if (!pdev)
+ break;
+ ndev++;
+ if (m->per_socket == NUM_IMC) {
+ for (i = 0; i < NUM_IMC; i++)
+ if (m->devfn[i] == pdev->devfn)
+ break;
+ if (i == NUM_IMC)
+ goto fail;
+ }
+ d = get_skx_dev(pdev->bus->number, m->busidx);
+ if (!d)
+ goto fail;
+
+ /* Be sure that the device is enabled */
+ if (unlikely(pci_enable_device(pdev) < 0)) {
+ skx_printk(KERN_ERR,
+ "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did);
+ goto fail;
+ }
+
+ switch (m->mtype) {
+ case CHAN0: case CHAN1: case CHAN2:
+ pci_dev_get(pdev);
+ d->imc[i].chan[m->mtype].cdev = pdev;
+ break;
+ case SAD_ALL:
+ pci_dev_get(pdev);
+ d->sad_all = pdev;
+ break;
+ case UTIL_ALL:
+ pci_dev_get(pdev);
+ d->util_all = pdev;
+ break;
+ case SAD:
+ /*
+ * one of these devices per core, including cores
+ * that don't exist on this SKU. Ignore any that
+ * read a route table of zero, make sure all the
+ * non-zero values match.
+ */
+ pci_read_config_dword(pdev, 0xB4, ®);
+ if (reg != 0) {
+ if (d->mcroute == 0)
+ d->mcroute = reg;
+ else if (d->mcroute != reg) {
+ skx_printk(KERN_ERR,
+ "mcroute mismatch\n");
+ goto fail;
+ }
+ }
+ ndev--;
+ break;
+ }
+
+ prev = pdev;
+ }
+
+ return ndev;
+fail:
+ pci_dev_put(pdev);
+ return -ENODEV;
+}
+
+const struct x86_cpu_id skx_cpuids[] = {
+ { X86_VENDOR_INTEL, 6, 0x55, 0, 0 }, /* Skylake */
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
+
+static u8 get_src_id(struct skx_dev *d)
+{
+ u32 reg;
+
+ pci_read_config_dword(d->util_all, 0xF0, ®);
+
+ return GET_BITFIELD(reg, 12, 14);
+}
+
+static u8 skx_get_node_id(struct skx_dev *d)
+{
+ u32 reg;
+
+ pci_read_config_dword(d->util_all, 0xF4, ®);
+
+ return GET_BITFIELD(reg, 0, 2);
+}
+
+static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
+ int maxval, char *name)
+{
+ u32 val = GET_BITFIELD(reg, lobit, hibit);
+
+ if (val < minval || val > maxval) {
+ edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg);
+ return -EINVAL;
+ }
+ return val + add;
+}
+
+#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15)
+
+#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks")
+#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
+#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
+
+static int get_width(u32 mtr)
+{
+ switch (GET_BITFIELD(mtr, 8, 9)) {
+ case 0:
+ return DEV_X4;
+ case 1:
+ return DEV_X8;
+ case 2:
+ return DEV_X16;
+ }
+ return DEV_UNKNOWN;
+}
+
+static int skx_get_hi_lo(void)
+{
+ struct pci_dev *pdev;
+ u32 reg;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL);
+ if (!pdev) {
+ edac_dbg(0, "Can't get tolm/tohm\n");
+ return -ENODEV;
+ }
+
+ pci_read_config_dword(pdev, 0xD0, ®);
+ skx_tolm = reg;
+ pci_read_config_dword(pdev, 0xD4, ®);
+ skx_tohm = reg;
+ pci_read_config_dword(pdev, 0xD8, ®);
+ skx_tohm |= (u64)reg << 32;
+
+ pci_dev_put(pdev);
+ edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm);
+
+ return 0;
+}
+
+static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+ struct skx_imc *imc, int chan, int dimmno)
+{
+ int banks = 16, ranks, rows, cols, npages;
+ u64 size;
+
+ if (!IS_DIMM_PRESENT(mtr))
+ return 0;
+ ranks = numrank(mtr);
+ rows = numrow(mtr);
+ cols = numcol(mtr);
+
+ /*
+ * Compute size in 8-byte (2^3) words, then shift to MiB (2^20)
+ */
+ size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
+ npages = MiB_TO_PAGES(size);
+
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ imc->mc, chan, dimmno, size, npages,
+ banks, ranks, rows, cols);
+
+ imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
+ imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
+ imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
+ imc->chan[chan].dimms[dimmno].rowbits = rows;
+ imc->chan[chan].dimms[dimmno].colbits = cols;
+
+ dimm->nr_pages = npages;
+ dimm->grain = 32;
+ dimm->dtype = get_width(mtr);
+ dimm->mtype = MEM_DDR4;
+ dimm->edac_mode = EDAC_SECDED; /* likely better than this */
+ snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
+ imc->src_id, imc->lmc, chan, dimmno);
+
+ return 1;
+}
+
+#define SKX_GET_MTMTR(dev, reg) \
+ pci_read_config_dword((dev), 0x87c, ®)
+
+static bool skx_check_ecc(struct pci_dev *pdev)
+{
+ u32 mtmtr;
+
+ SKX_GET_MTMTR(pdev, mtmtr);
+
+ return !!GET_BITFIELD(mtmtr, 2, 2);
+}
+
+static int skx_get_dimm_config(struct mem_ctl_info *mci)
+{
+ struct skx_pvt *pvt = mci->pvt_info;
+ struct skx_imc *imc = pvt->imc;
+ struct dimm_info *dimm;
+ int i, j;
+ u32 mtr, amap;
+ int ndimms;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ ndimms = 0;
+ pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
+ for (j = 0; j < NUM_DIMMS; j++) {
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, i, j, 0);
+ pci_read_config_dword(imc->chan[i].cdev,
+ 0x80 + 4*j, &mtr);
+ ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j);
+ }
+ if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
+ skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static void skx_unregister_mci(struct skx_imc *imc)
+{
+ struct mem_ctl_info *mci = imc->mci;
+
+ if (!mci)
+ return;
+
+ edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci);
+
+ /* Remove MC sysfs nodes */
+ edac_mc_del_mc(mci->pdev);
+
+ edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+}
+
+static int skx_register_mci(struct skx_imc *imc)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
+ struct pci_dev *pdev = imc->chan[0].cdev;
+ struct skx_pvt *pvt;
+ int rc;
+
+ /* allocate a new MC control structure */
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANNELS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = NUM_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
+ sizeof(struct skx_pvt));
+
+ if (unlikely(!mci))
+ return -ENOMEM;
+
+ edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci);
+
+ /* Associate skx_dev and mci for future usage */
+ imc->mci = mci;
+ pvt = mci->pvt_info;
+ pvt->imc = imc;
+
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
+ imc->node_id, imc->lmc);
+ mci->mtype_cap = MEM_FLAG_DDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "skx_edac.c";
+ mci->dev_name = pci_name(imc->chan[0].cdev);
+ mci->mod_ver = SKX_REVISION;
+ mci->ctl_page_to_phys = NULL;
+
+ rc = skx_get_dimm_config(mci);
+ if (rc < 0)
+ goto fail;
+
+ /* record ptr to the generic device */
+ mci->pdev = &pdev->dev;
+
+ /* add this new MC control structure to EDAC's list of MCs */
+ if (unlikely(edac_mc_add_mc(mci))) {
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ imc->mci = NULL;
+ return rc;
+}
+
+#define SKX_MAX_SAD 24
+
+#define SKX_GET_SAD(d, i, reg) \
+ pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), ®)
+#define SKX_GET_ILV(d, i, reg) \
+ pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), ®)
+
+#define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31)
+#define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27)
+#define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26)
+#define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6)
+#define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4)
+#define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2)
+#define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0)
+
+#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0)
+#define SKX_ILV_TARGET(tgt) ((tgt) & 7)
+
+static bool skx_sad_decode(struct decoded_addr *res)
+{
+ struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list);
+ u64 addr = res->addr;
+ int i, idx, tgt, lchan, shift;
+ u32 sad, ilv;
+ u64 limit, prev_limit;
+ int remote = 0;
+
+ /* Simple sanity check for I/O space or out of range */
+ if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
+ edac_dbg(0, "Address %llx out of range\n", addr);
+ return false;
+ }
+
+restart:
+ prev_limit = 0;
+ for (i = 0; i < SKX_MAX_SAD; i++) {
+ SKX_GET_SAD(d, i, sad);
+ limit = SKX_SAD_LIMIT(sad);
+ if (SKX_SAD_ENABLE(sad)) {
+ if (addr >= prev_limit && addr <= limit)
+ goto sad_found;
+ }
+ prev_limit = limit + 1;
+ }
+ edac_dbg(0, "No SAD entry for %llx\n", addr);
+ return false;
+
+sad_found:
+ SKX_GET_ILV(d, i, ilv);
+
+ switch (SKX_SAD_INTERLEAVE(sad)) {
+ case 0:
+ idx = GET_BITFIELD(addr, 6, 8);
+ break;
+ case 1:
+ idx = GET_BITFIELD(addr, 8, 10);
+ break;
+ case 2:
+ idx = GET_BITFIELD(addr, 12, 14);
+ break;
+ case 3:
+ idx = GET_BITFIELD(addr, 30, 32);
+ break;
+ }
+
+ tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
+
+ /* If point to another node, find it and start over */
+ if (SKX_ILV_REMOTE(tgt)) {
+ if (remote) {
+ edac_dbg(0, "Double remote!\n");
+ return false;
+ }
+ remote = 1;
+ list_for_each_entry(d, &skx_edac_list, list) {
+ if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
+ goto restart;
+ }
+ edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
+ return false;
+ }
+
+ if (SKX_SAD_MOD3(sad) == 0)
+ lchan = SKX_ILV_TARGET(tgt);
+ else {
+ switch (SKX_SAD_MOD3MODE(sad)) {
+ case 0:
+ shift = 6;
+ break;
+ case 1:
+ shift = 8;
+ break;
+ case 2:
+ shift = 12;
+ break;
+ default:
+ edac_dbg(0, "illegal mod3mode\n");
+ return false;
+ }
+ switch (SKX_SAD_MOD3ASMOD2(sad)) {
+ case 0:
+ lchan = (addr >> shift) % 3;
+ break;
+ case 1:
+ lchan = (addr >> shift) % 2;
+ break;
+ case 2:
+ lchan = (addr >> shift) % 2;
+ lchan = (lchan << 1) | ~lchan;
+ break;
+ case 3:
+ lchan = ((addr >> shift) % 2) << 1;
+ break;
+ }
+ lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
+ }
+
+ res->dev = d;
+ res->socket = d->imc[0].src_id;
+ res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
+ res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
+
+ edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n",
+ res->addr, res->socket, res->imc, res->channel);
+ return true;
+}
+
+#define SKX_MAX_TAD 8
+
+#define SKX_GET_TADBASE(d, mc, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), ®)
+#define SKX_GET_TADWAYNESS(d, mc, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), ®)
+#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), ®)
+
+#define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26)
+#define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5)
+#define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7)
+#define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26)
+#define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26)
+#define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11))
+#define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1)
+
+/* which bit used for both socket and channel interleave */
+static int skx_granularity[] = { 6, 8, 12, 30 };
+
+static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits)
+{
+ addr >>= shift;
+ addr /= ways;
+ addr <<= shift;
+
+ return addr | (lowbits & ((1ull << shift) - 1));
+}
+
+static bool skx_tad_decode(struct decoded_addr *res)
+{
+ int i;
+ u32 base, wayness, chnilvoffset;
+ int skt_interleave_bit, chn_interleave_bit;
+ u64 channel_addr;
+
+ for (i = 0; i < SKX_MAX_TAD; i++) {
+ SKX_GET_TADBASE(res->dev, res->imc, i, base);
+ SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness);
+ if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
+ goto tad_found;
+ }
+ edac_dbg(0, "No TAD entry for %llx\n", res->addr);
+ return false;
+
+tad_found:
+ res->sktways = SKX_TAD_SKTWAYS(wayness);
+ res->chanways = SKX_TAD_CHNWAYS(wayness);
+ skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)];
+ chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)];
+
+ SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset);
+ channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset);
+
+ if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) {
+ /* Must handle channel first, then socket */
+ channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
+ res->chanways, channel_addr);
+ channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
+ res->sktways, channel_addr);
+ } else {
+ /* Handle socket then channel. Preserve low bits from original address */
+ channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
+ res->sktways, res->addr);
+ channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
+ res->chanways, res->addr);
+ }
+
+ res->chan_addr = channel_addr;
+
+ edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n",
+ res->addr, res->chan_addr, res->sktways, res->chanways);
+ return true;
+}
+
+#define SKX_MAX_RIR 4
+
+#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
+ 0x108 + 4 * (i), ®)
+#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
+ 0x120 + 16 * idx + 4 * (i), ®)
+
+#define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31)
+#define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29)
+#define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29))
+#define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19)
+#define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26))
+
+static bool skx_rir_decode(struct decoded_addr *res)
+{
+ int i, idx, chan_rank;
+ int shift;
+ u32 rirway, rirlv;
+ u64 rank_addr, prev_limit = 0, limit;
+
+ if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg)
+ shift = 6;
+ else
+ shift = 13;
+
+ for (i = 0; i < SKX_MAX_RIR; i++) {
+ SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway);
+ limit = SKX_RIR_LIMIT(rirway);
+ if (SKX_RIR_VALID(rirway)) {
+ if (prev_limit <= res->chan_addr &&
+ res->chan_addr <= limit)
+ goto rir_found;
+ }
+ prev_limit = limit;
+ }
+ edac_dbg(0, "No RIR entry for %llx\n", res->addr);
+ return false;
+
+rir_found:
+ rank_addr = res->chan_addr >> shift;
+ rank_addr /= SKX_RIR_WAYS(rirway);
+ rank_addr <<= shift;
+ rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0);
+
+ res->rank_address = rank_addr;
+ idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway);
+
+ SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv);
+ res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv);
+ chan_rank = SKX_RIR_CHAN_RANK(rirlv);
+ res->channel_rank = chan_rank;
+ res->dimm = chan_rank / 4;
+ res->rank = chan_rank % 4;
+
+ edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n",
+ res->addr, res->dimm, res->rank,
+ res->channel_rank, res->rank_address);
+ return true;
+}
+
+static u8 skx_close_row[] = {
+ 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+};
+static u8 skx_close_column[] = {
+ 3, 4, 5, 14, 19, 23, 24, 25, 26, 27
+};
+static u8 skx_open_row[] = {
+ 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+};
+static u8 skx_open_column[] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
+};
+static u8 skx_open_fine_column[] = {
+ 3, 4, 5, 7, 8, 9, 10, 11, 12, 13
+};
+
+static int skx_bits(u64 addr, int nbits, u8 *bits)
+{
+ int i, res = 0;
+
+ for (i = 0; i < nbits; i++)
+ res |= ((addr >> bits[i]) & 1) << i;
+ return res;
+}
+
+static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
+{
+ int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
+
+ if (do_xor)
+ ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
+
+ return ret;
+}
+
+static bool skx_mad_decode(struct decoded_addr *r)
+{
+ struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm];
+ int bg0 = dimm->fine_grain_bank ? 6 : 13;
+
+ if (dimm->close_pg) {
+ r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row);
+ r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column);
+ r->column |= 0x400; /* C10 is autoprecharge, always set */
+ r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28);
+ r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21);
+ } else {
+ r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row);
+ if (dimm->fine_grain_bank)
+ r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column);
+ else
+ r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column);
+ r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23);
+ r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21);
+ }
+ r->row &= (1u << dimm->rowbits) - 1;
+
+ edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n",
+ r->addr, r->row, r->column, r->bank_address,
+ r->bank_group);
+ return true;
+}
+
+static bool skx_decode(struct decoded_addr *res)
+{
+
+ return skx_sad_decode(res) && skx_tad_decode(res) &&
+ skx_rir_decode(res) && skx_mad_decode(res);
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr.
+ * Write an address to this file to exercise the address decode
+ * logic in this driver.
+ */
+static struct dentry *skx_test;
+static u64 skx_fake_addr;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct decoded_addr res;
+
+ res.addr = val;
+ skx_decode(&res);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static struct dentry *mydebugfs_create(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_u64_wo);
+}
+
+static void setup_skx_debug(void)
+{
+ skx_test = debugfs_create_dir("skx_edac_test", NULL);
+ mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr);
+}
+
+static void teardown_skx_debug(void)
+{
+ debugfs_remove_recursive(skx_test);
+}
+#else
+static void setup_skx_debug(void)
+{
+}
+
+static void teardown_skx_debug(void)
+{
+}
+#endif /*CONFIG_EDAC_DEBUG*/
+
+static void skx_mce_output_error(struct mem_ctl_info *mci,
+ const struct mce *m,
+ struct decoded_addr *res)
+{
+ enum hw_event_mc_err_type tp_event;
+ char *type, *optype, msg[256];
+ bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
+ bool overflow = GET_BITFIELD(m->status, 62, 62);
+ bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+ bool recoverable;
+ u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
+ u32 mscod = GET_BITFIELD(m->status, 16, 31);
+ u32 errcode = GET_BITFIELD(m->status, 0, 15);
+ u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+
+ recoverable = GET_BITFIELD(m->status, 56, 56);
+
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
+
+ /*
+ * According with Table 15-9 of the Intel Architecture spec vol 3A,
+ * memory errors should fit in this mask:
+ * 000f 0000 1mmm cccc (binary)
+ * where:
+ * f = Correction Report Filtering Bit. If 1, subsequent errors
+ * won't be shown
+ * mmm = error type
+ * cccc = channel
+ * If the mask doesn't match, report an error to the parsing logic
+ */
+ if (!((errcode & 0xef80) == 0x80)) {
+ optype = "Can't parse: it is not a mem";
+ } else {
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request error";
+ break;
+ case 1:
+ optype = "memory read error";
+ break;
+ case 2:
+ optype = "memory write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "memory scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
+ }
+ }
+
+ snprintf(msg, sizeof(msg),
+ "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
+ overflow ? " OVERFLOW" : "",
+ (uncorrected_error && recoverable) ? " recoverable" : "",
+ mscod, errcode,
+ res->socket, res->imc, res->rank,
+ res->bank_group, res->bank_address, res->row, res->column);
+
+ edac_dbg(0, "%s\n", msg);
+
+ /* Call the helper to output message */
+ edac_mc_handle_error(tp_event, mci, core_err_cnt,
+ m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+ res->channel, res->dimm, -1,
+ optype, msg);
+}
+
+static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ struct decoded_addr res;
+ struct mem_ctl_info *mci;
+ char *type;
+
+ if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+ return NOTIFY_DONE;
+
+ /* ignore unless this is memory related with an address */
+ if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
+ return NOTIFY_DONE;
+
+ res.addr = mce->addr;
+ if (!skx_decode(&res))
+ return NOTIFY_DONE;
+ mci = res.dev->imc[res.imc].mci;
+
+ if (mce->mcgstatus & MCG_STATUS_MCIP)
+ type = "Exception";
+ else
+ type = "Event";
+
+ skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
+
+ skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
+ "Bank %d: %016Lx\n", mce->extcpu, type,
+ mce->mcgstatus, mce->bank, mce->status);
+ skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
+ skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
+ skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
+
+ skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
+ "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
+ mce->time, mce->socketid, mce->apicid);
+
+ skx_mce_output_error(mci, mce, &res);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block skx_mce_dec = {
+ .notifier_call = skx_mce_check_error,
+};
+
+static void skx_remove(void)
+{
+ int i, j;
+ struct skx_dev *d, *tmp;
+
+ edac_dbg(0, "\n");
+
+ list_for_each_entry_safe(d, tmp, &skx_edac_list, list) {
+ list_del(&d->list);
+ for (i = 0; i < NUM_IMC; i++) {
+ skx_unregister_mci(&d->imc[i]);
+ for (j = 0; j < NUM_CHANNELS; j++)
+ pci_dev_put(d->imc[i].chan[j].cdev);
+ }
+ pci_dev_put(d->util_all);
+ pci_dev_put(d->sad_all);
+
+ kfree(d);
+ }
+}
+
+/*
+ * skx_init:
+ * make sure we are running on the correct cpu model
+ * search for all the devices we need
+ * check which DIMMs are present.
+ */
+int __init skx_init(void)
+{
+ const struct x86_cpu_id *id;
+ const struct munit *m;
+ int rc = 0, i;
+ u8 mc = 0, src_id, node_id;
+ struct skx_dev *d;
+
+ edac_dbg(2, "\n");
+
+ id = x86_match_cpu(skx_cpuids);
+ if (!id)
+ return -ENODEV;
+
+ rc = skx_get_hi_lo();
+ if (rc)
+ return rc;
+
+ rc = get_all_bus_mappings();
+ if (rc < 0)
+ goto fail;
+ if (rc == 0) {
+ edac_dbg(2, "No memory controllers found\n");
+ return -ENODEV;
+ }
+
+ for (m = skx_all_munits; m->did; m++) {
+ rc = get_all_munits(m);
+ if (rc < 0)
+ goto fail;
+ if (rc != m->per_socket * skx_num_sockets) {
+ edac_dbg(2, "Expected %d, got %d of %x\n",
+ m->per_socket * skx_num_sockets, rc, m->did);
+ rc = -ENODEV;
+ goto fail;
+ }
+ }
+
+ list_for_each_entry(d, &skx_edac_list, list) {
+ src_id = get_src_id(d);
+ node_id = skx_get_node_id(d);
+ edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
+ for (i = 0; i < NUM_IMC; i++) {
+ d->imc[i].mc = mc++;
+ d->imc[i].lmc = i;
+ d->imc[i].src_id = src_id;
+ d->imc[i].node_id = node_id;
+ rc = skx_register_mci(&d->imc[i]);
+ if (rc < 0)
+ goto fail;
+ }
+ }
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ setup_skx_debug();
+
+ mce_register_decode_chain(&skx_mce_dec);
+
+ return 0;
+fail:
+ skx_remove();
+ return rc;
+}
+
+static void __exit skx_exit(void)
+{
+ edac_dbg(2, "\n");
+ mce_unregister_decode_chain(&skx_mce_dec);
+ skx_remove();
+ teardown_skx_debug();
+}
+
+module_init(skx_init);
+module_exit(skx_exit);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tony Luck");
+MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors");
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/efi.h>
+#include <linux/vmalloc.h>
#define NO_FURTHER_WRITE_ACTION -1
int ret;
void *cap_hdr_temp;
- cap_hdr_temp = kmap(cap_info->pages[0]);
+ cap_hdr_temp = vmap(cap_info->pages, cap_info->index,
+ VM_MAP, PAGE_KERNEL);
if (!cap_hdr_temp) {
- pr_debug("%s: kmap() failed\n", __func__);
+ pr_debug("%s: vmap() failed\n", __func__);
return -EFAULT;
}
ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
- kunmap(cap_info->pages[0]);
+ vunmap(cap_hdr_temp);
if (ret) {
pr_err("%s: efi_capsule_update() failed\n", __func__);
return ret;
* map the capsule described by @capsule with its data in @pages and
* send it to the firmware via the UpdateCapsule() runtime service.
*
- * @capsule must be a virtual mapping of the first page in @pages
- * (@pages[0]) in the kernel address space. That is, a
- * capsule_header_t that describes the entire contents of the capsule
+ * @capsule must be a virtual mapping of the complete capsule update in the
+ * kernel address space, as the capsule can be consumed immediately.
+ * A capsule_header_t that describes the entire contents of the capsule
* must be at the start of the first data page.
*
* Even though this function will validate that the firmware supports
config OF_GPIO
def_bool y
depends on OF
+ depends on HAS_IOMEM
config GPIO_ACPI
def_bool y
config GPIO_ETRAXFS
bool "Axis ETRAX FS General I/O"
depends on CRIS || COMPILE_TEST
- depends on OF
+ depends on OF_GPIO
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
help
config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
- depends on OF
+ depends on OF_GPIO
select GPIO_GENERIC
select IRQ_DOMAIN
help
config GPIO_MVEBU
def_bool y
depends on PLAT_ORION
- depends on OF
+ depends on OF_GPIO
select GENERIC_IRQ_CHIP
config GPIO_MXC
bool "NVIDIA Tegra GPIO support"
default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
- depends on OF
+ depends on OF_GPIO
help
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
config GPIO_74X164
tristate "74x164 serial-in/parallel-out 8-bits shift register"
- depends on OF
+ depends on OF_GPIO
help
Driver for 74x164 compatible serial-in/parallel-out 8-outputs
shift registers. This driver can be used to provide access
ts->chip.parent = dev;
ts->chip.owner = THIS_MODULE;
+ ret = gpiochip_add_data(&ts->chip, ts);
+ if (ret)
+ goto exit_destroy;
+
/*
* initialize pullups according to platform data and cache the
* register values for later use.
}
}
- ret = gpiochip_add_data(&ts->chip, ts);
- if (ret)
- goto exit_destroy;
-
return ret;
exit_destroy:
/* custom LRU management */
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
+ /* guard for log2_size array, don't add anything in between */
+ struct amdgpu_mman_lru guard;
};
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags);
(le16_to_cpu(path->usConnObjectId) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+ /* Skip TV/CV support */
+ if ((le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV1_SUPPORT) ||
+ (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_CV_SUPPORT))
+ continue;
+
+ if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
+ DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
+ con_obj_id, le16_to_cpu(path->usDeviceTag));
+ continue;
+ }
+
connector_type =
object_connector_convert[con_obj_id];
connector_object_id = con_obj_id;
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
-#if 1
- /* This is a temporary hack until the D3 cold support
- * makes it upstream. The ATPX power_control method seems
- * to still work on even if the system should be using
- * the new standardized hybrid D3 cold ACPI interface.
- */
- atpx->functions.power_cntl = true;
-#else
atpx->functions.power_cntl = false;
-#endif
atpx->is_hybrid = true;
}
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
*/
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint32_t flags)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
char *table = NULL;
- int size, i;
+ int size;
if (adev->pp_enabled)
size = amdgpu_dpm_get_pp_table(adev, &table);
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
- for (i = 0; i < size; i++) {
- sprintf(buf + i, "%02x", table[i]);
- }
- sprintf(buf + i, "\n");
+ memcpy(buf, table, size);
return size;
}
adev = amdgpu_get_adev(bo->bdev);
ring = adev->mman.buffer_funcs_ring;
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
struct list_head *res = lru->lru[tbo->mem.mem_type];
lru->lru[tbo->mem.mem_type] = &tbo->lru;
+ while ((++lru)->lru[tbo->mem.mem_type] == res)
+ lru->lru[tbo->mem.mem_type] = &tbo->lru;
return res;
}
struct list_head *res = lru->swap_lru;
lru->swap_lru = &tbo->swap;
+ while ((++lru)->swap_lru == res)
+ lru->swap_lru = &tbo->swap;
return res;
}
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
}
+ for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+ adev->mman.guard.lru[j] = NULL;
+ adev->mman.guard.swap_lru = NULL;
+
adev->mman.initialized = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT);
r = 0;
}
-error:
fence_put(fence);
+
+error:
return r;
}
r = amd_sched_entity_init(&ring->sched, &vm->entity,
rq, amdgpu_sched_jobs);
if (r)
- return r;
+ goto err;
vm->page_directory_fence = NULL;
error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
+err:
+ drm_free_large(vm->page_tables);
+
return r;
}
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
default: BUG();
}
static const u32 golden_settings_polaris11_a11[] =
{
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+ mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
- mmSQ_CONFIG, 0x07f80000, 0x07180000,
+ mmSQ_CONFIG, 0x07f80000, 0x01180000,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
static const u32 golden_settings_polaris10_a11[] =
{
mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
- mmCB_HW_CONTROL_2, 0, 0x0f000000,
+ mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmGB_GPU_ID, 0x0000000f, 0x00000000,
mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
+ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
return 0;
default: BUG();
}
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
+static const u32 golden_settings_stoney_common[] =
+{
+ mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
+ mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
+};
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
amdgpu_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_stoney_common,
+ (const u32)ARRAY_SIZE(golden_settings_stoney_common));
break;
default:
break;
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
- } else if (r) {
+ } else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
sizeof(u32)) + inx;
pr_debug("kfd: get kernel queue doorbell\n"
- " doorbell offset == 0x%08d\n"
+ " doorbell offset == 0x%08X\n"
" kernel address == 0x%08lX\n",
*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
spin_lock(&sched->job_list_lock);
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node);
- if (s_job)
+ if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
goto out;
}
+ /*
+ * cirrus_modeset_init() is initializing/registering the emulated fbdev
+ * and DRM internals can access/test some of the fields in
+ * mode_config->funcs as part of the fbdev registration process.
+ * Make sure dev->mode_config.funcs is properly set to avoid
+ * dereferencing a NULL pointer.
+ * FIXME: mode_config.funcs assignment should probably be done in
+ * cirrus_modeset_init() (that's a common pattern seen in other DRM
+ * drivers).
+ */
+ dev->mode_config.funcs = &cirrus_mode_funcs;
r = cirrus_modeset_init(cdev);
if (r) {
dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
goto out;
}
- dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
-
return 0;
out:
cirrus_driver_unload(dev);
val,
-1,
&replaced);
- state->color_mgmt_changed = replaced;
+ state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->ctm_property) {
ret = drm_atomic_replace_property_blob_from_id(crtc,
val,
sizeof(struct drm_color_ctm),
&replaced);
- state->color_mgmt_changed = replaced;
+ state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->gamma_lut_property) {
ret = drm_atomic_replace_property_blob_from_id(crtc,
val,
-1,
&replaced);
- state->color_mgmt_changed = replaced;
+ state->color_mgmt_changed |= replaced;
return ret;
} else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
struct drm_connector *connector;
int ret;
- mutex_lock(&dev->mode_config.mutex);
-
- drm_for_each_connector(connector, dev) {
+ /* FIXME: taking the mode config mutex ends up in a clash with
+ * fbcon/backlight registration */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
ret = drm_connector_register(connector);
if (ret)
goto err;
}
- mutex_unlock(&dev->mode_config.mutex);
-
return 0;
err:
struct drm_pending_vblank_event *e = NULL;
int ret = -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
page_flip->reserved != 0)
return -EINVAL;
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* Force 12bpc */
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
+/* Force 6bpc */
+#define EDID_QUIRK_FORCE_6BPC (1 << 10)
struct detailed_mode_closure {
struct drm_connector *connector;
/* Unknown Acer */
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+ /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
+ { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
/* HDMI deep color modes supported? Assign to info, if so */
drm_assign_hdmi_deep_color_info(edid, info, connector);
+ /*
+ * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
+ *
+ * For such displays, the DFP spec 1.0, section 3.10 "EDID support"
+ * tells us to assume 8 bpc color depth if the EDID doesn't have
+ * extensions which tell otherwise.
+ */
+ if ((info->bpc == 0) && (edid->revision < 4) &&
+ (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) {
+ info->bpc = 8;
+ DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n",
+ connector->name, info->bpc);
+ }
+
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
drm_add_display_info(edid, &connector->display_info, connector);
+ if (quirks & EDID_QUIRK_FORCE_6BPC)
+ connector->display_info.bpc = 6;
+
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
/* Sometimes user space wants everything disabled, so don't steal the
* display if there's a master. */
- if (lockless_dereference(dev->master))
+ if (READ_ONCE(dev->master))
return false;
drm_for_each_crtc(crtc, dev) {
if (ret < 0)
return ret;
- mutex_lock(&gpu->lock);
-
/*
* TODO
*
if (unlikely(event == ~0U)) {
DRM_ERROR("no free event\n");
ret = -EBUSY;
- goto out_unlock;
+ goto out_pm_put;
}
fence = etnaviv_gpu_fence_alloc(gpu);
if (!fence) {
event_free(gpu, event);
ret = -ENOMEM;
- goto out_unlock;
+ goto out_pm_put;
}
+ mutex_lock(&gpu->lock);
+
gpu->event[event].fence = fence;
submit->fence = fence->seqno;
gpu->active_fence = submit->fence;
hangcheck_timer_reset(gpu);
ret = 0;
-out_unlock:
mutex_unlock(&gpu->lock);
+out_pm_put:
etnaviv_gpu_pm_put(gpu);
return ret;
struct i915_ctx_hang_stats hang_stats;
- /* Unique identifier for this context, used by the hw for tracking */
unsigned long flags;
#define CONTEXT_NO_ZEROMAP BIT(0)
#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
- unsigned hw_id;
+
+ /* Unique identifier for this context, used by the hw for tracking */
+ unsigned int hw_id;
u32 user_handle;
u32 ggtt_alignment;
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
struct drm_atomic_state *modeset_restore_state;
+ struct drm_modeset_acquire_ctx reset_ctx;
struct list_head vm_list; /* Global list of all address spaces */
struct i915_ggtt ggtt; /* VM representing the global address space */
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
+ enum {
+ I915_SKL_SAGV_UNKNOWN = 0,
+ I915_SKL_SAGV_DISABLED,
+ I915_SKL_SAGV_ENABLED,
+ I915_SKL_SAGV_NOT_CONTROLLED
+ } skl_sagv_status;
+
struct {
/*
* Raw watermark latency values:
/* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
{
+ wmb();
if (INTEL_GEN(dev_priv) < 6)
intel_gtt_chipset_flush();
}
ret = i915_gem_shmem_pread(dev, obj, args, file);
/* pread for non shmem backed objects */
- if (ret == -EFAULT || ret == -ENODEV)
+ if (ret == -EFAULT || ret == -ENODEV) {
+ intel_runtime_pm_get(to_i915(dev));
ret = i915_gem_gtt_pread(dev, obj, args->size,
args->offset, args->data_ptr);
+ intel_runtime_pm_put(to_i915(dev));
+ }
out:
drm_gem_object_unreference(&obj->base);
* textures). Fallback to the shmem path in that case. */
}
- if (ret == -EFAULT) {
+ if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
else if (i915_gem_object_has_struct_page(obj))
}
intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+
+ engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
}
void i915_gem_reset(struct drm_device *dev)
for_each_engine(engine, dev_priv)
i915_gem_reset_engine_cleanup(engine);
+ mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
i915_gem_context_reset(dev);
{
const unsigned other_rings = ~intel_engine_flag(req->engine);
struct i915_vma *vma;
- uint32_t flush_domains = 0;
- bool flush_chipset = false;
int ret;
list_for_each_entry(vma, vmas, exec_list) {
}
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
- flush_chipset |= i915_gem_clflush_object(obj, false);
-
- flush_domains |= obj->base.write_domain;
+ i915_gem_clflush_object(obj, false);
}
- if (flush_chipset)
- i915_gem_chipset_flush(req->engine->i915);
-
- if (flush_domains & I915_GEM_DOMAIN_GTT)
- wmb();
+ /* Unconditionally flush any chipset caches (for streaming writes). */
+ i915_gem_chipset_flush(req->engine->i915);
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->base.cleanup(&ppgtt->base);
+ kfree(ppgtt);
}
i915_gem_cleanup_stolen(dev);
#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
/* Balance leg disable bits */
#define BALANCE_LEG_DISABLE_SHIFT 23
+#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
/*
* Fence registers
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1<<31)
+#define GEN6_PCODE_ERROR_MASK 0xFF
+#define GEN6_PCODE_SUCCESS 0x0
+#define GEN6_PCODE_ILLEGAL_CMD 0x1
+#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
+#define GEN6_PCODE_TIMEOUT 0x3
+#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
+#define GEN7_PCODE_TIMEOUT 0x2
+#define GEN7_PCODE_ILLEGAL_DATA 0x3
+#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
+#define GEN9_PCODE_SAGV_CONTROL 0x21
+#define GEN9_SAGV_DISABLE 0x0
+#define GEN9_SAGV_IS_DISABLED 0x1
+#define GEN9_SAGV_ENABLE 0x3
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
return;
+ i915_audio_component_get_power(dev);
+
/*
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
}
+
+ i915_audio_component_put_power(dev);
}
/* Get CDCLK in kHz */
!IS_HASWELL(dev_priv))
return 0;
+ i915_audio_component_get_power(dev);
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
intel_encoder = dev_priv->dig_port_map[port];
unlock:
mutex_unlock(&dev_priv->av_mutex);
+ i915_audio_component_put_power(dev);
return err;
}
* be moved to FW_FAILED.
*/
-#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
-#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
-#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x0 },
+ { 0x80007011, 0x000000CD, 0x1 },
{ 0x80009010, 0x000000C0, 0x1 },
{ 0x0000201B, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 },
static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
{ 0x80009010, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 },
}
}
+static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+{
+ int n_hdmi_entries;
+ int hdmi_level;
+ int hdmi_default_entry;
+
+ hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+ if (IS_BROXTON(dev_priv))
+ return hdmi_level;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+ hdmi_default_entry = 8;
+ } else if (IS_BROADWELL(dev_priv)) {
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ hdmi_default_entry = 7;
+ } else if (IS_HASWELL(dev_priv)) {
+ n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+ hdmi_default_entry = 6;
+ } else {
+ WARN(1, "ddi translation table missing\n");
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ hdmi_default_entry = 7;
+ }
+
+ /* Choose a good default if VBT is badly populated */
+ if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
+ hdmi_level >= n_hdmi_entries)
+ hdmi_level = hdmi_default_entry;
+
+ return hdmi_level;
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. The buffer values are different for FDI and DP modes,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
- int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
+ int i, n_hdmi_entries, n_dp_entries, n_edp_entries,
size;
int hdmi_level;
enum port port;
const struct ddi_buf_trans *ddi_translations;
port = intel_ddi_get_encoder_port(encoder);
- hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+ hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
if (IS_BROXTON(dev_priv)) {
if (encoder->type != INTEL_OUTPUT_HDMI)
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
ddi_translations_hdmi =
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
- hdmi_default_entry = 8;
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
dev_priv->vbt.ddi_port_info[port].dp_boost_level)
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_default_entry = 7;
} else if (IS_HASWELL(dev_priv)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- hdmi_default_entry = 6;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_default_entry = 7;
}
switch (encoder->type) {
if (encoder->type != INTEL_OUTPUT_HDMI)
return;
- /* Choose a good default if VBT is badly populated */
- if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
- hdmi_level >= n_hdmi_entries)
- hdmi_level = hdmi_default_entry;
-
/* Entry 9 is for HDMI: */
I915_WRITE(DDI_BUF_TRANS_LO(port, i),
ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
TRANS_CLK_SEL_DISABLED);
}
-static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type)
+static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+ enum port port, uint8_t iboost)
{
+ u32 tmp;
+
+ tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
+ tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
+ if (iboost)
+ tmp |= iboost << BALANCE_LEG_SHIFT(port);
+ else
+ tmp |= BALANCE_LEG_DISABLE(port);
+ I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
+}
+
+static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ enum port port = intel_dig_port->port;
+ int type = encoder->type;
const struct ddi_buf_trans *ddi_translations;
uint8_t iboost;
uint8_t dp_iboost, hdmi_iboost;
int n_entries;
- u32 reg;
/* VBT may override standard boost values */
dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
return;
}
- reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
- reg &= ~BALANCE_LEG_MASK(port);
- reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
-
- if (iboost)
- reg |= iboost << BALANCE_LEG_SHIFT(port);
- else
- reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+ _skl_ddi_set_iboost(dev_priv, port, iboost);
- I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+ if (port == PORT_A && intel_dig_port->max_lanes == 4)
+ _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
}
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
level = translate_signal_level(signal_levels);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
+ skl_ddi_set_iboost(encoder, level);
else if (IS_BROXTON(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int level = intel_ddi_hdmi_level(dev_priv, port);
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ skl_ddi_set_iboost(intel_encoder, level);
intel_hdmi->set_infoframes(encoder,
crtc->config->has_hdmi_sink,
for_each_crtc(dev, crtc) {
struct intel_plane *plane = to_intel_plane(crtc->primary);
- struct intel_plane_state *plane_state;
-
- drm_modeset_lock_crtc(crtc, &plane->base);
- plane_state = to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
if (plane_state->visible)
plane->update_plane(&plane->base,
to_intel_crtc_state(crtc->state),
plane_state);
+ }
+}
+
+static int
+__intel_display_resume(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i, ret;
+
+ intel_modeset_setup_hw_state(dev);
+ i915_redisable_vga(dev);
+
+ if (!state)
+ return 0;
- drm_modeset_unlock_crtc(crtc);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * Force recalculation even if we restore
+ * current state. With fast modeset this may not result
+ * in a modeset when the state is compatible.
+ */
+ crtc_state->mode_changed = true;
}
+
+ /* ignore any reset values/BIOS leftovers in the WM registers */
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
+ ret = drm_atomic_commit(state);
+
+ WARN_ON(ret == -EDEADLK);
+ return ret;
}
void intel_prepare_reset(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
/* no reset support for gen2 */
if (IS_GEN2(dev_priv))
return;
- /* reset doesn't touch the display */
+ /*
+ * Need mode_config.mutex so that we don't
+ * trample ongoing ->detect() and whatnot.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_acquire_init(ctx, 0);
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(dev, ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(ctx);
+ }
+
+ /* reset doesn't touch the display, but flips might get nuked anyway, */
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
return;
- drm_modeset_lock_all(&dev_priv->drm);
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- intel_display_suspend(&dev_priv->drm);
+ state = drm_atomic_helper_duplicate_state(dev, ctx);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ state = NULL;
+ DRM_ERROR("Duplicating state failed with %i\n", ret);
+ goto err;
+ }
+
+ ret = drm_atomic_helper_disable_all(dev, ctx);
+ if (ret) {
+ DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ goto err;
+ }
+
+ dev_priv->modeset_restore_state = state;
+ state->acquire_ctx = ctx;
+ return;
+
+err:
+ drm_atomic_state_free(state);
}
void intel_finish_reset(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+ struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+ int ret;
+
/*
* Flips in the rings will be nuked by the reset,
* so complete all pending flips so that user space
if (IS_GEN2(dev_priv))
return;
+ dev_priv->modeset_restore_state = NULL;
+
/* reset doesn't touch the display */
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
/*
* FIXME: Atomic will make this obsolete since we won't schedule
* CS-based flips (which might get lost in gpu resets) any more.
*/
- intel_update_primary_planes(&dev_priv->drm);
- return;
- }
-
- /*
- * The display has been reset as well,
- * so need a full re-initialization.
- */
- intel_runtime_pm_disable_interrupts(dev_priv);
- intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_update_primary_planes(dev);
+ } else {
+ /*
+ * The display has been reset as well,
+ * so need a full re-initialization.
+ */
+ intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_runtime_pm_enable_interrupts(dev_priv);
- intel_modeset_init_hw(&dev_priv->drm);
+ intel_modeset_init_hw(dev);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
- intel_display_resume(&dev_priv->drm);
+ ret = __intel_display_resume(dev, state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
- intel_hpd_init(dev_priv);
+ intel_hpd_init(dev_priv);
+ }
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+ mutex_unlock(&dev->mode_config.mutex);
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
{
- unsigned int i;
-
- for (i = 0; i < 15; i++) {
- if (skl_cdclk_pcu_ready(dev_priv))
- return true;
- udelay(10);
- }
-
- return false;
+ return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
}
static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
- /* Clamp bpp to default limit on screens without EDID 1.4 */
- if (connector->base.display_info.bpc == 0) {
- int type = connector->base.connector_type;
- int clamp_bpp = 24;
-
- /* Fall back to 18 bpp when DP sink capability is unknown. */
- if (type == DRM_MODE_CONNECTOR_DisplayPort ||
- type == DRM_MODE_CONNECTOR_eDP)
- clamp_bpp = 18;
-
- if (bpp > clamp_bpp) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
- bpp, clamp_bpp);
- pipe_config->pipe_bpp = clamp_bpp;
- }
+ /* Clamp bpp to 8 on screens without EDID 1.4 */
+ if (connector->base.display_info.bpc == 0 && bpp > 24) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+ bpp);
+ pipe_config->pipe_bpp = 24;
}
}
intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
dev_priv->display.modeset_commit_cdclk(state);
+ /*
+ * SKL workaround: bspec recommends we disable the SAGV when we
+ * have more then one pipe enabled
+ */
+ if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
+ skl_disable_sagv(dev_priv);
+
intel_modeset_verify_disabled(dev);
}
intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
}
+ if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
+ skl_can_enable_sagv(state))
+ skl_enable_sagv(dev_priv);
+
drm_atomic_helper_commit_hw_done(state);
if (intel_state->modeset)
struct drm_atomic_state *state = dev_priv->modeset_restore_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- bool setup = false;
dev_priv->modeset_restore_state = NULL;
+ if (state)
+ state->acquire_ctx = &ctx;
/*
* This is a cludge because with real atomic modeset mode_config.mutex
mutex_lock(&dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, 0);
-retry:
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
-
- if (ret == 0 && !setup) {
- setup = true;
-
- intel_modeset_setup_hw_state(dev);
- i915_redisable_vga(dev);
- }
-
- if (ret == 0 && state) {
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- int i;
-
- state->acquire_ctx = &ctx;
-
- /* ignore any reset values/BIOS leftovers in the WM registers */
- to_intel_atomic_state(state)->skip_intermediate_wm = true;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- /*
- * Force recalculation even if we restore
- * current state. With fast modeset this may not result
- * in a modeset when the state is compatible.
- */
- crtc_state->mode_changed = true;
- }
-
- ret = drm_atomic_commit(state);
- }
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret != -EDEADLK)
+ break;
- if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
- goto retry;
}
+ if (!ret)
+ ret = __intel_display_resume(dev, state);
+
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
+bool skl_can_enable_sagv(struct drm_atomic_state *state);
+int skl_enable_sagv(struct drm_i915_private *dev_priv);
+int skl_disable_sagv(struct drm_i915_private *dev_priv);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
if (i915.enable_fbc >= 0)
return !!i915.enable_fbc;
+ if (!HAS_FBC(dev_priv))
+ return 0;
+
if (IS_BROADWELL(dev_priv))
return 1;
return 0;
}
+static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
+ if (intel_iommu_gfx_mapped &&
+ (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
+ DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+ return true;
+ }
+#endif
+
+ return false;
+}
+
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
fbc->active = false;
fbc->work.scheduled = false;
+ if (need_fbc_vtd_wa(dev_priv))
+ mkwrite_device_info(dev_priv)->has_fbc = false;
+
i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct fb_info *info;
- if (!ifbdev)
+ if (!ifbdev || !ifbdev->fb)
return;
info = ifbdev->helper.fbdev;
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- if (dev_priv->fbdev)
- drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+ struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+ if (ifbdev && ifbdev->fb)
+ drm_fb_helper_hotplug_event(&ifbdev->helper);
}
void intel_fbdev_restore_mode(struct drm_device *dev)
{
- int ret;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
- struct drm_fb_helper *fb_helper;
+ struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
if (!ifbdev)
return;
intel_fbdev_sync(ifbdev);
+ if (!ifbdev->fb)
+ return;
- fb_helper = &ifbdev->helper;
-
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (ret) {
+ if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) {
DRM_DEBUG("failed to restore crtc mode\n");
} else {
- mutex_lock(&fb_helper->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
- mutex_unlock(&fb_helper->dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
}
}
#define SKL_DDB_SIZE 896 /* in blocks */
#define BXT_DDB_SIZE 512
+#define SKL_SAGV_BLOCK_TIME 30 /* µs */
/*
* Return the index of a plane in the SKL DDB and wm result arrays. Primary
}
}
+/*
+ * SAGV dynamically adjusts the system agent voltage and clock frequencies
+ * depending on power and performance requirements. The display engine access
+ * to system memory is blocked during the adjustment time. Because of the
+ * blocking time, having this enabled can cause full system hangs and/or pipe
+ * underruns if we don't meet all of the following requirements:
+ *
+ * - <= 1 pipe enabled
+ * - All planes can enable watermarks for latencies >= SAGV engine block time
+ * - We're not using an interlaced display configuration
+ */
+int
+skl_enable_sagv(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+ dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
+ return 0;
+
+ DRM_DEBUG_KMS("Enabling the SAGV\n");
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
+
+ /* We don't need to wait for the SAGV when enabling */
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have an SAGV.
+ */
+ if (ret == -ENXIO) {
+ DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+ return 0;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to enable the SAGV\n");
+ return ret;
+ }
+
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
+ return 0;
+}
+
+static int
+skl_do_sagv_disable(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ uint32_t temp = GEN9_SAGV_DISABLE;
+
+ ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ &temp);
+ if (ret)
+ return ret;
+ else
+ return temp & GEN9_SAGV_IS_DISABLED;
+}
+
+int
+skl_disable_sagv(struct drm_i915_private *dev_priv)
+{
+ int ret, result;
+
+ if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+ dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
+ return 0;
+
+ DRM_DEBUG_KMS("Disabling the SAGV\n");
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (ret == -ETIMEDOUT) {
+ DRM_ERROR("Request to disable SAGV timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have an SAGV.
+ */
+ if (result == -ENXIO) {
+ DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+ return 0;
+ } else if (result < 0) {
+ DRM_ERROR("Failed to disable the SAGV\n");
+ return result;
+ }
+
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
+ return 0;
+}
+
+bool skl_can_enable_sagv(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+ int level, plane;
+
+ /*
+ * SKL workaround: bspec recommends we disable the SAGV when we have
+ * more then one pipe enabled
+ *
+ * If there are no active CRTCs, no additional checks need be performed
+ */
+ if (hweight32(intel_state->active_crtcs) == 0)
+ return true;
+ else if (hweight32(intel_state->active_crtcs) > 1)
+ return false;
+
+ /* Since we're now guaranteed to only have one active CRTC... */
+ pipe = ffs(intel_state->active_crtcs) - 1;
+ crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ for_each_plane(dev_priv, pipe, plane) {
+ /* Skip this plane if it's not enabled */
+ if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+ continue;
+
+ /* Find the highest enabled wm level for this plane */
+ for (level = ilk_wm_max_level(dev);
+ intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+ { }
+
+ /*
+ * If any of the planes on this pipe don't enable wm levels
+ * that incur memory latencies higher then 30µs we can't enable
+ * the SAGV
+ */
+ if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+ return false;
+ }
+
+ return true;
+}
+
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
const struct intel_crtc_state *cstate,
total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
}
- WARN_ON(cstate->plane_mask && total_data_rate == 0);
-
return total_data_rate;
}
plane_bytes_per_line *= 4;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
plane_blocks_per_line /= 4;
+ } else if (tiling == DRM_FORMAT_MOD_NONE) {
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
} else {
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
}
* pretend that all pipes switched active status so that we'll
* ensure a full DDB recompute.
*/
- if (dev_priv->wm.distrust_bios_wm)
+ if (dev_priv->wm.distrust_bios_wm) {
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ return ret;
+
intel_state->active_pipe_changes = ~0;
+ /*
+ * We usually only initialize intel_state->active_crtcs if we
+ * we're doing a modeset; make sure this field is always
+ * initialized during the sanitization process that happens
+ * on the first commit too.
+ */
+ if (!intel_state->modeset)
+ intel_state->active_crtcs = dev_priv->active_crtcs;
+ }
+
/*
* If the modeset changes which CRTC's are active, we need to
* recompute the DDB allocation for *all* active pipes, even
ret = skl_allocate_pipe_ddb(cstate, ddb);
if (ret)
return ret;
+
+ ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
+ if (ret)
+ return ret;
}
return 0;
}
+static void
+skl_copy_wm_for_pipe(struct skl_wm_values *dst,
+ struct skl_wm_values *src,
+ enum pipe pipe)
+{
+ dst->wm_linetime[pipe] = src->wm_linetime[pipe];
+ memcpy(dst->plane[pipe], src->plane[pipe],
+ sizeof(dst->plane[pipe]));
+ memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
+ sizeof(dst->plane_trans[pipe]));
+
+ dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
+ memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
+ sizeof(dst->ddb.y_plane[pipe]));
+ memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
+ sizeof(dst->ddb.plane[pipe]));
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_wm_values *results = &dev_priv->wm.skl_results;
+ struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+ int pipe;
if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
return;
skl_write_wm_values(dev_priv, results);
skl_flush_wm_values(dev_priv, results);
- /* store the new configuration */
- dev_priv->wm.skl_hw = *results;
+ /*
+ * Store the new configuration (but only for the pipes that have
+ * changed; the other values weren't recomputed).
+ */
+ for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
+ skl_copy_wm_for_pipe(hw_vals, results, pipe);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
else
gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
mutex_unlock(&dev_priv->rps.hw_lock);
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
{
- if (IS_CHERRYVIEW(dev_priv))
- return;
- else if (IS_VALLEYVIEW(dev_priv))
+ if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv);
if (!i915.enable_rc6)
}
}
+static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+ uint32_t flags =
+ I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+ switch (flags) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_UNIMPLEMENTED_CMD:
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ case GEN6_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ default:
+ MISSING_CASE(flags)
+ return 0;
+ }
+}
+
+static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+ uint32_t flags =
+ I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+ switch (flags) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN7_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ case GEN7_PCODE_ILLEGAL_DATA:
+ return -EINVAL;
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ default:
+ MISSING_CASE(flags);
+ return 0;
+ }
+}
+
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
{
+ int status;
+
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
/* GEN6_PCODE_* are outside of the forcewake domain, we can
*val = I915_READ_FW(GEN6_PCODE_DATA);
I915_WRITE_FW(GEN6_PCODE_DATA, 0);
+ if (INTEL_GEN(dev_priv) > 6)
+ status = gen7_check_mailbox_status(dev_priv);
+ else
+ status = gen6_check_mailbox_status(dev_priv);
+
+ if (status) {
+ DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
+ status);
+ return status;
+ }
+
return 0;
}
int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
- u32 mbox, u32 val)
+ u32 mbox, u32 val)
{
+ int status;
+
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
/* GEN6_PCODE_* are outside of the forcewake domain, we can
I915_WRITE_FW(GEN6_PCODE_DATA, 0);
+ if (INTEL_GEN(dev_priv) > 6)
+ status = gen7_check_mailbox_status(dev_priv);
+ else
+ status = gen6_check_mailbox_status(dev_priv);
+
+ if (status) {
+ DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
+ status);
+ return status;
+ }
+
return 0;
}
I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
L3_HIGH_PRIO_CREDITS(2));
- /* WaInsertDummyPushConstPs:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ /* WaToEnableHwFixForPushConstHWBug:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
- /* WaInsertDummyPushConstPs:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ /* WaToEnableHwFixForPushConstHWBug:kbl */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
tristate "DRM Support for Mediatek SoCs"
depends on DRM
depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
+ depends on COMMON_CLK
+ depends on HAVE_ARM_SMCCC
+ depends on OF
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
}
}
+#ifdef CONFIG_DRM_FBDEV_EMULATION
static struct fb_deferred_io qxl_defio = {
.delay = QXL_DIRTY_DELAY,
.deferred_io = drm_fb_helper_deferred_io,
};
+#endif
static struct fb_ops qxlfb_ops = {
.owner = THIS_MODULE,
goto out_destroy_fbi;
}
+#ifdef CONFIG_DRM_FBDEV_EMULATION
info->fbdefio = &qxl_defio;
fb_deferred_io_init(info);
+#endif
qdev->fbdev_info = info;
qdev->fbdev_qfb = &qfbdev->qfb;
if (radeon_crtc->ss.refdiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
- if (rdev->family >= CHIP_RV770)
+ if (ASIC_IS_AVIVO(rdev) &&
+ rdev->family != CHIP_RS780 &&
+ rdev->family != CHIP_RS880)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
-#if 1
- /* This is a temporary hack until the D3 cold support
- * makes it upstream. The ATPX power_control method seems
- * to still work on even if the system should be using
- * the new standardized hybrid D3 cold ACPI interface.
- */
- atpx->functions.power_cntl = true;
-#else
atpx->functions.power_cntl = false;
-#endif
atpx->is_hybrid = true;
}
rdev = radeon_get_rdev(bo->bdev);
ridx = radeon_copy_ring_index(rdev);
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
/* Link drm_bridge to encoder */
bridge->encoder = encoder;
+ encoder->bridge = bridge;
ret = drm_bridge_attach(rcdu->ddev, bridge);
if (ret) {
.destroy = tegra_output_encoder_destroy,
};
+static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
+{
+ int err;
+
+ if (dsi->slave)
+ tegra_dsi_unprepare(dsi->slave);
+
+ err = tegra_mipi_disable(dsi->mipi);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
+ err);
+
+ pm_runtime_put(dsi->dev);
+}
+
static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
tegra_dsi_disable(dsi);
- pm_runtime_put(dsi->dev);
+ tegra_dsi_unprepare(dsi);
+}
+
+static void tegra_dsi_prepare(struct tegra_dsi *dsi)
+{
+ int err;
+
+ pm_runtime_get_sync(dsi->dev);
+
+ err = tegra_mipi_enable(dsi->mipi);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n",
+ err);
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0)
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+
+ if (dsi->slave)
+ tegra_dsi_prepare(dsi->slave);
}
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
struct tegra_dsi *dsi = to_dsi(output);
struct tegra_dsi_state *state;
u32 value;
- int err;
-
- pm_runtime_get_sync(dsi->dev);
- err = tegra_dsi_pad_calibrate(dsi);
- if (err < 0)
- dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+ tegra_dsi_prepare(dsi);
state = tegra_dsi_get_state(dsi);
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
+ mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
no_wait_gpu, mem);
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict,
+ bool evict, bool interruptible,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ pr_err("Failed to expire sync object before unbinding TTM\n");
+ return ret;
+ }
+
ttm_tt_unbind(ttm);
ttm_bo_free_old_node(bo);
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
ufbdev->fb_count++;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
if (fb_defio && (info->fbdefio == NULL)) {
/* enable defio at last moment if not disabled by client */
info->fbdefio = fbdefio;
fb_deferred_io_init(info);
}
+#endif
pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, ufbdev->fb_count);
ufbdev->fb_count--;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
info->fbdefio = NULL;
info->fbops->fb_mmap = udl_fb_mmap;
}
+#endif
pr_warn("released /dev/fb%d user=%d count=%d\n",
info->node, user, ufbdev->fb_count);
dev->pads = args.args[0];
dev->device = device;
- mutex_lock(&dev->mipi->lock);
-
- if (dev->mipi->usage_count++ == 0) {
- err = tegra_mipi_power_up(dev->mipi);
- if (err < 0) {
- dev_err(dev->mipi->dev,
- "failed to power up MIPI bricks: %d\n",
- err);
- return ERR_PTR(err);
- }
- }
-
- mutex_unlock(&dev->mipi->lock);
-
return dev;
put:
void tegra_mipi_free(struct tegra_mipi_device *device)
{
- int err;
+ platform_device_put(device->pdev);
+ kfree(device);
+}
+EXPORT_SYMBOL(tegra_mipi_free);
- mutex_lock(&device->mipi->lock);
+int tegra_mipi_enable(struct tegra_mipi_device *dev)
+{
+ int err = 0;
- if (--device->mipi->usage_count == 0) {
- err = tegra_mipi_power_down(device->mipi);
- if (err < 0) {
- /*
- * Not much that can be done here, so an error message
- * will have to do.
- */
- dev_err(device->mipi->dev,
- "failed to power down MIPI bricks: %d\n",
- err);
- }
- }
+ mutex_lock(&dev->mipi->lock);
- mutex_unlock(&device->mipi->lock);
+ if (dev->mipi->usage_count++ == 0)
+ err = tegra_mipi_power_up(dev->mipi);
+
+ mutex_unlock(&dev->mipi->lock);
+
+ return err;
- platform_device_put(device->pdev);
- kfree(device);
}
-EXPORT_SYMBOL(tegra_mipi_free);
+EXPORT_SYMBOL(tegra_mipi_enable);
+
+int tegra_mipi_disable(struct tegra_mipi_device *dev)
+{
+ int err = 0;
+
+ mutex_lock(&dev->mipi->lock);
+
+ if (--dev->mipi->usage_count == 0)
+ err = tegra_mipi_power_down(dev->mipi);
+
+ mutex_unlock(&dev->mipi->lock);
+
+ return err;
+
+}
+EXPORT_SYMBOL(tegra_mipi_disable);
static int tegra_mipi_wait(struct tegra_mipi *mipi)
{
struct it87_data {
const struct attribute_group *groups[7];
enum chips type;
- u16 features;
+ u32 features;
u8 peci_mask;
u8 old_peci_mask;
#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
#define AUTOSUSPEND_TIMEOUT 2000
+#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256
/* AT91 TWI register definitions */
#define AT91_TWI_CR 0x0000 /* Control Register */
unsigned twi_cwgr_reg;
struct at91_twi_pdata *pdata;
bool use_dma;
+ bool use_alt_cmd;
bool recv_len_abort;
u32 fifo_size;
struct at91_twi_dma dma;
/* send stop when last byte has been written */
if (--dev->buf_len == 0)
- if (!dev->pdata->has_alt_cmd)
+ if (!dev->use_alt_cmd)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
* we just have to enable TXCOMP one.
*/
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
- if (!dev->pdata->has_alt_cmd)
+ if (!dev->use_alt_cmd)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
}
}
/* send stop if second but last byte has been read */
- if (!dev->pdata->has_alt_cmd && dev->buf_len == 1)
+ if (!dev->use_alt_cmd && dev->buf_len == 1)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
dev->buf_len, DMA_FROM_DEVICE);
- if (!dev->pdata->has_alt_cmd) {
+ if (!dev->use_alt_cmd) {
/* The last two bytes have to be read without using dma */
dev->buf += dev->buf_len - 2;
dev->buf_len = 2;
struct dma_chan *chan_rx = dma->chan_rx;
size_t buf_len;
- buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
+ buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
dma->direction = DMA_FROM_DEVICE;
/* Keep in mind that we won't use dma to read the last two bytes */
unsigned start_flags = AT91_TWI_START;
/* if only one byte is to be read, immediately stop transfer */
- if (!has_alt_cmd && dev->buf_len <= 1 &&
+ if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
!(dev->msg->flags & I2C_M_RECV_LEN))
start_flags |= AT91_TWI_STOP;
at91_twi_write(dev, AT91_TWI_CR, start_flags);
int ret;
unsigned int_addr_flag = 0;
struct i2c_msg *m_start = msg;
- bool is_read, use_alt_cmd = false;
+ bool is_read;
dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
at91_twi_write(dev, AT91_TWI_IADR, internal_address);
}
+ dev->use_alt_cmd = false;
is_read = (m_start->flags & I2C_M_RD);
if (dev->pdata->has_alt_cmd) {
- if (m_start->len > 0) {
+ if (m_start->len > 0 &&
+ m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
at91_twi_write(dev, AT91_TWI_ACR,
AT91_TWI_ACR_DATAL(m_start->len) |
((is_read) ? AT91_TWI_ACR_DIR : 0));
- use_alt_cmd = true;
+ dev->use_alt_cmd = true;
} else {
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
}
at91_twi_write(dev, AT91_TWI_MMR,
(m_start->addr << 16) |
int_addr_flag |
- ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
+ ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
dev->buf_len = m_start->len;
dev->buf = m_start->buf;
if (status & BIT(IS_M_START_BUSY_SHIFT)) {
iproc_i2c->xfer_is_done = 1;
- complete_all(&iproc_i2c->done);
+ complete(&iproc_i2c->done);
}
writel(status, iproc_i2c->base + IS_OFFSET);
dev->base + TXFCR_OFFSET);
writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET);
- complete_all(&dev->done);
+ complete(&dev->done);
return IRQ_HANDLED;
}
return IRQ_NONE;
brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE);
- complete_all(&dev->done);
+ complete(&dev->done);
dev_dbg(dev->device, "isr handled");
return IRQ_HANDLED;
msg->outsize = request_len;
msg->insize = response_len;
- result = cros_ec_cmd_xfer(bus->ec, msg);
+ result = cros_ec_cmd_xfer_status(bus->ec, msg);
if (result < 0) {
dev_err(dev, "Error transferring EC i2c message %d\n", result);
goto exit;
meson_i2c_add_token(i2c, TOKEN_STOP);
} else {
i2c->state = STATE_IDLE;
- complete_all(&i2c->done);
+ complete(&i2c->done);
}
}
dev_dbg(i2c->dev, "error bit set\n");
i2c->error = -ENXIO;
i2c->state = STATE_IDLE;
- complete_all(&i2c->done);
+ complete(&i2c->done);
goto out;
}
break;
case STATE_STOP:
i2c->state = STATE_IDLE;
- complete_all(&i2c->done);
+ complete(&i2c->done);
break;
case STATE_IDLE:
break;
if (!clock_frequency_present) {
dev_err(&pdev->dev,
"Missing required parameter 'opencores,ip-clock-frequency'\n");
+ clk_disable_unprepare(i2c->clk);
return -ENODEV;
}
i2c->ip_clock_khz = clock_frequency / 1000;
default:
dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
i2c->reg_io_width);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_clk;
}
}
ret = ocores_init(&pdev->dev, i2c);
if (ret)
- return ret;
+ goto err_clk;
init_waitqueue_head(&i2c->wait);
ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
pdev->name, i2c);
if (ret) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- return ret;
+ goto err_clk;
}
/* hook up driver to tree */
ret = i2c_add_adapter(&i2c->adap);
if (ret) {
dev_err(&pdev->dev, "Failed to add adapter\n");
- return ret;
+ goto err_clk;
}
/* add in known devices to the bus */
}
return 0;
+
+err_clk:
+ clk_disable_unprepare(i2c->clk);
+ return ret;
}
static int ocores_i2c_remove(struct platform_device *pdev)
adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
if (!adap) {
ret = -ENODEV;
- goto err;
+ goto err_with_revert;
}
p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
err_with_put:
i2c_put_adapter(adap);
+ err_with_revert:
+ of_changeset_revert(&priv->chan[new_chan].chgset);
err:
dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
return ret;
if (addr->dev_addr.bound_dev_if) {
ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
- if (!ndev)
- return -ENODEV;
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err2;
+ }
if (ndev->flags & IFF_LOOPBACK) {
dev_put(ndev);
- if (!id_priv->id.device->get_netdev)
- return -EOPNOTSUPP;
+ if (!id_priv->id.device->get_netdev) {
+ ret = -EOPNOTSUPP;
+ goto err2;
+ }
ndev = id_priv->id.device->get_netdev(id_priv->id.device,
id_priv->id.port_num);
- if (!ndev)
- return -ENODEV;
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err2;
+ }
}
route->path_rec->net = &init_net;
(ep->mpa_pkt + sizeof(*mpa));
ep->ird = ntohs(mpa_v2_params->ird) &
MPA_V2_IRD_ORD_MASK;
+ ep->ird = min_t(u32, ep->ird,
+ cur_max_read_depth(ep->com.dev));
ep->ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
+ ep->ord = min_t(u32, ep->ord,
+ cur_max_read_depth(ep->com.dev));
PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
if (conn_param->ord > ep->ird) {
if (RELAXED_IRD_NEGOTIATION) {
- ep->ord = ep->ird;
+ conn_param->ord = ep->ird;
} else {
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct c4iw_cq *chp;
- int ret;
+ int ret = 0;
unsigned long flag;
chp = to_c4iw_cq(ibcq);
spin_lock_irqsave(&chp->lock, flag);
- ret = t4_arm_cq(&chp->cq,
- (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ t4_arm_cq(&chp->cq,
+ (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ if (flags & IB_CQ_REPORT_MISSED_EVENTS)
+ ret = t4_cq_notempty(&chp->cq);
spin_unlock_irqrestore(&chp->lock, flag);
- if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
- ret = 0;
return ret;
}
return (CQE_GENBIT(cqe) == cq->gen);
}
+static inline int t4_cq_notempty(struct t4_cq *cq)
+{
+ return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
+}
+
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret;
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/module.h>
-#include <linux/cpumask.h>
#include "hfi.h"
#include "affinity.h"
size_t count)
{
struct hfi1_affinity_node *entry;
- struct cpumask mask;
+ cpumask_var_t mask;
int ret, i;
spin_lock(&node_affinity.lock);
if (!entry)
return -EINVAL;
- ret = cpulist_parse(buf, &mask);
+ ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ ret = cpulist_parse(buf, mask);
if (ret)
- return ret;
+ goto out;
- if (!cpumask_subset(&mask, cpu_online_mask) || cpumask_empty(&mask)) {
+ if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
dd_dev_warn(dd, "Invalid CPU mask\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
mutex_lock(&sdma_affinity_mutex);
/* reset the SDMA interrupt affinity details */
init_cpu_mask_set(&entry->def_intr);
- cpumask_copy(&entry->def_intr.mask, &mask);
+ cpumask_copy(&entry->def_intr.mask, mask);
/*
* Reassign the affinity for each SDMA interrupt.
*/
if (ret)
break;
}
-
mutex_unlock(&sdma_affinity_mutex);
+out:
+ free_cpumask_var(mask);
return ret ? ret : strnlen(buf, PAGE_SIZE);
}
DEBUGFS_FILE_OPS(ctx_stats);
static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
+ __acquires(RCU)
{
struct qp_iter *iter;
loff_t n = *pos;
- rcu_read_lock();
iter = qp_iter_init(s->private);
+
+ /* stop calls rcu_read_unlock */
+ rcu_read_lock();
+
if (!iter)
return NULL;
- while (n--) {
+ do {
if (qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
- }
+ } while (n--);
return iter;
}
static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
loff_t *pos)
+ __must_hold(RCU)
{
struct qp_iter *iter = iter_ptr;
}
static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
-__releases(RCU)
+ __releases(RCU)
{
rcu_read_unlock();
}
}
static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
- struct hfi1_packet packet,
+ struct hfi1_packet *packet,
struct hfi1_devdata *dd)
{
struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
- struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd,
- packet.rhf_addr);
+ struct hfi1_message_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
+ packet->rhf_addr);
+ u8 etype = rhf_rcv_type(packet->rhf);
- if (hdr2sc(hdr, packet.rhf) != 0xf) {
+ if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) {
int hwstate = read_logical_state(dd);
if (hwstate != LSTATE_ACTIVE) {
/* Auto activate link on non-SC15 packet receive */
if (unlikely(rcd->ppd->host_link_state ==
HLS_UP_ARMED) &&
- set_armed_to_active(rcd, packet, dd))
+ set_armed_to_active(rcd, &packet, dd))
goto bail;
last = process_rcv_packet(&packet, thread);
}
if (fd) {
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
+ atomic_inc(&fd->mm->mm_count);
}
fp->private_data = fd;
ret = assign_ctxt(fp, &uinfo);
if (ret < 0)
return ret;
- setup_ctxt(fp);
+ ret = setup_ctxt(fp);
if (ret)
return ret;
ret = user_init(fp);
mutex_unlock(&hfi1_mutex);
hfi1_free_ctxtdata(dd, uctxt);
done:
+ mmdrop(fdata->mm);
kobject_put(&dd->kobj);
kfree(fdata);
return 0;
((!!(rhf_dc_info(rhf))) << 4);
}
+#define HFI1_JKEY_WIDTH 16
+#define HFI1_JKEY_MASK (BIT(16) - 1)
+#define HFI1_ADMIN_JKEY_RANGE 32
+
+/*
+ * J_KEYs are split and allocated in the following groups:
+ * 0 - 31 - users with administrator privileges
+ * 32 - 63 - kernel protocols using KDETH packets
+ * 64 - 65535 - all other users using KDETH packets
+ */
static inline u16 generate_jkey(kuid_t uid)
{
- return from_kuid(current_user_ns(), uid) & 0xffff;
+ u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
+
+ if (capable(CAP_SYS_ADMIN))
+ jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
+ else if (jkey < 64)
+ jkey |= BIT(HFI1_JKEY_WIDTH - 1);
+
+ return jkey;
}
/*
struct hfi1_devdata *hfi1_init_dd(struct pci_dev *,
const struct pci_device_id *);
void hfi1_free_devdata(struct hfi1_devdata *);
-void cc_state_reclaim(struct rcu_head *rcu);
struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
/* LED beaconing functions */
spin_unlock(&ppd->cc_state_lock);
if (cc_state)
- call_rcu(&cc_state->rcu, cc_state_reclaim);
+ kfree_rcu(cc_state, rcu);
}
free_credit_return(dd);
u32 len = OPA_AM_CI_LEN(am) + 1;
int ret;
+ if (dd->pport->port_type != PORT_TYPE_QSFP) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
spin_unlock(&ppd->cc_state_lock);
- call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+ kfree_rcu(old_cc_state, rcu);
}
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
-void cc_state_reclaim(struct rcu_head *rcu)
-{
- struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
-
- kfree(cc_state);
-}
-
static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
u32 *resp_len)
iter->dev = dev;
iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
- if (qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
return iter;
}
u8 *data)
{
struct hfi1_pportdata *ppd;
- u32 excess_len = 0;
- int ret = 0;
+ u32 excess_len = len;
+ int ret = 0, offset = 0;
if (port_num > dd->num_pports || port_num < 1) {
dd_dev_info(dd, "%s: Invalid port number %d\n",
}
memcpy(data, &ppd->qsfp_info.cache[addr], len);
+
+ if (addr <= QSFP_MONITOR_VAL_END &&
+ (addr + len) >= QSFP_MONITOR_VAL_START) {
+ /* Overlap with the dynamic channel monitor range */
+ if (addr < QSFP_MONITOR_VAL_START) {
+ if (addr + len <= QSFP_MONITOR_VAL_END)
+ len = addr + len - QSFP_MONITOR_VAL_START;
+ else
+ len = QSFP_MONITOR_RANGE;
+ offset = QSFP_MONITOR_VAL_START - addr;
+ addr = QSFP_MONITOR_VAL_START;
+ } else if (addr == QSFP_MONITOR_VAL_START) {
+ offset = 0;
+ if (addr + len > QSFP_MONITOR_VAL_END)
+ len = QSFP_MONITOR_RANGE;
+ } else {
+ offset = 0;
+ if (addr + len > QSFP_MONITOR_VAL_END)
+ len = QSFP_MONITOR_VAL_END - addr + 1;
+ }
+ /* Refresh the values of the dynamic monitors from the cable */
+ ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
+ if (ret != len) {
+ ret = -EAGAIN;
+ goto set_zeroes;
+ }
+ }
+
return 0;
set_zeroes:
/* Defined fields that Intel requires of qualified cables */
/* Byte 0 is Identifier, not checked */
/* Byte 1 is reserved "status MSB" */
+#define QSFP_MONITOR_VAL_START 22
+#define QSFP_MONITOR_VAL_END 81
+#define QSFP_MONITOR_RANGE (QSFP_MONITOR_VAL_END - QSFP_MONITOR_VAL_START + 1)
#define QSFP_TX_CTRL_BYTE_OFFS 86
#define QSFP_PWR_CTRL_BYTE_OFFS 93
#define QSFP_CDR_CTRL_BYTE_OFFS 98
struct i40e_client *client;
struct i40iw_hw hw;
struct i40iw_cm_core cm_core;
- unsigned long *mem_resources;
+ u8 *mem_resources;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
unsigned long *allocated_mrs;
*next = resource_num + 1;
if (*next == max_resources)
*next = 0;
- spin_unlock_irqrestore(&iwdev->resource_lock, flags);
*req_resource_num = resource_num;
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
return 0;
}
buf += hdr_len;
}
- if (pd_len)
- memcpy(buf, pdata->addr, pd_len);
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
atomic_set(&sqbuf->refcount, 1);
return 0;
}
-/**
- * i40iw_loopback_nop - Send a nop
- * @qp: associated hw qp
- */
-static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
-{
- u64 *wqe;
- u64 header;
-
- wqe = qp->qp_uk.sq_base->elem;
- set_64bit_val(wqe, 0, 0);
- set_64bit_val(wqe, 8, 0);
- set_64bit_val(wqe, 16, 0);
-
- header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
- LS_64(0, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
- set_64bit_val(wqe, 24, header);
-}
-
/**
* i40iw_qp_disconnect - free qp and close cm
* @iwqp: associate qp for the connection
} else {
if (iwqp->page)
iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
- i40iw_loopback_nop(&iwqp->sc_qp);
+ dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
}
if (iwqp->page)
enum i40iw_status_code status;
struct i40iw_handler *hdl;
+ hdl = i40iw_find_netdev(ldev->netdev);
+ if (hdl)
+ return 0;
+
hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
if (!hdl)
return -ENOMEM;
{
if (!mem)
return I40IW_ERR_PARAM;
+ /*
+ * mem->va points to the parent of mem, so both mem and mem->va
+ * can not be touched once mem->va is freed
+ */
kfree(mem->va);
- mem->va = NULL;
return 0;
}
return &iwqp->ibqp;
error:
i40iw_free_qp_resources(iwdev, iwqp, qp_num);
- kfree(mem);
return ERR_PTR(err_code);
}
}
if (iwpbl->pbl_allocated)
i40iw_free_pble(iwdev->pble_rsrc, palloc);
- kfree(iwpbl->iwmr);
- iwpbl->iwmr = NULL;
+ kfree(iwmr);
return 0;
}
checksum == cpu_to_be16(0xffff);
}
-static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
- unsigned tail, struct mlx4_cqe *cqe, int is_eth)
+static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
+ unsigned tail, struct mlx4_cqe *cqe, int is_eth)
{
struct mlx4_ib_proxy_sqp_hdr *hdr;
wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
}
-
- return 0;
}
static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
is_send)) {
pr_warn("Completion for NOP opcode detected!\n");
- return -EINVAL;
+ return -EAGAIN;
}
/* Resize CQ in progress */
if (unlikely(!mqp)) {
pr_warn("CQ %06x with entry for unknown QPN %06x\n",
cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
- return -EINVAL;
+ return -EAGAIN;
}
*cur_qp = to_mibqp(mqp);
if (unlikely(!msrq)) {
pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
cq->mcq.cqn, srq_num);
- return -EINVAL;
+ return -EAGAIN;
}
}
if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
if ((*cur_qp)->mlx4_ib_qp_type &
(MLX4_IB_QPT_PROXY_SMI_OWNER |
- MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
- return use_tunnel_data(*cur_qp, cq, wc, tail,
- cqe, is_eth);
+ MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
+ use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
+ is_eth);
+ return 0;
+ }
}
wc->slid = be16_to_cpu(cqe->rlid);
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <linux/io-mapping.h>
#if defined(CONFIG_X86)
#include <asm/pat.h>
#endif
attr->max_srq =
(rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
- attr->max_send_sge = ((rsp->max_write_send_sge &
+ attr->max_send_sge = ((rsp->max_recv_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
- attr->max_recv_sge = (rsp->max_write_send_sge &
- OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
- OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+ attr->max_recv_sge = (rsp->max_recv_send_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
attr->max_srq_sge = (rsp->max_srq_rqe_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
- attr->max_rdma_sge = (rsp->max_write_send_sge &
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
+ attr->max_rdma_sge = (rsp->max_wr_rd_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF <<
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK = 0xFFFF,
};
struct ocrdma_mbx_query_config {
struct ocrdma_mbx_rsp rsp;
u32 qp_srq_cq_ird_ord;
u32 max_pd_ca_ack_delay;
- u32 max_write_send_sge;
+ u32 max_recv_send_sge;
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
u32 max_wqes_rqes_per_q;
u32 max_cq_cqes_per_cq;
u32 max_srq_rqe_sge;
+ u32 max_wr_rd_sge;
+ u32 ird_pgsz_num_pages;
};
struct ocrdma_fw_ver_rsp {
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_MGT_EXTENSIONS;
- attr->max_sge = dev->attr.max_send_sge;
- attr->max_sge_rd = attr->max_sge;
+ attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
+ attr->max_sge_rd = dev->attr.max_rdma_sge;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
DEBUGFS_FILE(ctx_stats)
static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
+ __acquires(RCU)
{
struct qib_qp_iter *iter;
loff_t n = *pos;
- rcu_read_lock();
iter = qib_qp_iter_init(s->private);
+
+ /* stop calls rcu_read_unlock */
+ rcu_read_lock();
+
if (!iter)
return NULL;
- while (n--) {
+ do {
if (qib_qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
- }
+ } while (n--);
return iter;
}
static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
loff_t *pos)
+ __must_hold(RCU)
{
struct qib_qp_iter *iter = iter_ptr;
}
static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
+ __releases(RCU)
{
rcu_read_unlock();
}
pos = *ppos;
- if (pos != 0) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (count != sizeof(struct qib_flash)) {
- ret = -EINVAL;
- goto bail;
- }
-
- tmp = kmalloc(count, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto bail;
- }
+ if (pos != 0 || count != sizeof(struct qib_flash))
+ return -EINVAL;
- if (copy_from_user(tmp, buf, count)) {
- ret = -EFAULT;
- goto bail_tmp;
- }
+ tmp = memdup_user(buf, count);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
dd = private2dd(file);
if (qib_eeprom_write(dd, pos, tmp, count)) {
bail_tmp:
kfree(tmp);
-
-bail:
return ret;
}
return NULL;
iter->dev = dev;
- if (qib_qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
return iter;
}
return err;
}
- if (pci_register_driver(&usnic_ib_pci_driver)) {
+ err = pci_register_driver(&usnic_ib_pci_driver);
+ if (err) {
usnic_err("Unable to register with PCI\n");
goto out_umem_fini;
}
free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_rq_wq:
- vfree(qp->r_rq.wq);
+ if (!qp->ip)
+ vfree(qp->r_rq.wq);
bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);
isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
if (!isert_conn->login_rsp_buf) {
- isert_err("Unable to allocate isert_conn->login_rspbuf\n");
+ ret = -ENOMEM;
goto out_unmap_login_req_buf;
}
if (ret)
goto err_query_port;
+ snprintf(sport->port_guid, sizeof(sport->port_guid),
+ "0x%016llx%016llx",
+ be64_to_cpu(sport->gid.global.subnet_prefix),
+ be64_to_cpu(sport->gid.global.interface_id));
+
if (!sport->mad_agent) {
memset(®_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
sdev->device->name, i);
goto err_ring;
}
- snprintf(sport->port_guid, sizeof(sport->port_guid),
- "0x%016llx%016llx",
- be64_to_cpu(sport->gid.global.subnet_prefix),
- be64_to_cpu(sport->gid.global.interface_id));
}
spin_lock(&srpt_dev_lock);
/* Reset the KBC controller to clear all previous status.*/
reset_control_assert(kbc->rst);
udelay(100);
- reset_control_assert(kbc->rst);
+ reset_control_deassert(kbc->rst);
udelay(100);
tegra_kbc_config_pins(kbc);
goto free_struct_buff;
reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
- map_offset = 0;
for (i = 0; i < rdesc->num_registers; i++) {
struct rmi_register_desc_item *item = &rdesc->registers[i];
int reg_size = struct_buf[offset];
item->reg = reg;
item->reg_size = reg_size;
+ map_offset = 0;
+
do {
for (b = 0; b < 7; b++) {
if (struct_buf[offset] & (0x1 << b))
serio->write = i8042_aux_write;
serio->start = i8042_start;
serio->stop = i8042_stop;
+ serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
if (idx < 0) {
ads784x_hwmon_unregister(spi, ts);
- regulator_disable(ts->reg);
regulator_put(ts->reg);
if (!ts->get_pendown_state) {
return -ENODEV;
/* Power GPIO pin */
- data->gpio_power = gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
+ data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
if (IS_ERR(data->gpio_power)) {
if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER)
dev_err(dev, "Shutdown GPIO request failed\n");
* We may have concurrent producers, so we need to be careful
* not to touch any of the shadow cmdq state.
*/
- queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
dev_err(smmu->dev, "skipping command in error state:\n");
for (i = 0; i < ARRAY_SIZE(cmd); ++i)
dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
return;
}
- queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
case STRTAB_STE_0_CFG_S2_TRANS:
ste_live = true;
break;
+ case STRTAB_STE_0_CFG_ABORT:
+ if (disable_bypass)
+ break;
default:
BUG(); /* STE corruption */
}
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
- int flags, ret;
- u32 fsr, fsynr, resume;
+ u32 fsr, fsynr;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (!(fsr & FSR_FAULT))
return IRQ_NONE;
- if (fsr & FSR_IGN)
- dev_err_ratelimited(smmu->dev,
- "Unexpected context fault (fsr 0x%x)\n",
- fsr);
-
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
- flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
-
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
- if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
- ret = IRQ_HANDLED;
- resume = RESUME_RETRY;
- } else {
- dev_err_ratelimited(smmu->dev,
- "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
- iova, fsynr, cfg->cbndx);
- ret = IRQ_NONE;
- resume = RESUME_TERMINATE;
- }
-
- /* Clear the faulting FSR */
- writel(fsr, cb_base + ARM_SMMU_CB_FSR);
- /* Retry or terminate any stalled transactions */
- if (fsr & FSR_SS)
- writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cfg->cbndx);
- return ret;
+ writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+ return IRQ_HANDLED;
}
static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
}
/* SCTLR */
- reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
if (stage1)
reg |= SCTLR_S1_ASIDPNE;
#ifdef __BIG_ENDIAN
if (!iovad)
return;
- put_iova_domain(iovad);
+ if (iovad->granule)
+ put_iova_domain(iovad);
kfree(iovad);
domain->iova_cookie = NULL;
}
}
}
-static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
+static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
dma_addr_t dma_limit)
{
+ struct iova_domain *iovad = domain->iova_cookie;
unsigned long shift = iova_shift(iovad);
unsigned long length = iova_align(iovad, size) >> shift;
+ if (domain->geometry.force_aperture)
+ dma_limit = min(dma_limit, domain->geometry.aperture_end);
/*
* Enforce size-alignment to be safe - there could perhaps be an
* attribute to control this per-device, or at least per-domain...
if (!pages)
return NULL;
- iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
+ iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
if (!iova)
goto out_free_pages;
phys_addr_t phys = page_to_phys(page) + offset;
size_t iova_off = iova_offset(iovad, phys);
size_t len = iova_align(iovad, size + iova_off);
- struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
+ struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
if (!iova)
return DMA_ERROR_CODE;
prev = s;
}
- iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
+ iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
if (!iova)
goto out_restore_sg;
int prot = IOMMU_READ;
arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
- if (attr & ARM_V7S_PTE_AP_RDONLY)
+ if (!(attr & ARM_V7S_PTE_AP_RDONLY))
prot |= IOMMU_WRITE;
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
prot |= IOMMU_MMIO;
else if (pte & ARM_V7S_ATTR_C)
prot |= IOMMU_CACHE;
+ if (pte & ARM_V7S_ATTR_XN(lvl))
+ prot |= IOMMU_NOEXEC;
return prot;
}
bool enable_4GB;
};
-static int compare_of(struct device *dev, void *data)
+static inline int compare_of(struct device *dev, void *data)
{
return dev->of_node == data;
}
-static int mtk_iommu_bind(struct device *dev)
+static inline int mtk_iommu_bind(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
return component_bind_all(dev, &data->smi_imu);
}
-static void mtk_iommu_unbind(struct device *dev)
+static inline void mtk_iommu_unbind(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
u32 val;
val = readl_relaxed(base + GITS_CTLR);
- if (val & GITS_CTLR_QUIESCENT)
+ /*
+ * GIC architecture specification requires the ITS to be both
+ * disabled and quiescent for writes to GITS_BASER<n> or
+ * GITS_CBASER to not have UNPREDICTABLE results.
+ */
+ if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
return 0;
/* Disable the generation of all interrupts to this ITS */
#endif
#ifdef CONFIG_CPU_PM
+/* Check whether it's single security state view */
+static bool gic_dist_security_disabled(void)
+{
+ return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
+}
+
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
if (cmd == CPU_PM_EXIT) {
- gic_enable_redist(true);
+ if (gic_dist_security_disabled())
+ gic_enable_redist(true);
gic_cpu_sys_reg_init();
- } else if (cmd == CPU_PM_ENTER) {
+ } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
gic_write_grpen1(0);
gic_enable_redist(false);
}
int cpu;
unsigned long flags, map = 0;
+ if (unlikely(nr_cpu_ids == 1)) {
+ /* Only one CPU? let's do a self-IPI... */
+ writel_relaxed(2 << 24 | irq,
+ gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+ return;
+ }
+
raw_spin_lock_irqsave(&irq_controller_lock, flags);
/* Convert our logical CPU mask into a physical one. */
unsigned long flags;
int i;
- irq_set_chip_and_handler(virq, &gic_level_irq_controller,
- handle_level_irq);
-
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
{
if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
return gic_local_irq_domain_map(d, virq, hw);
+
+ irq_set_chip_and_handler(virq, &gic_level_irq_controller,
+ handle_level_irq);
+
return gic_shared_irq_domain_map(d, virq, hw, 0);
}
hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
- &gic_edge_irq_controller,
+ &gic_level_irq_controller,
NULL);
if (ret)
goto error;
+ irq_set_handler(virq + i, handle_level_irq);
+
ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
if (ret)
goto error;
return;
}
+static void gic_dev_domain_activate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
+}
+
static struct irq_domain_ops gic_dev_domain_ops = {
.xlate = gic_dev_domain_xlate,
.alloc = gic_dev_domain_alloc,
.free = gic_dev_domain_free,
+ .activate = gic_dev_domain_activate,
};
static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
if (!d->nr_stripes ||
d->nr_stripes > INT_MAX ||
d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
- pr_err("nr_stripes too large");
+ pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
+ (unsigned)d->nr_stripes);
return -ENOMEM;
}
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
- !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+ !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
- const char *err = NULL;
+ const char *err = NULL; /* must be set for any error case */
int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
- if (ret != 0)
+ if (ret != 0) {
+ if (ret == -ENOMEM)
+ err = "cache_alloc(): -ENOMEM";
+ else
+ err = "cache_alloc(): unknown error";
goto err;
+ }
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
err = "error calling kobject_add";
u8 key[0];
};
-#define MIN_IOS 16
+#define MIN_IOS 64
static void clone_init(struct dm_crypt_io *, struct bio *);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
pb->bio_submitted = true;
/*
- * Map reads as normal only if corrupt_bio_byte set.
+ * Error reads if neither corrupt_bio_byte or drop_writes are set.
+ * Otherwise, flakey_end_io() will decide if the reads should be modified.
*/
if (bio_data_dir(bio) == READ) {
- /* If flags were specified, only corrupt those that match. */
- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
- all_corrupt_bio_flags_match(bio, fc))
- goto map_bio;
- else
+ if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
return -EIO;
+ goto map_bio;
}
/*
struct flakey_c *fc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
- /*
- * Corrupt successful READs while in down state.
- */
if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
- if (fc->corrupt_bio_byte)
+ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc)) {
+ /*
+ * Corrupt successful matching READs while in down state.
+ */
corrupt_bio_data(bio, fc);
- else
+
+ } else if (!test_bit(DROP_WRITES, &fc->flags)) {
+ /*
+ * Error read during the down_interval if drop_writes
+ * wasn't configured.
+ */
return -EIO;
+ }
}
return error;
core->nr_regions = le64_to_cpu(disk->nr_regions);
}
-static int rw_header(struct log_c *lc, int rw)
+static int rw_header(struct log_c *lc, int op)
{
- lc->io_req.bi_op = rw;
+ lc->io_req.bi_op = op;
+ lc->io_req.bi_op_flags = 0;
return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
}
{
int r;
- r = rw_header(log, READ);
+ r = rw_header(log, REQ_OP_READ);
if (r)
return r;
header_to_disk(&lc->header, lc->disk_header);
/* write the new header */
- r = rw_header(lc, WRITE);
+ r = rw_header(lc, REQ_OP_WRITE);
if (!r) {
r = flush_header(lc);
if (r)
log_clear_bit(lc, lc->clean_bits, i);
}
- r = rw_header(lc, WRITE);
+ r = rw_header(lc, REQ_OP_WRITE);
if (r)
fail_log_device(lc);
else {
#define RT_FLAG_RS_BITMAP_LOADED 2
#define RT_FLAG_UPDATE_SBS 3
#define RT_FLAG_RESHAPE_RS 4
-#define RT_FLAG_KEEP_RS_FROZEN 5
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
{
unsigned long min_region_size = rs->ti->len / (1 << 21);
+ if (rs_is_raid0(rs))
+ return 0;
+
if (!region_size) {
/*
* Choose a reasonable default. All figures in sectors.
rebuild_cnt++;
switch (rs->raid_type->level) {
+ case 0:
+ break;
case 1:
if (rebuild_cnt >= rs->md.raid_disks)
goto too_many;
case 0:
break;
default:
+ /*
+ * We have to keep any raid0 data/metadata device pairs or
+ * the MD raid0 personality will fail to start the array.
+ */
+ if (rs_is_raid0(rs))
+ continue;
+
dev = container_of(rdev, struct raid_dev, rdev);
if (dev->meta_dev)
dm_put_device(ti, dev->meta_dev);
} else {
/* Process raid1 without delta_disks */
mddev->raid_disks = rs->raid_disks;
- set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
reshape = false;
}
} else {
if (reshape) {
set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
- set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
} else if (mddev->raid_disks < rs->raid_disks)
/* Create new superblocks and bitmaps, if any new disks */
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
goto bad;
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
- set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
/* Takeover ain't recovery, so disable recovery */
rs_setup_recovery(rs, MaxSector);
rs_set_new(rs);
{
struct raid_set *rs = ti->private;
- if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
- if (!rs->md.suspended)
- mddev_suspend(&rs->md);
- rs->md.ro = 1;
- }
+ if (!rs->md.suspended)
+ mddev_suspend(&rs->md);
+
+ rs->md.ro = 1;
}
static void attempt_restore_of_faulty_devices(struct raid_set *rs)
{
int i;
- uint64_t failed_devices, cleared_failed_devices = 0;
+ uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
unsigned long flags;
+ bool cleared = false;
struct dm_raid_superblock *sb;
+ struct mddev *mddev = &rs->md;
struct md_rdev *r;
+ /* RAID personalities have to provide hot add/remove methods or we need to bail out. */
+ if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
+ return;
+
+ memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
+
for (i = 0; i < rs->md.raid_disks; i++) {
r = &rs->dev[i].rdev;
if (test_bit(Faulty, &r->flags) && r->sb_page &&
* ourselves.
*/
if ((r->raid_disk >= 0) &&
- (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
+ (mddev->pers->hot_remove_disk(mddev, r) != 0))
/* Failed to revive this device, try next */
continue;
clear_bit(Faulty, &r->flags);
clear_bit(WriteErrorSeen, &r->flags);
clear_bit(In_sync, &r->flags);
- if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
+ if (mddev->pers->hot_add_disk(mddev, r)) {
r->raid_disk = -1;
r->saved_raid_disk = -1;
r->flags = flags;
} else {
r->recovery_offset = 0;
- cleared_failed_devices |= 1 << i;
+ set_bit(i, (void *) cleared_failed_devices);
+ cleared = true;
}
}
}
- if (cleared_failed_devices) {
+
+ /* If any failed devices could be cleared, update all sbs failed_devices bits */
+ if (cleared) {
+ uint64_t failed_devices[DISKS_ARRAY_ELEMS];
+
rdev_for_each(r, &rs->md) {
sb = page_address(r->sb_page);
- failed_devices = le64_to_cpu(sb->failed_devices);
- failed_devices &= ~cleared_failed_devices;
- sb->failed_devices = cpu_to_le64(failed_devices);
+ sb_retrieve_failed_devices(sb, failed_devices);
+
+ for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
+ failed_devices[i] &= ~cleared_failed_devices[i];
+
+ sb_update_failed_devices(sb, failed_devices);
}
}
}
* devices are reachable again.
*/
attempt_restore_of_faulty_devices(rs);
- } else {
- mddev->ro = 0;
- mddev->in_sync = 0;
+ }
- /*
- * When passing in flags to the ctr, we expect userspace
- * to reset them because they made it to the superblocks
- * and reload the mapping anyway.
- *
- * -> only unfreeze recovery in case of a table reload or
- * we'll have a bogus recovery/reshape position
- * retrieved from the superblock by the ctr because
- * the ongoing recovery/reshape will change it after read.
- */
- if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags))
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ mddev->ro = 0;
+ mddev->in_sync = 0;
- if (mddev->suspended)
- mddev_resume(mddev);
- }
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+ if (mddev->suspended)
+ mddev_resume(mddev);
}
static struct target_type raid_target = {
struct path_info *pi = NULL;
struct dm_path *current_path = NULL;
+ local_irq_save(flags);
current_path = *this_cpu_ptr(s->current_path);
if (current_path) {
percpu_counter_dec(&s->repeat_count);
- if (percpu_counter_read_positive(&s->repeat_count) > 0)
+ if (percpu_counter_read_positive(&s->repeat_count) > 0) {
+ local_irq_restore(flags);
return current_path;
+ }
}
- spin_lock_irqsave(&s->lock, flags);
+ spin_lock(&s->lock);
if (!list_empty(&s->valid_paths)) {
pi = list_entry(s->valid_paths.next, struct path_info, list);
list_move_tail(&pi->list, &s->valid_paths);
OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
--set-section-flags .text=alloc,readonly \
--rename-section .text=.rodata
-$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o
+targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
+$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
$(call if_changed,objcopy)
*/
mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL);
- i = idr_alloc(&ctx->afu->contexts_idr, ctx,
- ctx->afu->adapter->native->sl_ops->min_pe,
+ i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe,
ctx->afu->num_procs, GFP_NOWAIT);
idr_preload_end();
mutex_unlock(&afu->contexts_lock);
u64 (*timebase_read)(struct cxl *adapter);
int capi_mode;
bool needs_reset_before_disable;
- int min_pe;
};
struct cxl_native {
struct bin_attribute cxl_attr;
int adapter_num;
int user_irqs;
+ int min_pe;
u64 ps_size;
u16 psl_rev;
u16 base_image;
return fail_psl_irq(afu, &irq_info);
}
-void native_irq_wait(struct cxl_context *ctx)
+static void native_irq_wait(struct cxl_context *ctx)
{
u64 dsisr;
int timeout = 1000;
static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev)
{
- u64 psl_dsnctl;
+ u64 psl_dsnctl, psl_fircntl;
u64 chipid;
u64 capp_unit_id;
int rc;
cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
/* snoop write mask */
cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
- /* set fir_accum */
- cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
+ /* set fir_cntl to recommended value for production env */
+ psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
+ psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
+ psl_fircntl |= 0x1ULL; /* ce_thresh */
+ cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
/* for debugging with trace arrays */
cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
.write_timebase_ctrl = write_timebase_ctrl_xsl,
.timebase_read = timebase_read_xsl,
.capi_mode = OPAL_PHB_CAPI_MODE_DMA,
- .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */
};
static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
{
if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
+ /* Mellanox CX-4 */
dev_info(&adapter->dev, "Device uses an XSL\n");
adapter->native->sl_ops = &xsl_ops;
+ adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
} else {
dev_info(&adapter->dev, "Device uses a PSL\n");
adapter->native->sl_ops = &psl_ops;
/* Setup the PHB using arch provided callback */
phb->ops = &cxl_pcie_pci_ops;
phb->cfg_addr = NULL;
- phb->cfg_data = 0;
+ phb->cfg_data = NULL;
phb->private_data = afu;
phb->controller_ops = cxl_pci_controller_ops;
/* This is a pointer to outside our current stack frame. */
if (bad_frame) {
- bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack);
+ bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
} else {
/* Put start address just inside stack. */
bad_stack = task_stack_page(current) + THREAD_SIZE;
break;
if (req_op(next) == REQ_OP_DISCARD ||
+ req_op(next) == REQ_OP_SECURE_ERASE ||
req_op(next) == REQ_OP_FLUSH)
break;
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
+ bool req_is_special = mmc_req_is_special(req);
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
}
out:
- if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
- mmc_req_is_special(req))
+ if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
/*
* We only like normal block requests and discards.
*/
- if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) {
+ if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
+ req_op(req) != REQ_OP_SECURE_ERASE) {
blk_dump_rq_flags(req, "MMC bad request");
return BLKPREP_KILL;
}
spin_unlock_irq(q->queue_lock);
if (req || mq->mqrq_prev->req) {
+ bool req_is_special = mmc_req_is_special(req);
+
set_current_state(TASK_RUNNING);
mq->issue_fn(mq, req);
cond_resched();
* has been finished. Do not assign it to previous
* request.
*/
- if (mmc_req_is_special(req))
+ if (req_is_special)
mq->mqrq_cur->req = NULL;
mq->mqrq_prev->brq.mrq.data = NULL;
static inline bool mmc_req_is_special(struct request *req)
{
return req &&
- (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD);
+ (req_op(req) == REQ_OP_FLUSH ||
+ req_op(req) == REQ_OP_DISCARD ||
+ req_op(req) == REQ_OP_SECURE_ERASE);
}
struct request;
MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
"0 for slow, 1 for fast");
module_param(ad_select, charp, 0);
-MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
+MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
"0 for stable (default), 1 for bandwidth, "
"2 for count");
module_param(min_links, int, 0);
* BCM5325 and BCM5365 share most definitions below
*/
#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
-#define ARLTBL_MAC_MASK 0xffffffffffff
+#define ARLTBL_MAC_MASK 0xffffffffffffULL
#define ARLTBL_VID_S 48
#define ARLTBL_VID_MASK_25 0xff
#define ARLTBL_VID_MASK 0xfff
return err;
}
+#ifdef CONFIG_NET_DSA_HWMON
static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page,
int reg)
{
return ret;
}
+#endif
static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port)
{
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
}
+#else
+ return -ENODEV;
#endif
}
priv->dev = dev;
priv->regs = devm_ioremap_resource(dev, &res_regs);
- if (IS_ERR(priv->regs))
- return PTR_ERR(priv->regs);
+ if (IS_ERR(priv->regs)) {
+ err = PTR_ERR(priv->regs);
+ goto out_put_node;
+ }
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
info->data = TG3_RSS_MAX_NUM_QS;
}
- /* The first interrupt vector only
- * handles link interrupts.
- */
- info->data -= 1;
return 0;
default:
}
if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
+ (!ec->rx_coalesce_usecs) ||
(ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
(ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
(ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
#define MACB_CAPS_USRIO_DISABLED 0x00000010
+#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_JUMBO 0x00000010
/* Bit manipulation macros */
#define MACB_BIT(name) \
dm9000_open(struct net_device *dev)
{
struct board_info *db = netdev_priv(dev);
+ unsigned int irq_flags = irq_get_trigger_type(dev->irq);
if (netif_msg_ifup(db))
dev_dbg(db->dev, "enabling %s\n", dev->name);
/* If there is no IRQ type specified, tell the user that this is a
* problem
*/
- if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
+ if (irq_flags == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
+ irq_flags |= IRQF_SHARED;
+
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
/* Initialize DM9000 board */
dm9000_init_dm9000(dev);
- if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
- dev->name, dev))
+ if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
return -EAGAIN;
/* Now that we have an interrupt handler hooked up we can unmask
* our interrupts
{"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
{"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
{"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
- {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
{"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
{"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
{"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
| FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX
- | FLAG2_DMA_BURST,
+ | FLAG2_DMA_BURST
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
- | FLAG2_NO_DISABLE_RX,
+ | FLAG2_NO_DISABLE_RX
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
+#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
| FLAG_HAS_JUMBO_FRAMES
| FLAG_APME_IN_WUC,
.flags2 = FLAG2_HAS_PHY_STATS
- | FLAG2_HAS_EEE,
+ | FLAG2_HAS_EEE
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 26,
.max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
clear_bit(__E1000_RESETTING, &adapter->state);
}
+/**
+ * e1000e_sanitize_systim - sanitize raw cycle counter reads
+ * @hw: pointer to the HW structure
+ * @systim: cycle_t value read, sanitized and returned
+ *
+ * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
+ * check to see that the time is incrementing at a reasonable
+ * rate and is a multiple of incvalue.
+ **/
+static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
+{
+ u64 time_delta, rem, temp;
+ cycle_t systim_next;
+ u32 incvalue;
+ int i;
+
+ incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+ for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+ /* latch SYSTIMH on read of SYSTIML */
+ systim_next = (cycle_t)er32(SYSTIML);
+ systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+ time_delta = systim_next - systim;
+ temp = time_delta;
+ /* VMWare users have seen incvalue of zero, don't div / 0 */
+ rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
+
+ systim = systim_next;
+
+ if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
+ break;
+ }
+
+ return systim;
+}
+
/**
* e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
* @cc: cyclecounter structure
cc);
struct e1000_hw *hw = &adapter->hw;
u32 systimel, systimeh;
- cycle_t systim, systim_next;
+ cycle_t systim;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
* we read SYSTIMH, the value of SYSTIMH has been incremented and we
systim = (cycle_t)systimel;
systim |= (cycle_t)systimeh << 32;
- if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
- u64 time_delta, rem, temp;
- u32 incvalue;
- int i;
-
- /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
- * check to see that the time is incrementing at a reasonable
- * rate and is a multiple of incvalue
- */
- incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
- for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
- /* latch SYSTIMH on read of SYSTIML */
- systim_next = (cycle_t)er32(SYSTIML);
- systim_next |= (cycle_t)er32(SYSTIMH) << 32;
-
- time_delta = systim_next - systim;
- temp = time_delta;
- /* VMWare users have seen incvalue of zero, don't div / 0 */
- rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
-
- systim = systim_next;
+ if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
+ systim = e1000e_sanitize_systim(hw, systim);
- if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
- (rem == 0))
- break;
- }
- }
return systim;
}
**/
static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
{
+ int i, tc_unused = 0;
u8 num_tc = 0;
- int i;
+ u8 ret = 0;
/* Scan the ETS Config Priority Table to find
* traffic class enabled for a given priority
- * and use the traffic class index to get the
- * number of traffic classes enabled
+ * and create a bitmask of enabled TCs
*/
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- if (dcbcfg->etscfg.prioritytable[i] > num_tc)
- num_tc = dcbcfg->etscfg.prioritytable[i];
- }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
- /* Traffic class index starts from zero so
- * increment to return the actual count
+ /* Now scan the bitmask to check for
+ * contiguous TCs starting with TC0
*/
- return num_tc + 1;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (num_tc & BIT(i)) {
+ if (!tc_unused) {
+ ret++;
+ } else {
+ pr_err("Non-contiguous TC - Disabling DCB\n");
+ return 1;
+ }
+ } else {
+ tc_unused = 1;
+ }
+ }
+
+ /* There is always at least TC0 */
+ if (!ret)
+ ret = 1;
+
+ return ret;
}
/**
}
}
- shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust);
+ shhwtstamps.hwtstamp =
+ ktime_add_ns(shhwtstamps.hwtstamp, adjust);
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
struct sk_buff *skb)
{
__le64 *regval = (__le64 *)va;
+ struct igb_adapter *adapter = q_vector->adapter;
+ int adjust = 0;
/* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
- igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+ igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
le64_to_cpu(regval[1]));
+
+ /* adjust timestamp for the RX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_RX_LATENCY_1000;
+ break;
+ }
+ }
+ skb_hwtstamps(skb)->hwtstamp =
+ ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
}
/**
}
}
skb_hwtstamps(skb)->hwtstamp =
- ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+ ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
/* Update the last_rx_timestamp timer in order to enable watchdog check
* for error case of latched timestamp on a dropped packet.
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl, i;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
/* fall through */
case ixgbe_mac_82598EB:
/* legacy case, we can just disable VLAN filtering */
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
/* Set flag so we don't redo unnecessary work */
adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
+ /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
/* Add PF to all active pools */
for (i = IXGBE_VLVF_ENTRIES; --i;) {
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl, i;
+ /* Set VLAN filtering to enabled */
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
break;
/* fall through */
case ixgbe_mac_82598EB:
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
struct tcf_exts *exts, u64 *action, u8 *queue)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
int err;
if (tc_no_actions(exts))
return -EINVAL;
- tc_for_each_action(a, exts) {
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
/* Drop action */
if (is_tcf_gact_shot(a)) {
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXALL |
case PHY_INTERFACE_MODE_MII:
ge_mode = 1;
break;
- case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_REVMII:
ge_mode = 2;
break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (!mac->id)
+ goto err_phy;
+ ge_mode = 3;
+ break;
default:
- dev_err(eth->dev, "invalid phy_mode\n");
- return -1;
+ goto err_phy;
}
/* put the gmac into the right mode */
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
+
+ if (of_phy_is_fixed_link(mac->of_node))
+ mac->phy_dev->supported |=
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
+ of_node_put(np);
+
return 0;
+
+err_phy:
+ of_node_put(np);
+ dev_err(eth->dev, "invalid phy_mode\n");
+ return -EINVAL;
}
static int mtk_mdio_init(struct mtk_eth *eth)
return &ring->buf[idx];
}
-static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
{
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(dev,
+ dma_unmap_single(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(dev,
+ dma_unmap_page(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
- mapped_addr = dma_map_single(&dev->dev, skb->data,
+ mapped_addr = dma_map_single(eth->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr);
n_desc++;
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
+ mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
frag_map_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
goto err_dma;
if (i == nr_frags - 1 &&
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
- mtk_tx_unmap(&dev->dev, tx_buf);
+ mtk_tx_unmap(eth, tx_buf);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
netdev->stats.rx_dropped++;
goto release_desc;
}
- dma_addr = dma_map_single(ð->netdev[mac]->dev,
+ dma_addr = dma_map_single(eth->dev,
new_data + NET_SKB_PAD,
ring->buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- dma_unmap_single(&netdev->dev, trxd.rxd1,
+ dma_unmap_single(eth->dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
done[mac]++;
budget--;
}
- mtk_tx_unmap(eth->dev, tx_buf);
+ mtk_tx_unmap(eth, tx_buf);
ring->last_free = desc;
atomic_inc(&ring->free_count);
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth->dev, &ring->buf[i]);
+ mtk_tx_unmap(eth, &ring->buf[i]);
kfree(ring->buf);
ring->buf = NULL;
}
goto free_netdev;
}
spin_lock_init(&mac->hw_stats->stats_lock);
+ u64_stats_init(&mac->hw_stats->syncp);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
u32 *action, u32 *flow_tag)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
if (tc_no_actions(exts))
return -EINVAL;
*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
*action = 0;
- tc_for_each_action(a, exts) {
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
if (*action)
return -EINVAL;
u32 *action, u32 *dest_vport)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
if (tc_no_actions(exts))
return -EINVAL;
*action = 0;
- tc_for_each_action(a, exts) {
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
if (*action)
return -EINVAL;
struct mlx5e_tc_flow *flow;
struct tc_action *a;
struct mlx5_fc *counter;
+ LIST_HEAD(actions);
u64 bytes;
u64 packets;
u64 lastuse;
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
- tc_for_each_action(a, f->exts)
+ tcf_exts_to_list(f->exts, &actions);
+ list_for_each_entry(a, &actions, list)
tcf_action_stats_update(a, bytes, packets, lastuse);
return 0;
*/
MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
+/* reg_ritr_lb_en
+ * Loop-back filter enable for unicast packets.
+ * If the flag is set then loop-back filter for unicast packets is
+ * implemented on the RIF. Multicast packets are always subject to
+ * loop-back filtering.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
+
/* reg_ritr_virtual_router
* Virtual router ID associated with the router interface.
* Access: RW
mlxsw_reg_ritr_op_set(payload, op);
mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
+ mlxsw_reg_ritr_lb_en_set(payload, 1);
mlxsw_reg_ritr_mtu_set(payload, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
}
{
MLXSW_REG_ZERO(ralue, payload);
mlxsw_reg_ralue_protocol_set(payload, protocol);
+ mlxsw_reg_ralue_op_set(payload, op);
mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
mlxsw_reg_ralue_entry_type_set(payload,
kfree(mlxsw_sp_vport);
}
-int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
- u16 vid)
+static int mlxsw_sp_port_add_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
if (!vid)
return 0;
- if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
- netdev_warn(dev, "VID=%d already configured\n", vid);
+ if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
return 0;
- }
mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
+ if (!mlxsw_sp_vport)
return -ENOMEM;
- }
/* When adding the first VLAN interface on a bridged port we need to
* transition all the active 802.1Q bridge VLANs to use explicit
*/
if (list_is_singular(&mlxsw_sp_port->vports_list)) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to set to Virtual mode\n");
+ if (err)
goto err_port_vp_mode_trans;
- }
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
- if (err) {
- netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
+ if (err)
goto err_port_vid_learning_set;
- }
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
- if (err) {
- netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
- vid);
+ if (err)
goto err_port_add_vid;
- }
return 0;
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *f;
- int err;
/* VLAN 0 is removed from HW filter when device goes down, but
* it is reserved in our case, so simply return.
return 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- netdev_warn(dev, "VID=%d does not exist\n", vid);
+ if (WARN_ON(!mlxsw_sp_vport))
return 0;
- }
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- if (err) {
- netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
- vid);
- return err;
- }
+ mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
- if (err) {
- netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
- return err;
- }
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
/* Drop FID reference. If this was the last reference the
* resources will be freed.
* transition all active 802.1Q bridge VLANs to use VID to FID
* mappings and set port's mode to VLAN mode.
*/
- if (list_is_singular(&mlxsw_sp_port->vports_list)) {
- err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to set to VLAN mode\n");
- return err;
- }
- }
+ if (list_is_singular(&mlxsw_sp_port->vports_list))
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
bool ingress)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
int err;
if (!tc_single_action(cls->exts)) {
return -ENOTSUPP;
}
- tc_for_each_action(a, cls->exts) {
+ tcf_exts_to_list(cls->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
return -ENOTSUPP;
return 0;
}
+static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->pvid = 1;
+
+ return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
+}
+
+static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
goto err_port_dcb_init;
}
+ err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_pvid_vport_create;
+ }
+
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
+ mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
goto err_core_port_init;
}
- err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
- if (err)
- goto err_port_vlan_init;
-
- mlxsw_sp->ports[local_port] = mlxsw_sp_port;
return 0;
-err_port_vlan_init:
- mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
err_core_port_init:
unregister_netdev(dev);
err_register_netdev:
+ mlxsw_sp->ports[local_port] = NULL;
+ mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+err_port_pvid_vport_create:
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_by_width_set:
+ mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
if (!mlxsw_sp_port)
return;
- mlxsw_sp->ports[local_port] = NULL;
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
- mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
- mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+ mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
.local_port = MLXSW_PORT_DONT_CARE,
.trap_id = MLXSW_TRAP_ID_ARPUC,
},
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MTUERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_TTLERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LBERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_OSPF,
+ },
{
.func = mlxsw_sp_rx_listener_func,
.local_port = MLXSW_PORT_DONT_CARE,
u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
-int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
- u16 vid);
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
bool set);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
char pfcc_pl[MLXSW_REG_PFCC_LEN];
mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
+ mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
struct ieee_pfc *pfc)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
int err;
- if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
- pfc->pfc_en) {
+ if (pause_en && pfc->pfc_en) {
netdev_err(dev, "PAUSE frames already enabled on port\n");
return -EINVAL;
}
err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
mlxsw_sp_port->dcb.ets->prio_tc,
- false, pfc);
+ pause_en, pfc);
if (err) {
netdev_err(dev, "Failed to configure port's headroom for PFC\n");
return err;
err_port_pfc_set:
__mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- mlxsw_sp_port->dcb.ets->prio_tc, false,
+ mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
mlxsw_sp_port->dcb.pfc);
return err;
}
const struct mlxsw_sp_router_fib4_add_info *info = data;
struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
mlxsw_sp_fib_entry_destroy(fib_entry);
- mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
kfree(info);
}
kfree(f);
+ mlxsw_sp_fid_map(mlxsw_sp, fid, false);
+
mlxsw_sp_fid_op(mlxsw_sp, fid, false);
}
}
static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end, bool init)
+ u16 vid_begin, u16 vid_end)
{
struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, pvid;
int err;
- if (!init && !mlxsw_sp_port->bridged)
+ if (!mlxsw_sp_port->bridged)
return -EINVAL;
err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
return err;
}
- if (init)
- goto out;
-
pvid = mlxsw_sp_port->pvid;
if (pvid >= vid_begin && pvid <= vid_end) {
err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
-out:
/* Changing activity bits only if HW operation succeded */
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan)
{
- return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
- vlan->vid_begin, vlan->vid_end, false);
+ return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
+ vlan->vid_end);
}
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
u16 vid;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
+ __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
}
static int
mlxsw_sp_fdb_fini(mlxsw_sp);
}
-int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- /* Allow only untagged packets to ingress and tag them internally
- * with VID 1.
- */
- mlxsw_sp_port->pvid = 1;
- err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
- true);
- if (err) {
- netdev_err(dev, "Unable to init VLANs\n");
- return err;
- }
-
- /* Add implicit VLAN interface in the device, so that untagged
- * packets will be classified to the default vFID.
- */
- err = mlxsw_sp_port_add_vid(dev, 0, 1);
- if (err)
- netdev_err(dev, "Failed to configure default vFID\n");
-
- return err;
-}
-
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
+ MLXSW_TRAP_ID_MTUERROR = 0x52,
+ MLXSW_TRAP_ID_TTLERROR = 0x53,
+ MLXSW_TRAP_ID_LBERROR = 0x54,
+ MLXSW_TRAP_ID_OSPF = 0x55,
MLXSW_TRAP_ID_IP2ME = 0x5F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
DCBX_APP_SF_ETHTYPE);
}
+static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+ u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
+}
+
static bool qed_dcbx_app_port(u32 app_info_bitmap)
{
return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
DCBX_APP_SF_PORT);
}
-static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
{
- return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == QED_ETH_TYPE_DEFAULT);
+ u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
}
-static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_port(app_info_bitmap) &&
- proto_id == QED_TCP_PORT_ISCSI);
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
}
-static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == QED_ETH_TYPE_FCOE);
+ bool port;
+
+ if (ieee)
+ port = qed_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
}
-static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == QED_ETH_TYPE_ROCE);
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
}
-static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_port(app_info_bitmap) &&
- proto_id == QED_UDP_PORT_TYPE_ROCE_V2);
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
+}
+
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool port;
+
+ if (ieee)
+ port = qed_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_UDP_PORT);
+ else
+ port = qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
}
static void
static bool
qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
u32 app_prio_bitmap,
- u16 id, enum dcbx_protocol_type *type)
+ u16 id, enum dcbx_protocol_type *type, bool ieee)
{
- if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) {
+ if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_FCOE;
- } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ROCE;
- } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ISCSI;
- } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ETH;
- } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ROCE_V2;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
struct qed_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl,
- u32 pri_tc_tbl, int count, bool dcbx_enabled)
+ u32 pri_tc_tbl, int count, u8 dcbx_version)
{
u8 tc, priority_map;
enum dcbx_protocol_type type;
+ bool enable, ieee;
u16 protocol_id;
int priority;
- bool enable;
int i;
DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+ ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
/* Parse APP TLV */
for (i = 0; i < count; i++) {
protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
- protocol_id, &type)) {
+ protocol_id, &type, ieee)) {
/* ETH always have the enable bit reset, as it gets
* vlan information per packet. For other protocols,
* should be set according to the dcbx_enabled
struct dcbx_ets_feature *p_ets;
struct qed_hw_info *p_info;
u32 pri_tc_tbl, flags;
- bool dcbx_enabled;
+ u8 dcbx_version;
int num_entries;
int rc = 0;
- /* If DCBx version is non zero, then negotiation was
- * successfuly performed
- */
flags = p_hwfn->p_dcbx_info->operational.flags;
- dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+ dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
p_app = &p_hwfn->p_dcbx_info->operational.features.app;
p_tbl = p_app->app_pri_tbl;
num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
- num_entries, dcbx_enabled);
+ num_entries, dcbx_version);
if (rc)
return rc;
p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
data.pf_id = p_hwfn->rel_pf_id;
- data.dcbx_enabled = dcbx_enabled;
+ data.dcbx_enabled = !!dcbx_version;
qed_dcbx_dp_protocol(p_hwfn, &data);
qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
- struct qed_dcbx_params *p_params)
+ struct qed_dcbx_params *p_params, bool ieee)
{
struct qed_app_entry *entry;
u8 pri_map;
DCBX_APP_NUM_ENTRIES);
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_params->app_entry[i];
- entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_SF));
+ if (ieee) {
+ u8 sf_ieee;
+ u32 val;
+
+ sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF_IEEE);
+ switch (sf_ieee) {
+ case DCBX_APP_SF_IEEE_RESERVED:
+ /* Old MFW */
+ val = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF);
+ entry->sf_ieee = val ?
+ QED_DCBX_SF_IEEE_TCP_UDP_PORT :
+ QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_ETHTYPE:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_UDP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ }
+ } else {
+ entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
+ }
+
pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
entry->prio = ffs(pri_map) - 1;
entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_PROTOCOL_ID);
qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
entry->proto_id,
- &entry->proto_type);
+ &entry->proto_type, ieee);
}
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
- pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]);
+ pri_map = p_ets->pri_tc_tbl[0];
for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
struct dcbx_ets_feature *p_ets,
- u32 pfc, struct qed_dcbx_params *p_params)
+ u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
{
- qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params);
+ qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
}
p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
- p_feat->pfc, ¶ms->local.params);
+ p_feat->pfc, ¶ms->local.params, false);
params->local.valid = true;
}
p_feat = &p_hwfn->p_dcbx_info->remote.features;
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
- p_feat->pfc, ¶ms->remote.params);
+ p_feat->pfc, ¶ms->remote.params, false);
params->remote.valid = true;
}
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
- p_feat->pfc, ¶ms->operational.params);
+ p_feat->pfc, ¶ms->operational.params,
+ p_operational->ieee);
qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
p_operational->err = err;
val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
p_ets->pri_tc_tbl[0] |= val;
}
- p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]);
for (i = 0; i < 2; i++) {
p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
static void
qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
- struct qed_dcbx_params *p_params)
+ struct qed_dcbx_params *p_params, bool ieee)
{
u32 *entry;
int i;
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_app->app_pri_tbl[i].entry;
- *entry &= ~DCBX_APP_SF_MASK;
- if (p_params->app_entry[i].ethtype)
- *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
- DCBX_APP_SF_SHIFT);
- else
- *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT);
+ if (ieee) {
+ *entry &= ~DCBX_APP_SF_IEEE_MASK;
+ switch (p_params->app_entry[i].sf_ieee) {
+ case QED_DCBX_SF_IEEE_ETHTYPE:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_TCP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ break;
+ }
+ } else {
+ *entry &= ~DCBX_APP_SF_MASK;
+ if (p_params->app_entry[i].ethtype)
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ else
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ }
+
*entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
*entry |= ((u32)p_params->app_entry[i].proto_id <<
DCBX_APP_PROTOCOL_ID_SHIFT);
struct dcbx_local_params *local_admin,
struct qed_dcbx_set *params)
{
+ bool ieee = false;
+
local_admin->flags = 0;
memcpy(&local_admin->features,
&p_hwfn->p_dcbx_info->operational.features,
sizeof(local_admin->features));
- if (params->enabled)
+ if (params->enabled) {
local_admin->config = params->ver_num;
- else
+ ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
+ } else {
local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+ }
if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
- ¶ms->config.params);
+ ¶ms->config.params, ieee);
}
int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
break;
/* First empty slot */
- if (!entry->proto_id)
+ if (!entry->proto_id) {
+ dcbx_set.config.params.num_app_entries++;
break;
+ }
}
if (i == QED_DCBX_MAX_APP_PROTOCOL) {
(entry->proto_id == app->protocol))
break;
/* First empty slot */
- if (!entry->proto_id)
+ if (!entry->proto_id) {
+ dcbx_set.config.params.num_app_entries++;
break;
+ }
}
if (i == QED_DCBX_MAX_APP_PROTOCOL) {
#define DCBX_APP_SF_SHIFT 8
#define DCBX_APP_SF_ETHTYPE 0
#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
#define DCBX_APP_PROTOCOL_ID_SHIFT 16
};
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 64
-#define QLCNIC_LINUX_VERSIONID "5.3.64"
+#define _QLCNIC_LINUX_SUBVERSION 65
+#define QLCNIC_LINUX_VERSIONID "5.3.65"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
#define QLCNIC_RESPONSE_DESC 0x05
#define QLCNIC_LRO_DESC 0x12
-#define QLCNIC_TX_POLL_BUDGET 128
#define QLCNIC_TCP_HDR_SIZE 20
#define QLCNIC_TCP_TS_OPTION_SIZE 12
#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_adapter *adapter;
- budget = QLCNIC_TX_POLL_BUDGET;
tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
adapter = tx_ring->adapter;
work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
spinlock_t vlan_list_lock; /* Lock for VLAN list */
};
-struct qlcnic_async_work_list {
+struct qlcnic_async_cmd {
struct list_head list;
- struct work_struct work;
- void *ptr;
struct qlcnic_cmd_args *cmd;
};
struct workqueue_struct *bc_trans_wq;
struct workqueue_struct *bc_async_wq;
struct workqueue_struct *bc_flr_wq;
- struct list_head async_list;
+ struct qlcnic_adapter *adapter;
+ struct list_head async_cmd_list;
+ struct work_struct vf_async_work;
+ spinlock_t queue_lock; /* async_cmd_list queue lock */
};
struct qlcnic_sriov {
#define QLC_83XX_VF_RESET_FAIL_THRESH 8
#define QLC_BC_CMD_MAX_RETRY_CNT 5
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
}
bc->bc_async_wq = wq;
- INIT_LIST_HEAD(&bc->async_list);
+ INIT_LIST_HEAD(&bc->async_cmd_list);
+ INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
+ spin_lock_init(&bc->queue_lock);
+ bc->adapter = adapter;
for (i = 0; i < num_vfs; i++) {
vf = &sriov->vf_info[i];
void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
{
- struct list_head *head = &bc->async_list;
- struct qlcnic_async_work_list *entry;
+ struct list_head *head = &bc->async_cmd_list;
+ struct qlcnic_async_cmd *entry;
flush_workqueue(bc->bc_async_wq);
+ cancel_work_sync(&bc->vf_async_work);
+
+ spin_lock(&bc->queue_lock);
while (!list_empty(head)) {
- entry = list_entry(head->next, struct qlcnic_async_work_list,
+ entry = list_entry(head->next, struct qlcnic_async_cmd,
list);
- cancel_work_sync(&entry->work);
list_del(&entry->list);
+ kfree(entry->cmd);
kfree(entry);
}
+ spin_unlock(&bc->queue_lock);
}
void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
{
- struct qlcnic_async_work_list *entry;
- struct qlcnic_adapter *adapter;
+ struct qlcnic_async_cmd *entry, *tmp;
+ struct qlcnic_back_channel *bc;
struct qlcnic_cmd_args *cmd;
+ struct list_head *head;
+ LIST_HEAD(del_list);
+
+ bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
+ head = &bc->async_cmd_list;
+
+ spin_lock(&bc->queue_lock);
+ list_splice_init(head, &del_list);
+ spin_unlock(&bc->queue_lock);
+
+ list_for_each_entry_safe(entry, tmp, &del_list, list) {
+ list_del(&entry->list);
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
+ kfree(entry);
+ }
+
+ if (!list_empty(head))
+ queue_work(bc->bc_async_wq, &bc->vf_async_work);
- entry = container_of(work, struct qlcnic_async_work_list, work);
- adapter = entry->ptr;
- cmd = entry->cmd;
- __qlcnic_sriov_issue_cmd(adapter, cmd);
return;
}
-static struct qlcnic_async_work_list *
-qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
+static struct qlcnic_async_cmd *
+qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
+ struct qlcnic_cmd_args *cmd)
{
- struct list_head *node;
- struct qlcnic_async_work_list *entry = NULL;
- u8 empty = 0;
+ struct qlcnic_async_cmd *entry = NULL;
- list_for_each(node, &bc->async_list) {
- entry = list_entry(node, struct qlcnic_async_work_list, list);
- if (!work_pending(&entry->work)) {
- empty = 1;
- break;
- }
- }
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return NULL;
- if (!empty) {
- entry = kzalloc(sizeof(struct qlcnic_async_work_list),
- GFP_ATOMIC);
- if (entry == NULL)
- return NULL;
- list_add_tail(&entry->list, &bc->async_list);
- }
+ entry->cmd = cmd;
+
+ spin_lock(&bc->queue_lock);
+ list_add_tail(&entry->list, &bc->async_cmd_list);
+ spin_unlock(&bc->queue_lock);
return entry;
}
static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
- work_func_t func, void *data,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_async_work_list *entry = NULL;
+ struct qlcnic_async_cmd *entry = NULL;
- entry = qlcnic_sriov_get_free_node_async_work(bc);
- if (!entry)
+ entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
+ if (!entry) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
return;
+ }
- entry->ptr = data;
- entry->cmd = cmd;
- INIT_WORK(&entry->work, func);
- queue_work(bc->bc_async_wq, &entry->work);
+ queue_work(bc->bc_async_wq, &bc->vf_async_work);
}
static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
if (adapter->need_fw_reset)
return -EIO;
- qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
- adapter, cmd);
+ qlcnic_sriov_schedule_async_cmd(bc, cmd);
+
return 0;
}
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
ndev->stats.rx_packets++;
+ kmemleak_not_leak(new_skb);
} else {
ndev->stats.rx_dropped++;
new_skb = skb;
kfree_skb(skb);
goto err_cleanup;
}
+ kmemleak_not_leak(skb);
}
/* continue even if we didn't manage to submit all
* receive descs
static void tsi108_timed_checker(unsigned long dev_ptr);
+#ifdef DEBUG
static void dump_eth_one(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
TSI_READ(TSI108_EC_RXESTAT),
TSI_READ(TSI108_EC_RXERR), data->rxpending);
}
+#endif
/* Synchronization is needed between the thread and up/down events.
* Note that the PHY is accessed through the same registers for both
u32 event;
};
-struct garp_wrk {
- struct work_struct dwrk;
- struct net_device *netdev;
- struct netvsc_device *netvsc_dev;
-};
-
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
struct work_struct work;
u32 msg_enable; /* debug level */
- struct garp_wrk gwrk;
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
/* the device is going away */
bool start_remove;
+
+ /* State to manage the associated VF interface. */
+ struct net_device *vf_netdev;
+ bool vf_inject;
+ atomic_t vf_use_cnt;
+ /* 1: allocated, serial number is valid. 0: not allocated */
+ u32 vf_alloc;
+ /* Serial number of the VF to team with */
+ u32 vf_serial;
};
/* Per netvsc device */
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
- /* 1: allocated, serial number is valid. 0: not allocated */
- u32 vf_alloc;
- /* Serial number of the VF to team with */
- u32 vf_serial;
atomic_t open_cnt;
- /* State to manage the associated VF interface. */
- bool vf_inject;
- struct net_device *vf_netdev;
- atomic_t vf_use_cnt;
};
static inline struct netvsc_device *
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
atomic_set(&net_device->open_cnt, 0);
- atomic_set(&net_device->vf_use_cnt, 0);
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
- net_device->vf_netdev = NULL;
- net_device->vf_inject = false;
-
return net_device;
}
nvscdev->send_table[i] = tab[i];
}
-static void netvsc_send_vf(struct netvsc_device *nvdev,
+static void netvsc_send_vf(struct net_device_context *net_device_ctx,
struct nvsp_message *nvmsg)
{
- nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
- nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+ net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+ net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
}
static inline void netvsc_receive_inband(struct hv_device *hdev,
- struct netvsc_device *nvdev,
- struct nvsp_message *nvmsg)
+ struct net_device_context *net_device_ctx,
+ struct nvsp_message *nvmsg)
{
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
- netvsc_send_vf(nvdev, nvmsg);
+ netvsc_send_vf(net_device_ctx, nvmsg);
break;
}
}
struct vmpacket_descriptor *desc)
{
struct nvsp_message *nvmsg;
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
nvmsg = (struct nvsp_message *)((unsigned long)
desc + (desc->offset8 << 3));
break;
case VM_PKT_DATA_INBAND:
- netvsc_receive_inband(device, net_device, nvmsg);
+ netvsc_receive_inband(device, net_device_ctx, nvmsg);
break;
default:
struct sk_buff *skb;
struct sk_buff *vf_skb;
struct netvsc_stats *rx_stats;
- struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
u32 bytes_recvd = packet->total_data_buflen;
int ret = 0;
if (!net || net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
- if (READ_ONCE(netvsc_dev->vf_inject)) {
- atomic_inc(&netvsc_dev->vf_use_cnt);
- if (!READ_ONCE(netvsc_dev->vf_inject)) {
+ if (READ_ONCE(net_device_ctx->vf_inject)) {
+ atomic_inc(&net_device_ctx->vf_use_cnt);
+ if (!READ_ONCE(net_device_ctx->vf_inject)) {
/*
* We raced; just move on.
*/
- atomic_dec(&netvsc_dev->vf_use_cnt);
+ atomic_dec(&net_device_ctx->vf_use_cnt);
goto vf_injection_done;
}
* the host). Deliver these via the VF interface
* in the guest.
*/
- vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
- csum_info, *data, vlan_tci);
+ vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
+ packet, csum_info, *data,
+ vlan_tci);
if (vf_skb != NULL) {
- ++netvsc_dev->vf_netdev->stats.rx_packets;
- netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
+ ++net_device_ctx->vf_netdev->stats.rx_packets;
+ net_device_ctx->vf_netdev->stats.rx_bytes +=
+ bytes_recvd;
netif_receive_skb(vf_skb);
} else {
++net->stats.rx_dropped;
ret = NVSP_STAT_FAIL;
}
- atomic_dec(&netvsc_dev->vf_use_cnt);
+ atomic_dec(&net_device_ctx->vf_use_cnt);
return ret;
}
free_netdev(netdev);
}
-static void netvsc_notify_peers(struct work_struct *wrk)
-{
- struct garp_wrk *gwrk;
-
- gwrk = container_of(wrk, struct garp_wrk, dwrk);
-
- netdev_notify_peers(gwrk->netdev);
-
- atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
-}
-
static struct net_device *get_netvsc_net_device(char *mac)
{
struct net_device *dev, *found = NULL;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (netvsc_dev == NULL)
+ if (!netvsc_dev || net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
* Take a reference on the module.
*/
try_module_get(THIS_MODULE);
- netvsc_dev->vf_netdev = vf_netdev;
+ net_device_ctx->vf_netdev = vf_netdev;
return NOTIFY_OK;
}
+static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
+{
+ net_device_ctx->vf_inject = true;
+}
+
+static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
+{
+ net_device_ctx->vf_inject = false;
+
+ /* Wait for currently active users to drain out. */
+ while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
+ udelay(50);
+}
static int netvsc_vf_up(struct net_device *vf_netdev)
{
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+ if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
- netvsc_dev->vf_inject = true;
+ netvsc_inject_enable(net_device_ctx);
/*
* Open the device before switching data path.
netif_carrier_off(ndev);
- /*
- * Now notify peers. We are scheduling work to
- * notify peers; take a reference to prevent
- * the VF interface from vanishing.
- */
- atomic_inc(&netvsc_dev->vf_use_cnt);
- net_device_ctx->gwrk.netdev = vf_netdev;
- net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
- schedule_work(&net_device_ctx->gwrk.dwrk);
+ /* Now notify peers through VF device. */
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
return NOTIFY_OK;
}
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+ if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
- netvsc_dev->vf_inject = false;
- /*
- * Wait for currently active users to
- * drain out.
- */
-
- while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
- udelay(50);
+ netvsc_inject_disable(net_device_ctx);
netvsc_switch_datapath(ndev, false);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
rndis_filter_close(netvsc_dev);
netif_carrier_on(ndev);
- /*
- * Notify peers.
- */
- atomic_inc(&netvsc_dev->vf_use_cnt);
- net_device_ctx->gwrk.netdev = ndev;
- net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
- schedule_work(&net_device_ctx->gwrk.dwrk);
+
+ /* Now notify peers through netvsc device. */
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
return NOTIFY_OK;
}
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (netvsc_dev == NULL)
+ if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
-
- netvsc_dev->vf_netdev = NULL;
+ netvsc_inject_disable(net_device_ctx);
+ net_device_ctx->vf_netdev = NULL;
module_put(THIS_MODULE);
return NOTIFY_OK;
}
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
- INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
+ atomic_set(&net_device_ctx->vf_use_cnt, 0);
+ net_device_ctx->vf_netdev = NULL;
+ net_device_ctx->vf_inject = false;
+
net->netdev_ops = &device_ops;
net->hw_features = NETVSC_HW_FEATURES;
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
- /* Avoid Vlan, Bonding dev with same MAC registering as VF */
- if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING))
+ /* Avoid Vlan dev with same MAC registering as VF */
+ if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+ return NOTIFY_DONE;
+
+ /* Avoid Bonding master dev with same MAC registering as VF */
+ if (event_dev->priv_flags & IFF_BONDING &&
+ event_dev->flags & IFF_MASTER)
return NOTIFY_DONE;
switch (event) {
struct pcpu_secy_stats __percpu *stats;
struct list_head secys;
struct gro_cells gro_cells;
+ unsigned int nest_level;
};
/**
#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+static struct lock_class_key macsec_netdev_addr_lock_key;
+
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
return macsec_priv(dev)->real_dev->ifindex;
}
+
+static int macsec_get_nest_level(struct net_device *dev)
+{
+ return macsec_priv(dev)->nest_level;
+}
+
+
static const struct net_device_ops macsec_netdev_ops = {
.ndo_init = macsec_dev_init,
.ndo_uninit = macsec_dev_uninit,
.ndo_start_xmit = macsec_start_xmit,
.ndo_get_stats64 = macsec_get_stats64,
.ndo_get_iflink = macsec_get_iflink,
+ .ndo_get_lock_subclass = macsec_get_nest_level,
};
static const struct device_type macsec_type = {
}
}
+static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+
+ unregister_netdevice_queue(dev, head);
+ list_del_rcu(&macsec->secys);
+ macsec_del_dev(macsec);
+ netdev_upper_dev_unlink(real_dev, dev);
+
+ macsec_generation++;
+}
+
static void macsec_dellink(struct net_device *dev, struct list_head *head)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
- macsec_generation++;
+ macsec_common_dellink(dev, head);
- unregister_netdevice_queue(dev, head);
- list_del_rcu(&macsec->secys);
if (list_empty(&rxd->secys)) {
netdev_rx_handler_unregister(real_dev);
kfree(rxd);
}
-
- macsec_del_dev(macsec);
}
static int register_macsec_dev(struct net_device *real_dev,
dev_hold(real_dev);
+ macsec->nest_level = dev_get_nest_level(real_dev) + 1;
+ netdev_lockdep_set_classes(dev);
+ lockdep_set_class_and_subclass(&dev->addr_list_lock,
+ &macsec_netdev_addr_lock_key,
+ macsec_get_nest_level(dev));
+
+ err = netdev_upper_dev_link(real_dev, dev);
+ if (err < 0)
+ goto unregister;
+
/* need to be already registered so that ->init has run and
* the MAC addr is set
*/
if (rx_handler && sci_exists(real_dev, sci)) {
err = -EBUSY;
- goto unregister;
+ goto unlink;
}
err = macsec_add_dev(dev, sci, icv_len);
if (err)
- goto unregister;
+ goto unlink;
if (data)
macsec_changelink_common(dev, data);
del_dev:
macsec_del_dev(macsec);
+unlink:
+ netdev_upper_dev_unlink(real_dev, dev);
unregister:
unregister_netdevice(dev);
return err;
rxd = macsec_data_rtnl(real_dev);
list_for_each_entry_safe(m, n, &rxd->secys, secys) {
- macsec_dellink(m->secy.netdev, &head);
+ macsec_common_dellink(m->secy.netdev, &head);
}
+
+ netdev_rx_handler_unregister(real_dev);
+ kfree(rxd);
+
unregister_netdevice_many(&head);
break;
}
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
- vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
+ vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
rtnl_unlock();
synchronize_rcu();
- skb_array_cleanup(&q->skb_array);
sock_put(&q->sk);
}
static void macvtap_sock_destruct(struct sock *sk)
{
struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
- struct sk_buff *skb;
- while ((skb = skb_array_consume(&q->skb_array)) != NULL)
- kfree_skb(skb);
+ skb_array_cleanup(&q->skb_array);
}
static int macvtap_open(struct inode *inode, struct file *file)
data[i] = kszphy_get_stat(phydev, i);
}
-static int kszphy_resume(struct phy_device *phydev)
+static int kszphy_suspend(struct phy_device *phydev)
{
- int value;
+ /* Disable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_DISABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
- mutex_lock(&phydev->lock);
+ return genphy_suspend(phydev);
+}
- value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
+static int kszphy_resume(struct phy_device *phydev)
+{
+ genphy_resume(phydev);
- kszphy_config_intr(phydev);
- mutex_unlock(&phydev->lock);
+ /* Enable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
return 0;
}
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
+ .suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8061,
fl4.flowi4_mark = skb->mark;
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = daddr;
- fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
+ fl4.saddr = *saddr;
rt = ip_route_output_key(vxlan->net, &fl4);
if (!IS_ERR(rt)) {
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
fl6.daddr = *daddr;
- fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
+ fl6.saddr = *saddr;
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
struct rtable *rt = NULL;
const struct iphdr *old_iph;
union vxlan_addr *dst;
- union vxlan_addr remote_ip;
+ union vxlan_addr remote_ip, local_ip;
+ union vxlan_addr *src;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 src_port = 0, dst_port;
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
vni = rdst->remote_vni;
dst = &rdst->remote_ip;
+ src = &vxlan->cfg.saddr;
dst_cache = &rdst->dst_cache;
} else {
if (!info) {
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
vni = vxlan_tun_id_to_vni(info->key.tun_id);
remote_ip.sa.sa_family = ip_tunnel_info_af(info);
- if (remote_ip.sa.sa_family == AF_INET)
+ if (remote_ip.sa.sa_family == AF_INET) {
remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
- else
+ local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
+ } else {
remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
+ local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
+ }
dst = &remote_ip;
+ src = &local_ip;
dst_cache = &info->dst_cache;
}
}
if (dst->sa.sa_family == AF_INET) {
- __be32 saddr;
-
if (!vxlan->vn4_sock)
goto drop;
sk = vxlan->vn4_sock->sock->sk;
rt = vxlan_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
- dst->sin.sin_addr.s_addr, &saddr,
+ dst->sin.sin_addr.s_addr,
+ &src->sin.sin_addr.s_addr,
dst_cache, info);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n",
}
/* Bypass encapsulation if the destination is local */
- if (rt->rt_flags & RTCF_LOCAL &&
+ if (!info && rt->rt_flags & RTCF_LOCAL &&
!(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
struct vxlan_dev *dst_vxlan;
if (err < 0)
goto xmit_tx_error;
- udp_tunnel_xmit_skb(rt, sk, skb, saddr,
+ udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
dst->sin.sin_addr.s_addr, tos, ttl, df,
src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct dst_entry *ndst;
- struct in6_addr saddr;
u32 rt6i_flags;
if (!vxlan->vn6_sock)
ndst = vxlan6_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
- label, &dst->sin6.sin6_addr, &saddr,
+ label, &dst->sin6.sin6_addr,
+ &src->sin6.sin6_addr,
dst_cache, info);
if (IS_ERR(ndst)) {
netdev_dbg(dev, "no route to %pI6\n",
/* Bypass encapsulation if the destination is local */
rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
- if (rt6i_flags & RTF_LOCAL &&
+ if (!info && rt6i_flags & RTF_LOCAL &&
!(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
struct vxlan_dev *dst_vxlan;
return;
}
udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
- &saddr, &dst->sin6.sin6_addr, tos, ttl,
+ &src->sin6.sin6_addr,
+ &dst->sin6.sin6_addr, tos, ttl,
label, src_port, dst_port, !udp_sum);
#endif
}
mutex_unlock(&wl->mutex);
}
-static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta)
+static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
- struct wl1271 *wl = wl_sta->wl;
+ struct wl1271 *wl = hw->priv;
u8 hlid = wl_sta->hlid;
/* return in units of Kbps */
}
}
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
+ btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
revalidate_disk(btt->btt_disk);
return 0;
}
static DEVICE_ATTR_RW(namespace);
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+ ssize_t rc;
+
+ device_lock(dev);
+ if (dev->driver)
+ rc = sprintf(buf, "%llu\n", nd_btt->size);
+ else {
+ /* no size to convey if the btt instance is disabled */
+ rc = -ENXIO;
+ }
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(size);
+
static struct attribute *nd_btt_attributes[] = {
&dev_attr_sector_size.attr,
&dev_attr_namespace.attr,
&dev_attr_uuid.attr,
+ &dev_attr_size.attr,
NULL,
};
struct nd_namespace_common *ndns;
struct btt *btt;
unsigned long lbasize;
+ u64 size;
u8 *uuid;
int id;
};
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
- enum nvme_ctrl_state old_state = ctrl->state;
+ enum nvme_ctrl_state old_state;
bool changed = false;
spin_lock_irq(&ctrl->lock);
+
+ old_state = ctrl->state;
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
default:
break;
}
- spin_unlock_irq(&ctrl->lock);
if (changed)
ctrl->state = new_state;
+ spin_unlock_irq(&ctrl->lock);
+
return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
NVME_QID_ANY, 0, 0);
- if (ret >= 0)
+ if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
}
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
NVME_QID_ANY, 0, 0);
- if (ret >= 0)
+ if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
}
reinit_completion(&dev->ioq_wait);
retry:
timeout = ADMIN_TIMEOUT;
- for (; i > 0; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
-
- if (!pass)
- nvme_suspend_queue(nvmeq);
- if (nvme_delete_queue(nvmeq, opcode))
+ for (; i > 0; i--, sent++)
+ if (nvme_delete_queue(dev->queues[i], opcode))
break;
- ++sent;
- }
+
while (sent--) {
timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
if (timeout == 0)
nvme_stop_queues(&dev->ctrl);
csts = readl(dev->bar + NVME_REG_CSTS);
}
+
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_suspend_queue(dev->queues[i]);
+
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
- for (i = dev->queue_count - 1; i >= 0; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
- nvme_suspend_queue(nvmeq);
- }
+ nvme_suspend_queue(dev->queues[0]);
} else {
nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
* more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/string.h>
-#include <linux/jiffies.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/nvme.h>
-#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <rdma/ib_verbs.h>
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
-static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
/* XXX: really should move to a generic header sooner or later.. */
static inline void put_unaligned_le24(u32 val, u8 *p)
list_del(&ctrl->list);
mutex_unlock(&nvme_rdma_ctrl_mutex);
- if (ctrl->ctrl.tagset) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(&ctrl->tag_set);
- nvme_rdma_dev_put(ctrl->device);
- }
kfree(ctrl->queues);
nvmf_free_options(nctrl->opts);
free_ctrl:
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
WARN_ON_ONCE(!changed);
- if (ctrl->queue_count > 1)
+ if (ctrl->queue_count > 1) {
nvme_start_queues(&ctrl->ctrl);
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
{
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
struct rdma_conn_param param = { };
- struct nvme_rdma_cm_req priv;
+ struct nvme_rdma_cm_req priv = { };
int ret;
param.qp_num = queue->qp->qp_num;
* that caught the event. Since we hold the callout until the controller
* deletion is completed, we'll deadlock if the controller deletion will
* call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
- * of destroying this queue before-hand, destroy the queue resources
- * after the controller deletion completed with the exception of destroying
- * the cm_id implicitely by returning a non-zero rc to the callout.
+ * of destroying this queue before-hand, destroy the queue resources,
+ * then queue the controller deletion which won't destroy this queue and
+ * we destroy the cm_id implicitely by returning a non-zero rc to the callout.
*/
static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
{
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- int ret, ctrl_deleted = 0;
+ int ret;
- /* First disable the queue so ctrl delete won't free it */
- if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
- goto out;
+ /* Own the controller deletion */
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+ return 0;
- /* delete the controller */
- ret = __nvme_rdma_del_ctrl(ctrl);
- if (!ret) {
- dev_warn(ctrl->ctrl.device,
- "Got rdma device removal event, deleting ctrl\n");
- flush_work(&ctrl->delete_work);
+ dev_warn(ctrl->ctrl.device,
+ "Got rdma device removal event, deleting ctrl\n");
- /* Return non-zero so the cm_id will destroy implicitly */
- ctrl_deleted = 1;
+ /* Get rid of reconnect work if its running */
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
+ /* Disable the queue so ctrl delete won't free it */
+ if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
/* Free this queue ourselves */
- rdma_disconnect(queue->cm_id);
- ib_drain_qp(queue->qp);
+ nvme_rdma_stop_queue(queue);
nvme_rdma_destroy_queue_ib(queue);
+
+ /* Return non-zero so the cm_id will destroy implicitly */
+ ret = 1;
}
-out:
- return ctrl_deleted;
+ /* Queue controller deletion */
+ queue_work(nvme_rdma_wq, &ctrl->delete_work);
+ flush_work(&ctrl->delete_work);
+ return ret;
}
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
nvme_rdma_free_io_queues(ctrl);
}
- if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
nvme_shutdown_ctrl(&ctrl->ctrl);
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl);
}
+static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
+{
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ if (shutdown)
+ nvme_rdma_shutdown_ctrl(ctrl);
+
+ if (ctrl->ctrl.tagset) {
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ nvme_rdma_dev_put(ctrl->device);
+ }
+
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
static void nvme_rdma_del_ctrl_work(struct work_struct *work)
{
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, delete_work);
- nvme_remove_namespaces(&ctrl->ctrl);
- nvme_rdma_shutdown_ctrl(ctrl);
- nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
+ __nvme_rdma_remove_ctrl(ctrl, true);
}
static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, delete_work);
- nvme_remove_namespaces(&ctrl->ctrl);
- nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
+ __nvme_rdma_remove_ctrl(ctrl, false);
}
static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
if (ctrl->queue_count > 1) {
nvme_start_queues(&ctrl->ctrl);
nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
}
return;
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/random.h>
#include <generated/utsrelease.h>
#include "nvmet.h"
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
- u64 serial;
u16 status = 0;
id = kzalloc(sizeof(*id), GFP_KERNEL);
id->vid = 0;
id->ssvid = 0;
- /* generate a random serial number as our controllers are ephemeral: */
- get_random_bytes(&serial, sizeof(serial));
memset(id->sn, ' ', sizeof(id->sn));
- snprintf(id->sn, sizeof(id->sn), "%llx", serial);
+ snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
memset(id->mn, ' ', sizeof(id->mn));
strncpy((char *)id->mn, "Linux", sizeof(id->mn));
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/random.h>
#include "nvmet.h"
static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
+ /* generate a random serial number as our controllers are ephemeral: */
+ get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
+
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
struct nvme_loop_ctrl *ctrl = container_of(work,
struct nvme_loop_ctrl, delete_work);
- nvme_remove_namespaces(&ctrl->ctrl);
- nvme_loop_shutdown_ctrl(ctrl);
nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_loop_shutdown_ctrl(ctrl);
nvme_put_ctrl(&ctrl->ctrl);
}
nvme_loop_destroy_admin_queue(ctrl);
out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
- nvme_remove_namespaces(&ctrl->ctrl);
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
}
struct mutex lock;
u64 cap;
+ u64 serial;
u32 cc;
u32 csts;
NVMET_RDMA_Q_CONNECTING,
NVMET_RDMA_Q_LIVE,
NVMET_RDMA_Q_DISCONNECTING,
+ NVMET_RDMA_IN_DEVICE_REMOVAL,
};
struct nvmet_rdma_queue {
if (!len)
return 0;
- /* use the already allocated data buffer if possible */
- if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) {
- nvmet_rdma_use_inline_sg(rsp, len, 0);
- } else {
- status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
- len);
- if (status)
- return status;
- }
+ status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
+ len);
+ if (status)
+ return status;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
struct nvmet_rdma_device *dev = queue->dev;
nvmet_rdma_free_queue(queue);
- rdma_destroy_id(cm_id);
+
+ if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL)
+ rdma_destroy_id(cm_id);
+
kref_put(&dev->ref, nvmet_rdma_free_dev);
}
switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING:
case NVMET_RDMA_Q_LIVE:
- disconnect = true;
queue->state = NVMET_RDMA_Q_DISCONNECTING;
+ case NVMET_RDMA_IN_DEVICE_REMOVAL:
+ disconnect = true;
break;
case NVMET_RDMA_Q_DISCONNECTING:
break;
schedule_work(&queue->release_work);
}
+/**
+ * nvme_rdma_device_removal() - Handle RDMA device removal
+ * @queue: nvmet rdma queue (cm id qp_context)
+ * @addr: nvmet address (cm_id context)
+ *
+ * DEVICE_REMOVAL event notifies us that the RDMA device is about
+ * to unplug so we should take care of destroying our RDMA resources.
+ * This event will be generated for each allocated cm_id.
+ *
+ * Note that this event can be generated on a normal queue cm_id
+ * and/or a device bound listener cm_id (where in this case
+ * queue will be null).
+ *
+ * we claim ownership on destroying the cm_id. For queues we move
+ * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
+ * we nullify the priv to prevent double cm_id destruction and destroying
+ * the cm_id implicitely by returning a non-zero rc to the callout.
+ */
+static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
+ struct nvmet_rdma_queue *queue)
+{
+ unsigned long flags;
+
+ if (!queue) {
+ struct nvmet_port *port = cm_id->context;
+
+ /*
+ * This is a listener cm_id. Make sure that
+ * future remove_port won't invoke a double
+ * cm_id destroy. use atomic xchg to make sure
+ * we don't compete with remove_port.
+ */
+ if (xchg(&port->priv, NULL) != cm_id)
+ return 0;
+ } else {
+ /*
+ * This is a queue cm_id. Make sure that
+ * release queue will not destroy the cm_id
+ * and schedule all ctrl queues removal (only
+ * if the queue is not disconnecting already).
+ */
+ spin_lock_irqsave(&queue->state_lock, flags);
+ if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
+ queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ nvmet_rdma_queue_disconnect(queue);
+ flush_scheduled_work();
+ }
+
+ /*
+ * We need to return 1 so that the core will destroy
+ * it's own ID. What a great API design..
+ */
+ return 1;
+}
+
static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_DISCONNECTED:
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- /*
- * We can get the device removal callback even for a
- * CM ID that we aren't actually using. In that case
- * the context pointer is NULL, so we shouldn't try
- * to disconnect a non-existing queue. But we also
- * need to return 1 so that the core will destroy
- * it's own ID. What a great API design..
- */
- if (queue)
- nvmet_rdma_queue_disconnect(queue);
- else
- ret = 1;
+ nvmet_rdma_queue_disconnect(queue);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ ret = nvmet_rdma_device_removal(cm_id, queue);
break;
case RDMA_CM_EVENT_REJECTED:
case RDMA_CM_EVENT_UNREACHABLE:
static void nvmet_rdma_remove_port(struct nvmet_port *port)
{
- struct rdma_cm_id *cm_id = port->priv;
+ struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
- rdma_destroy_id(cm_id);
+ if (cm_id)
+ rdma_destroy_id(cm_id);
}
static struct nvmet_fabrics_ops nvmet_rdma_ops = {
*/
err:
- if (it.node)
- of_node_put(it.node);
+ of_node_put(it.node);
return rc;
}
const struct device_node *parent, int port_reg, int reg)
{
struct of_endpoint endpoint;
- struct device_node *node, *prev_node = NULL;
-
- while (1) {
- node = of_graph_get_next_endpoint(parent, prev_node);
- of_node_put(prev_node);
- if (!node)
- break;
+ struct device_node *node = NULL;
+ for_each_endpoint_of_node(parent, node) {
of_graph_parse_endpoint(node, &endpoint);
if (((port_reg == -1) || (endpoint.port == port_reg)) &&
((reg == -1) || (endpoint.id == reg)))
return node;
-
- prev_node = node;
}
return NULL;
pr_warning("End of tree marker overwritten: %08x\n",
be32_to_cpup(mem + size));
- if (detached) {
+ if (detached && mynodes) {
of_node_set_flag(*mynodes, OF_DETACHED);
pr_debug("unflattened tree is detached\n");
}
list_del(&desc->list);
+ of_node_set_flag(desc->dev, OF_POPULATED);
+
pr_debug("of_irq_init: init %s (%p), parent %p\n",
desc->dev->full_name,
desc->dev, desc->interrupt_parent);
ret = desc->irq_init_cb(desc->dev,
desc->interrupt_parent);
if (ret) {
+ of_node_clear_flag(desc->dev, OF_POPULATED);
kfree(desc);
continue;
}
* its children can get processed in a subsequent pass.
*/
list_add_tail(&desc->list, &intc_parent_list);
-
- of_node_set_flag(desc->dev, OF_POPULATED);
}
/* Get the next pending parent that might have children */
}
EXPORT_SYMBOL_GPL(of_platform_default_populate);
+#ifndef CONFIG_PPC
static int __init of_platform_default_populate_init(void)
{
struct device_node *node;
return 0;
}
arch_initcall_sync(of_platform_default_populate_init);
+#endif
static int of_platform_device_destroy(struct device *dev, void *data)
{
nvec = maxvec;
for (;;) {
- if (!(flags & PCI_IRQ_NOAFFINITY)) {
+ if (flags & PCI_IRQ_AFFINITY) {
dev->irq_affinity = irq_create_affinity_mask(&nvec);
if (nvec < minvec)
return -ENOSPC;
**/
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
- return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY);
+ return __pci_enable_msi_range(dev, minvec, maxvec, 0);
}
EXPORT_SYMBOL(pci_enable_msi_range);
return -ERANGE;
for (;;) {
- if (!(flags & PCI_IRQ_NOAFFINITY)) {
+ if (flags & PCI_IRQ_AFFINITY) {
dev->irq_affinity = irq_create_affinity_mask(&nvec);
if (nvec < minvec)
return -ENOSPC;
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec)
{
- return __pci_enable_msix_range(dev, entries, minvec, maxvec,
- PCI_IRQ_NOAFFINITY);
+ return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
}
EXPORT_SYMBOL(pci_enable_msix_range);
{
int vecs = -ENOSPC;
- if (!(flags & PCI_IRQ_NOMSIX)) {
+ if (flags & PCI_IRQ_MSIX) {
vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
flags);
if (vecs > 0)
return vecs;
}
- if (!(flags & PCI_IRQ_NOMSI)) {
+ if (flags & PCI_IRQ_MSI) {
vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
if (vecs > 0)
return vecs;
}
/* use legacy irq if allowed */
- if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1)
+ if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) {
+ pci_intx(dev, 1);
return 1;
+ }
+
return vecs;
}
EXPORT_SYMBOL(pci_alloc_irq_vectors);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
pci_msi_domain_update_chip_ops(info);
+ info->flags |= MSI_FLAG_ACTIVATE_EARLY;
+
domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
return NULL;
return 0;
}
-static DEFINE_MUTEX(arm_pmu_mutex);
+static DEFINE_SPINLOCK(arm_pmu_lock);
static LIST_HEAD(arm_pmu_list);
/*
{
struct arm_pmu *pmu;
- mutex_lock(&arm_pmu_mutex);
+ spin_lock(&arm_pmu_lock);
list_for_each_entry(pmu, &arm_pmu_list, entry) {
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
if (pmu->reset)
pmu->reset(pmu);
}
- mutex_unlock(&arm_pmu_mutex);
+ spin_unlock(&arm_pmu_lock);
return 0;
}
if (!cpu_hw_events)
return -ENOMEM;
- mutex_lock(&arm_pmu_mutex);
+ spin_lock(&arm_pmu_lock);
list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
- mutex_unlock(&arm_pmu_mutex);
+ spin_unlock(&arm_pmu_lock);
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
return 0;
out_unregister:
- mutex_lock(&arm_pmu_mutex);
+ spin_lock(&arm_pmu_lock);
list_del(&cpu_pmu->entry);
- mutex_unlock(&arm_pmu_mutex);
+ spin_unlock(&arm_pmu_lock);
free_percpu(cpu_hw_events);
return err;
}
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
cpu_pm_pmu_unregister(cpu_pmu);
- mutex_lock(&arm_pmu_mutex);
+ spin_lock(&arm_pmu_lock);
list_del(&cpu_pmu->entry);
- mutex_unlock(&arm_pmu_mutex);
+ spin_unlock(&arm_pmu_lock);
free_percpu(cpu_pmu->hw_events);
}
/* If we didn't manage to parse anything, try the interrupt affinity */
if (cpumask_weight(&pmu->supported_cpus) == 0) {
- if (!using_spi) {
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq_is_percpu(irq)) {
/* If using PPIs, check the affinity of the partition */
- int ret, irq;
+ int ret;
- irq = platform_get_irq(pdev, 0);
ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
if (ret) {
kfree(irqs);
#include <linux/bitops.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinconf.h>
return PTR_ERR(pc->pcdev);
}
- ret = meson_gpiolib_register(pc);
- if (ret) {
- pinctrl_unregister(pc->pcdev);
- return ret;
- }
-
- return 0;
+ return meson_gpiolib_register(pc);
}
static struct platform_driver meson_pinctrl_driver = {
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + offset * 4);
- /*
- * Suppose BIOS or Bootloader sets specific debounce for the
- * GPIO. if not, set debounce to be 2.75ms and remove glitch.
- */
- if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
- pin_reg |= 0xf;
- pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
- pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
- }
-
pin_reg &= ~BIT(OUTPUT_ENABLE_OFF);
writel(pin_reg, gpio_dev->base + offset * 4);
spin_unlock_irqrestore(&gpio_dev->lock, flags);
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
- /*
- Suppose BIOS or Bootloader sets specific debounce for the
- GPIO. if not, set debounce to be 2.75ms.
- */
- if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
- pin_reg |= 0xf;
- pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
- pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
- }
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
{
struct pistachio_pinctrl *pctl;
struct resource *res;
- int ret;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
return PTR_ERR(pctl->pctldev);
}
- ret = pistachio_gpio_register(pctl);
- if (ret < 0) {
- pinctrl_unregister(pctl->pctldev);
- return ret;
- }
-
- return 0;
+ return pistachio_gpio_register(pctl);
}
static struct platform_driver pistachio_pinctrl_driver = {
/* BIOS error detected */
{ KE_IGNORE, 0xe00d, { KEY_RESERVED } },
- /* Unknown, defined in ACPI DSDT */
- /* { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, */
+ /* Battery was removed or inserted */
+ { KE_IGNORE, 0xe00e, { KEY_RESERVED } },
/* Wifi Catcher */
{ KE_KEY, 0xe011, { KEY_PROG2 } },
}
static inline void max17042_read_model_data(struct max17042_chip *chip,
- u8 addr, u32 *data, int size)
+ u8 addr, u16 *data, int size)
{
struct regmap *map = chip->regmap;
int i;
+ u32 tmp;
- for (i = 0; i < size; i++)
- regmap_read(map, addr + i, &data[i]);
+ for (i = 0; i < size; i++) {
+ regmap_read(map, addr + i, &tmp);
+ data[i] = (u16)tmp;
+ }
}
static inline int max17042_model_data_compare(struct max17042_chip *chip,
{
int ret;
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
- u32 *temp_data;
+ u16 *temp_data;
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
if (!temp_data)
ret = max17042_model_data_compare(
chip,
chip->pdata->config_data->cell_char_tbl,
- (u16 *)temp_data,
+ temp_data,
table_size);
max10742_lock_model(chip);
{
int i;
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
- u32 *temp_data;
+ u16 *temp_data;
int ret = 0;
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
config SYSCON_REBOOT_MODE
tristate "Generic SYSCON regmap reboot mode driver"
depends on OF
+ depends on MFD_SYSCON
select REBOOT_MODE
- select MFD_SYSCON
help
Say y here will enable reboot mode driver. This will
get reboot mode arguments and store it in SYSCON mapped
if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
pr_err("failed to find reboot-offset property\n");
+ iounmap(base);
return -EINVAL;
}
err = register_restart_handler(&hisi_restart_nb);
- if (err)
+ if (err) {
dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
err);
+ iounmap(base);
+ }
return err;
}
if (!charger)
return -ENOMEM;
+ platform_set_drvdata(pdev, charger);
charger->tps = tps;
charger->dev = &pdev->dev;
static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
long timeout)
{
- struct rio_channel *ch = NULL;
- struct rio_channel *new_ch = NULL;
+ struct rio_channel *ch;
+ struct rio_channel *new_ch;
struct conn_req *req;
struct cm_peer *peer;
int found = 0;
spin_unlock_bh(&ch->lock);
riocm_put_channel(ch);
+ ch = NULL;
kfree(req);
down_read(&rdev_sem);
if (!found) {
/* If peer device object not found, simply ignore the request */
err = -ENODEV;
- goto err_nodev;
+ goto err_put_new_ch;
}
new_ch->rdev = peer->rdev;
*new_ch_id = new_ch->id;
return new_ch;
+
+err_put_new_ch:
+ spin_lock_bh(&idr_lock);
+ idr_remove(&ch_idr, new_ch->id);
+ spin_unlock_bh(&idr_lock);
+ riocm_put_channel(new_ch);
+
err_put:
- riocm_put_channel(ch);
-err_nodev:
- if (new_ch) {
- spin_lock_bh(&idr_lock);
- idr_remove(&ch_idr, new_ch->id);
- spin_unlock_bh(&idr_lock);
- riocm_put_channel(new_ch);
- }
+ if (ch)
+ riocm_put_channel(ch);
*new_ch_id = 0;
return ERR_PTR(err);
}
u8 *sense = NULL;
int expires;
+ cqr = (struct dasd_ccw_req *) intparm;
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
+ if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
+ device = (struct dasd_device *) cqr->startdev;
+ cqr->status = DASD_CQR_CLEARED;
+ dasd_device_clear_timer(device);
+ wake_up(&dasd_flush_wq);
+ dasd_schedule_device_bh(device);
+ return;
+ }
break;
case -ETIMEDOUT:
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
}
now = get_tod_clock();
- cqr = (struct dasd_ccw_req *) intparm;
/* check for conditions that should be handled immediately */
if (!cqr ||
!(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
return PTR_ERR(cqr);
}
+ cqr->lpm = lpum;
+retry:
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
(prssdp + 1);
memcpy(messages, message_buf,
sizeof(struct dasd_rssd_messages));
+ } else if (cqr->lpm) {
+ /*
+ * on z/VM we might not be able to do I/O on the requested path
+ * but instead we get the required information on any path
+ * so retry with open path mask
+ */
+ cqr->lpm = 0;
+ goto retry;
} else
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading messages failed with rc=%d\n"
priv->state = DEV_STATE_NOT_OPER;
priv->dev_id.devno = sch->schib.pmcw.dev;
priv->dev_id.ssid = sch->schid.ssid;
- priv->schid = sch->schid;
INIT_WORK(&priv->todo_work, ccw_device_todo);
INIT_LIST_HEAD(&priv->cmb_list);
put_device(&old_sch->dev);
/* Initialize new subchannel. */
spin_lock_irq(sch->lock);
- cdev->private->schid = sch->schid;
cdev->ccwlock = sch->lock;
if (!sch_is_pseudo_sch(sch))
sch_set_cdev(sch, cdev);
static void
ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
char dbf_text[15];
if (!scsw_is_valid_cstat(&irb->scsw) ||
"received"
" ... device %04x on subchannel 0.%x.%04x, dev_stat "
": %02X sch_stat : %02X\n",
- cdev->private->dev_id.devno, cdev->private->schid.ssid,
- cdev->private->schid.sch_no,
+ cdev->private->dev_id.devno, sch->schid.ssid,
+ sch->schid.sch_no,
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
- sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
+ sprintf(dbf_text, "chk%x", sch->schid.sch_no);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, irb, sizeof(struct irb));
}
int state; /* device state */
atomic_t onoff;
struct ccw_dev_id dev_id; /* device id */
- struct subchannel_id schid; /* subchannel number */
struct ccw_request req; /* internal I/O request */
int iretry;
u8 pgid_valid_mask; /* mask of valid PGIDs */
q->qdio_error = 0;
}
+static inline int qdio_tasklet_schedule(struct qdio_q *q)
+{
+ if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
+ tasklet_schedule(&q->tasklet);
+ return 0;
+ }
+ return -EPERM;
+}
+
static void __qdio_inbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_inbound);
if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
- tasklet_schedule(&q->tasklet);
+ if (!qdio_tasklet_schedule(q))
return;
- }
}
qdio_stop_polling(q);
*/
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
}
}
* is noticed and outbound_handler is called after some time.
*/
if (qdio_outbound_q_done(q))
- del_timer(&q->u.out.timer);
+ del_timer_sync(&q->u.out.timer);
else
- if (!timer_pending(&q->u.out.timer))
+ if (!timer_pending(&q->u.out.timer) &&
+ likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
return;
sched:
- if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
- return;
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
}
/* outbound tasklet */
{
struct qdio_q *q = (struct qdio_q *)data;
- if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
- return;
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
}
static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
for_each_output_queue(q->irq_ptr, out, i)
if (!qdio_outbound_q_done(out))
- tasklet_schedule(&out->tasklet);
+ qdio_tasklet_schedule(out);
}
static void __tiqdio_inbound_processing(struct qdio_q *q)
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
- tasklet_schedule(&q->tasklet);
+ if (!qdio_tasklet_schedule(q))
return;
- }
}
qdio_stop_polling(q);
*/
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
}
}
int i;
struct qdio_q *q;
- if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+ if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
for_each_input_queue(irq_ptr, q, i) {
continue;
if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
qdio_siga_sync_q(q);
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
}
}
struct irb *irb)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
int cstat, dstat;
if (!intparm || !irq_ptr) {
- DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_ERROR("qint:%4x", schid.sch_no);
return;
}
int qdio_get_ssqd_desc(struct ccw_device *cdev,
struct qdio_ssqd_desc *data)
{
+ struct subchannel_id schid;
if (!cdev || !cdev->private)
return -EINVAL;
- DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
- return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("get ssqd:%4x", schid.sch_no);
+ return qdio_setup_get_ssqd(NULL, &schid, data);
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
tasklet_kill(&q->tasklet);
for_each_output_queue(irq_ptr, q, i) {
- del_timer(&q->u.out.timer);
+ del_timer_sync(&q->u.out.timer);
tasklet_kill(&q->tasklet);
}
}
int qdio_shutdown(struct ccw_device *cdev, int how)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
int rc;
- unsigned long flags;
if (!irq_ptr)
return -ENODEV;
WARN_ON_ONCE(irqs_disabled());
- DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qshutdown:%4x", schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
/*
qdio_shutdown_debug_entries(irq_ptr);
/* cleanup subchannel */
- spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ spin_lock_irq(get_ccwdev_lock(cdev));
if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
}
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
wait_event_interruptible_timeout(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR,
10 * HZ);
- spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ spin_lock_irq(get_ccwdev_lock(cdev));
no_cleanup:
qdio_shutdown_thinint(irq_ptr);
/* restore interrupt handler */
if ((void *)cdev->handler == (void *)qdio_int_handler)
cdev->handler = irq_ptr->orig_handler;
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
mutex_unlock(&irq_ptr->setup_mutex);
int qdio_free(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
if (!irq_ptr)
return -ENODEV;
- DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qfree:%4x", schid.sch_no);
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
mutex_lock(&irq_ptr->setup_mutex);
*/
int qdio_allocate(struct qdio_initialize *init_data)
{
+ struct subchannel_id schid;
struct qdio_irq *irq_ptr;
- DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
+ ccw_device_get_schid(init_data->cdev, &schid);
+ DBF_EVENT("qallocate:%4x", schid.sch_no);
if ((init_data->no_input_qs && !init_data->input_handler) ||
(init_data->no_output_qs && !init_data->output_handler))
*/
int qdio_establish(struct qdio_initialize *init_data)
{
- struct qdio_irq *irq_ptr;
struct ccw_device *cdev = init_data->cdev;
- unsigned long saveflags;
+ struct subchannel_id schid;
+ struct qdio_irq *irq_ptr;
int rc;
- DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qestablish:%4x", schid.sch_no);
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
- if (cdev->private->state != DEV_STATE_ONLINE)
- return -EINVAL;
-
mutex_lock(&irq_ptr->setup_mutex);
qdio_setup_irq(init_data);
irq_ptr->ccw.count = irq_ptr->equeue.count;
irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
- spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+ spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options_mask(cdev, 0);
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
- }
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
-
- if (rc) {
mutex_unlock(&irq_ptr->setup_mutex);
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return rc;
*/
int qdio_activate(struct ccw_device *cdev)
{
+ struct subchannel_id schid;
struct qdio_irq *irq_ptr;
int rc;
- unsigned long saveflags;
- DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qactivate:%4x", schid.sch_no);
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
- if (cdev->private->state != DEV_STATE_ONLINE)
- return -EINVAL;
-
mutex_lock(&irq_ptr->setup_mutex);
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
rc = -EBUSY;
irq_ptr->ccw.count = irq_ptr->aqueue.count;
irq_ptr->ccw.cda = 0;
- spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+ spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
0, DOIO_DENY_PREFETCH);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
- }
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
-
- if (rc)
goto out;
+ }
if (is_thinint_irq(irq_ptr))
tiqdio_add_input_queues(irq_ptr);
/* in case of SIGA errors we must process the error immediately */
if (used >= q->u.out.scan_threshold || rc)
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
else
/* free the SBALs in case of no further traffic */
- if (!timer_pending(&q->u.out.timer))
+ if (!timer_pending(&q->u.out.timer) &&
+ likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
mod_timer(&q->u.out.timer, jiffies + HZ);
return rc;
}
# it under the terms of the GNU General Public License (version 2 only)
# as published by the Free Software Foundation.
-obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o
+s390-virtio-objs := virtio_ccw.o
+ifdef CONFIG_S390_GUEST_OLD_TRANSPORT
+s390-virtio-objs += kvm_virtio.o
+endif
+obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs)
if (test_devices_support(total_memory_size) < 0)
return -ENODEV;
+ pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n");
+
rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
if (rc)
return rc;
}
/* code for early console output with virtio_console */
-static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+static int early_put_chars(u32 vtermno, const char *buf, int count)
{
char scratch[17];
unsigned int len = count;
struct fib *fibptr;
struct hw_fib * hw_fib = (struct hw_fib *)0;
dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
- unsigned size;
+ unsigned int size, osize;
int retval;
if (dev->in_reset) {
* will not overrun the buffer when we copy the memory. Return
* an error if we would.
*/
- size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
+ osize = size = le16_to_cpu(kfib->header.Size) +
+ sizeof(struct aac_fibhdr);
if (size < le16_to_cpu(kfib->header.SenderSize))
size = le16_to_cpu(kfib->header.SenderSize);
if (size > dev->max_fib_size) {
goto cleanup;
}
+ /* Sanity check the second copy */
+ if ((osize != le16_to_cpu(kfib->header.Size) +
+ sizeof(struct aac_fibhdr))
+ || (size < le16_to_cpu(kfib->header.SenderSize))) {
+ retval = -EINVAL;
+ goto cleanup;
+ }
+
if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
aac_adapter_interrupt(dev);
/*
mutex_unlock(&fip->ctlr_mutex);
drop:
- kfree(skb);
+ kfree_skb(skb);
return rc;
}
__ipr_remove(pdev);
return rc;
}
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ ioa_cfg->scan_enabled = 1;
+ schedule_work(&ioa_cfg->work_q);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
- scsi_scan_host(ioa_cfg->host);
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
}
}
- spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- ioa_cfg->scan_enabled = 1;
- schedule_work(&ioa_cfg->work_q);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ scsi_scan_host(ioa_cfg->host);
+
return 0;
}
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
- if (pci_request_selected_regions(instance->pdev, instance->bar,
+ if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
"megasas: LSI")) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
return -EBUSY;
iounmap(instance->reg_set);
fail_ioremap:
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
return -EINVAL;
}
iounmap(instance->reg_set);
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
}
/**
iounmap(instance->reg_set);
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
}
/**
} else
ioc->msix96_vector = 0;
+ if (ioc->is_warpdrive) {
+ ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
+ &ioc->chip->ReplyPostHostIndex;
+
+ for (i = 1; i < ioc->cpu_msix_table_sz; i++)
+ ioc->reply_post_host_index[i] =
+ (resource_size_t __iomem *)
+ ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+ * 4)));
+ }
+
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
if (r)
goto out_free_resources;
- if (ioc->is_warpdrive) {
- ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
- &ioc->chip->ReplyPostHostIndex;
-
- for (i = 1; i < ioc->cpu_msix_table_sz; i++)
- ioc->reply_post_host_index[i] =
- (resource_size_t __iomem *)
- ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
- * 4)));
- }
-
pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
if (r)
if (!edev)
return;
+ enclosure_unregister(edev);
+
ses_dev = edev->scratch;
edev->scratch = NULL;
kfree(edev->component[0].scratch);
put_device(&edev->edev);
- enclosure_unregister(edev);
}
static void ses_intf_remove(struct device *cdev,
if (!ccdev)
return ERR_PTR(-ENOMEM);
+ mutex_init(&ccdev->lock);
ccdev->dev = dev;
ccdev->clk = devm_clk_get(dev, clock_name);
if (IS_ERR(ccdev->clk))
}
/* Bind cpufreq callbacks to thermal cooling device ops */
+
static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
.get_max_state = cpufreq_get_max_state,
.get_cur_state = cpufreq_get_cur_state,
.set_cur_state = cpufreq_set_cur_state,
};
+static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
+ .get_max_state = cpufreq_get_max_state,
+ .get_cur_state = cpufreq_get_cur_state,
+ .set_cur_state = cpufreq_set_cur_state,
+ .get_requested_power = cpufreq_get_requested_power,
+ .state2power = cpufreq_state2power,
+ .power2state = cpufreq_power2state,
+};
+
/* Notifier for cpufreq policy change */
static struct notifier_block thermal_cpufreq_notifier_block = {
.notifier_call = cpufreq_thermal_notifier,
struct cpumask temp_mask;
unsigned int freq, i, num_cpus;
int ret;
+ struct thermal_cooling_device_ops *cooling_ops;
cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
if (capacitance) {
- cpufreq_cooling_ops.get_requested_power =
- cpufreq_get_requested_power;
- cpufreq_cooling_ops.state2power = cpufreq_state2power;
- cpufreq_cooling_ops.power2state = cpufreq_power2state;
cpufreq_dev->plat_get_static_power = plat_static_func;
ret = build_dyn_power_table(cpufreq_dev, capacitance);
cool_dev = ERR_PTR(ret);
goto free_table;
}
+
+ cooling_ops = &cpufreq_power_cooling_ops;
+ } else {
+ cooling_ops = &cpufreq_cooling_ops;
}
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
cpufreq_dev->id);
cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
- &cpufreq_cooling_ops);
+ cooling_ops);
if (IS_ERR(cool_dev))
goto remove_idr;
instance->target = get_target_state(tz, cdev, percentage,
cur_trip_level);
+ mutex_lock(&instance->cdev->lock);
instance->cdev->updated = false;
+ mutex_unlock(&instance->cdev->lock);
thermal_cdev_update(cdev);
}
return 0;
dev_dbg(&instance->cdev->device, "target=%d\n",
(int)instance->target);
+ mutex_lock(&instance->cdev->lock);
instance->cdev->updated = false; /* cdev needs update */
+ mutex_unlock(&instance->cdev->lock);
}
mutex_unlock(&tz->lock);
static int imx_thermal_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(of_imx_thermal_match, &pdev->dev);
struct imx_thermal_data *data;
struct regmap *map;
int measure_freq;
}
data->tempmon = map;
- data->socdata = of_id->data;
+ data->socdata = of_device_get_match_data(&pdev->dev);
/* make sure the IRQ flag is clear before enabling irq on i.MX6SX */
if (data->socdata->version == TEMPMON_IMX6SX) {
.remove = int3406_thermal_remove,
.driver = {
.name = "int3406 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = int3406_thermal_match,
},
};
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/thermal.h>
+#include <linux/pm.h>
/* Intel PCH thermal Device IDs */
#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
unsigned long crt_temp;
int hot_trip_id;
unsigned long hot_temp;
+ bool bios_enabled;
};
static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
*nr_trips = 0;
/* Check if BIOS has already enabled thermal sensor */
- if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))
+ if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) {
+ ptd->bios_enabled = true;
goto read_trips;
+ }
tsel = readb(ptd->hw_base + WPT_TSEL);
/*
return 0;
}
+static int pch_wpt_suspend(struct pch_thermal_device *ptd)
+{
+ u8 tsel;
+
+ if (ptd->bios_enabled)
+ return 0;
+
+ tsel = readb(ptd->hw_base + WPT_TSEL);
+
+ writeb(tsel & 0xFE, ptd->hw_base + WPT_TSEL);
+
+ return 0;
+}
+
+static int pch_wpt_resume(struct pch_thermal_device *ptd)
+{
+ u8 tsel;
+
+ if (ptd->bios_enabled)
+ return 0;
+
+ tsel = readb(ptd->hw_base + WPT_TSEL);
+
+ writeb(tsel | WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
+
+ return 0;
+}
+
struct pch_dev_ops {
int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips);
int (*get_temp)(struct pch_thermal_device *ptd, int *temp);
+ int (*suspend)(struct pch_thermal_device *ptd);
+ int (*resume)(struct pch_thermal_device *ptd);
};
static const struct pch_dev_ops pch_dev_ops_wpt = {
.hw_init = pch_wpt_init,
.get_temp = pch_wpt_get_temp,
+ .suspend = pch_wpt_suspend,
+ .resume = pch_wpt_resume,
};
static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
pci_disable_device(pdev);
}
+static int intel_pch_thermal_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+ return ptd->ops->suspend(ptd);
+}
+
+static int intel_pch_thermal_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+ return ptd->ops->resume(ptd);
+}
+
static struct pci_device_id intel_pch_thermal_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
+static const struct dev_pm_ops intel_pch_pm_ops = {
+ .suspend = intel_pch_thermal_suspend,
+ .resume = intel_pch_thermal_resume,
+};
+
static struct pci_driver intel_pch_thermal_driver = {
.name = "intel_pch_thermal",
.id_table = intel_pch_thermal_id,
.probe = intel_pch_thermal_probe,
.remove = intel_pch_thermal_remove,
+ .driver.pm = &intel_pch_pm_ops,
};
module_pci_driver(intel_pch_thermal_driver);
int sleeptime;
unsigned long target_jiffies;
unsigned int guard;
- unsigned int compensation = 0;
+ unsigned int compensated_ratio;
int interval; /* jiffies to sleep for each attempt */
unsigned int duration_jiffies = msecs_to_jiffies(duration);
unsigned int window_size_now;
* c-states, thus we need to compensate the injected idle ratio
* to achieve the actual target reported by the HW.
*/
- compensation = get_compensation(target_ratio);
- interval = duration_jiffies*100/(target_ratio+compensation);
+ compensated_ratio = target_ratio +
+ get_compensation(target_ratio);
+ if (compensated_ratio <= 0)
+ compensated_ratio = 1;
+ interval = duration_jiffies * 100 / compensated_ratio;
/* align idle time */
target_jiffies = roundup(jiffies, interval);
goto exit_set;
} else if (set_target_ratio > 0 && new_target_ratio == 0) {
pr_info("Stop forced idle injection\n");
- set_target_ratio = 0;
end_power_clamp();
+ set_target_ratio = 0;
} else /* adjust currently running */ {
set_target_ratio = new_target_ratio;
/* make new set_target_ratio visible to other cpus */
continue;
instance->target = 0;
+ mutex_lock(&instance->cdev->lock);
instance->cdev->updated = false;
+ mutex_unlock(&instance->cdev->lock);
thermal_cdev_update(instance->cdev);
}
}
update_passive_instance(tz, trip_type, -1);
instance->initialized = true;
+ mutex_lock(&instance->cdev->lock);
instance->cdev->updated = false; /* cdev needs update */
+ mutex_unlock(&instance->cdev->lock);
}
mutex_unlock(&tz->lock);
return ret;
instance->target = state;
+ mutex_lock(&cdev->lock);
cdev->updated = false;
+ mutex_unlock(&cdev->lock);
thermal_cdev_update(cdev);
return 0;
struct thermal_instance *instance;
unsigned long target = 0;
+ mutex_lock(&cdev->lock);
/* cooling device is updated*/
- if (cdev->updated)
+ if (cdev->updated) {
+ mutex_unlock(&cdev->lock);
return;
+ }
- mutex_lock(&cdev->lock);
/* Make sure cdev enters the deepest cooling state */
list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
dev_dbg(&cdev->device, "zone%d->target=%lu\n",
if (instance->target > target)
target = instance->target;
}
- mutex_unlock(&cdev->lock);
cdev->ops->set_cur_state(cdev, target);
cdev->updated = true;
+ mutex_unlock(&cdev->lock);
trace_cdev_update(cdev, target);
dev_dbg(&cdev->device, "set to state %lu\n", target);
}
return result;
}
+EXPORT_SYMBOL_GPL(thermal_add_hwmon_sysfs);
void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
{
hwmon_device_unregister(hwmon->device);
kfree(hwmon);
}
+EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
- acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
acm->is_int_ep = usb_endpoint_xfer_int(epread);
if (acm->is_int_ep)
acm->bInterval = epread->bInterval;
urb->transfer_dma = rb->dma;
if (acm->is_int_ep) {
usb_fill_int_urb(urb, acm->dev,
- acm->rx_endpoint,
+ usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
rb->base,
acm->readsize,
acm_read_bulk_callback, rb,
acm->bInterval);
} else {
usb_fill_bulk_urb(urb, acm->dev,
- acm->rx_endpoint,
+ usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
rb->base,
acm->readsize,
acm_read_bulk_callback, rb);
struct acm_rb read_buffers[ACM_NR];
struct acm_wb *putbuffer; /* for acm_tty_put_char() */
int rx_buflimit;
- int rx_endpoint;
spinlock_t read_lock;
int write_used; /* number of non-empty write buffers */
int transmitting;
ep, buffer, size);
}
+static const unsigned short low_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 8,
+ [USB_ENDPOINT_XFER_ISOC] = 0,
+ [USB_ENDPOINT_XFER_BULK] = 0,
+ [USB_ENDPOINT_XFER_INT] = 8,
+};
+static const unsigned short full_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 64,
+ [USB_ENDPOINT_XFER_ISOC] = 1023,
+ [USB_ENDPOINT_XFER_BULK] = 64,
+ [USB_ENDPOINT_XFER_INT] = 64,
+};
+static const unsigned short high_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 64,
+ [USB_ENDPOINT_XFER_ISOC] = 1024,
+ [USB_ENDPOINT_XFER_BULK] = 512,
+ [USB_ENDPOINT_XFER_INT] = 1023,
+};
+static const unsigned short super_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 512,
+ [USB_ENDPOINT_XFER_ISOC] = 1024,
+ [USB_ENDPOINT_XFER_BULK] = 1024,
+ [USB_ENDPOINT_XFER_INT] = 1024,
+};
+
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
int asnum, struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
int n, i, j, retval;
+ unsigned int maxp;
+ const unsigned short *maxpacket_maxes;
d = (struct usb_endpoint_descriptor *) buffer;
buffer += d->bLength;
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
+ /* Validate the wMaxPacketSize field */
+ maxp = usb_endpoint_maxp(&endpoint->desc);
+
+ /* Find the highest legal maxpacket size for this endpoint */
+ i = 0; /* additional transactions per microframe */
+ switch (to_usb_device(ddev)->speed) {
+ case USB_SPEED_LOW:
+ maxpacket_maxes = low_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_FULL:
+ maxpacket_maxes = full_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_HIGH:
+ /* Bits 12..11 are allowed only for HS periodic endpoints */
+ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
+ i = maxp & (BIT(12) | BIT(11));
+ maxp &= ~i;
+ }
+ /* fallthrough */
+ default:
+ maxpacket_maxes = high_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ maxpacket_maxes = super_speed_maxpacket_maxes;
+ break;
+ }
+ j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
+
+ if (maxp > j) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
+ cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
+ maxp = j;
+ endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
+ }
+
/*
* Some buggy high speed devices have bulk endpoints using
* maxpacket sizes other than 512. High speed HCDs may not
*/
if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
&& usb_endpoint_xfer_bulk(d)) {
- unsigned maxp;
-
- maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff;
if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
goto error_decrease_mem;
}
- mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle);
+ mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
+ &dma_handle);
if (!mem) {
ret = -ENOMEM;
goto error_free_usbm;
if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
mask |= POLLOUT | POLLWRNORM;
if (!connected(ps))
- mask |= POLLERR | POLLHUP;
+ mask |= POLLHUP;
+ if (list_empty(&ps->list))
+ mask |= POLLERR;
return mask;
}
/* Continue a partial initialization */
if (type == HUB_INIT2 || type == HUB_INIT3) {
- device_lock(hub->intfdev);
+ device_lock(&hdev->dev);
/* Was the hub disconnected while we were waiting? */
- if (hub->disconnected) {
- device_unlock(hub->intfdev);
- kref_put(&hub->kref, hub_release);
- return;
- }
+ if (hub->disconnected)
+ goto disconnected;
if (type == HUB_INIT2)
goto init2;
goto init3;
queue_delayed_work(system_power_efficient_wq,
&hub->init_work,
msecs_to_jiffies(delay));
- device_unlock(hub->intfdev);
+ device_unlock(&hdev->dev);
return; /* Continues at init3: below */
} else {
msleep(delay);
/* Scan all ports that need attention */
kick_hub_wq(hub);
- /* Allow autosuspend if it was suppressed */
- if (type <= HUB_INIT3)
+ if (type == HUB_INIT2 || type == HUB_INIT3) {
+ /* Allow autosuspend if it was suppressed */
+ disconnected:
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
-
- if (type == HUB_INIT2 || type == HUB_INIT3)
- device_unlock(hub->intfdev);
+ device_unlock(&hdev->dev);
+ }
kref_put(&hub->kref, hub_release);
}
struct usb_device *hdev = hub->hdev;
int i;
- cancel_delayed_work_sync(&hub->init_work);
-
/* hub_wq and related activity won't re-trigger */
hub->quiescing = 1;
if (!simple->clks)
return -ENOMEM;
+ platform_set_drvdata(pdev, simple);
simple->dev = dev;
for (i = 0; i < simple->num_clocks; i++) {
#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
+#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
if (!req->request.no_interrupt && !chain)
trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
- if (last)
+ if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc))
trb->ctrl |= DWC3_TRB_CTRL_LST;
if (chain)
static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
struct dwc3_request *req, struct dwc3_trb *trb,
- const struct dwc3_event_depevt *event, int status)
+ const struct dwc3_event_depevt *event, int status,
+ int chain)
{
unsigned int count;
unsigned int s_pkt = 0;
dep->queued_requests--;
trace_dwc3_complete_trb(dep, trb);
+ /*
+ * If we're in the middle of series of chained TRBs and we
+ * receive a short transfer along the way, DWC3 will skip
+ * through all TRBs including the last TRB in the chain (the
+ * where CHN bit is zero. DWC3 will also avoid clearing HWO
+ * bit and SW has to do it manually.
+ *
+ * We're going to do that here to avoid problems of HW trying
+ * to use bogus TRBs for transfers.
+ */
+ if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+
if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
- /*
- * We continue despite the error. There is not much we
- * can do. If we don't clean it up we loop forever. If
- * we skip the TRB then it gets overwritten after a
- * while since we use them in a ring buffer. A BUG()
- * would help. Lets hope that if this occurs, someone
- * fixes the root cause instead of looking away :)
- */
- dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
- dep->name, trb);
+ return 1;
+
count = trb->size & DWC3_TRB_SIZE_MASK;
if (dep->direction) {
s_pkt = 1;
}
- /*
- * We assume here we will always receive the entire data block
- * which we should receive. Meaning, if we program RX to
- * receive 4K but we receive only 2K, we assume that's all we
- * should receive and we simply bounce the request back to the
- * gadget driver for further processing.
- */
- req->request.actual += req->request.length - count;
- if (s_pkt)
+ if (s_pkt && !chain)
return 1;
if ((event->status & DEPEVT_STATUS_LST) &&
(trb->ctrl & (DWC3_TRB_CTRL_LST |
struct dwc3_trb *trb;
unsigned int slot;
unsigned int i;
+ int count = 0;
int ret;
do {
+ int chain;
+
req = next_request(&dep->started_list);
if (WARN_ON_ONCE(!req))
return 1;
+ chain = req->request.num_mapped_sgs > 0;
i = 0;
do {
slot = req->first_trb_index + i;
slot++;
slot %= DWC3_TRB_NUM;
trb = &dep->trb_pool[slot];
+ count += trb->size & DWC3_TRB_SIZE_MASK;
ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
- event, status);
+ event, status, chain);
if (ret)
break;
} while (++i < req->request.num_mapped_sgs);
+ /*
+ * We assume here we will always receive the entire data block
+ * which we should receive. Meaning, if we program RX to
+ * receive 4K but we receive only 2K, we assume that's all we
+ * should receive and we simply bounce the request back to the
+ * gadget driver for further processing.
+ */
+ req->request.actual += req->request.length - count;
dwc3_gadget_giveback(dep, req, status);
if (ret)
break;
case USB_RECIP_ENDPOINT:
+ if (!cdev->config)
+ break;
endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
list_for_each_entry(f, &cdev->config->functions, list) {
if (test_bit(endp, f->endpoints))
cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL);
if (!cdev->os_desc_req) {
- ret = PTR_ERR(cdev->os_desc_req);
+ ret = -ENOMEM;
goto end;
}
/* OS feature descriptor length <= 4kB */
cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
if (!cdev->os_desc_req->buf) {
- ret = PTR_ERR(cdev->os_desc_req->buf);
+ ret = -ENOMEM;
kfree(cdev->os_desc_req);
goto end;
}
{
struct gadget_info *gi = to_gadget_info(item);
+ mutex_lock(&gi->lock);
unregister_gadget(gi);
+ mutex_unlock(&gi->lock);
}
EXPORT_SYMBOL_GPL(unregister_gadget_item);
{
rndis_reset_cmplt_type *resp;
rndis_resp_t *r;
+ u8 *xbuf;
+ u32 length;
+
+ /* drain the response queue */
+ while ((xbuf = rndis_get_next_response(params, &length)))
+ rndis_free_response(params, xbuf);
r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
if (!r)
/* Multi frame CDC protocols may store the frame for
* later which is not a dropped frame.
*/
- if (dev->port_usb->supports_multi_frame)
+ if (dev->port_usb &&
+ dev->port_usb->supports_multi_frame)
goto multiframe;
goto drop;
}
if (!data) {
kfree(*class_array);
*class_array = NULL;
- ret = PTR_ERR(data);
+ ret = -ENOMEM;
goto unlock;
}
cl_arr = *class_array;
*/
spin_lock_irq(&epdata->dev->lock);
value = -ENODEV;
- if (unlikely(epdata->ep))
+ if (unlikely(epdata->ep == NULL))
goto fail;
req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
}
if (is_sync_kiocb(iocb)) {
value = ep_io(epdata, buf, len);
- if (value >= 0 && copy_to_iter(buf, value, to))
+ if (value >= 0 && (copy_to_iter(buf, value, to) != value))
value = -EFAULT;
} else {
struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (ret != -EPROBE_DEFER)
list_del(&driver->pending);
if (ret)
- goto err4;
+ goto err5;
break;
}
}
return 0;
+err5:
+ device_del(&udc->dev);
+
err4:
list_del(&udc->list);
mutex_unlock(&udc_lock);
struct qe_ep *ep;
if (wValue != 0 || wLength != 0
- || pipe > USB_MAX_ENDPOINTS)
+ || pipe >= USB_MAX_ENDPOINTS)
break;
ep = &udc->eps[pipe];
int port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
- ehci_writel(ehci, PORT_RWC_BITS,
- &ehci->regs->port_status[port]);
spin_unlock_irq(&ehci->lock);
ehci_port_power(ehci, port, false);
spin_lock_irq(&ehci->lock);
+ ehci_writel(ehci, PORT_RWC_BITS,
+ &ehci->regs->port_status[port]);
}
}
if (pin_number > 7)
return;
- mask = 1u << pin_number;
+ mask = 1u << (pin_number % 4);
idx = pin_number / 4;
if (value)
ret = 0;
virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return -ENODEV;
+
cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
if (!cmd) {
xhci_dbg(xhci, "Couldn't allocate command structure.\n");
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
}
- usb_hcd_pci_remove(dev);
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
pci_set_power_state(dev, PCI_D3hot);
+
+ usb_hcd_pci_remove(dev);
}
#ifdef CONFIG_PM
cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
- if (cmd->command_trb != xhci->cmd_ring->dequeue) {
- xhci_err(xhci,
- "Command completion event does not match command\n");
- return;
- }
-
del_timer(&xhci->cmd_timer);
trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
xhci_handle_stopped_cmd_ring(xhci, cmd);
return;
}
+
+ if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+ xhci_err(xhci,
+ "Command completion event does not match command\n");
+ return;
+ }
+
/*
* Host aborted the command ring, check if the current command was
* supposed to be aborted, otherwise continue normally.
send_addr = addr;
/* Queue the TRBs, even if they are zero-length */
- for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) {
+ for (enqd_len = 0; first_trb || enqd_len < full_len;
+ enqd_len += trb_buff_len) {
field = TRB_TYPE(TRB_NORMAL);
/* TRB buffer should not cross 64KB boundaries */
{
char data[30 *3 + 4];
char *d = data;
- int m = (sizeof(data) - 1) / 3;
+ int m = (sizeof(data) - 1) / 3 - 1;
int bytes_read = 0;
int retry_on_empty = 10;
int retry_on_timeout = 5;
int i = 0;
char data[30 *3 + 4];
char *d = data;
- int m = (sizeof(data) - 1) / 3;
+ int m = (sizeof(data) - 1) / 3 - 1;
int l = 0;
struct u132_target *target = &ftdi->target[ed];
struct u132_command *command = &ftdi->command[
if (packet_bytes > 2) {
char diag[30 *3 + 4];
char *d = diag;
- int m = (sizeof(diag) - 1) / 3;
+ int m = (sizeof(diag) - 1) / 3 - 1;
char *b = ftdi->bulk_in_buffer;
int bytes_read = 0;
diag[0] = 0;
if (packet_bytes > 2) {
char diag[30 *3 + 4];
char *d = diag;
- int m = (sizeof(diag) - 1) / 3;
+ int m = (sizeof(diag) - 1) / 3 - 1;
char *b = ftdi->bulk_in_buffer;
int bytes_read = 0;
unsigned char c = 0;
if (packet_bytes > 2) {
char diag[30 *3 + 4];
char *d = diag;
- int m = (sizeof(diag) - 1) / 3;
+ int m = (sizeof(diag) - 1) / 3 - 1;
char *b = ftdi->bulk_in_buffer;
int bytes_read = 0;
diag[0] = 0;
{
struct usb_sg_request *req = (struct usb_sg_request *) _req;
- req->status = -ETIMEDOUT;
usb_sg_cancel(req);
}
mod_timer(&sg_timer, jiffies +
msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
usb_sg_wait(req);
- del_timer_sync(&sg_timer);
- retval = req->status;
+ if (!del_timer_sync(&sg_timer))
+ retval = -ETIMEDOUT;
+ else
+ retval = req->status;
/* FIXME check resulting data pattern */
ktime_get_ts64(&start);
retval = usbtest_do_ioctl(intf, param_32);
- if (retval)
+ if (retval < 0)
goto free_mutex;
ktime_get_ts64(&end);
(rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id,
otg_dev->vbus);
+ platform_set_drvdata(pdev, otg_dev);
+
return 0;
}
if (gpio > 0)
dparam->enable_gpio = gpio;
- if (dparam->type == USBHS_TYPE_RCAR_GEN2)
+ if (dparam->type == USBHS_TYPE_RCAR_GEN2 ||
+ dparam->type == USBHS_TYPE_RCAR_GEN3)
dparam->has_usb_dmac = 1;
return info;
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
- usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_push;
/* check data length if this driver don't use USB-DMAC */
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
- usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_pop;
fifo = usbhsf_get_dma_fifo(priv, pkt);
* use dmaengine if possible.
* It will use pio handler if impossible.
*/
- if (usb_endpoint_dir_in(desc))
+ if (usb_endpoint_dir_in(desc)) {
pipe->handler = &usbhs_fifo_dma_push_handler;
- else
+ } else {
pipe->handler = &usbhs_fifo_dma_pop_handler;
+ usbhs_xxxsts_clear(priv, BRDYSTS,
+ usbhs_pipe_number(pipe));
+ }
ret = 0;
}
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+ { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ } /* Terminating entry */
};
#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2
+/*
+ * Ivium Technologies product IDs
+ */
+#define FTDI_PALMSENS_PID 0xf440
+#define FTDI_IVIUM_XSTAT_PID 0xf441
+
/*
* Linx Technologies product ids
*/
#define INTREPID_VALUECAN_PID 0x0601
#define INTREPID_NEOVI_PID 0x0701
+/*
+ * WICED USB UART
+ */
+#define WICED_VID 0x0A5C
+#define WICED_USB20706V2_PID 0x6422
+
/*
* Definitions for ID TECH (www.idt-net.com) devices
*/
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
+#define TELIT_PRODUCT_LE920A4_1207 0x1207
+#define TELIT_PRODUCT_LE920A4_1208 0x1208
+#define TELIT_PRODUCT_LE920A4_1211 0x1211
+#define TELIT_PRODUCT_LE920A4_1212 0x1212
+#define TELIT_PRODUCT_LE920A4_1213 0x1213
+#define TELIT_PRODUCT_LE920A4_1214 0x1214
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1),
+};
+
static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
.sendsetup = BIT(2),
.reserved = BIT(0) | BIT(1) | BIT(3),
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
+ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
+ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
rc = usb_register(udriver);
if (rc)
- return rc;
+ goto failed_usb_register;
for (sd = serial_drivers; *sd; ++sd) {
(*sd)->usb_driver = udriver;
while (sd-- > serial_drivers)
usb_serial_deregister(*sd);
usb_deregister(udriver);
+failed_usb_register:
+ kfree(udriver);
return rc;
}
EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
}
static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
- uint32_t flags, void *data)
+ unsigned int count, uint32_t flags,
+ void *data)
{
- int32_t fd = *(int32_t *)data;
-
- if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
- return -EINVAL;
-
/* DATA_NONE/DATA_BOOL enables loopback testing */
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- if (*ctx)
- eventfd_signal(*ctx, 1);
- return 0;
+ if (*ctx) {
+ if (count) {
+ eventfd_signal(*ctx, 1);
+ } else {
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ }
+ return 0;
+ }
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
- uint8_t trigger = *(uint8_t *)data;
+ uint8_t trigger;
+
+ if (!count)
+ return -EINVAL;
+
+ trigger = *(uint8_t *)data;
if (trigger && *ctx)
eventfd_signal(*ctx, 1);
- return 0;
- }
- /* Handle SET_DATA_EVENTFD */
- if (fd == -1) {
- if (*ctx)
- eventfd_ctx_put(*ctx);
- *ctx = NULL;
return 0;
- } else if (fd >= 0) {
- struct eventfd_ctx *efdctx;
- efdctx = eventfd_ctx_fdget(fd);
- if (IS_ERR(efdctx))
- return PTR_ERR(efdctx);
- if (*ctx)
- eventfd_ctx_put(*ctx);
- *ctx = efdctx;
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd;
+
+ if (!count)
+ return -EINVAL;
+
+ fd = *(int32_t *)data;
+ if (fd == -1) {
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ } else if (fd >= 0) {
+ struct eventfd_ctx *efdctx;
+
+ efdctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+
+ *ctx = efdctx;
+ }
return 0;
- } else
- return -EINVAL;
+ }
+
+ return -EINVAL;
}
static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
- if (index != VFIO_PCI_ERR_IRQ_INDEX)
+ if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
- /*
- * We should sanitize start & count, but that wasn't caught
- * originally, so this IRQ index must forever ignore them :-(
- */
-
- return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
+ return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
+ count, flags, data);
}
static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
- if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
+ if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
- return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
+ return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
+ count, flags, data);
}
int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages;
/* Pointer to response header iovec */
- struct iovec *tvc_resp_iov;
+ struct iovec tvc_resp_iov;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
/* Pointer to vhost_virtqueue for the cmd */
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
se_cmd->scsi_sense_length);
- iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
+ iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
}
cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
- cmd->tvc_resp_iov = &vq->iov[out];
+ cmd->tvc_resp_iov = vq->iov[out];
cmd->tvc_in_iovs = in;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
{
void *priv = NULL;
long err;
- struct vhost_memory *memory;
+ struct vhost_umem *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
- memory = vhost_dev_reset_owner_prepare();
- if (!memory) {
+ umem = vhost_dev_reset_owner_prepare();
+ if (!umem) {
err = -ENOMEM;
goto done;
}
vhost_test_stop(n, &priv);
vhost_test_flush(n);
- vhost_dev_reset_owner(&n->dev, memory);
+ vhost_dev_reset_owner(&n->dev, umem);
done:
mutex_unlock(&n->dev.mutex);
return err;
vhost_disable_notify(&vsock->dev, vq);
for (;;) {
+ u32 len;
+
if (!vhost_vsock_more_replies(vsock)) {
/* Stop tx until the device processes already
* pending replies. Leave tx virtqueue
continue;
}
+ len = pkt->len;
+
/* Only accept correctly addressed packets */
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
virtio_transport_recv_pkt(pkt);
else
virtio_transport_free_pkt(pkt);
- vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
added = true;
}
* host should service the ring ASAP. */
if (out_sgs)
vq->notify(&vq->vq);
+ if (indirect)
+ kfree(desc);
END_USE(vq);
return -ENOSPC;
}
if (indirect)
kfree(desc);
+ END_USE(vq);
return -EIO;
}
rc = -ENOMEM;
goto out;
}
- } else {
+ } else if (msg_type == XS_TRANSACTION_END) {
list_for_each_entry(trans, &u->transactions, list)
if (trans->handle.id == u->u.msg.tx_id)
break;
case 1:
_debug("extract FID count");
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
call->count = ntohl(call->tmp);
_debug("FID count: %u", call->count);
_debug("extract FID array");
ret = afs_extract_data(call, skb, last, call->buffer,
call->count * 3 * 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
_debug("unmarshall FID array");
call->request = kcalloc(call->count,
case 3:
_debug("extract CB count");
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
tmp = ntohl(call->tmp);
_debug("CB count: %u", tmp);
_debug("extract CB array");
ret = afs_extract_data(call, skb, last, call->request,
call->count * 3 * 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
_debug("unmarshall CB array");
cb = call->request;
call->unmarshall++;
case 5:
- _debug("trailer");
- if (skb->len != 0)
- return -EBADMSG;
+ ret = afs_data_complete(call, skb, last);
+ if (ret < 0)
+ return ret;
/* Record that the message was unmarshalled successfully so
* that the call destructor can know do the callback breaking
break;
}
- if (!last)
- return 0;
call->state = AFS_CALL_REPLYING;
{
struct afs_server *server;
struct in_addr addr;
+ int ret;
_enter(",{%u},%d", skb->len, last);
- if (skb->len > 0)
- return -EBADMSG;
- if (!last)
- return 0;
+ ret = afs_data_complete(call, skb, last);
+ if (ret < 0)
+ return ret;
/* no unmarshalling required */
call->state = AFS_CALL_REPLYING;
_enter(",{%u},%d", skb->len, last);
+ /* There are some arguments that we ignore */
+ afs_data_consumed(call, skb);
if (!last)
- return 0;
+ return -EAGAIN;
/* no unmarshalling required */
call->state = AFS_CALL_REPLYING;
static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb,
bool last)
{
+ int ret;
+
_enter(",{%u},%d", skb->len, last);
- if (skb->len > 0)
- return -EBADMSG;
- if (!last)
- return 0;
+ ret = afs_data_complete(call, skb, last);
+ if (ret < 0)
+ return ret;
/* no unmarshalling required */
call->state = AFS_CALL_REPLYING;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
- if (skb->len > 0)
- return -EBADMSG;
- if (!last)
- return 0;
+ ret = afs_data_complete(call, skb, last);
+ if (ret < 0)
+ return ret;
switch (call->unmarshall) {
case 0:
break;
}
- if (!last)
- return 0;
+ ret = afs_data_complete(call, skb, last);
+ if (ret < 0)
+ return ret;
call->state = AFS_CALL_REPLYING;
static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call,
struct sk_buff *skb, bool last)
{
+ int ret;
+
_enter(",{%u},%d", skb->len, last);
- if (skb->len > 0)
- return -EBADMSG;
- if (!last)
- return 0;
+ ret = afs_data_complete(call, skb, last);
+ if (ret < 0)
+ return ret;
/* no unmarshalling required */
call->state = AFS_CALL_REPLYING;
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
+ int ret;
_enter(",,%u", last);
- afs_transfer_reply(call, skb);
- if (!last)
- return 0;
-
- if (call->reply_size != call->reply_max)
- return -EBADMSG;
+ ret = afs_transfer_reply(call, skb, last);
+ if (ret < 0)
+ return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
case 1:
_debug("extract data length (MSW)");
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
call->count = ntohl(call->tmp);
_debug("DATA length MSW: %u", call->count);
case 2:
_debug("extract data length");
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
call->count = ntohl(call->tmp);
_debug("DATA length: %u", call->count);
ret = afs_extract_data(call, skb, last, buffer,
call->count);
kunmap_atomic(buffer);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
}
call->offset = 0;
case 4:
ret = afs_extract_data(call, skb, last, call->buffer,
(21 + 3 + 6) * 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
bp = call->buffer;
xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
call->unmarshall++;
case 5:
- _debug("trailer");
- if (skb->len != 0)
- return -EBADMSG;
+ ret = afs_data_complete(call, skb, last);
+ if (ret < 0)
+ return ret;
break;
}
- if (!last)
- return 0;
-
if (call->count < PAGE_SIZE) {
_debug("clear");
page = call->reply3;
{
_enter(",{%u},%d", skb->len, last);
- if (skb->len > 0)
- return -EBADMSG; /* shouldn't be any reply data */
- return 0;
+ /* shouldn't be any reply data */
+ return afs_data_complete(call, skb, last);
}
/*
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
+ int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
- afs_transfer_reply(call, skb);
- if (!last)
- return 0;
-
- if (call->reply_size != call->reply_max)
- return -EBADMSG;
+ ret = afs_transfer_reply(call, skb, last);
+ if (ret < 0)
+ return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
+ int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
- afs_transfer_reply(call, skb);
- if (!last)
- return 0;
-
- if (call->reply_size != call->reply_max)
- return -EBADMSG;
+ ret = afs_transfer_reply(call, skb, last);
+ if (ret < 0)
+ return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
{
struct afs_vnode *dvnode = call->reply, *vnode = call->reply2;
const __be32 *bp;
+ int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
- afs_transfer_reply(call, skb);
- if (!last)
- return 0;
-
- if (call->reply_size != call->reply_max)
- return -EBADMSG;
+ ret = afs_transfer_reply(call, skb, last);
+ if (ret < 0)
+ return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
+ int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
- afs_transfer_reply(call, skb);
- if (!last)
- return 0;
-
- if (call->reply_size != call->reply_max)
- return -EBADMSG;
+ ret = afs_transfer_reply(call, skb, last);
+ if (ret < 0)
+ return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
{
struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2;
const __be32 *bp;
+ int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
- afs_transfer_reply(call, skb);
- if (!last)
- return 0;
-
- if (call->reply_size != call->reply_max)
- return -EBADMSG;
+ ret = afs_transfer_reply(call, skb, last);
+ if (ret < 0)
+ return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
+ int ret;
_enter(",,%u", last);
- afs_transfer_reply(call, skb);
- if (!last) {
- _leave(" = 0 [more]");
- return 0;
- }
-
- if (call->reply_size != call->reply_max) {
- _leave(" = -EBADMSG [%u != %u]",
- call->reply_size, call->reply_max);
- return -EBADMSG;
- }
+ ret = afs_transfer_reply(call, skb, last);
+ if (ret < 0)
+ return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
afs_dataversion_t *store_version;
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
+ int ret;
_enter(",,%u", last);
- afs_transfer_reply(call, skb);
- if (!last) {
- _leave(" = 0 [more]");
- return 0;
- }
-
- if (call->reply_size != call->reply_max) {
- _leave(" = -EBADMSG [%u != %u]",
- call->reply_size, call->reply_max);
- return -EBADMSG;
- }
+ ret = afs_transfer_reply(call, skb, last);
+ if (ret < 0)
+ return ret;
/* unmarshall the reply once we've received all of it */
store_version = NULL;
_debug("extract status");
ret = afs_extract_data(call, skb, last, call->buffer,
12 * 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
bp = call->buffer;
xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2);
/* extract the volume name length */
case 2:
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
call->count = ntohl(call->tmp);
_debug("volname length: %u", call->count);
if (call->count > 0) {
ret = afs_extract_data(call, skb, last, call->reply3,
call->count);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
}
p = call->reply3;
case 4:
ret = afs_extract_data(call, skb, last, call->buffer,
call->count);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
call->offset = 0;
call->unmarshall++;
/* extract the offline message length */
case 5:
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
call->count = ntohl(call->tmp);
_debug("offline msg length: %u", call->count);
if (call->count > 0) {
ret = afs_extract_data(call, skb, last, call->reply3,
call->count);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
}
p = call->reply3;
case 7:
ret = afs_extract_data(call, skb, last, call->buffer,
call->count);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
call->offset = 0;
call->unmarshall++;
/* extract the message of the day length */
case 8:
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
call->count = ntohl(call->tmp);
_debug("motd length: %u", call->count);
if (call->count > 0) {
ret = afs_extract_data(call, skb, last, call->reply3,
call->count);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
}
p = call->reply3;
case 10:
ret = afs_extract_data(call, skb, last, call->buffer,
call->count);
- switch (ret) {
- case 0: break;
- case -EAGAIN: return 0;
- default: return ret;
- }
+ if (ret < 0)
+ return ret;
call->offset = 0;
call->unmarshall++;
no_motd_padding:
case 11:
- _debug("trailer %d", skb->len);
- if (skb->len != 0)
- return -EBADMSG;
+ ret = afs_data_complete(call, skb, last);
+ if (ret < 0)
+ return ret;
break;
}
- if (!last)
- return 0;
-
_leave(" = 0 [done]");
return 0;
}
struct sk_buff *skb, bool last)
{
const __be32 *bp;
+ int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
- afs_transfer_reply(call, skb);
- if (!last)
- return 0;
-
- if (call->reply_size != call->reply_max)
- return -EBADMSG;
+ ret = afs_transfer_reply(call, skb, last);
+ if (ret < 0)
+ return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
*/
extern int afs_open_socket(void);
extern void afs_close_socket(void);
+extern void afs_data_consumed(struct afs_call *, struct sk_buff *);
extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t,
const struct afs_wait_mode *);
extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *,
size_t, size_t);
extern void afs_flat_call_destructor(struct afs_call *);
-extern void afs_transfer_reply(struct afs_call *, struct sk_buff *);
+extern int afs_transfer_reply(struct afs_call *, struct sk_buff *, bool);
extern void afs_send_empty_reply(struct afs_call *);
extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *,
size_t);
+static inline int afs_data_complete(struct afs_call *call, struct sk_buff *skb,
+ bool last)
+{
+ if (skb->len > 0)
+ return -EBADMSG;
+ afs_data_consumed(call, skb);
+ if (!last)
+ return -EAGAIN;
+ return 0;
+}
+
/*
* security.c
*/
}
/*
- * note that the data in a socket buffer is now delivered and that the buffer
- * should be freed
+ * Note that the data in a socket buffer is now consumed.
*/
-static void afs_data_delivered(struct sk_buff *skb)
+void afs_data_consumed(struct afs_call *call, struct sk_buff *skb)
{
if (!skb) {
_debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs));
} else {
_debug("DLVR %p{%u} [%d]",
skb, skb->mark, atomic_read(&afs_outstanding_skbs));
- if (atomic_dec_return(&afs_outstanding_skbs) == -1)
- BUG();
- rxrpc_kernel_data_delivered(skb);
+ rxrpc_kernel_data_consumed(call->rxcall, skb);
}
}
last = rxrpc_kernel_is_data_last(skb);
ret = call->type->deliver(call, skb, last);
switch (ret) {
+ case -EAGAIN:
+ if (last) {
+ _debug("short data");
+ goto unmarshal_error;
+ }
+ break;
case 0:
- if (last &&
- call->state == AFS_CALL_AWAIT_REPLY)
+ ASSERT(last);
+ if (call->state == AFS_CALL_AWAIT_REPLY)
call->state = AFS_CALL_COMPLETE;
break;
case -ENOTCONN:
abort_code = RX_INVALID_OPERATION;
goto do_abort;
default:
+ unmarshal_error:
abort_code = RXGEN_CC_UNMARSHAL;
if (call->state != AFS_CALL_AWAIT_REPLY)
abort_code = RXGEN_SS_UNMARSHAL;
call->state = AFS_CALL_ERROR;
break;
}
- afs_data_delivered(skb);
- skb = NULL;
- continue;
+ break;
case RXRPC_SKB_MARK_FINAL_ACK:
_debug("Rcv ACK");
call->state = AFS_CALL_COMPLETE;
}
/*
- * empty a socket buffer into a flat reply buffer
+ * Empty a socket buffer into a flat reply buffer.
*/
-void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
+int afs_transfer_reply(struct afs_call *call, struct sk_buff *skb, bool last)
{
size_t len = skb->len;
- if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0)
- BUG();
- call->reply_size += len;
+ if (len > call->reply_max - call->reply_size) {
+ _leave(" = -EBADMSG [%zu > %u]",
+ len, call->reply_max - call->reply_size);
+ return -EBADMSG;
+ }
+
+ if (len > 0) {
+ if (skb_copy_bits(skb, 0, call->buffer + call->reply_size,
+ len) < 0)
+ BUG();
+ call->reply_size += len;
+ }
+
+ afs_data_consumed(call, skb);
+ if (!last)
+ return -EAGAIN;
+
+ if (call->reply_size != call->reply_max) {
+ _leave(" = -EBADMSG [%u != %u]",
+ call->reply_size, call->reply_max);
+ return -EBADMSG;
+ }
+ return 0;
}
/*
}
/*
- * grab the operation ID from an incoming cache manager call
+ * Grab the operation ID from an incoming cache manager call. The socket
+ * buffer is discarded on error or if we don't yet have sufficient data.
*/
static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
bool last)
call->offset += len;
if (call->offset < 4) {
- if (last) {
- _leave(" = -EBADMSG [op ID short]");
- return -EBADMSG;
- }
- _leave(" = 0 [incomplete]");
- return 0;
+ afs_data_consumed(call, skb);
+ _leave(" = -EAGAIN");
+ return -EAGAIN;
}
call->state = AFS_CALL_AWAIT_REQUEST;
}
/*
- * extract a piece of data from the received data socket buffers
+ * Extract a piece of data from the received data socket buffers.
*/
int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
bool last, void *buf, size_t count)
call->offset += len;
if (call->offset < count) {
- if (last) {
- _leave(" = -EBADMSG [%d < %zu]", call->offset, count);
- return -EBADMSG;
- }
+ afs_data_consumed(call, skb);
_leave(" = -EAGAIN");
return -EAGAIN;
}
struct afs_cache_vlocation *entry;
__be32 *bp;
u32 tmp;
- int loop;
+ int loop, ret;
_enter(",,%u", last);
- afs_transfer_reply(call, skb);
- if (!last)
- return 0;
-
- if (call->reply_size != call->reply_max)
- return -EBADMSG;
+ ret = afs_transfer_reply(call, skb, last);
+ if (ret < 0)
+ return ret;
/* unmarshall the reply once we've received all of it */
entry = call->reply;
* thaw_bdev drops it.
*/
sb = get_super(bdev);
- drop_super(sb);
+ if (sb)
+ drop_super(sb);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return sb;
}
{
struct dentry *dent;
dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
- if (dent)
+ if (!IS_ERR(dent))
dent->d_sb->s_iflags |= SB_I_CGROUPWB;
return dent;
}
list_del(&ref2->list);
kmem_cache_free(btrfs_prelim_ref_cache, ref2);
+ cond_resched();
}
}
struct btrfs_workqueue *qgroup_rescan_workers;
struct completion qgroup_rescan_completion;
struct btrfs_work qgroup_rescan_work;
+ bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */
/* filesystem state */
unsigned long fs_state;
struct list_head pinned_chunks;
int creating_free_space_tree;
+ /* Used to record internally whether fs has been frozen */
+ int fs_frozen;
};
struct btrfs_subvolume_writers {
struct btrfs_root *root,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins);
-int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes,
+int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
u64 min_alloc_size, u64 empty_size, u64 hint_byte,
struct btrfs_key *ins, int is_data, int delalloc);
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_head *head_ref = NULL;
struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_qgroup_extent_record *qexisting;
int count_mod = 1;
int must_insert_reserved = 0;
qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL;
- qexisting = btrfs_qgroup_insert_dirty_extent(fs_info,
- delayed_refs,
- qrecord);
- if (qexisting)
+ if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
+ delayed_refs, qrecord))
kfree(qrecord);
}
return 0;
}
-int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
- u64 ref_root, u64 bytenr, u64 num_bytes)
-{
- struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_delayed_ref_head *ref_head;
- int ret = 0;
-
- if (!fs_info->quota_enabled || !is_fstree(ref_root))
- return 0;
-
- delayed_refs = &trans->transaction->delayed_refs;
-
- spin_lock(&delayed_refs->lock);
- ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
- if (!ref_head) {
- ret = -ENOENT;
- goto out;
- }
- WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
- ref_head->qgroup_ref_root = ref_root;
- ref_head->qgroup_reserved = num_bytes;
-out:
- spin_unlock(&delayed_refs->lock);
- return ret;
-}
-
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root,
u64 owner, u64 offset, u64 reserved, int action,
struct btrfs_delayed_extent_op *extent_op);
-int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
- u64 ref_root, u64 bytenr, u64 num_bytes);
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
u32 nritems = btrfs_header_nritems(leaf);
int slot;
- if (nritems == 0)
+ if (nritems == 0) {
+ struct btrfs_root *check_root;
+
+ key.objectid = btrfs_header_owner(leaf);
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+
+ check_root = btrfs_get_fs_root(root->fs_info, &key, false);
+ /*
+ * The only reason we also check NULL here is that during
+ * open_ctree() some roots has not yet been set up.
+ */
+ if (!IS_ERR_OR_NULL(check_root)) {
+ /* if leaf is the root, then it's fine */
+ if (leaf->start !=
+ btrfs_root_bytenr(&check_root->root_item)) {
+ CORRUPT("non-root leaf's nritems is 0",
+ leaf, root, 0);
+ return -EIO;
+ }
+ }
return 0;
+ }
/* Check the 0 item */
if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
return 0;
}
+static int check_node(struct btrfs_root *root, struct extent_buffer *node)
+{
+ unsigned long nr = btrfs_header_nritems(node);
+
+ if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
+ btrfs_crit(root->fs_info,
+ "corrupt node: block %llu root %llu nritems %lu",
+ node->start, root->objectid, nr);
+ return -EIO;
+ }
+ return 0;
+}
+
static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
u64 phy_offset, struct page *page,
u64 start, u64 end, int mirror)
ret = -EIO;
}
+ if (found_level > 0 && check_node(root, eb))
+ ret = -EIO;
+
if (!ret)
set_extent_buffer_uptodate(eb);
err:
return ret;
}
-static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
- u64 root_id)
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+ u64 root_id)
{
struct btrfs_root *root;
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
fs_info->qgroup_ulist = NULL;
+ fs_info->qgroup_rescan_running = false;
mutex_init(&fs_info->qgroup_rescan_lock);
}
atomic_set(&fs_info->qgroup_op_seq, 0);
atomic_set(&fs_info->reada_works_cnt, 0);
atomic64_set(&fs_info->tree_mod_seq, 0);
+ fs_info->fs_frozen = 0;
fs_info->sb = sb;
fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
fs_info->metadata_ratio = 0;
if (btrfs_root_refs(&root->root_item) == 0)
synchronize_srcu(&fs_info->subvol_srcu);
- if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
btrfs_free_log(NULL, root);
+ if (root->reloc_root) {
+ free_extent_buffer(root->reloc_root->node);
+ free_extent_buffer(root->reloc_root->commit_root);
+ btrfs_put_fs_root(root->reloc_root);
+ root->reloc_root = NULL;
+ }
+ }
if (root->free_ino_pinned)
__btrfs_remove_free_space_cache(root->free_ino_pinned);
smp_mb();
/* wait for the qgroup rescan worker to stop */
- btrfs_qgroup_wait_for_completion(fs_info);
+ btrfs_qgroup_wait_for_completion(fs_info, false);
/* wait for the uuid_scan task to finish */
down(&fs_info->uuid_tree_rescan_sem);
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
struct btrfs_key *location);
int btrfs_init_fs_root(struct btrfs_root *root);
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+ u64 root_id);
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
CHUNK_ALLOC_FORCE = 2,
};
-/*
- * Control how reservations are dealt with.
- *
- * RESERVE_FREE - freeing a reservation.
- * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
- * ENOSPC accounting
- * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
- * bytes_may_use as the ENOSPC accounting is done elsewhere
- */
-enum {
- RESERVE_FREE = 0,
- RESERVE_ALLOC = 1,
- RESERVE_ALLOC_NO_ACCOUNT = 2,
-};
-
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
u64 num_bytes, int alloc);
struct btrfs_key *key);
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
-static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 num_bytes, int reserve,
- int delalloc);
+static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
+ u64 ram_bytes, u64 num_bytes, int delalloc);
+static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+ u64 num_bytes, int delalloc);
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
int btrfs_pin_extent(struct btrfs_root *root,
dcs = BTRFS_DC_SETUP;
else if (ret == -ENOSPC)
set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
- btrfs_free_reserved_data_space(inode, 0, num_pages);
out_put:
iput(inode);
}
}
+/*
+ * If force is CHUNK_ALLOC_FORCE:
+ * - return 1 if it successfully allocates a chunk,
+ * - return errors including -ENOSPC otherwise.
+ * If force is NOT CHUNK_ALLOC_FORCE:
+ * - return 0 if it doesn't need to allocate a new chunk,
+ * - return 1 if it successfully allocates a chunk,
+ * - return errors including -ENOSPC otherwise.
+ */
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 flags, int force)
{
btrfs_get_alloc_profile(root, 0),
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans, root);
- if (ret == -ENOSPC)
+ if (ret > 0 || ret == -ENOSPC)
ret = 0;
break;
case COMMIT_TRANS:
}
/**
- * btrfs_update_reserved_bytes - update the block_group and space info counters
+ * btrfs_add_reserved_bytes - update the block_group and space info counters
* @cache: The cache we are manipulating
+ * @ram_bytes: The number of bytes of file content, and will be same to
+ * @num_bytes except for the compress path.
* @num_bytes: The number of bytes in question
- * @reserve: One of the reservation enums
* @delalloc: The blocks are allocated for the delalloc write
*
- * This is called by the allocator when it reserves space, or by somebody who is
- * freeing space that was never actually used on disk. For example if you
- * reserve some space for a new leaf in transaction A and before transaction A
- * commits you free that leaf, you call this with reserve set to 0 in order to
- * clear the reservation.
- *
- * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
+ * This is called by the allocator when it reserves space. Metadata
+ * reservations should be called with RESERVE_ALLOC so we do the proper
* ENOSPC accounting. For data we handle the reservation through clearing the
* delalloc bits in the io_tree. We have to do this since we could end up
* allocating less disk space for the amount of data we have reserved in the
* make the reservation and return -EAGAIN, otherwise this function always
* succeeds.
*/
-static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 num_bytes, int reserve, int delalloc)
+static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
+ u64 ram_bytes, u64 num_bytes, int delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
int ret = 0;
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
- if (reserve != RESERVE_FREE) {
- if (cache->ro) {
- ret = -EAGAIN;
- } else {
- cache->reserved += num_bytes;
- space_info->bytes_reserved += num_bytes;
- if (reserve == RESERVE_ALLOC) {
- trace_btrfs_space_reservation(cache->fs_info,
- "space_info", space_info->flags,
- num_bytes, 0);
- space_info->bytes_may_use -= num_bytes;
- }
-
- if (delalloc)
- cache->delalloc_bytes += num_bytes;
- }
+ if (cache->ro) {
+ ret = -EAGAIN;
} else {
- if (cache->ro)
- space_info->bytes_readonly += num_bytes;
- cache->reserved -= num_bytes;
- space_info->bytes_reserved -= num_bytes;
+ cache->reserved += num_bytes;
+ space_info->bytes_reserved += num_bytes;
+ trace_btrfs_space_reservation(cache->fs_info,
+ "space_info", space_info->flags,
+ ram_bytes, 0);
+ space_info->bytes_may_use -= ram_bytes;
if (delalloc)
- cache->delalloc_bytes -= num_bytes;
+ cache->delalloc_bytes += num_bytes;
}
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
return ret;
}
+/**
+ * btrfs_free_reserved_bytes - update the block_group and space info counters
+ * @cache: The cache we are manipulating
+ * @num_bytes: The number of bytes in question
+ * @delalloc: The blocks are allocated for the delalloc write
+ *
+ * This is called by somebody who is freeing space that was never actually used
+ * on disk. For example if you reserve some space for a new leaf in transaction
+ * A and before transaction A commits you free that leaf, you call this with
+ * reserve set to 0 in order to clear the reservation.
+ */
+
+static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+ u64 num_bytes, int delalloc)
+{
+ struct btrfs_space_info *space_info = cache->space_info;
+ int ret = 0;
+
+ spin_lock(&space_info->lock);
+ spin_lock(&cache->lock);
+ if (cache->ro)
+ space_info->bytes_readonly += num_bytes;
+ cache->reserved -= num_bytes;
+ space_info->bytes_reserved -= num_bytes;
+
+ if (delalloc)
+ cache->delalloc_bytes -= num_bytes;
+ spin_unlock(&cache->lock);
+ spin_unlock(&space_info->lock);
+ return ret;
+}
void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
btrfs_add_free_space(cache, buf->start, buf->len);
- btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
+ btrfs_free_reserved_bytes(cache, buf->len, 0);
btrfs_put_block_group(cache);
trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
pin = 0;
* the free space extent currently.
*/
static noinline int find_free_extent(struct btrfs_root *orig_root,
- u64 num_bytes, u64 empty_size,
- u64 hint_byte, struct btrfs_key *ins,
- u64 flags, int delalloc)
+ u64 ram_bytes, u64 num_bytes, u64 empty_size,
+ u64 hint_byte, struct btrfs_key *ins,
+ u64 flags, int delalloc)
{
int ret = 0;
struct btrfs_root *root = orig_root->fs_info->extent_root;
struct btrfs_space_info *space_info;
int loop = 0;
int index = __get_raid_index(flags);
- int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
- RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
bool failed_cluster_refill = false;
bool failed_alloc = false;
bool use_cluster = true;
search_start - offset);
BUG_ON(offset > search_start);
- ret = btrfs_update_reserved_bytes(block_group, num_bytes,
- alloc_type, delalloc);
+ ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
+ num_bytes, delalloc);
if (ret == -EAGAIN) {
btrfs_add_free_space(block_group, offset, num_bytes);
goto loop;
up_read(&info->groups_sem);
}
-int btrfs_reserve_extent(struct btrfs_root *root,
+int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
u64 num_bytes, u64 min_alloc_size,
u64 empty_size, u64 hint_byte,
struct btrfs_key *ins, int is_data, int delalloc)
flags = btrfs_get_alloc_profile(root, is_data);
again:
WARN_ON(num_bytes < root->sectorsize);
- ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
- flags, delalloc);
+ ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
+ hint_byte, ins, flags, delalloc);
if (!ret && !is_data) {
btrfs_dec_block_group_reservations(root->fs_info,
ins->objectid);
num_bytes = min(num_bytes >> 1, ins->offset);
num_bytes = round_down(num_bytes, root->sectorsize);
num_bytes = max(num_bytes, min_alloc_size);
+ ram_bytes = num_bytes;
if (num_bytes == min_alloc_size)
final_tried = true;
goto again;
if (btrfs_test_opt(root->fs_info, DISCARD))
ret = btrfs_discard_extent(root, start, len, NULL);
btrfs_add_free_space(cache, start, len);
- btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
+ btrfs_free_reserved_bytes(cache, len, delalloc);
trace_btrfs_reserved_extent_free(root, start, len);
}
if (!block_group)
return -EINVAL;
- ret = btrfs_update_reserved_bytes(block_group, ins->offset,
- RESERVE_ALLOC_NO_ACCOUNT, 0);
+ ret = btrfs_add_reserved_bytes(block_group, ins->offset,
+ ins->offset, 0);
BUG_ON(ret); /* logic error */
ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
0, owner, offset, ins, 1);
if (IS_ERR(block_rsv))
return ERR_CAST(block_rsv);
- ret = btrfs_reserve_extent(root, blocksize, blocksize,
+ ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
empty_size, hint, &ins, 0, 0);
if (ret)
goto out_unuse;
wc->reada_slot = slot;
}
-/*
- * These may not be seen by the usual inc/dec ref code so we have to
- * add them here.
- */
-static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr,
- u64 num_bytes)
-{
- struct btrfs_qgroup_extent_record *qrecord;
- struct btrfs_delayed_ref_root *delayed_refs;
-
- qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
- if (!qrecord)
- return -ENOMEM;
-
- qrecord->bytenr = bytenr;
- qrecord->num_bytes = num_bytes;
- qrecord->old_roots = NULL;
-
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
- if (btrfs_qgroup_insert_dirty_extent(trans->fs_info,
- delayed_refs, qrecord))
- kfree(qrecord);
- spin_unlock(&delayed_refs->lock);
-
- return 0;
-}
-
static int account_leaf_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *eb)
num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
- ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
+ ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
+ bytenr, num_bytes, GFP_NOFS);
if (ret)
return ret;
}
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
- ret = record_one_subtree_extent(trans, root, child_bytenr,
- root->nodesize);
+ ret = btrfs_qgroup_insert_dirty_extent(trans,
+ root->fs_info, child_bytenr,
+ root->nodesize, GFP_NOFS);
if (ret)
goto out;
}
} else {
ret = 0;
}
+ free_extent_map(em);
goto out;
}
path->slots[0]++;
block_group->iref = 0;
block_group->inode = NULL;
spin_unlock(&block_group->lock);
+ ASSERT(block_group->io_ctl.inode == NULL);
iput(inode);
last = block_group->key.objectid + block_group->key.offset;
btrfs_put_block_group(block_group);
free_excluded_extents(info->extent_root, block_group);
btrfs_remove_free_space_cache(block_group);
+ ASSERT(list_empty(&block_group->dirty_list));
+ ASSERT(list_empty(&block_group->io_list));
+ ASSERT(list_empty(&block_group->bg_list));
+ ASSERT(atomic_read(&block_group->count) == 1);
btrfs_put_block_group(block_group);
spin_lock(&info->block_group_cache_lock);
#define EXTENT_DAMAGED (1U << 14)
#define EXTENT_NORESERVE (1U << 15)
#define EXTENT_QGROUP_RESERVED (1U << 16)
+#define EXTENT_CLEAR_DATA_RESV (1U << 17)
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
*/
clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
+ /*
+ * An ordered extent might have started before and completed
+ * already with io errors, in which case the inode was not
+ * updated and we end up here. So check the inode's mapping
+ * flags for any errors that might have happened while doing
+ * writeback of file data.
+ */
+ ret = btrfs_inode_check_errors(inode);
inode_unlock(inode);
goto out;
}
}
trans->sync = true;
- btrfs_init_log_ctx(&ctx);
+ btrfs_init_log_ctx(&ctx, inode);
ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
if (ret < 0) {
alloc_start = round_down(offset, blocksize);
alloc_end = round_up(offset + len, blocksize);
+ cur_offset = alloc_start;
/* Make sure we aren't being give some crap mode */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
/* First, check if we exceed the qgroup limit */
INIT_LIST_HEAD(&reserve_list);
- cur_offset = alloc_start;
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
alloc_end - cur_offset, 0);
last_byte - cur_offset);
if (ret < 0)
break;
+ } else {
+ /*
+ * Do not need to reserve unwritten extent for this
+ * range, free reserved data space first, otherwise
+ * it'll result in false ENOSPC error.
+ */
+ btrfs_free_reserved_data_space(inode, cur_offset,
+ last_byte - cur_offset);
}
free_extent_map(em);
cur_offset = last_byte;
range->start,
range->len, 1 << inode->i_blkbits,
offset + len, &alloc_hint);
+ else
+ btrfs_free_reserved_data_space(inode, range->start,
+ range->len);
list_del(&range->list);
kfree(range);
}
unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
&cached_state, GFP_KERNEL);
out:
- /*
- * As we waited the extent range, the data_rsv_map must be empty
- * in the range, as written data range will be released from it.
- * And for prealloacted extent, it will also be released when
- * its metadata is written.
- * So this is completely used as cleanup.
- */
- btrfs_qgroup_free_data(inode, alloc_start, alloc_end - alloc_start);
inode_unlock(inode);
/* Let go of our reservation. */
- btrfs_free_reserved_data_space(inode, alloc_start,
- alloc_end - alloc_start);
+ if (ret != 0)
+ btrfs_free_reserved_data_space(inode, alloc_start,
+ alloc_end - cur_offset);
return ret;
}
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
prealloc, prealloc, &alloc_hint);
if (ret) {
- btrfs_delalloc_release_space(inode, 0, prealloc);
+ btrfs_delalloc_release_metadata(inode, prealloc);
goto out_put;
}
- btrfs_free_reserved_data_space(inode, 0, prealloc);
ret = btrfs_write_out_ino_cache(root, trans, path, inode);
out_put:
PAGE_SET_WRITEBACK |
page_error_op |
PAGE_END_WRITEBACK);
+ btrfs_free_reserved_data_space_noquota(inode, start,
+ end - start + 1);
goto free_pages_out;
}
}
lock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->ram_size - 1);
- ret = btrfs_reserve_extent(root,
+ ret = btrfs_reserve_extent(root, async_extent->ram_size,
async_extent->compressed_size,
async_extent->compressed_size,
0, alloc_hint, &ins, 1, 1);
EXTENT_DEFRAG, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
PAGE_END_WRITEBACK);
-
+ btrfs_free_reserved_data_space_noquota(inode, start,
+ end - start + 1);
*nr_written = *nr_written +
(end - start + PAGE_SIZE) / PAGE_SIZE;
*page_started = 1;
unsigned long op;
cur_alloc_size = disk_num_bytes;
- ret = btrfs_reserve_extent(root, cur_alloc_size,
+ ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
root->sectorsize, 0, alloc_hint,
&ins, 1, 1);
if (ret < 0)
extent_clear_unlock_delalloc(inode, cur_offset,
cur_offset + num_bytes - 1,
locked_page, EXTENT_LOCKED |
- EXTENT_DELALLOC, PAGE_UNLOCK |
- PAGE_SET_PRIVATE2);
+ EXTENT_DELALLOC |
+ EXTENT_CLEAR_DATA_RESV,
+ PAGE_UNLOCK | PAGE_SET_PRIVATE2);
+
if (!nolock && nocow)
btrfs_end_write_no_snapshoting(root);
cur_offset = extent_end;
return;
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
- && do_list && !(state->state & EXTENT_NORESERVE))
+ && do_list && !(state->state & EXTENT_NORESERVE)
+ && (*bits & (EXTENT_DO_ACCOUNTING |
+ EXTENT_CLEAR_DATA_RESV)))
btrfs_free_reserved_data_space_noquota(inode,
state->start, len);
found_key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
ret = PTR_ERR_OR_ZERO(inode);
- if (ret && ret != -ESTALE)
+ if (ret && ret != -ENOENT)
goto out;
- if (ret == -ESTALE && root == root->fs_info->tree_root) {
+ if (ret == -ENOENT && root == root->fs_info->tree_root) {
struct btrfs_root *dead_root;
struct btrfs_fs_info *fs_info = root->fs_info;
int is_dead_root = 0;
* Inode is already gone but the orphan item is still there,
* kill the orphan item.
*/
- if (ret == -ESTALE) {
+ if (ret == -ENOENT) {
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
/*
* read an inode from the btree into the in-memory inode
*/
-static void btrfs_read_locked_inode(struct inode *inode)
+static int btrfs_read_locked_inode(struct inode *inode)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
filled = true;
path = btrfs_alloc_path();
- if (!path)
+ if (!path) {
+ ret = -ENOMEM;
goto make_bad;
+ }
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
- if (ret)
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
goto make_bad;
+ }
leaf = path->nodes[0];
}
btrfs_update_iflags(inode);
- return;
+ return 0;
make_bad:
btrfs_free_path(path);
make_bad_inode(inode);
+ return ret;
}
/*
int err = 0;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
+ u64 last_unlink_trans;
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
if (err)
goto out;
+ last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
+
/* now the directory is empty */
err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
dentry->d_name.name, dentry->d_name.len);
- if (!err)
+ if (!err) {
btrfs_i_size_write(inode, 0);
+ /*
+ * Propagate the last_unlink_trans value of the deleted dir to
+ * its parent directory. This is to prevent an unrecoverable
+ * log tree in the case we do something like this:
+ * 1) create dir foo
+ * 2) create snapshot under dir foo
+ * 3) delete the snapshot
+ * 4) rmdir foo
+ * 5) mkdir foo
+ * 6) fsync foo or some file inside foo
+ */
+ if (last_unlink_trans >= trans->transid)
+ BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
+ }
out:
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
- btrfs_read_locked_inode(inode);
+ int ret;
+
+ ret = btrfs_read_locked_inode(inode);
if (!is_bad_inode(inode)) {
inode_tree_add(inode);
unlock_new_inode(inode);
} else {
unlock_new_inode(inode);
iput(inode);
- inode = ERR_PTR(-ESTALE);
+ ASSERT(ret < 0);
+ inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
}
}
int ret;
alloc_hint = get_extent_allocation_hint(inode, start, len);
- ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
+ ret = btrfs_reserve_extent(root, len, len, root->sectorsize, 0,
alloc_hint, &ins, 1, 1);
if (ret)
return ERR_PTR(ret);
ret = PTR_ERR(em2);
goto unlock_err;
}
+ /*
+ * For inode marked NODATACOW or extent marked PREALLOC,
+ * use the existing or preallocated extent, so does not
+ * need to adjust btrfs_space_info's bytes_may_use.
+ */
+ btrfs_free_reserved_data_space_noquota(inode,
+ start, len);
goto unlock;
}
}
i_size_write(inode, start + len);
adjust_dio_outstanding_extents(inode, dio_data, len);
- btrfs_free_reserved_data_space(inode, start, len);
WARN_ON(dio_data->reserve < len);
dio_data->reserve -= len;
dio_data->unsubmitted_oe_range_end = start + len;
u64 last_alloc = (u64)-1;
int ret = 0;
bool own_trans = true;
+ u64 end = start + num_bytes - 1;
if (trans)
own_trans = false;
* sized chunks.
*/
cur_bytes = min(cur_bytes, last_alloc);
- ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
- *alloc_hint, &ins, 1, 0);
+ ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
+ min_size, 0, *alloc_hint, &ins, 1, 0);
if (ret) {
if (own_trans)
btrfs_end_transaction(trans, root);
if (own_trans)
btrfs_end_transaction(trans, root);
}
+ if (cur_offset < end)
+ btrfs_free_reserved_data_space(inode, cur_offset,
+ end - cur_offset + 1);
return ret;
}
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return btrfs_qgroup_wait_for_completion(root->fs_info);
+ return btrfs_qgroup_wait_for_completion(root->fs_info, true);
}
static long _btrfs_ioctl_set_received_subvol(struct file *file,
goto out;
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
- btrfs_qgroup_wait_for_completion(fs_info);
+ btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root;
fs_info->quota_root = NULL;
return ret;
}
-struct btrfs_qgroup_extent_record *
-btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record)
+int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_qgroup_extent_record *record)
{
struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
struct rb_node *parent_node = NULL;
else if (bytenr > entry->bytenr)
p = &(*p)->rb_right;
else
- return entry;
+ return 1;
}
rb_link_node(&record->node, parent_node, p);
rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
- return NULL;
+ return 0;
+}
+
+int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
+ gfp_t gfp_flag)
+{
+ struct btrfs_qgroup_extent_record *record;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ int ret;
+
+ if (!fs_info->quota_enabled || bytenr == 0 || num_bytes == 0)
+ return 0;
+ if (WARN_ON(trans == NULL))
+ return -EINVAL;
+ record = kmalloc(sizeof(*record), gfp_flag);
+ if (!record)
+ return -ENOMEM;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+ record->bytenr = bytenr;
+ record->num_bytes = num_bytes;
+ record->old_roots = NULL;
+
+ spin_lock(&delayed_refs->lock);
+ ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs,
+ record);
+ spin_unlock(&delayed_refs->lock);
+ if (ret > 0)
+ kfree(record);
+ return 0;
}
#define UPDATE_NEW 0
int err = -ENOMEM;
int ret = 0;
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ fs_info->qgroup_rescan_running = true;
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+
path = btrfs_alloc_path();
if (!path)
goto out;
}
done:
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ fs_info->qgroup_rescan_running = false;
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
complete_all(&fs_info->qgroup_rescan_completion);
}
return 0;
}
-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
+int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+ bool interruptible)
{
int running;
int ret = 0;
mutex_lock(&fs_info->qgroup_rescan_lock);
spin_lock(&fs_info->qgroup_lock);
- running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+ running = fs_info->qgroup_rescan_running;
spin_unlock(&fs_info->qgroup_lock);
mutex_unlock(&fs_info->qgroup_rescan_lock);
- if (running)
+ if (!running)
+ return 0;
+
+ if (interruptible)
ret = wait_for_completion_interruptible(
&fs_info->qgroup_rescan_completion);
+ else
+ wait_for_completion(&fs_info->qgroup_rescan_completion);
return ret;
}
struct btrfs_fs_info *fs_info);
int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info);
+int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+ bool interruptible);
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 src, u64 dst);
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
struct btrfs_delayed_extent_op;
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
-struct btrfs_qgroup_extent_record *
-btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record);
+/*
+ * Insert one dirty extent record into @delayed_refs, informing qgroup to
+ * account that extent at commit trans time.
+ *
+ * No lock version, caller must acquire delayed ref lock and allocate memory.
+ *
+ * Return 0 for success insert
+ * Return >0 for existing record, caller can free @record safely.
+ * Error is not possible
+ */
+int btrfs_qgroup_insert_dirty_extent_nolock(
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_qgroup_extent_record *record);
+
+/*
+ * Insert one dirty extent record into @delayed_refs, informing qgroup to
+ * account that extent at commit trans time.
+ *
+ * Better encapsulated version.
+ *
+ * Return 0 if the operation is done.
+ * Return <0 for error, like memory allocation failure or invalid parameter
+ * (NULL trans)
+ */
+int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
+ gfp_t gfp_flag);
+
int
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
#include "async-thread.h"
#include "free-space-cache.h"
#include "inode-map.h"
+#include "qgroup.h"
/*
* backref_node, mapping_node and tree_block start with this
u64 num_bytes;
int nr = 0;
int ret = 0;
+ u64 prealloc_start = cluster->start - offset;
+ u64 prealloc_end = cluster->end - offset;
+ u64 cur_offset;
BUG_ON(cluster->start != cluster->boundary[0]);
inode_lock(inode);
- ret = btrfs_check_data_free_space(inode, cluster->start,
- cluster->end + 1 - cluster->start);
+ ret = btrfs_check_data_free_space(inode, prealloc_start,
+ prealloc_end + 1 - prealloc_start);
if (ret)
goto out;
+ cur_offset = prealloc_start;
while (nr < cluster->nr) {
start = cluster->boundary[nr] - offset;
if (nr + 1 < cluster->nr)
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
num_bytes = end + 1 - start;
+ if (cur_offset < start)
+ btrfs_free_reserved_data_space(inode, cur_offset,
+ start - cur_offset);
ret = btrfs_prealloc_file_range(inode, 0, start,
num_bytes, num_bytes,
end + 1, &alloc_hint);
+ cur_offset = end + 1;
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
if (ret)
break;
nr++;
}
- btrfs_free_reserved_data_space(inode, cluster->start,
- cluster->end + 1 - cluster->start);
+ if (cur_offset < prealloc_end)
+ btrfs_free_reserved_data_space(inode, cur_offset,
+ prealloc_end + 1 - cur_offset);
out:
inode_unlock(inode);
return ret;
return 0;
}
+/*
+ * Qgroup fixer for data chunk relocation.
+ * The data relocation is done in the following steps
+ * 1) Copy data extents into data reloc tree
+ * 2) Create tree reloc tree(special snapshot) for related subvolumes
+ * 3) Modify file extents in tree reloc tree
+ * 4) Merge tree reloc tree with original fs tree, by swapping tree blocks
+ *
+ * The problem is, data and tree reloc tree are not accounted to qgroup,
+ * and 4) will only info qgroup to track tree blocks change, not file extents
+ * in the tree blocks.
+ *
+ * The good news is, related data extents are all in data reloc tree, so we
+ * only need to info qgroup to track all file extents in data reloc tree
+ * before commit trans.
+ */
+static int qgroup_fix_relocated_data_extents(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc)
+{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
+ struct inode *inode = rc->data_inode;
+ struct btrfs_root *data_reloc_root = BTRFS_I(inode)->root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ int ret = 0;
+
+ if (!fs_info->quota_enabled)
+ return 0;
+
+ /*
+ * Only for stage where we update data pointers the qgroup fix is
+ * valid.
+ * For MOVING_DATA stage, we will miss the timing of swapping tree
+ * blocks, and won't fix it.
+ */
+ if (!(rc->stage == UPDATE_DATA_PTRS && rc->extents_found))
+ return 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(NULL, data_reloc_root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ lock_extent(&BTRFS_I(inode)->io_tree, 0, (u64)-1);
+ while (1) {
+ struct btrfs_file_extent_item *fi;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.objectid > btrfs_ino(inode))
+ break;
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
+ goto next;
+ fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ if (btrfs_file_extent_type(path->nodes[0], fi) !=
+ BTRFS_FILE_EXTENT_REG)
+ goto next;
+ ret = btrfs_qgroup_insert_dirty_extent(trans, fs_info,
+ btrfs_file_extent_disk_bytenr(path->nodes[0], fi),
+ btrfs_file_extent_disk_num_bytes(path->nodes[0], fi),
+ GFP_NOFS);
+ if (ret < 0)
+ break;
+next:
+ ret = btrfs_next_item(data_reloc_root, path);
+ if (ret < 0)
+ break;
+ if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ }
+ unlock_extent(&BTRFS_I(inode)->io_tree, 0 , (u64)-1);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
{
struct rb_root blocks = RB_ROOT;
/* get rid of pinned extents */
trans = btrfs_join_transaction(rc->extent_root);
- if (IS_ERR(trans))
+ if (IS_ERR(trans)) {
err = PTR_ERR(trans);
- else
- btrfs_commit_transaction(trans, rc->extent_root);
+ goto out_free;
+ }
+ err = qgroup_fix_relocated_data_extents(trans, rc);
+ if (err < 0) {
+ btrfs_abort_transaction(trans, err);
+ goto out_free;
+ }
+ btrfs_commit_transaction(trans, rc->extent_root);
out_free:
btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
btrfs_free_path(path);
unset_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root);
- if (IS_ERR(trans))
+ if (IS_ERR(trans)) {
err = PTR_ERR(trans);
- else
- err = btrfs_commit_transaction(trans, rc->extent_root);
+ goto out_free;
+ }
+ err = qgroup_fix_relocated_data_extents(trans, rc);
+ if (err < 0) {
+ btrfs_abort_transaction(trans, err);
+ goto out_free;
+ }
+ err = btrfs_commit_transaction(trans, rc->extent_root);
out_free:
kfree(rc);
out:
root_key.objectid = key.offset;
key.offset++;
+ /*
+ * The root might have been inserted already, as before we look
+ * for orphan roots, log replay might have happened, which
+ * triggers a transaction commit and qgroup accounting, which
+ * in turn reads and inserts fs roots while doing backref
+ * walking.
+ */
+ root = btrfs_lookup_fs_root(tree_root->fs_info,
+ root_key.objectid);
+ if (root) {
+ WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
+ &root->state));
+ if (btrfs_root_refs(&root->root_item) == 0)
+ btrfs_add_dead_root(root);
+ continue;
+ }
+
root = btrfs_read_fs_root(tree_root, &root_key);
err = PTR_ERR_OR_ZERO(root);
if (err && err != -ENOENT) {
set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
err = btrfs_insert_fs_root(root->fs_info, root);
- /*
- * The root might have been inserted already, as before we look
- * for orphan roots, log replay might have happened, which
- * triggers a transaction commit and qgroup accounting, which
- * in turn reads and inserts fs roots while doing backref
- * walking.
- */
- if (err == -EEXIST)
- err = 0;
if (err) {
+ BUG_ON(err == -EEXIST);
btrfs_free_fs_root(root);
break;
}
u64 parent_ino;
u64 ino;
u64 gen;
- bool is_orphan;
struct list_head update_refs;
};
char name[];
};
+static void inconsistent_snapshot_error(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result result,
+ const char *what)
+{
+ const char *result_string;
+
+ switch (result) {
+ case BTRFS_COMPARE_TREE_NEW:
+ result_string = "new";
+ break;
+ case BTRFS_COMPARE_TREE_DELETED:
+ result_string = "deleted";
+ break;
+ case BTRFS_COMPARE_TREE_CHANGED:
+ result_string = "updated";
+ break;
+ case BTRFS_COMPARE_TREE_SAME:
+ ASSERT(0);
+ result_string = "unchanged";
+ break;
+ default:
+ ASSERT(0);
+ result_string = "unexpected";
+ }
+
+ btrfs_err(sctx->send_root->fs_info,
+ "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
+ result_string, what, sctx->cmp_key->objectid,
+ sctx->send_root->root_key.objectid,
+ (sctx->parent_root ?
+ sctx->parent_root->root_key.objectid : 0));
+}
+
static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
static struct waiting_dir_move *
* was already unlinked/moved, so we can safely assume that we will not
* overwrite anything at this point in time.
*/
- if (other_inode > sctx->send_progress) {
+ if (other_inode > sctx->send_progress ||
+ is_waiting_for_move(sctx, other_inode)) {
ret = get_inode_info(sctx->parent_root, other_inode, NULL,
who_gen, NULL, NULL, NULL, NULL);
if (ret < 0)
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
+ if (ret > 0)
+ ret = -ENOENT;
if (ret < 0)
goto out;
}
if (loc.objectid > send_progress) {
+ struct orphan_dir_info *odi;
+
+ odi = get_orphan_dir_info(sctx, dir);
+ free_orphan_dir_info(sctx, odi);
ret = 0;
goto out;
}
pm->parent_ino = parent_ino;
pm->ino = ino;
pm->gen = ino_gen;
- pm->is_orphan = is_orphan;
INIT_LIST_HEAD(&pm->list);
INIT_LIST_HEAD(&pm->update_refs);
RB_CLEAR_NODE(&pm->node);
return NULL;
}
+static int path_loop(struct send_ctx *sctx, struct fs_path *name,
+ u64 ino, u64 gen, u64 *ancestor_ino)
+{
+ int ret = 0;
+ u64 parent_inode = 0;
+ u64 parent_gen = 0;
+ u64 start_ino = ino;
+
+ *ancestor_ino = 0;
+ while (ino != BTRFS_FIRST_FREE_OBJECTID) {
+ fs_path_reset(name);
+
+ if (is_waiting_for_rm(sctx, ino))
+ break;
+ if (is_waiting_for_move(sctx, ino)) {
+ if (*ancestor_ino == 0)
+ *ancestor_ino = ino;
+ ret = get_first_ref(sctx->parent_root, ino,
+ &parent_inode, &parent_gen, name);
+ } else {
+ ret = __get_cur_name_and_parent(sctx, ino, gen,
+ &parent_inode,
+ &parent_gen, name);
+ if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ if (parent_inode == start_ino) {
+ ret = 1;
+ if (*ancestor_ino == 0)
+ *ancestor_ino = ino;
+ break;
+ }
+ ino = parent_inode;
+ gen = parent_gen;
+ }
+ return ret;
+}
+
static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
{
struct fs_path *from_path = NULL;
u64 parent_ino, parent_gen;
struct waiting_dir_move *dm = NULL;
u64 rmdir_ino = 0;
+ u64 ancestor;
+ bool is_orphan;
int ret;
name = fs_path_alloc();
dm = get_waiting_dir_move(sctx, pm->ino);
ASSERT(dm);
rmdir_ino = dm->rmdir_ino;
+ is_orphan = dm->orphanized;
free_waiting_dir_move(sctx, dm);
- if (pm->is_orphan) {
+ if (is_orphan) {
ret = gen_unique_name(sctx, pm->ino,
pm->gen, from_path);
} else {
goto out;
sctx->send_progress = sctx->cur_ino + 1;
+ ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ LIST_HEAD(deleted_refs);
+ ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
+ ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
+ &pm->update_refs, &deleted_refs,
+ is_orphan);
+ if (ret < 0)
+ goto out;
+ if (rmdir_ino) {
+ dm = get_waiting_dir_move(sctx, pm->ino);
+ ASSERT(dm);
+ dm->rmdir_ino = rmdir_ino;
+ }
+ goto out;
+ }
fs_path_reset(name);
to_path = name;
name = NULL;
/* already deleted */
goto finish;
}
- ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1);
+ ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino);
if (ret < 0)
goto out;
if (!ret)
* and old parent(s).
*/
list_for_each_entry(cur, &pm->update_refs, list) {
- if (cur->dir == rmdir_ino)
+ /*
+ * The parent inode might have been deleted in the send snapshot
+ */
+ ret = get_inode_info(sctx->send_root, cur->dir, NULL,
+ NULL, NULL, NULL, NULL, NULL);
+ if (ret == -ENOENT) {
+ ret = 0;
continue;
+ }
+ if (ret < 0)
+ goto out;
+
ret = send_utimes(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
u64 left_gen;
u64 right_gen;
int ret = 0;
+ struct waiting_dir_move *wdm;
if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
return 0;
goto out;
}
- if (is_waiting_for_move(sctx, di_key.objectid)) {
+ wdm = get_waiting_dir_move(sctx, di_key.objectid);
+ if (wdm && !wdm->orphanized) {
ret = add_pending_dir_move(sctx,
sctx->cur_ino,
sctx->cur_inode_gen,
ret = is_ancestor(sctx->parent_root,
sctx->cur_ino, sctx->cur_inode_gen,
ino, path_before);
- break;
+ if (ret)
+ break;
}
fs_path_reset(path_before);
goto out;
if (ret) {
struct name_cache_entry *nce;
+ struct waiting_dir_move *wdm;
ret = orphanize_inode(sctx, ow_inode, ow_gen,
cur->full_path);
if (ret < 0)
goto out;
+
+ /*
+ * If ow_inode has its rename operation delayed
+ * make sure that its orphanized name is used in
+ * the source path when performing its rename
+ * operation.
+ */
+ if (is_waiting_for_move(sctx, ow_inode)) {
+ wdm = get_waiting_dir_move(sctx,
+ ow_inode);
+ ASSERT(wdm);
+ wdm->orphanized = true;
+ }
+
/*
* Make sure we clear our orphanized inode's
* name from the name cache. This is because the
name_cache_delete(sctx, nce);
kfree(nce);
}
+
+ /*
+ * ow_inode might currently be an ancestor of
+ * cur_ino, therefore compute valid_path (the
+ * current path of cur_ino) again because it
+ * might contain the pre-orphanization name of
+ * ow_inode, which is no longer valid.
+ */
+ fs_path_reset(valid_path);
+ ret = get_cur_path(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen, valid_path);
+ if (ret < 0)
+ goto out;
} else {
ret = send_unlink(sctx, cur->full_path);
if (ret < 0)
{
int ret = 0;
- BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+ if (sctx->cur_ino != sctx->cmp_key->objectid) {
+ inconsistent_snapshot_error(sctx, result, "reference");
+ return -EIO;
+ }
if (!sctx->cur_inode_new_gen &&
sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
{
int ret = 0;
- BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+ if (sctx->cur_ino != sctx->cmp_key->objectid) {
+ inconsistent_snapshot_error(sctx, result, "xattr");
+ return -EIO;
+ }
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result == BTRFS_COMPARE_TREE_NEW)
{
int ret = 0;
- BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+ if (sctx->cur_ino != sctx->cmp_key->objectid) {
+ inconsistent_snapshot_error(sctx, result, "extent");
+ return -EIO;
+ }
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result != BTRFS_COMPARE_TREE_DELETED)
struct btrfs_trans_handle *trans;
struct btrfs_root *root = btrfs_sb(sb)->tree_root;
+ root->fs_info->fs_frozen = 1;
+ /*
+ * We don't need a barrier here, we'll wait for any transaction that
+ * could be in progress on other threads (and do delayed iputs that
+ * we want to avoid on a frozen filesystem), or do the commit
+ * ourselves.
+ */
trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
/* no transaction, don't bother */
return btrfs_commit_transaction(trans, root);
}
+static int btrfs_unfreeze(struct super_block *sb)
+{
+ struct btrfs_root *root = btrfs_sb(sb)->tree_root;
+
+ root->fs_info->fs_frozen = 0;
+ return 0;
+}
+
static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
{
struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
.statfs = btrfs_statfs,
.remount_fs = btrfs_remount,
.freeze_fs = btrfs_freeze,
+ .unfreeze_fs = btrfs_unfreeze,
};
static const struct file_operations btrfs_ctl_fops = {
kmem_cache_free(btrfs_trans_handle_cachep, trans);
+ /*
+ * If fs has been frozen, we can not handle delayed iputs, otherwise
+ * it'll result in deadlock about SB_FREEZE_FS.
+ */
if (current != root->fs_info->transaction_kthread &&
- current != root->fs_info->cleaner_kthread)
+ current != root->fs_info->cleaner_kthread &&
+ !root->fs_info->fs_frozen)
btrfs_run_delayed_iputs(root);
return ret;
#include "backref.h"
#include "hash.h"
#include "compression.h"
+#include "qgroup.h"
/* magic values for the inode_only field in btrfs_log_inode:
*
ins.type = BTRFS_EXTENT_ITEM_KEY;
offset = key->offset - btrfs_file_extent_offset(eb, item);
+ /*
+ * Manually record dirty extent, as here we did a shallow
+ * file extent item copy and skip normal backref update,
+ * but modifying extent tree all by ourselves.
+ * So need to manually record dirty extent for qgroup,
+ * as the owner of the file extent changed from log tree
+ * (doesn't affect qgroup) to fs/file tree(affects qgroup)
+ */
+ ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
+ btrfs_file_extent_disk_bytenr(eb, item),
+ btrfs_file_extent_disk_num_bytes(eb, item),
+ GFP_NOFS);
+ if (ret < 0)
+ goto out;
+
if (ins.objectid > 0) {
u64 csum_start;
u64 csum_end;
*/
mutex_unlock(&root->log_mutex);
- btrfs_init_log_ctx(&root_log_ctx);
+ btrfs_init_log_ctx(&root_log_ctx, NULL);
mutex_lock(&log_root_tree->log_mutex);
atomic_inc(&log_root_tree->log_batch);
static int btrfs_check_ref_name_override(struct extent_buffer *eb,
const int slot,
const struct btrfs_key *key,
- struct inode *inode)
+ struct inode *inode,
+ u64 *other_ino)
{
int ret;
struct btrfs_path *search_path;
search_path, parent,
name, this_name_len, 0);
if (di && !IS_ERR(di)) {
- ret = 1;
+ struct btrfs_key di_key;
+
+ btrfs_dir_item_key_to_cpu(search_path->nodes[0],
+ di, &di_key);
+ if (di_key.type == BTRFS_INODE_ITEM_KEY) {
+ ret = 1;
+ *other_ino = di_key.objectid;
+ } else {
+ ret = -EAGAIN;
+ }
goto out;
} else if (IS_ERR(di)) {
ret = PTR_ERR(di);
if ((min_key.type == BTRFS_INODE_REF_KEY ||
min_key.type == BTRFS_INODE_EXTREF_KEY) &&
BTRFS_I(inode)->generation == trans->transid) {
+ u64 other_ino = 0;
+
ret = btrfs_check_ref_name_override(path->nodes[0],
path->slots[0],
- &min_key, inode);
+ &min_key, inode,
+ &other_ino);
if (ret < 0) {
err = ret;
goto out_unlock;
- } else if (ret > 0) {
- err = 1;
- btrfs_set_log_full_commit(root->fs_info, trans);
- goto out_unlock;
+ } else if (ret > 0 && ctx &&
+ other_ino != btrfs_ino(ctx->inode)) {
+ struct btrfs_key inode_key;
+ struct inode *other_inode;
+
+ if (ins_nr > 0) {
+ ins_nr++;
+ } else {
+ ins_nr = 1;
+ ins_start_slot = path->slots[0];
+ }
+ ret = copy_items(trans, inode, dst_path, path,
+ &last_extent, ins_start_slot,
+ ins_nr, inode_only,
+ logged_isize);
+ if (ret < 0) {
+ err = ret;
+ goto out_unlock;
+ }
+ ins_nr = 0;
+ btrfs_release_path(path);
+ inode_key.objectid = other_ino;
+ inode_key.type = BTRFS_INODE_ITEM_KEY;
+ inode_key.offset = 0;
+ other_inode = btrfs_iget(root->fs_info->sb,
+ &inode_key, root,
+ NULL);
+ /*
+ * If the other inode that had a conflicting dir
+ * entry was deleted in the current transaction,
+ * we don't need to do more work nor fallback to
+ * a transaction commit.
+ */
+ if (IS_ERR(other_inode) &&
+ PTR_ERR(other_inode) == -ENOENT) {
+ goto next_key;
+ } else if (IS_ERR(other_inode)) {
+ err = PTR_ERR(other_inode);
+ goto out_unlock;
+ }
+ /*
+ * We are safe logging the other inode without
+ * acquiring its i_mutex as long as we log with
+ * the LOG_INODE_EXISTS mode. We're safe against
+ * concurrent renames of the other inode as well
+ * because during a rename we pin the log and
+ * update the log with the new name before we
+ * unpin it.
+ */
+ err = btrfs_log_inode(trans, root, other_inode,
+ LOG_INODE_EXISTS,
+ 0, LLONG_MAX, ctx);
+ iput(other_inode);
+ if (err)
+ goto out_unlock;
+ else
+ goto next_key;
}
}
ins_nr = 0;
}
btrfs_release_path(path);
-
+next_key:
if (min_key.offset < (u64)-1) {
min_key.offset++;
} else if (min_key.type < max_key.type) {
if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
break;
- if (IS_ROOT(parent))
+ if (IS_ROOT(parent)) {
+ inode = d_inode(parent);
+ if (btrfs_must_commit_transaction(trans, inode))
+ ret = 1;
break;
+ }
parent = dget_parent(parent);
dput(old_parent);
int log_transid;
int io_err;
bool log_new_dentries;
+ struct inode *inode;
struct list_head list;
};
-static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
+static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx,
+ struct inode *inode)
{
ctx->log_ret = 0;
ctx->log_transid = 0;
ctx->io_err = 0;
ctx->log_new_dentries = false;
+ ctx->inode = inode;
INIT_LIST_HEAD(&ctx->list);
}
struct btrfs_device *device;
device = container_of(work, struct btrfs_device, rcu_work);
-
- if (device->bdev)
- blkdev_put(device->bdev, device->mode);
-
rcu_string_free(device->name);
kfree(device);
}
schedule_work(&device->rcu_work);
}
+static void btrfs_close_bdev(struct btrfs_device *device)
+{
+ if (device->bdev && device->writeable) {
+ sync_blockdev(device->bdev);
+ invalidate_bdev(device->bdev);
+ }
+
+ if (device->bdev)
+ blkdev_put(device->bdev, device->mode);
+}
+
static void btrfs_close_one_device(struct btrfs_device *device)
{
struct btrfs_fs_devices *fs_devices = device->fs_devices;
if (device->missing)
fs_devices->missing_devices--;
- if (device->bdev && device->writeable) {
- sync_blockdev(device->bdev);
- invalidate_bdev(device->bdev);
- }
+ btrfs_close_bdev(device);
new_device = btrfs_alloc_device(NULL, &device->devid,
device->uuid);
btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
}
+ btrfs_close_bdev(device);
+
call_rcu(&device->rcu, free_device);
num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
/* zero out the old super if it is writable */
btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
}
+
+ btrfs_close_bdev(srcdev);
+
call_rcu(&srcdev->rcu, free_device);
/*
* the device_list_mutex lock.
*/
btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
+
+ btrfs_close_bdev(tgtdev);
call_rcu(&tgtdev->rcu, free_device);
}
{
struct inode *inode = &ci->vfs_inode;
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
- struct ceph_mds_session *session = *psession;
+ struct ceph_mds_session *session = NULL;
int mds;
+
dout("ceph_flush_snaps %p\n", inode);
+ if (psession)
+ session = *psession;
retry:
spin_lock(&ci->i_ceph_lock);
if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
} else {
path = NULL;
pathlen = 0;
+ pathbase = 0;
}
spin_lock(&ci->i_ceph_lock);
static const struct file_operations format3_fops;
static const struct file_operations format4_fops;
-static int table_open(struct inode *inode, struct file *file)
+static int table_open1(struct inode *inode, struct file *file)
{
struct seq_file *seq;
- int ret = -1;
+ int ret;
- if (file->f_op == &format1_fops)
- ret = seq_open(file, &format1_seq_ops);
- else if (file->f_op == &format2_fops)
- ret = seq_open(file, &format2_seq_ops);
- else if (file->f_op == &format3_fops)
- ret = seq_open(file, &format3_seq_ops);
- else if (file->f_op == &format4_fops)
- ret = seq_open(file, &format4_seq_ops);
+ ret = seq_open(file, &format1_seq_ops);
+ if (ret)
+ return ret;
+
+ seq = file->private_data;
+ seq->private = inode->i_private; /* the dlm_ls */
+ return 0;
+}
+
+static int table_open2(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ int ret;
+
+ ret = seq_open(file, &format2_seq_ops);
+ if (ret)
+ return ret;
+
+ seq = file->private_data;
+ seq->private = inode->i_private; /* the dlm_ls */
+ return 0;
+}
+
+static int table_open3(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ int ret;
+
+ ret = seq_open(file, &format3_seq_ops);
+ if (ret)
+ return ret;
+
+ seq = file->private_data;
+ seq->private = inode->i_private; /* the dlm_ls */
+ return 0;
+}
+
+static int table_open4(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ int ret;
+ ret = seq_open(file, &format4_seq_ops);
if (ret)
return ret;
static const struct file_operations format1_fops = {
.owner = THIS_MODULE,
- .open = table_open,
+ .open = table_open1,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release
static const struct file_operations format2_fops = {
.owner = THIS_MODULE,
- .open = table_open,
+ .open = table_open2,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release
static const struct file_operations format3_fops = {
.owner = THIS_MODULE,
- .open = table_open,
+ .open = table_open3,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release
static const struct file_operations format4_fops = {
.owner = THIS_MODULE,
- .open = table_open,
+ .open = table_open4,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release
trace_f2fs_write_end(inode, pos, len, copied);
set_page_dirty(page);
- f2fs_put_page(page, 1);
if (pos + copied > i_size_read(inode))
f2fs_i_size_write(inode, pos + copied);
+ f2fs_put_page(page, 1);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
}
/* NAT cache management */
struct radix_tree_root nat_root;/* root of the nat entry cache */
struct radix_tree_root nat_set_root;/* root of the nat set cache */
- struct percpu_rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
+ struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
struct list_head nat_entries; /* cached nat entry list (clean) */
unsigned int nat_cnt; /* the # of cached nat entries */
unsigned int dirty_nat_cnt; /* total num of nat entries in set */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */
- struct percpu_rw_semaphore cp_rwsem; /* blocking FS operations */
+ struct rw_semaphore cp_rwsem; /* blocking FS operations */
struct rw_semaphore node_write; /* locking node writes */
wait_queue_head_t cp_wait;
unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
- percpu_down_read(&sbi->cp_rwsem);
+ down_read(&sbi->cp_rwsem);
}
static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
{
- percpu_up_read(&sbi->cp_rwsem);
+ up_read(&sbi->cp_rwsem);
}
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
{
- percpu_down_write(&sbi->cp_rwsem);
+ down_write(&sbi->cp_rwsem);
}
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
{
- percpu_up_write(&sbi->cp_rwsem);
+ up_write(&sbi->cp_rwsem);
}
static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
if (unlikely(f2fs_readonly(src->i_sb)))
return -EROFS;
- if (S_ISDIR(src->i_mode) || S_ISDIR(dst->i_mode))
- return -EISDIR;
+ if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
+ return -EINVAL;
if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
return -EOPNOTSUPP;
inode_lock(src);
- if (src != dst)
- inode_lock(dst);
+ if (src != dst) {
+ if (!inode_trylock(dst)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
ret = -EINVAL;
if (pos_in + len > src->i_size || pos_in + len < pos_in)
out_unlock:
if (src != dst)
inode_unlock(dst);
+out:
inode_unlock(src);
return ret;
}
struct nat_entry *e;
bool need = false;
- percpu_down_read(&nm_i->nat_tree_lock);
+ down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (e) {
if (!get_nat_flag(e, IS_CHECKPOINTED) &&
!get_nat_flag(e, HAS_FSYNCED_INODE))
need = true;
}
- percpu_up_read(&nm_i->nat_tree_lock);
+ up_read(&nm_i->nat_tree_lock);
return need;
}
struct nat_entry *e;
bool is_cp = true;
- percpu_down_read(&nm_i->nat_tree_lock);
+ down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (e && !get_nat_flag(e, IS_CHECKPOINTED))
is_cp = false;
- percpu_up_read(&nm_i->nat_tree_lock);
+ up_read(&nm_i->nat_tree_lock);
return is_cp;
}
struct nat_entry *e;
bool need_update = true;
- percpu_down_read(&nm_i->nat_tree_lock);
+ down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ino);
if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
(get_nat_flag(e, IS_CHECKPOINTED) ||
get_nat_flag(e, HAS_FSYNCED_INODE)))
need_update = false;
- percpu_up_read(&nm_i->nat_tree_lock);
+ up_read(&nm_i->nat_tree_lock);
return need_update;
}
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
- percpu_down_write(&nm_i->nat_tree_lock);
+ down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ni->nid);
if (!e) {
e = grab_nat_entry(nm_i, ni->nid);
set_nat_flag(e, HAS_FSYNCED_INODE, true);
set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
}
- percpu_up_write(&nm_i->nat_tree_lock);
+ up_write(&nm_i->nat_tree_lock);
}
int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
struct f2fs_nm_info *nm_i = NM_I(sbi);
int nr = nr_shrink;
- percpu_down_write(&nm_i->nat_tree_lock);
+ if (!down_write_trylock(&nm_i->nat_tree_lock))
+ return 0;
while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
struct nat_entry *ne;
__del_from_nat_cache(nm_i, ne);
nr_shrink--;
}
- percpu_up_write(&nm_i->nat_tree_lock);
+ up_write(&nm_i->nat_tree_lock);
return nr - nr_shrink;
}
ni->nid = nid;
/* Check nat cache */
- percpu_down_read(&nm_i->nat_tree_lock);
+ down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (e) {
ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
- percpu_up_read(&nm_i->nat_tree_lock);
+ up_read(&nm_i->nat_tree_lock);
return;
}
node_info_from_raw_nat(ni, &ne);
f2fs_put_page(page, 1);
cache:
- percpu_up_read(&nm_i->nat_tree_lock);
+ up_read(&nm_i->nat_tree_lock);
/* cache nat entry */
- percpu_down_write(&nm_i->nat_tree_lock);
+ down_write(&nm_i->nat_tree_lock);
cache_nat_entry(sbi, nid, &ne);
- percpu_up_write(&nm_i->nat_tree_lock);
+ up_write(&nm_i->nat_tree_lock);
}
/*
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
META_NAT, true);
- percpu_down_read(&nm_i->nat_tree_lock);
+ down_read(&nm_i->nat_tree_lock);
while (1) {
struct page *page = get_current_nat_page(sbi, nid);
remove_free_nid(nm_i, nid);
}
up_read(&curseg->journal_rwsem);
- percpu_up_read(&nm_i->nat_tree_lock);
+ up_read(&nm_i->nat_tree_lock);
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
if (!nm_i->dirty_nat_cnt)
return;
- percpu_down_write(&nm_i->nat_tree_lock);
+ down_write(&nm_i->nat_tree_lock);
/*
* if there are no enough space in journal to store dirty nat
list_for_each_entry_safe(set, tmp, &sets, set_list)
__flush_nat_entry_set(sbi, set);
- percpu_up_write(&nm_i->nat_tree_lock);
+ up_write(&nm_i->nat_tree_lock);
f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
}
mutex_init(&nm_i->build_lock);
spin_lock_init(&nm_i->free_nid_list_lock);
- if (percpu_init_rwsem(&nm_i->nat_tree_lock))
- return -ENOMEM;
+ init_rwsem(&nm_i->nat_tree_lock);
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
spin_unlock(&nm_i->free_nid_list_lock);
/* destroy nat cache */
- percpu_down_write(&nm_i->nat_tree_lock);
+ down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_cache(nm_i,
nid, NATVEC_SIZE, natvec))) {
unsigned idx;
kmem_cache_free(nat_entry_set_slab, setvec[idx]);
}
}
- percpu_up_write(&nm_i->nat_tree_lock);
+ up_write(&nm_i->nat_tree_lock);
- percpu_free_rwsem(&nm_i->nat_tree_lock);
kfree(nm_i->nat_bitmap);
sbi->nm_info = NULL;
kfree(nm_i);
percpu_counter_destroy(&sbi->nr_pages[i]);
percpu_counter_destroy(&sbi->alloc_valid_block_count);
percpu_counter_destroy(&sbi->total_valid_inode_count);
-
- percpu_free_rwsem(&sbi->cp_rwsem);
}
static void f2fs_put_super(struct super_block *sb)
{
int i, err;
- if (percpu_init_rwsem(&sbi->cp_rwsem))
- return -ENOMEM;
-
for (i = 0; i < NR_COUNT_TYPE; i++) {
err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
if (err)
sbi->write_io[i].bio = NULL;
}
+ init_rwsem(&sbi->cp_rwsem);
init_waitqueue_head(&sbi->cp_wait);
init_sb_info(sbi);
{
struct backing_dev_info *bdi;
+ /*
+ * If we are expecting writeback progress we must submit plugged IO.
+ */
+ if (blk_needs_flush_plug(current))
+ blk_schedule_flush_plug(current);
+
if (!nr_pages)
nr_pages = get_nr_dirty_pages();
* Now the data has been copied, commit the range we've copied. This
* should not fail unless the filesystem has had a fatal error.
*/
- ret = ops->iomap_end(inode, pos, length, written > 0 ? written : 0,
- flags, &iomap);
+ if (ops->iomap_end) {
+ ret = ops->iomap_end(inode, pos, length,
+ written > 0 ? written : 0,
+ flags, &iomap);
+ }
return written ? written : ret;
}
if (mapping_writably_mapped(inode->i_mapping))
flush_dcache_page(page);
- pagefault_disable();
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
- pagefault_enable();
flush_dcache_page(page);
- mark_page_accessed(page);
status = iomap_write_end(inode, pos, bytes, copied, page);
if (unlikely(status < 0))
if (ret)
return ret;
- ret = filemap_write_and_wait(inode->i_mapping);
- if (ret)
- return ret;
+ if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
+ ret = filemap_write_and_wait(inode->i_mapping);
+ if (ret)
+ return ret;
+ }
while (len > 0) {
ret = iomap_apply(inode, start, len, 0, ops, &ctx,
iomap_fiemap_actor);
+ /* inode with no (attribute) mapping will give ENOENT */
+ if (ret == -ENOENT)
+ break;
if (ret < 0)
return ret;
if (ret == 0)
case 0:
break;
case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
extern void nfs4_kill_renewd(struct nfs_client *);
extern void nfs4_renew_state(struct work_struct *);
+extern void nfs4_set_lease_period(struct nfs_client *clp,
+ unsigned long lease,
+ unsigned long lastrenewed);
+
/* nfs4state.c */
struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp);
err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
if (err == 0) {
- struct nfs_client *clp = server->nfs_client;
-
- spin_lock(&clp->cl_lock);
- clp->cl_lease_time = fsinfo->lease_time * HZ;
- clp->cl_last_renewal = now;
- spin_unlock(&clp->cl_lock);
+ nfs4_set_lease_period(server->nfs_client,
+ fsinfo->lease_time * HZ,
+ now);
break;
}
err = nfs4_handle_exception(server, err, &exception);
cancel_delayed_work_sync(&clp->cl_renewd);
}
+/**
+ * nfs4_set_lease_period - Sets the lease period on a nfs_client
+ *
+ * @clp: pointer to nfs_client
+ * @lease: new value for lease period
+ * @lastrenewed: time at which lease was last renewed
+ */
+void nfs4_set_lease_period(struct nfs_client *clp,
+ unsigned long lease,
+ unsigned long lastrenewed)
+{
+ spin_lock(&clp->cl_lock);
+ clp->cl_lease_time = lease;
+ clp->cl_last_renewal = lastrenewed;
+ spin_unlock(&clp->cl_lock);
+
+ /* Cap maximum reconnect timeout at 1/2 lease period */
+ rpc_cap_max_reconnect_timeout(clp->cl_rpcclient, lease >> 1);
+}
+
/*
* Local variables:
* c-basic-offset: 8
{
int status;
struct nfs_fsinfo fsinfo;
+ unsigned long now;
if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
nfs4_schedule_state_renewal(clp);
return 0;
}
+ now = jiffies;
status = nfs4_proc_get_lease_time(clp, &fsinfo);
if (status == 0) {
- /* Update lease time and schedule renewal */
- spin_lock(&clp->cl_lock);
- clp->cl_lease_time = fsinfo.lease_time * HZ;
- clp->cl_last_renewal = jiffies;
- spin_unlock(&clp->cl_lock);
-
+ nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
nfs4_schedule_state_renewal(clp);
}
return nfs_ok;
}
+static __be32
+nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
+{
+ struct nfs4_ol_stateid *stp = openlockstateid(s);
+ __be32 ret;
+
+ mutex_lock(&stp->st_mutex);
+
+ ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
+ if (ret)
+ goto out;
+
+ ret = nfserr_locks_held;
+ if (check_for_locks(stp->st_stid.sc_file,
+ lockowner(stp->st_stateowner)))
+ goto out;
+
+ release_lock_stateid(stp);
+ ret = nfs_ok;
+
+out:
+ mutex_unlock(&stp->st_mutex);
+ nfs4_put_stid(s);
+ return ret;
+}
+
__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_free_stateid *free_stateid)
stateid_t *stateid = &free_stateid->fr_stateid;
struct nfs4_stid *s;
struct nfs4_delegation *dp;
- struct nfs4_ol_stateid *stp;
struct nfs4_client *cl = cstate->session->se_client;
__be32 ret = nfserr_bad_stateid;
ret = nfserr_locks_held;
break;
case NFS4_LOCK_STID:
- ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
- if (ret)
- break;
- stp = openlockstateid(s);
- ret = nfserr_locks_held;
- if (check_for_locks(stp->st_stid.sc_file,
- lockowner(stp->st_stateowner)))
- break;
- WARN_ON(!unhash_lock_stateid(stp));
+ atomic_inc(&s->sc_count);
spin_unlock(&cl->cl_lock);
- nfs4_put_stid(s);
- ret = nfs_ok;
+ ret = nfsd4_free_lock_stateid(stateid, s);
goto out;
case NFS4_REVOKED_DELEG_STID:
dp = delegstateid(s);
lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
struct nfs4_ol_stateid *ost,
struct nfsd4_lock *lock,
- struct nfs4_ol_stateid **lst, bool *new)
+ struct nfs4_ol_stateid **plst, bool *new)
{
__be32 status;
struct nfs4_file *fi = ost->st_stid.sc_file;
struct nfs4_client *cl = oo->oo_owner.so_client;
struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
struct nfs4_lockowner *lo;
+ struct nfs4_ol_stateid *lst;
unsigned int strhashval;
+ bool hashed;
lo = find_lockowner_str(cl, &lock->lk_new_owner);
if (!lo) {
goto out;
}
- *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
- if (*lst == NULL) {
+retry:
+ lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
+ if (lst == NULL) {
status = nfserr_jukebox;
goto out;
}
+
+ mutex_lock(&lst->st_mutex);
+
+ /* See if it's still hashed to avoid race with FREE_STATEID */
+ spin_lock(&cl->cl_lock);
+ hashed = !list_empty(&lst->st_perfile);
+ spin_unlock(&cl->cl_lock);
+
+ if (!hashed) {
+ mutex_unlock(&lst->st_mutex);
+ nfs4_put_stid(&lst->st_stid);
+ goto retry;
+ }
status = nfs_ok;
+ *plst = lst;
out:
nfs4_put_stateowner(&lo->lo_owner);
return status;
goto out;
status = lookup_or_create_lock_state(cstate, open_stp, lock,
&lock_stp, &new);
- if (status == nfs_ok)
- mutex_lock(&lock_stp->st_mutex);
} else {
status = nfs4_preprocess_seqid_op(cstate,
lock->lk_old_lock_seqid,
if (IS_ERR(dchild))
return nfserrno(host_err);
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
- if (err) {
- dput(dchild);
+ /*
+ * We unconditionally drop our ref to dchild as fh_compose will have
+ * already grabbed its own ref for it.
+ */
+ dput(dchild);
+ if (err)
return err;
- }
return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type,
rdev, resfhp);
}
struct page *page = buf->page;
if (page_count(page) == 1) {
- if (memcg_kmem_enabled()) {
+ if (memcg_kmem_enabled())
memcg_kmem_uncharge(page, 0);
- __ClearPageKmemcg(page);
- }
__SetPageLocked(page);
return 0;
}
cached = 0;
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
- pages[lru] = global_page_state(NR_LRU_BASE + lru);
+ pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
available = si_mem_available();
size -= n;
buf += n;
copied += n;
- if (!m->count)
+ if (!m->count) {
+ m->from = 0;
m->index++;
+ }
if (!size)
goto Done;
}
p = c->gap_lebs;
do {
- ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
+ ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
written = layout_leb_in_gaps(c, p);
if (written < 0) {
err = written;
dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
inode->i_ino, dentry, size);
- return __ubifs_getxattr(inode, name, buffer, size);
+ name = xattr_full_name(handler, name);
+ return __ubifs_getxattr(inode, name, buffer, size);
}
static int ubifs_xattr_set(const struct xattr_handler *handler,
dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
name, inode->i_ino, dentry, size);
+ name = xattr_full_name(handler, name);
+
if (value)
return __ubifs_setxattr(inode, name, value, size, flags);
else
xfs_extlen_t *flenp, /* result length */
int *stat) /* status: 0-freelist, 1-normal/none */
{
+ struct xfs_owner_info oinfo;
int error;
xfs_agblock_t fbno;
xfs_extlen_t flen;
error0);
args->wasfromfl = 1;
trace_xfs_alloc_small_freelist(args);
+
+ /*
+ * If we're feeding an AGFL block to something that
+ * doesn't live in the free space, we need to clear
+ * out the OWN_AG rmap.
+ */
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
+ error = xfs_rmap_free(args->tp, args->agbp, args->agno,
+ fbno, 1, &oinfo);
+ if (error)
+ goto error0;
+
*stat = 0;
return 0;
}
offsetof(xfs_agf_t, agf_longest),
offsetof(xfs_agf_t, agf_btreeblks),
offsetof(xfs_agf_t, agf_uuid),
+ offsetof(xfs_agf_t, agf_rmap_blocks),
sizeof(xfs_agf_t)
};
__be32 agf_btreeblks; /* # of blocks held in AGF btrees */
uuid_t agf_uuid; /* uuid of filesystem */
+ __be32 agf_rmap_blocks; /* rmapbt blocks used */
+ __be32 agf_padding; /* padding */
+
/*
* reserve some contiguous space for future logged fields before we add
* the unlogged fields. This makes the range logging via flags and
* structure offsets much simpler.
*/
- __be64 agf_spare64[16];
+ __be64 agf_spare64[15];
/* unlogged fields, written during buffer writeback. */
__be64 agf_lsn; /* last write sequence */
#define XFS_AGF_LONGEST 0x00000400
#define XFS_AGF_BTREEBLKS 0x00000800
#define XFS_AGF_UUID 0x00001000
-#define XFS_AGF_NUM_BITS 13
+#define XFS_AGF_RMAP_BLOCKS 0x00002000
+#define XFS_AGF_NUM_BITS 14
#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
#define XFS_AGF_FLAGS \
{ XFS_AGF_FREEBLKS, "FREEBLKS" }, \
{ XFS_AGF_LONGEST, "LONGEST" }, \
{ XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \
- { XFS_AGF_UUID, "UUID" }
+ { XFS_AGF_UUID, "UUID" }, \
+ { XFS_AGF_RMAP_BLOCKS, "RMAP_BLOCKS" }
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
union xfs_btree_ptr *new,
int *stat)
{
+ struct xfs_buf *agbp = cur->bc_private.a.agbp;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
int error;
xfs_agblock_t bno;
xfs_trans_agbtree_delta(cur->bc_tp, 1);
new->s = cpu_to_be32(bno);
+ be32_add_cpu(&agf->agf_rmap_blocks, 1);
+ xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
bno, 1);
+ be32_add_cpu(&agf->agf_rmap_blocks, -1);
+ xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
if (error)
return error;
if (!(bp->b_flags & _XBF_IN_FLIGHT))
return;
- ASSERT(bp->b_flags & XBF_ASYNC);
bp->b_flags &= ~_XBF_IN_FLIGHT;
percpu_counter_dec(&bp->b_target->bt_io_count);
}
* page is inserted into the pagecache when we have to serve a write
* fault on a hole. It should never be dirtied and can simply be
* dropped from the pagecache once we get real data for the page.
+ *
+ * XXX: This is racy against mmap, and there's nothing we can do about
+ * it. dax_do_io() should really do this invalidation internally as
+ * it will know if we've allocated over a holei for this specific IO and
+ * if so it needs to update the mapping tree and invalidate existing
+ * PTEs over the newly allocated range. Remove this invalidation when
+ * dax_do_io() is fixed up.
*/
if (mapping->nrpages) {
- ret = invalidate_inode_pages2(mapping);
+ loff_t end = iocb->ki_pos + iov_iter_count(from) - 1;
+
+ ret = invalidate_inode_pages2_range(mapping,
+ iocb->ki_pos >> PAGE_SHIFT,
+ end >> PAGE_SHIFT);
WARN_ON_ONCE(ret);
}
agf->agf_roots[XFS_BTNUM_RMAPi] =
cpu_to_be32(XFS_RMAP_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
+ agf->agf_rmap_blocks = cpu_to_be32(1);
}
agf->agf_flfirst = cpu_to_be32(1);
* is in the delayed allocation extent on which we sit
* but before our buffer starts.
*/
-
nimaps = 0;
while (nimaps == 0) {
nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, nres,
+ /*
+ * We have already reserved space for the extent and any
+ * indirect blocks when creating the delalloc extent,
+ * there is no need to reserve space in this transaction
+ * again.
+ */
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
0, XFS_TRANS_RESERVE, &tp);
if (error)
return error;
return error;
trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
- xfs_bmbt_to_iomap(ip, iomap, &imap);
- } else if (nimaps) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- trace_xfs_iomap_found(ip, offset, length, 0, &imap);
- xfs_bmbt_to_iomap(ip, iomap, &imap);
} else {
+ ASSERT(nimaps);
+
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- trace_xfs_iomap_not_found(ip, offset, length, 0, &imap);
- iomap->blkno = IOMAP_NULL_BLOCK;
- iomap->type = IOMAP_HOLE;
- iomap->offset = offset;
- iomap->length = length;
+ trace_xfs_iomap_found(ip, offset, length, 0, &imap);
}
+ xfs_bmbt_to_iomap(ip, iomap, &imap);
return 0;
}
.iomap_begin = xfs_file_iomap_begin,
.iomap_end = xfs_file_iomap_end,
};
+
+static int
+xfs_xattr_iomap_begin(
+ struct inode *inode,
+ loff_t offset,
+ loff_t length,
+ unsigned flags,
+ struct iomap *iomap)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
+ struct xfs_bmbt_irec imap;
+ int nimaps = 1, error = 0;
+ unsigned lockmode;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ lockmode = xfs_ilock_data_map_shared(ip);
+
+ /* if there are no attribute fork or extents, return ENOENT */
+ if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
+ error = -ENOENT;
+ goto out_unlock;
+ }
+
+ ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
+ error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
+ &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
+out_unlock:
+ xfs_iunlock(ip, lockmode);
+
+ if (!error) {
+ ASSERT(nimaps);
+ xfs_bmbt_to_iomap(ip, iomap, &imap);
+ }
+
+ return error;
+}
+
+struct iomap_ops xfs_xattr_iomap_ops = {
+ .iomap_begin = xfs_xattr_iomap_begin,
+};
struct xfs_bmbt_irec *);
extern struct iomap_ops xfs_iomap_ops;
+extern struct iomap_ops xfs_xattr_iomap_ops;
#endif /* __XFS_IOMAP_H__*/
int error;
xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED);
- error = iomap_fiemap(inode, fieinfo, start, length, &xfs_iomap_ops);
+ if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
+ fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
+ error = iomap_fiemap(inode, fieinfo, start, length,
+ &xfs_xattr_iomap_ops);
+ } else {
+ error = iomap_fiemap(inode, fieinfo, start, length,
+ &xfs_iomap_ops);
+ }
xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
return error;
DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct);
DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
DEFINE_IOMAP_EVENT(xfs_iomap_found);
-DEFINE_IOMAP_EVENT(xfs_iomap_not_found);
DECLARE_EVENT_CLASS(xfs_simple_io_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
#include <asm-generic/qrwlock_types.h>
/*
- * Writer states & reader shift and bias
+ * Writer states & reader shift and bias.
+ *
+ * | +0 | +1 | +2 | +3 |
+ * ----+----+----+----+----+
+ * LE | 78 | 56 | 34 | 12 | 0x12345678
+ * ----+----+----+----+----+
+ * | wr | rd |
+ * +----+----+----+----+
+ *
+ * ----+----+----+----+----+
+ * BE | 12 | 34 | 56 | 78 | 0x12345678
+ * ----+----+----+----+----+
+ * | rd | wr |
+ * +----+----+----+----+
*/
#define _QW_WAITING 1 /* A writer is waiting */
#define _QW_LOCKED 0xff /* A writer holds the lock */
(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
}
+/**
+ * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ * Return: the write byte address of a queue rwlock
+ */
+static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
+{
+ return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
+}
+
/**
* queued_write_unlock - release write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
static inline void queued_write_unlock(struct qrwlock *lock)
{
- smp_store_release((u8 *)&lock->cnts, 0);
+ smp_store_release(__qrwlock_write_byte(lock), 0);
}
/*
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
+ * @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
*/
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
+ bool evict, bool interruptible, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
{
if (bio &&
bio->bi_iter.bi_size &&
- bio_op(bio) != REQ_OP_DISCARD)
+ bio_op(bio) != REQ_OP_DISCARD &&
+ bio_op(bio) != REQ_OP_SECURE_ERASE)
return true;
return false;
static inline bool bio_no_advance_iter(struct bio *bio)
{
- return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME;
+ return bio_op(bio) == REQ_OP_DISCARD ||
+ bio_op(bio) == REQ_OP_SECURE_ERASE ||
+ bio_op(bio) == REQ_OP_WRITE_SAME;
}
static inline bool bio_is_rw(struct bio *bio)
if (bio_op(bio) == REQ_OP_DISCARD)
return 1;
+ if (bio_op(bio) == REQ_OP_SECURE_ERASE)
+ return 1;
+
if (bio_op(bio) == REQ_OP_WRITE_SAME)
return 1;
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
- if (unlikely(op == REQ_OP_DISCARD))
+ if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
if (unlikely(op == REQ_OP_WRITE_SAME))
if (unlikely(rq->cmd_type != REQ_TYPE_FS))
return q->limits.max_hw_sectors;
- if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
+ if (!q->limits.chunk_sectors ||
+ req_op(rq) == REQ_OP_DISCARD ||
+ req_op(rq) == REQ_OP_SECURE_ERASE)
return blk_queue_get_max_sectors(q, req_op(rq));
return min(blk_max_size_offset(q, offset),
"Attempted to advance past end of bvec iter\n");
while (bytes) {
- unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+ unsigned iter_len = bvec_iter_len(bv, *iter);
+ unsigned len = min(bytes, iter_len);
bytes -= len;
iter->bi_size -= len;
*/
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+/*
+ * sparse (__CHECKER__) pretends to be gcc, but can't do constant
+ * folding in __builtin_bswap*() (yet), so don't set these for it.
+ */
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
#if GCC_VERSION >= 40400
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
#if GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*
- * The seemingly unused void * variable is to validate @p is indeed a pointer
- * type. All pointer types silently cast to void *.
+ * The seemingly unused size_t variable is to validate @p is indeed a pointer
+ * type by making sure it can be dereferenced.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = READ_ONCE(p); \
- __maybe_unused const void * const _________p2 = _________p1; \
+ size_t __maybe_unused __size_of_ptr = sizeof(*(p)); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
struct tegra_mipi_device *tegra_mipi_request(struct device *device);
void tegra_mipi_free(struct tegra_mipi_device *device);
+int tegra_mipi_enable(struct tegra_mipi_device *device);
+int tegra_mipi_disable(struct tegra_mipi_device *device);
int tegra_mipi_calibrate(struct tegra_mipi_device *device);
#endif
*/
#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109
+#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307
#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507
#define E_ITS_MAPD_DEVICE_OOR 0x010801
#define E_ITS_MAPC_PROCNUM_OOR 0x010902
/* create, destroy, and name are mandatory */
struct kvm_device_ops {
const char *name;
+
+ /*
+ * create is called holding kvm->lock and any operations not suitable
+ * to do while holding the lock should be deferred to init (see
+ * below).
+ */
int (*create)(struct kvm_device *dev, u32 type);
+ /*
+ * init is called after create if create is successful and is called
+ * outside of holding kvm->lock.
+ */
+ void (*init)(struct kvm_device *dev);
+
/*
* Destroy is responsible for freeing dev.
*
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
+# define is_migrate_cma_page(_page) false
#endif
#define for_each_migratetype_order(order, type) \
MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
/* Support PCI MSIX interrupts */
MSI_FLAG_PCI_MSIX = (1 << 3),
+ /* Needs early activate, required for PCI */
+ MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
};
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
-int dev_get_nest_level(struct net_device *dev,
- bool (*type_check)(const struct net_device *dev));
+int dev_get_nest_level(struct net_device *dev);
int skb_checksum_help(struct sk_buff *skb);
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
-#define PCI_IRQ_NOLEGACY (1 << 0) /* don't use legacy interrupts */
-#define PCI_IRQ_NOMSI (1 << 1) /* don't use MSI interrupts */
-#define PCI_IRQ_NOMSIX (1 << 2) /* don't use MSI-X interrupts */
-#define PCI_IRQ_NOAFFINITY (1 << 3) /* don't auto-assign affinity */
+#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */
+#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */
+#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */
+#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */
+#define PCI_IRQ_ALL_TYPES \
+ (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
/* kmem_cache style wrapper around pci_alloc_consistent() */
u64 parent_gen;
u64 generation;
int pin_count;
+#ifdef CONFIG_CGROUP_PERF
int nr_cgroups; /* cgroup evts */
+#endif
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
};
unsigned int hrtimer_active;
struct pmu *unique_pmu;
+#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
+#endif
};
struct perf_output_handle {
* and other debug macros are compiled out unless either DEBUG is defined
* or CONFIG_DYNAMIC_DEBUG is set.
*/
-
-#ifdef CONFIG_PRINTK
-
-asmlinkage __printf(1, 2) __cold void __pr_emerg(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_alert(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_crit(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_err(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_warn(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_notice(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_info(const char *fmt, ...);
-
-#define pr_emerg(fmt, ...) __pr_emerg(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert(fmt, ...) __pr_alert(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit(fmt, ...) __pr_crit(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err(fmt, ...) __pr_err(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...) __pr_warn(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_notice(fmt, ...) __pr_notice(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...) __pr_info(pr_fmt(fmt), ##__VA_ARGS__)
-
-#else
-
-#define pr_emerg(fmt, ...) printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert(fmt, ...) printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit(fmt, ...) printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err(fmt, ...) printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...) printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_notice(fmt, ...) printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-
-#endif
-
-#define pr_warning pr_warn
-
+#define pr_emerg(fmt, ...) \
+ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert(fmt, ...) \
+ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit(fmt, ...) \
+ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn pr_warning
+#define pr_notice(fmt, ...) \
+ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
/*
* Like KERN_CONT, pr_cont() should only be used when continuing
* a line with no newline ('\n') enclosed. Otherwise it defaults
u8 max_tc;
};
+enum qed_dcbx_sf_ieee_type {
+ QED_DCBX_SF_IEEE_ETHTYPE,
+ QED_DCBX_SF_IEEE_TCP_PORT,
+ QED_DCBX_SF_IEEE_UDP_PORT,
+ QED_DCBX_SF_IEEE_TCP_UDP_PORT
+};
+
struct qed_app_entry {
bool ethtype;
+ enum qed_dcbx_sf_ieee_type sf_ieee;
bool enabled;
u8 prio;
u16 proto_id;
sctp_authhdr_t auth_hdr;
} __packed sctp_auth_chunk_t;
-struct sctp_info {
- __u32 sctpi_tag;
- __u32 sctpi_state;
- __u32 sctpi_rwnd;
- __u16 sctpi_unackdata;
- __u16 sctpi_penddata;
- __u16 sctpi_instrms;
- __u16 sctpi_outstrms;
- __u32 sctpi_fragmentation_point;
- __u32 sctpi_inqueue;
- __u32 sctpi_outqueue;
- __u32 sctpi_overall_error;
- __u32 sctpi_max_burst;
- __u32 sctpi_maxseg;
- __u32 sctpi_peer_rwnd;
- __u32 sctpi_peer_tag;
- __u8 sctpi_peer_capable;
- __u8 sctpi_peer_sack;
- __u16 __reserved1;
-
- /* assoc status info */
- __u64 sctpi_isacks;
- __u64 sctpi_osacks;
- __u64 sctpi_opackets;
- __u64 sctpi_ipackets;
- __u64 sctpi_rtxchunks;
- __u64 sctpi_outofseqtsns;
- __u64 sctpi_idupchunks;
- __u64 sctpi_gapcnt;
- __u64 sctpi_ouodchunks;
- __u64 sctpi_iuodchunks;
- __u64 sctpi_oodchunks;
- __u64 sctpi_iodchunks;
- __u64 sctpi_octrlchunks;
- __u64 sctpi_ictrlchunks;
-
- /* primary transport info */
- struct sockaddr_storage sctpi_p_address;
- __s32 sctpi_p_state;
- __u32 sctpi_p_cwnd;
- __u32 sctpi_p_srtt;
- __u32 sctpi_p_rto;
- __u32 sctpi_p_hbinterval;
- __u32 sctpi_p_pathmaxrxt;
- __u32 sctpi_p_sackdelay;
- __u32 sctpi_p_sackfreq;
- __u32 sctpi_p_ssthresh;
- __u32 sctpi_p_partial_bytes_acked;
- __u32 sctpi_p_flight_size;
- __u16 sctpi_p_error;
- __u16 __reserved2;
-
- /* sctp sock info */
- __u32 sctpi_s_autoclose;
- __u32 sctpi_s_adaptation_ind;
- __u32 sctpi_s_pd_point;
- __u8 sctpi_s_nodelay;
- __u8 sctpi_s_disable_fragments;
- __u8 sctpi_s_v4mapped;
- __u8 sctpi_s_frag_interleave;
- __u32 sctpi_s_type;
- __u32 __reserved3;
-};
-
struct sctp_infox {
struct sctp_info *sctpinfo;
struct sctp_association *asoc;
__skb_linearize(skb) : 0;
}
+static __always_inline void
+__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
+ unsigned int off)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_block_sub(skb->csum,
+ csum_partial(start, len, 0), off);
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
/**
* skb_postpull_rcsum - update checksum for received skb after pull
* @skb: buffer to update
* update the CHECKSUM_COMPLETE checksum, or set ip_summed to
* CHECKSUM_NONE so that it can be recomputed from scratch.
*/
-
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
- else if (skb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_start_offset(skb) < 0)
- skb->ip_summed = CHECKSUM_NONE;
+ __skb_postpull_rcsum(skb, start, len, 0);
}
-unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+static __always_inline void
+__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
+ unsigned int off)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_block_add(skb->csum,
+ csum_partial(start, len, 0), off);
+}
+/**
+ * skb_postpush_rcsum - update checksum for received skb after push
+ * @skb: buffer to update
+ * @start: start of data after push
+ * @len: length of data pushed
+ *
+ * After doing a push on a received packet, you need to call this to
+ * update the CHECKSUM_COMPLETE checksum.
+ */
static inline void skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- /* For performing the reverse operation to skb_postpull_rcsum(),
- * we can instead of ...
- *
- * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
- *
- * ... just use this equivalent version here to save a few
- * instructions. Feeding csum of 0 in csum_partial() and later
- * on adding skb->csum is equivalent to feed skb->csum in the
- * first place.
- */
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_partial(start, len, skb->csum);
+ __skb_postpush_rcsum(skb, start, len, 0);
}
+unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+
/**
* skb_push_rcsum - push skb and update receive checksum
* @skb: buffer to update
void kzfree(const void *);
size_t ksize(const void *);
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+ unsigned long n,
+ struct page *page)
+{
+ return NULL;
+}
+#endif
+
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
struct rpc_xprt *,
void *),
void *data);
+void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
+ unsigned long timeo);
const char *rpc_proc_name(const struct rpc_task *task);
#endif /* __KERNEL__ */
struct work_struct task_cleanup;
struct timer_list timer;
unsigned long last_used,
- idle_timeout;
+ idle_timeout,
+ max_reconnect_timeout;
/*
* Send stuff
void __user *, size_t *, loff_t *);
extern int proc_dointvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+extern int proc_douintvec(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_jiffies(struct ctl_table *, int,
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+ const void * const stackend,
+ const void *obj, unsigned long len)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+ bool to_user);
+
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ __check_object_size(ptr, n, to_user);
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
#endif /* __KERNEL__ */
#endif /* _LINUX_THREAD_INFO_H */
#ifndef user_access_begin
#define user_access_begin() do { } while (0)
#define user_access_end() do { } while (0)
-#define unsafe_get_user(x, ptr) __get_user(x, ptr)
-#define unsafe_put_user(x, ptr) __put_user(x, ptr)
+#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
#endif
#endif /* __LINUX_UACCESS_H__ */
int tcf_unregister_action(struct tc_action_ops *a,
struct pernet_operations *ops);
int tcf_action_destroy(struct list_head *actions, int bind);
-int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
- struct tcf_result *res);
+int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
+ int nr_actions, struct tcf_result *res);
int tcf_action_init(struct net *net, struct nlattr *nla,
struct nlattr *est, char *n, int ovr,
int bind, struct list_head *);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
-#define tc_no_actions(_exts) \
- (list_empty(&(_exts)->actions))
-
-#define tc_for_each_action(_a, _exts) \
- list_for_each_entry(a, &(_exts)->actions, list)
-
-#define tc_single_action(_exts) \
- (list_is_singular(&(_exts)->actions))
+#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
u64 packets, u64 lastuse)
{
+#ifdef CONFIG_NET_CLS_ACT
if (!a->ops->stats_update)
return;
a->ops->stats_update(a, bytes, packets, lastuse);
+#endif
}
-#else /* CONFIG_NET_CLS_ACT */
-
-#define tc_no_actions(_exts) true
-#define tc_for_each_action(_a, _exts) while ((void)(_a), 0)
-#define tc_single_action(_exts) false
-#define tcf_action_stats_update(a, bytes, packets, lastuse)
-
-#endif /* CONFIG_NET_CLS_ACT */
#endif
unsigned long,
gfp_t);
int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
+void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
void rxrpc_kernel_end_call(struct rxrpc_call *);
bool rxrpc_kernel_is_data_last(struct sk_buff *);
u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
int rxrpc_kernel_get_error_number(struct sk_buff *);
-void rxrpc_kernel_data_delivered(struct sk_buff *);
void rxrpc_kernel_free_skb(struct sk_buff *);
struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
int rxrpc_kernel_reject_call(struct socket *);
skb_push(skb, hdr_len);
+ skb_set_inner_protocol(skb, proto);
skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)skb->data;
greh->flags = gre_tnl_flags_to_gre_flags(flags);
to = from | htonl(INET_ECN_CE << 20);
*(__be32 *)iph = to;
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_add(csum_sub(skb->csum, from), to);
+ skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
+ (__force __wsum)to);
return 1;
}
int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
- u32 (*get_expected_throughput)(struct ieee80211_sta *sta);
+ u32 (*get_expected_throughput)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta);
int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm);
struct tcf_exts {
#ifdef CONFIG_NET_CLS_ACT
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
- struct list_head actions;
+ int nr_actions;
+ struct tc_action **actions;
#endif
/* Map to export classifier specific extension TLV types to the
* generic extensions API. Unsupported extensions must be set to 0.
{
#ifdef CONFIG_NET_CLS_ACT
exts->type = 0;
- INIT_LIST_HEAD(&exts->actions);
+ exts->nr_actions = 0;
+ exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
+ GFP_KERNEL);
+ WARN_ON(!exts->actions); /* TODO: propagate the error to callers */
#endif
exts->action = action;
exts->police = police;
tcf_exts_is_predicative(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
- return !list_empty(&exts->actions);
+ return exts->nr_actions;
#else
return 0;
#endif
return tcf_exts_is_predicative(exts);
}
+static inline void tcf_exts_to_list(const struct tcf_exts *exts,
+ struct list_head *actions)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ int i;
+
+ for (i = 0; i < exts->nr_actions; i++) {
+ struct tc_action *a = exts->actions[i];
+
+ list_add(&a->list, actions);
+ }
+#endif
+}
+
/**
* tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer
struct tcf_result *res)
{
#ifdef CONFIG_NET_CLS_ACT
- if (!list_empty(&exts->actions))
- return tcf_action_exec(skb, &exts->actions, res);
+ if (exts->nr_actions)
+ return tcf_action_exec(skb, exts->actions, exts->nr_actions,
+ res);
#endif
return 0;
}
+#ifdef CONFIG_NET_CLS_ACT
+
+#define tc_no_actions(_exts) ((_exts)->nr_actions == 0)
+#define tc_single_action(_exts) ((_exts)->nr_actions == 1)
+
+#else /* CONFIG_NET_CLS_ACT */
+
+#define tc_no_actions(_exts) true
+#define tc_single_action(_exts) false
+
+#endif /* CONFIG_NET_CLS_ACT */
+
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
struct tcf_exts *exts, bool ovr);
size_t len)
{
const void __user *p = udata->inbuf + offset;
- bool ret = false;
+ bool ret;
u8 *buf;
if (len > USHRT_MAX)
return false;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
+ buf = memdup_user(p, len);
+ if (IS_ERR(buf))
return false;
- if (copy_from_user(buf, p, len))
- goto free;
-
ret = !memchr_inv(buf, 0, len);
-
-free:
kfree(buf);
return ret;
}
#ifdef CONFIG_NO_HZ_COMMON
#define TICK_DEP_NAMES \
- tick_dep_name(NONE) \
+ tick_dep_mask_name(NONE) \
tick_dep_name(POSIX_TIMER) \
tick_dep_name(PERF_EVENTS) \
tick_dep_name(SCHED) \
tick_dep_name_end(CLOCK_UNSTABLE)
#undef tick_dep_name
+#undef tick_dep_mask_name
#undef tick_dep_name_end
-#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
-#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* The MASK will convert to their bits and they need to be processed too */
+#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+ TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+ TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* NONE only has a mask defined for it */
+#define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
TICK_DEP_NAMES
#undef tick_dep_name
+#undef tick_dep_mask_name
#undef tick_dep_name_end
#define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
+#define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
#define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep }
#define show_tick_dep_name(val) \
BPF_FUNC_skb_change_type,
/**
- * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb
+ * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb
* @skb: pointer to skb
* @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
* @index: index of the cgroup in the bpf_map
* == 1 skb succeeded the cgroup2 descendant test
* < 0 error
*/
- BPF_FUNC_skb_in_cgroup,
+ BPF_FUNC_skb_under_cgroup,
/**
* bpf_get_hash_recalc(skb)
__NFT_REG_MAX,
NFT_REG32_00 = 8,
- MFT_REG32_01,
+ NFT_REG32_01,
NFT_REG32_02,
NFT_REG32_03,
NFT_REG32_04,
__u16 pr_policy;
};
+struct sctp_info {
+ __u32 sctpi_tag;
+ __u32 sctpi_state;
+ __u32 sctpi_rwnd;
+ __u16 sctpi_unackdata;
+ __u16 sctpi_penddata;
+ __u16 sctpi_instrms;
+ __u16 sctpi_outstrms;
+ __u32 sctpi_fragmentation_point;
+ __u32 sctpi_inqueue;
+ __u32 sctpi_outqueue;
+ __u32 sctpi_overall_error;
+ __u32 sctpi_max_burst;
+ __u32 sctpi_maxseg;
+ __u32 sctpi_peer_rwnd;
+ __u32 sctpi_peer_tag;
+ __u8 sctpi_peer_capable;
+ __u8 sctpi_peer_sack;
+ __u16 __reserved1;
+
+ /* assoc status info */
+ __u64 sctpi_isacks;
+ __u64 sctpi_osacks;
+ __u64 sctpi_opackets;
+ __u64 sctpi_ipackets;
+ __u64 sctpi_rtxchunks;
+ __u64 sctpi_outofseqtsns;
+ __u64 sctpi_idupchunks;
+ __u64 sctpi_gapcnt;
+ __u64 sctpi_ouodchunks;
+ __u64 sctpi_iuodchunks;
+ __u64 sctpi_oodchunks;
+ __u64 sctpi_iodchunks;
+ __u64 sctpi_octrlchunks;
+ __u64 sctpi_ictrlchunks;
+
+ /* primary transport info */
+ struct sockaddr_storage sctpi_p_address;
+ __s32 sctpi_p_state;
+ __u32 sctpi_p_cwnd;
+ __u32 sctpi_p_srtt;
+ __u32 sctpi_p_rto;
+ __u32 sctpi_p_hbinterval;
+ __u32 sctpi_p_pathmaxrxt;
+ __u32 sctpi_p_sackdelay;
+ __u32 sctpi_p_sackfreq;
+ __u32 sctpi_p_ssthresh;
+ __u32 sctpi_p_partial_bytes_acked;
+ __u32 sctpi_p_flight_size;
+ __u16 sctpi_p_error;
+ __u16 __reserved2;
+
+ /* sctp sock info */
+ __u32 sctpi_s_autoclose;
+ __u32 sctpi_s_adaptation_ind;
+ __u32 sctpi_s_pd_point;
+ __u8 sctpi_s_nodelay;
+ __u8 sctpi_s_disable_fragments;
+ __u8 sctpi_s_v4mapped;
+ __u8 sctpi_s_frag_interleave;
+ __u32 sctpi_s_type;
+ __u32 __reserved3;
+};
+
#endif /* _UAPI_SCTP_H */
*/
#ifndef _UAPI_LINUX_VIRTIO_VSOCK_H
-#define _UAPI_LINUX_VIRTIO_VOSCK_H
+#define _UAPI_LINUX_VIRTIO_VSOCK_H
#include <linux/types.h>
#include <linux/virtio_ids.h>
*
* Of course the contents will be ABI, but that's up the AFU driver.
*/
- size_t data_size;
- u8 data[];
+ __u32 data_size;
+ __u8 data[];
};
struct cxl_event {
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
-DECLARE_PER_CPU(int, xen_vcpu_id);
-static inline int xen_vcpu_nr(int cpu)
+DECLARE_PER_CPU(uint32_t, xen_vcpu_id);
+static inline uint32_t xen_vcpu_nr(int cpu)
{
return per_cpu(xen_vcpu_id, cpu);
}
config SLAB
bool "SLAB"
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
well in all environments. It organizes cache hot objects in
config SLUB
bool "SLUB (Unqueued Allocator)"
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
SLUB is a slab allocator that minimizes cache line usage
instead of managing queues of cached objects (SLAB approach).
struct bucket *buckets;
void *elems;
struct pcpu_freelist freelist;
+ void __percpu *extra_elems;
atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */
};
+enum extra_elem_state {
+ HTAB_NOT_AN_EXTRA_ELEM = 0,
+ HTAB_EXTRA_ELEM_FREE,
+ HTAB_EXTRA_ELEM_USED
+};
+
/* each htab element is struct htab_elem + key + value */
struct htab_elem {
union {
struct bpf_htab *htab;
struct pcpu_freelist_node fnode;
};
- struct rcu_head rcu;
+ union {
+ struct rcu_head rcu;
+ enum extra_elem_state state;
+ };
u32 hash;
char key[0] __aligned(8);
};
return err;
}
+static int alloc_extra_elems(struct bpf_htab *htab)
+{
+ void __percpu *pptr;
+ int cpu;
+
+ pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
+ if (!pptr)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
+ HTAB_EXTRA_ELEM_FREE;
+ }
+ htab->extra_elems = pptr;
+ return 0;
+}
+
/* Called from syscall */
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
if (percpu)
cost += (u64) round_up(htab->map.value_size, 8) *
num_possible_cpus() * htab->map.max_entries;
+ else
+ cost += (u64) htab->elem_size * num_possible_cpus();
if (cost >= U32_MAX - PAGE_SIZE)
/* make sure page count doesn't overflow */
raw_spin_lock_init(&htab->buckets[i].lock);
}
+ if (!percpu) {
+ err = alloc_extra_elems(htab);
+ if (err)
+ goto free_buckets;
+ }
+
if (!(attr->map_flags & BPF_F_NO_PREALLOC)) {
err = prealloc_elems_and_freelist(htab);
if (err)
- goto free_buckets;
+ goto free_extra_elems;
}
return &htab->map;
+free_extra_elems:
+ free_percpu(htab->extra_elems);
free_buckets:
kvfree(htab->buckets);
free_htab:
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
kfree(l);
-
}
static void htab_elem_free_rcu(struct rcu_head *head)
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
{
+ if (l->state == HTAB_EXTRA_ELEM_USED) {
+ l->state = HTAB_EXTRA_ELEM_FREE;
+ return;
+ }
+
if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
pcpu_freelist_push(&htab->freelist, &l->fnode);
} else {
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
void *value, u32 key_size, u32 hash,
- bool percpu, bool onallcpus)
+ bool percpu, bool onallcpus,
+ bool old_elem_exists)
{
u32 size = htab->map.value_size;
bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
struct htab_elem *l_new;
void __percpu *pptr;
+ int err = 0;
if (prealloc) {
l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
if (!l_new)
- return ERR_PTR(-E2BIG);
+ err = -E2BIG;
} else {
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
atomic_dec(&htab->count);
- return ERR_PTR(-E2BIG);
+ err = -E2BIG;
+ } else {
+ l_new = kmalloc(htab->elem_size,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!l_new)
+ return ERR_PTR(-ENOMEM);
}
- l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
- if (!l_new)
- return ERR_PTR(-ENOMEM);
+ }
+
+ if (err) {
+ if (!old_elem_exists)
+ return ERR_PTR(err);
+
+ /* if we're updating the existing element and the hash table
+ * is full, use per-cpu extra elems
+ */
+ l_new = this_cpu_ptr(htab->extra_elems);
+ if (l_new->state != HTAB_EXTRA_ELEM_FREE)
+ return ERR_PTR(-E2BIG);
+ l_new->state = HTAB_EXTRA_ELEM_USED;
+ } else {
+ l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
}
memcpy(l_new->key, key, key_size);
if (ret)
goto err;
- l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false);
+ l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
+ !!l_old);
if (IS_ERR(l_new)) {
/* all pre-allocated elements are in use or memory exhausted */
ret = PTR_ERR(l_new);
}
} else {
l_new = alloc_htab_elem(htab, key, value, key_size,
- hash, true, onallcpus);
+ hash, true, onallcpus, false);
if (IS_ERR(l_new)) {
ret = PTR_ERR(l_new);
goto err;
htab_free_elems(htab);
pcpu_freelist_destroy(&htab->freelist);
}
+ free_percpu(htab->extra_elems);
kvfree(htab->buckets);
kfree(htab);
}
struct verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
u32 used_map_cnt; /* number of used maps */
+ u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
};
goto error;
break;
case BPF_MAP_TYPE_CGROUP_ARRAY:
- if (func_id != BPF_FUNC_skb_in_cgroup)
+ if (func_id != BPF_FUNC_skb_under_cgroup)
goto error;
break;
default:
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
goto error;
break;
- case BPF_FUNC_skb_in_cgroup:
+ case BPF_FUNC_skb_under_cgroup:
if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
goto error;
break;
/* dst_reg stays as pkt_ptr type and since some positive
* integer value was added to the pointer, increment its 'id'
*/
- dst_reg->id++;
+ dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range and off to zero */
dst_reg->off = 0;
return ret;
}
-static void event_function_local(struct perf_event *event, event_f func, void *data)
-{
- struct event_function_struct efs = {
- .event = event,
- .func = func,
- .data = data,
- };
-
- int ret = event_function(&efs);
- WARN_ON_ONCE(ret);
-}
-
static void event_function_call(struct perf_event *event, event_f func, void *data)
{
struct perf_event_context *ctx = event->ctx;
raw_spin_unlock_irq(&ctx->lock);
}
+/*
+ * Similar to event_function_call() + event_function(), but hard assumes IRQs
+ * are already disabled and we're on the right CPU.
+ */
+static void event_function_local(struct perf_event *event, event_f func, void *data)
+{
+ struct perf_event_context *ctx = event->ctx;
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ struct task_struct *task = READ_ONCE(ctx->task);
+ struct perf_event_context *task_ctx = NULL;
+
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (task) {
+ if (task == TASK_TOMBSTONE)
+ return;
+
+ task_ctx = ctx;
+ }
+
+ perf_ctx_lock(cpuctx, task_ctx);
+
+ task = ctx->task;
+ if (task == TASK_TOMBSTONE)
+ goto unlock;
+
+ if (task) {
+ /*
+ * We must be either inactive or active and the right task,
+ * otherwise we're screwed, since we cannot IPI to somewhere
+ * else.
+ */
+ if (ctx->is_active) {
+ if (WARN_ON_ONCE(task != current))
+ goto unlock;
+
+ if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
+ goto unlock;
+ }
+ } else {
+ WARN_ON_ONCE(&cpuctx->ctx != ctx);
+ }
+
+ func(event, cpuctx, ctx, data);
+unlock:
+ perf_ctx_unlock(cpuctx, task_ctx);
+}
+
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP |\
}
}
}
+
+/*
+ * Update cpuctx->cgrp so that it is set when first cgroup event is added and
+ * cleared when last cgroup event is removed.
+ */
+static inline void
+list_update_cgroup_event(struct perf_event *event,
+ struct perf_event_context *ctx, bool add)
+{
+ struct perf_cpu_context *cpuctx;
+
+ if (!is_cgroup_event(event))
+ return;
+
+ if (add && ctx->nr_cgroups++)
+ return;
+ else if (!add && --ctx->nr_cgroups)
+ return;
+ /*
+ * Because cgroup events are always per-cpu events,
+ * this will always be called from the right CPU.
+ */
+ cpuctx = __get_cpu_context(ctx);
+ cpuctx->cgrp = add ? event->cgrp : NULL;
+}
+
#else /* !CONFIG_CGROUP_PERF */
static inline bool
struct perf_event_context *ctx)
{
}
+
+static inline void
+list_update_cgroup_event(struct perf_event *event,
+ struct perf_event_context *ctx, bool add)
+{
+}
+
#endif
/*
static void
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{
+
lockdep_assert_held(&ctx->lock);
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
list_add_tail(&event->group_entry, list);
}
- if (is_cgroup_event(event))
- ctx->nr_cgroups++;
+ list_update_cgroup_event(event, ctx, true);
list_add_rcu(&event->event_entry, &ctx->event_list);
ctx->nr_events++;
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
- struct perf_cpu_context *cpuctx;
-
WARN_ON_ONCE(event->ctx != ctx);
lockdep_assert_held(&ctx->lock);
event->attach_state &= ~PERF_ATTACH_CONTEXT;
- if (is_cgroup_event(event)) {
- ctx->nr_cgroups--;
- /*
- * Because cgroup events are always per-cpu events, this will
- * always be called from the right CPU.
- */
- cpuctx = __get_cpu_context(ctx);
- /*
- * If there are no more cgroup events then clear cgrp to avoid
- * stale pointer in update_cgrp_time_from_cpuctx().
- */
- if (!ctx->nr_cgroups)
- cpuctx->cgrp = NULL;
- }
+ list_update_cgroup_event(event, ctx, false);
ctx->nr_events--;
if (event->attr.inherit_stat)
static inline int
event_filter_match(struct perf_event *event)
{
- return (event->cpu == -1 || event->cpu == smp_processor_id())
- && perf_cgroup_match(event) && pmu_filter_match(event);
+ return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
+ perf_cgroup_match(event) && pmu_filter_match(event);
}
static void
* maintained, otherwise bogus information is return
* via read() for time_enabled, time_running:
*/
- if (event->state == PERF_EVENT_STATE_INACTIVE
- && !event_filter_match(event)) {
+ if (event->state == PERF_EVENT_STATE_INACTIVE &&
+ !event_filter_match(event)) {
delta = tstamp - event->tstamp_stopped;
event->tstamp_running += delta;
event->tstamp_stopped = tstamp;
lockdep_assert_held(&ctx->mutex);
- event->ctx = ctx;
if (event->cpu != -1)
event->cpu = cpu;
+ /*
+ * Ensures that if we can observe event->ctx, both the event and ctx
+ * will be 'complete'. See perf_iterate_sb_cpu().
+ */
+ smp_store_release(&event->ctx, ctx);
+
if (!task) {
cpu_function_call(cpu, __perf_install_in_context, event);
return;
.group = group,
.ret = 0,
};
- smp_call_function_single(event->oncpu,
- __perf_event_read, &data, 1);
- ret = data.ret;
+ ret = smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
+ /* The event must have been read from an online CPU: */
+ WARN_ON_ONCE(ret);
+ ret = ret ? : data.ret;
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
struct perf_event *event;
list_for_each_entry_rcu(event, &pel->list, sb_list) {
+ /*
+ * Skip events that are not fully formed yet; ensure that
+ * if we observe event->ctx, both event and ctx will be
+ * complete enough. See perf_install_in_context().
+ */
+ if (!smp_load_acquire(&event->ctx))
+ continue;
+
if (event->state < PERF_EVENT_STATE_INACTIVE)
continue;
if (!event_filter_match(event))
{
struct perf_event *event = info;
struct pmu *pmu = event->pmu;
- struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct remote_output ro = {
.rb = event->rb,
};
kfree(buf);
}
-/*
- * Whether this @filter depends on a dynamic object which is not loaded
- * yet or its load addresses are not known.
- */
-static bool perf_addr_filter_needs_mmap(struct perf_addr_filter *filter)
-{
- return filter->filter && filter->inode;
-}
-
/*
* Check whether inode and address range match filter criteria.
*/
struct perf_event_context *ctx;
int ctxn;
+ /*
+ * Data tracing isn't supported yet and as such there is no need
+ * to keep track of anything that isn't related to executable code:
+ */
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
+
rcu_read_lock();
for_each_task_context_nr(ctxn) {
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
list_for_each_entry(filter, &ifh->list, entry) {
event->addr_filters_offs[count] = 0;
- if (perf_addr_filter_needs_mmap(filter))
+ /*
+ * Adjust base offset if the filter is associated to a binary
+ * that needs to be mapped:
+ */
+ if (filter->inode)
event->addr_filters_offs[count] =
perf_addr_filter_apply(filter, mm);
goto fail;
}
- if (token == IF_SRC_FILE) {
- filename = match_strdup(&args[2]);
+ if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
+ int fpos = filter->range ? 2 : 1;
+
+ filename = match_strdup(&args[fpos]);
if (!filename) {
ret = -ENOMEM;
goto fail;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
err = -EAGAIN;
ptep = page_check_address(page, mm, addr, &ptl, 0);
- if (!ptep)
+ if (!ptep) {
+ mem_cgroup_cancel_charge(kpage, memcg, false);
goto unlock;
+ }
get_page(kpage);
page_add_new_anon_rmap(kpage, vma, addr, false);
err = 0;
unlock:
- mem_cgroup_cancel_charge(kpage, memcg, false);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(page);
return err;
* Futex flags used to encode options to functions and preserve them across
* restarts.
*/
-#define FLAGS_SHARED 0x01
+#ifdef CONFIG_MMU
+# define FLAGS_SHARED 0x01
+#else
+/*
+ * NOMMU does not have per process address space. Let the compiler optimize
+ * code away.
+ */
+# define FLAGS_SHARED 0x00
+#endif
#define FLAGS_CLOCKRT 0x02
#define FLAGS_HAS_TIMEOUT 0x04
if (!key->both.ptr)
return;
+ /*
+ * On MMU less systems futexes are always "private" as there is no per
+ * process address space. We need the smp wmb nevertheless - yes,
+ * arch/blackfin has MMU less SMP ...
+ */
+ if (!IS_ENABLED(CONFIG_MMU)) {
+ smp_mb(); /* explicit smp_mb(); (B) */
+ return;
+ }
+
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
ihold(key->shared.inode); /* implies smp_mb(); (B) */
return;
}
+ if (!IS_ENABLED(CONFIG_MMU))
+ return;
+
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
iput(key->shared.inode);
return NULL;
}
+ get_online_cpus();
if (max_vecs >= num_online_cpus()) {
cpumask_copy(affinity_mask, cpu_online_mask);
*nr_vecs = num_online_cpus();
}
*nr_vecs = vecs;
}
+ put_online_cpus();
return affinity_mask;
}
desc->name = name;
if (handle != handle_bad_irq && is_chained) {
+ /*
+ * We're about to start this interrupt immediately,
+ * hence the need to set the trigger configuration.
+ * But the .set_type callback may have overridden the
+ * flow handler, ignoring that we're dealing with a
+ * chained interrupt. Reset it immediately because we
+ * do know better.
+ */
+ __irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data));
+ desc->handle_irq = handle;
+
irq_settings_set_noprobe(desc);
irq_settings_set_norequest(desc);
irq_settings_set_nothread(desc);
action->dev_id = dev_id;
retval = irq_chip_pm_get(&desc->irq_data);
- if (retval < 0)
+ if (retval < 0) {
+ kfree(action);
return retval;
+ }
chip_bus_lock(desc);
retval = __setup_irq(irq, desc, action);
action->percpu_dev_id = dev_id;
retval = irq_chip_pm_get(&desc->irq_data);
- if (retval < 0)
+ if (retval < 0) {
+ kfree(action);
return retval;
+ }
chip_bus_lock(desc);
retval = __setup_irq(irq, desc, action);
else
dev_dbg(dev, "irq [%d-%d] for MSI\n",
virq, virq + desc->nvec_used - 1);
+ /*
+ * This flag is set by the PCI layer as we need to activate
+ * the MSI entries before the PCI layer enables MSI in the
+ * card. Otherwise the card latches a random msi message.
+ */
+ if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
+ struct irq_data *irq_data;
+
+ irq_data = irq_domain_get_irq_data(domain, desc->irq);
+ irq_domain_activate_irq(irq_data);
+ }
}
return 0;
goto gotlock;
}
}
- WRITE_ONCE(pn->state, vcpu_halted);
+ WRITE_ONCE(pn->state, vcpu_hashed);
qstat_inc(qstat_pv_wait_head, true);
qstat_inc(qstat_pv_wait_again, waitcnt);
pv_wait(&l->locked, _Q_SLOW_VAL);
*/
if ((counter == qstat_pv_latency_kick) ||
(counter == qstat_pv_latency_wake)) {
- stat = 0;
if (kicks)
stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
}
save_processor_state();
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
error = swsusp_arch_suspend();
+ /* Restore control flow magically appears here */
+ restore_processor_state();
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
if (error)
printk(KERN_ERR "PM: Error %d creating hibernation image\n",
error);
- /* Restore control flow magically appears here */
- restore_processor_state();
if (!in_suspend)
events_check_enabled = false;
*/
static bool rtree_next_node(struct memory_bitmap *bm)
{
- bm->cur.node = list_entry(bm->cur.node->list.next,
- struct rtree_node, list);
- if (&bm->cur.node->list != &bm->cur.zone->leaves) {
+ if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
+ bm->cur.node = list_entry(bm->cur.node->list.next,
+ struct rtree_node, list);
bm->cur.node_pfn += BM_BITS_PER_BLOCK;
bm->cur.node_bit = 0;
touch_softlockup_watchdog();
}
/* No more nodes, goto next zone */
- bm->cur.zone = list_entry(bm->cur.zone->list.next,
+ if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
+ bm->cur.zone = list_entry(bm->cur.zone->list.next,
struct mem_zone_bm_rtree, list);
- if (&bm->cur.zone->list != &bm->zones) {
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
struct rtree_node, list);
bm->cur.node_pfn = 0;
char *_braille_console_setup(char **str, char **brl_options)
{
- if (!memcmp(*str, "brl,", 4)) {
+ if (!strncmp(*str, "brl,", 4)) {
*brl_options = "";
*str += 4;
- } else if (!memcmp(str, "brl=", 4)) {
+ } else if (!strncmp(*str, "brl=", 4)) {
*brl_options = *str + 4;
*str = strchr(*brl_options, ',');
if (!*str)
*/
#include <linux/percpu.h>
-typedef __printf(2, 0) int (*printk_func_t)(int level, const char *fmt,
- va_list args);
+typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
-__printf(2, 0)
-int vprintk_default(int level, const char *fmt, va_list args);
+int __printf(1, 0) vprintk_default(const char *fmt, va_list args);
#ifdef CONFIG_PRINTK_NMI
* via per-CPU variable.
*/
DECLARE_PER_CPU(printk_func_t, printk_func);
-__printf(2, 0)
-static inline int vprintk_func(int level, const char *fmt, va_list args)
+static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
{
- return this_cpu_read(printk_func)(level, fmt, args);
+ return this_cpu_read(printk_func)(fmt, args);
}
extern atomic_t nmi_message_lost;
#else /* CONFIG_PRINTK_NMI */
-__printf(2, 0)
-static inline int vprintk_func(int level, const char *fmt, va_list args)
+static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
{
- return vprintk_default(level, fmt, args);
+ return vprintk_default(fmt, args);
}
static inline int get_nmi_message_lost(void)
* one writer running. But the buffer might get flushed from another
* CPU, so we need to be careful.
*/
-static int vprintk_nmi(int level, const char *fmt, va_list args)
+static int vprintk_nmi(const char *fmt, va_list args)
{
struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
int add = 0;
if (!len)
smp_rmb();
- if (level != LOGLEVEL_DEFAULT) {
- add = snprintf(s->buffer + len, sizeof(s->buffer) - len,
- KERN_SOH "%c", '0' + level);
- add += vsnprintf(s->buffer + len + add,
- sizeof(s->buffer) - len - add,
- fmt, args);
- } else {
- add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len,
- fmt, args);
- }
+ add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
/*
* Do it once again if the buffer has been flushed in the meantime.
}
EXPORT_SYMBOL(printk_emit);
-#ifdef CONFIG_PRINTK
-#define define_pr_level(func, loglevel) \
-asmlinkage __visible void func(const char *fmt, ...) \
-{ \
- va_list args; \
- \
- va_start(args, fmt); \
- vprintk_default(loglevel, fmt, args); \
- va_end(args); \
-} \
-EXPORT_SYMBOL(func)
-
-define_pr_level(__pr_emerg, LOGLEVEL_EMERG);
-define_pr_level(__pr_alert, LOGLEVEL_ALERT);
-define_pr_level(__pr_crit, LOGLEVEL_CRIT);
-define_pr_level(__pr_err, LOGLEVEL_ERR);
-define_pr_level(__pr_warn, LOGLEVEL_WARNING);
-define_pr_level(__pr_notice, LOGLEVEL_NOTICE);
-define_pr_level(__pr_info, LOGLEVEL_INFO);
-#endif
-
-int vprintk_default(int level, const char *fmt, va_list args)
+int vprintk_default(const char *fmt, va_list args)
{
int r;
return r;
}
#endif
- r = vprintk_emit(0, level, NULL, 0, fmt, args);
+ r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
return r;
}
int r;
va_start(args, fmt);
- r = vprintk_func(LOGLEVEL_DEFAULT, fmt, args);
+ r = vprintk_func(fmt, args);
va_end(args);
return r;
#include <linux/context_tracking.h>
#include <linux/compiler.h>
#include <linux/frame.h>
+#include <linux/prefetch.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
EXPORT_PER_CPU_SYMBOL(kstat);
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
+/*
+ * The function fair_sched_class.update_curr accesses the struct curr
+ * and its field curr->exec_start; when called from task_sched_runtime(),
+ * we observe a high rate of cache misses in practice.
+ * Prefetching this data results in improved performance.
+ */
+static inline void prefetch_curr_exec_start(struct task_struct *p)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity *curr = (&p->se)->cfs_rq->curr;
+#else
+ struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
+#endif
+ prefetch(curr);
+ prefetch(&curr->exec_start);
+}
+
/*
* Return accounted runtime for the task.
* In case the task is currently running, return the runtime plus current's
* thread, breaking clock_gettime().
*/
if (task_current(rq, p) && task_on_rq_queued(p)) {
+ prefetch_curr_exec_start(p);
update_rq_clock(rq);
p->sched_class->update_curr(rq);
}
if (old_idx == IDX_INVALID) {
cp->size++;
- cp->elements[cp->size - 1].dl = 0;
+ cp->elements[cp->size - 1].dl = dl;
cp->elements[cp->size - 1].cpu = cpu;
cp->elements[cpu].idx = cp->size - 1;
cpudl_change_key(cp, cp->size - 1, dl);
cpustat[CPUTIME_IDLE] += (__force u64) cputime;
}
+/*
+ * When a guest is interrupted for a longer amount of time, missed clock
+ * ticks are not redelivered later. Due to that, this function may on
+ * occasion account more time than the calling functions think elapsed.
+ */
static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
{
#ifdef CONFIG_PARAVIRT
* idle, or potentially user or system time. Due to rounding,
* other time can exceed ticks occasionally.
*/
- other = account_other_time(cputime);
+ other = account_other_time(ULONG_MAX);
if (other >= cputime)
return;
cputime -= other;
}
cputime = cputime_one_jiffy;
- steal = steal_account_process_time(cputime);
+ steal = steal_account_process_time(ULONG_MAX);
if (steal >= cputime)
return;
*/
void account_idle_ticks(unsigned long ticks)
{
+ cputime_t cputime, steal;
if (sched_clock_irqtime) {
irqtime_account_idle_ticks(ticks);
return;
}
- account_idle_time(jiffies_to_cputime(ticks));
+ cputime = jiffies_to_cputime(ticks);
+ steal = steal_account_process_time(ULONG_MAX);
+
+ if (steal >= cputime)
+ return;
+
+ cputime -= steal;
+ account_idle_time(cputime);
}
/*
stime = curr->stime;
utime = curr->utime;
- if (utime == 0) {
- stime = rtime;
+ /*
+ * If either stime or both stime and utime are 0, assume all runtime is
+ * userspace. Once a task gets some ticks, the monotonicy code at
+ * 'update' will ensure things converge to the observed ratio.
+ */
+ if (stime == 0) {
+ utime = rtime;
goto update;
}
- if (stime == 0) {
- utime = rtime;
+ if (utime == 0) {
+ stime = rtime;
goto update;
}
stime = scale_stime((__force u64)stime, (__force u64)rtime,
(__force u64)(stime + utime));
+update:
/*
* Make sure stime doesn't go backwards; this preserves monotonicity
* for utime because rtime is monotonic.
stime = rtime - utime;
}
-update:
prev->stime = stime;
prev->utime = utime;
out:
unsigned long now = READ_ONCE(jiffies);
cputime_t delta, other;
+ /*
+ * Unlike tick based timing, vtime based timing never has lost
+ * ticks, and no need for steal time accounting to make up for
+ * lost ticks. Vtime accounts a rounded version of actual
+ * elapsed time. Limit account_other_time to prevent rounding
+ * errors from causing elapsed vtime to go negative.
+ */
delta = jiffies_to_cputime(now - tsk->vtime_snap);
other = account_other_time(delta);
WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
*
* XXX figure out if select_task_rq_dl() deals with offline cpus.
*/
- if (unlikely(!rq->online))
+ if (unlikely(!rq->online)) {
+ lockdep_unpin_lock(&rq->lock, rf.cookie);
rq = dl_task_offline_migration(rq, p);
+ rf.cookie = lockdep_pin_lock(&rq->lock);
+ }
/*
* Queueing this task back might have overloaded rq, check if we need
pcfs_rq = tg->parent->cfs_rq[cpu];
cfs_rq->throttle_count = pcfs_rq->throttle_count;
- pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
+ cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
}
/* conditionally throttle active cfs_rq's from put_prev_entity() */
return 0;
}
+static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
+ int *valp,
+ int write, void *data)
+{
+ if (write) {
+ if (*negp)
+ return -EINVAL;
+ *valp = *lvalp;
+ } else {
+ unsigned int val = *valp;
+ *lvalp = (unsigned long)val;
+ }
+ return 0;
+}
+
static const char proc_wspace_sep[] = { ' ', '\t', '\n' };
static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
int proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_dointvec(table,write,buffer,lenp,ppos,
- NULL,NULL);
+ return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL);
+}
+
+/**
+ * proc_douintvec - read a vector of unsigned integers
+ * @table: the sysctl table
+ * @write: %TRUE if this is a write to the sysctl file
+ * @buffer: the user buffer
+ * @lenp: the size of the user buffer
+ * @ppos: file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) unsigned integer
+ * values from/to the user buffer, treated as an ASCII string.
+ *
+ * Returns 0 on success.
+ */
+int proc_douintvec(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return do_proc_dointvec(table, write, buffer, lenp, ppos,
+ do_proc_douintvec_conv, NULL);
}
/*
return -ENOSYS;
}
+int proc_douintvec(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return -ENOSYS;
+}
+
int proc_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
* exception granted :-)
*/
EXPORT_SYMBOL(proc_dointvec);
+EXPORT_SYMBOL(proc_douintvec);
EXPORT_SYMBOL(proc_dointvec_jiffies);
EXPORT_SYMBOL(proc_dointvec_minmax);
EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
- now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
+ now = ktime_to_ns(tkr->base);
+
+ now += clocksource_delta(tkr->read(tkr->clock),
+ tkr->cycle_last, tkr->mask);
} while (read_seqcount_retry(&tkf->seq, seq));
return now;
#include "timekeeping_internal.h"
-static unsigned int sleep_time_bin[32] = {0};
+#define NUM_BINS 32
+
+static unsigned int sleep_time_bin[NUM_BINS] = {0};
static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
{
void tk_debug_account_sleep_time(struct timespec64 *t)
{
- sleep_time_bin[fls(t->tv_sec)]++;
+ /* Cap bin index so we don't overflow the array */
+ int bin = min(fls(t->tv_sec), NUM_BINS-1);
+
+ sleep_time_bin[bin]++;
}
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
u64 expires = KTIME_MAX;
unsigned long nextevt;
+ bool is_max_delta;
/*
* Pretend that there is no timer pending if the cpu is offline.
spin_lock(&base->lock);
nextevt = __next_timer_interrupt(base);
+ is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
base->next_expiry = nextevt;
/*
* We have a fresh next event. Check whether we can forward the base:
expires = basem;
base->is_idle = false;
} else {
- expires = basem + (nextevt - basej) * TICK_NSEC;
+ if (!is_max_delta)
+ expires = basem + (nextevt - basej) * TICK_NSEC;
/*
* If we expect to sleep more than a tick, mark the base idle:
*/
what |= MASK_TC_BIT(op_flags, META);
what |= MASK_TC_BIT(op_flags, PREFLUSH);
what |= MASK_TC_BIT(op_flags, FUA);
- if (op == REQ_OP_DISCARD)
+ if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
what |= BLK_TC_ACT(BLK_TC_DISCARD);
if (op == REQ_OP_FLUSH)
what |= BLK_TC_ACT(BLK_TC_FLUSH);
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4U
-#define BUCKET_LOCKS_PER_CPU 128UL
+#define BUCKET_LOCKS_PER_CPU 32UL
static u32 head_hashfn(struct rhashtable *ht,
const struct bucket_table *tbl,
unsigned int nr_pcpus = num_possible_cpus();
#endif
- nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
+ nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
/* Never allocate more than 0.5 locks per bucket */
tbl->locks = vmalloc(size * sizeof(spinlock_t));
else
#endif
+ if (gfp != GFP_KERNEL)
+ gfp |= __GFP_NOWARN | __GFP_NORETRY;
+
tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
gfp);
if (!tbl->locks)
static int rhashtable_shrink(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
- unsigned int size;
+ unsigned int nelems = atomic_read(&ht->nelems);
+ unsigned int size = 0;
int err;
ASSERT_RHT_MUTEX(ht);
- size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
+ if (nelems)
+ size = roundup_pow_of_two(nelems * 3 / 2);
if (size < ht->p.min_size)
size = ht->p.min_size;
unsigned long c, data;
/* Fall back to byte-at-a-time if we get a page fault */
- if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
- break;
+ unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
+
*(unsigned long *)(dst+res) = c;
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
while (max) {
char c;
- if (unlikely(unsafe_get_user(c,src+res)))
- return -EFAULT;
+ unsafe_get_user(c,src+res, efault);
dst[res] = c;
if (!c)
return res;
* Nope: we hit the address space limit, and we still had more
* characters the caller would have wanted. That's an EFAULT.
*/
+efault:
return -EFAULT;
}
src -= align;
max += align;
- if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
- return 0;
+ unsafe_get_user(c, (unsigned long __user *)src, efault);
c |= aligned_byte_mask(align);
for (;;) {
if (unlikely(max <= sizeof(unsigned long)))
break;
max -= sizeof(unsigned long);
- if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
- return 0;
+ unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
}
res -= align;
* Nope: we hit the address space limit, and we still had more
* characters the caller would have wanted. That's 0.
*/
+efault:
return 0;
}
static int max_size = 0;
module_param(max_size, int, 0);
-MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)");
+MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)");
static bool shrinking = false;
module_param(shrinking, bool, 0);
select MIGRATION
depends on MMU
help
- Allows the compaction of memory for the allocation of huge pages.
+ Compaction is the only memory management component to form
+ high order (larger physically contiguous) memory blocks
+ reliably. The page allocator relies on compaction heavily and
+ the lack of the feature can lead to unexpected OOM killer
+ invocations for high order memory requests. You shouldn't
+ disable this option unless there really is a strong reason for
+ it and then we would be really interested to hear about that at
+ linux-mm@kvack.org.
#
# support for page migration
KCOV_INSTRUMENT_mmzone.o := n
KCOV_INSTRUMENT_vmstat.o := n
+# Since __builtin_frame_address does work as used, disable the warning.
+CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
+obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
struct page *page;
pgtable_t pgtable;
pmd_t _pmd;
- bool young, write, dirty;
+ bool young, write, dirty, soft_dirty;
unsigned long addr;
int i;
write = pmd_write(*pmd);
young = pmd_young(*pmd);
dirty = pmd_dirty(*pmd);
+ soft_dirty = pmd_soft_dirty(*pmd);
pmdp_huge_split_prepare(vma, haddr, pmd);
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
swp_entry_t swp_entry;
swp_entry = make_migration_entry(page + i, write);
entry = swp_entry_to_pte(swp_entry);
+ if (soft_dirty)
+ entry = pte_swp_mksoft_dirty(entry);
} else {
entry = mk_pte(page + i, vma->vm_page_prot);
entry = maybe_mkwrite(entry, vma);
entry = pte_wrprotect(entry);
if (!young)
entry = pte_mkold(entry);
+ if (soft_dirty)
+ entry = pte_mksoft_dirty(entry);
}
if (dirty)
SetPageDirty(page + i);
list_del(&page->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
+ h->max_huge_pages--;
update_and_free_page(h, page);
}
spin_unlock(&hugetlb_lock);
new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
QUARANTINE_FRACTION;
percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
- if (WARN_ONCE(new_quarantine_size < percpu_quarantines,
- "Too little memory, disabling global KASAN quarantine.\n"))
- new_quarantine_size = 0;
- else
- new_quarantine_size -= percpu_quarantines;
+ new_quarantine_size = (new_quarantine_size < percpu_quarantines) ?
+ 0 : new_quarantine_size - percpu_quarantines;
WRITE_ONCE(quarantine_size, new_quarantine_size);
last = global_quarantine.head;
return 0;
memcg = get_mem_cgroup_from_mm(current->mm);
- if (!mem_cgroup_is_root(memcg))
+ if (!mem_cgroup_is_root(memcg)) {
ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
+ if (!ret)
+ __SetPageKmemcg(page);
+ }
css_put(&memcg->css);
return ret;
}
page_counter_uncharge(&memcg->memsw, nr_pages);
page->mem_cgroup = NULL;
+
+ /* slab pages do not have PageKmemcg flag set */
+ if (PageKmemcg(page))
+ __ClearPageKmemcg(page);
+
css_put_many(&memcg->css, nr_pages);
}
#endif /* !CONFIG_SLOB */
static DEFINE_IDR(mem_cgroup_idr);
-static void mem_cgroup_id_get(struct mem_cgroup *memcg)
+static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
{
- atomic_inc(&memcg->id.ref);
+ atomic_add(n, &memcg->id.ref);
}
-static void mem_cgroup_id_put(struct mem_cgroup *memcg)
+static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
{
- if (atomic_dec_and_test(&memcg->id.ref)) {
+ if (atomic_sub_and_test(n, &memcg->id.ref)) {
idr_remove(&mem_cgroup_idr, memcg->id.id);
memcg->id.id = 0;
}
}
+static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
+{
+ mem_cgroup_id_get_many(memcg, 1);
+}
+
+static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
+{
+ mem_cgroup_id_put_many(memcg, 1);
+}
+
/**
* mem_cgroup_from_id - look up a memcg from a memcg id
* @id: the memcg id to look up
if (!mem_cgroup_is_root(mc.from))
page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
+ mem_cgroup_id_put_many(mc.from, mc.moved_swap);
+
/*
* we charged both to->memory and to->memsw, so we
* should uncharge to->memory.
if (!mem_cgroup_is_root(mc.to))
page_counter_uncharge(&mc.to->memory, mc.moved_swap);
- css_put_many(&mc.from->css, mc.moved_swap);
+ mem_cgroup_id_get_many(mc.to, mc.moved_swap);
+ css_put_many(&mc.to->css, mc.moved_swap);
- /* we've already done css_get(mc.to) */
mc.moved_swap = 0;
}
memcg_oom_recover(from);
else
nr_file += nr_pages;
pgpgout++;
- } else
+ } else {
nr_kmem += 1 << compound_order(page);
+ __ClearPageKmemcg(page);
+ }
page->mem_cgroup = NULL;
} while (next != page_list);
subsys_initcall(mem_cgroup_init);
#ifdef CONFIG_MEMCG_SWAP
+static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
+{
+ while (!atomic_inc_not_zero(&memcg->id.ref)) {
+ /*
+ * The root cgroup cannot be destroyed, so it's refcount must
+ * always be >= 1.
+ */
+ if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
+ VM_BUG_ON(1);
+ break;
+ }
+ memcg = parent_mem_cgroup(memcg);
+ if (!memcg)
+ memcg = root_mem_cgroup;
+ }
+ return memcg;
+}
+
/**
* mem_cgroup_swapout - transfer a memsw charge to swap
* @page: page whose memsw charge to transfer
*/
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
- struct mem_cgroup *memcg;
+ struct mem_cgroup *memcg, *swap_memcg;
unsigned short oldid;
VM_BUG_ON_PAGE(PageLRU(page), page);
if (!memcg)
return;
- mem_cgroup_id_get(memcg);
- oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+ /*
+ * In case the memcg owning these pages has been offlined and doesn't
+ * have an ID allocated to it anymore, charge the closest online
+ * ancestor for the swap instead and transfer the memory+swap charge.
+ */
+ swap_memcg = mem_cgroup_id_get_online(memcg);
+ oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
VM_BUG_ON_PAGE(oldid, page);
- mem_cgroup_swap_statistics(memcg, true);
+ mem_cgroup_swap_statistics(swap_memcg, true);
page->mem_cgroup = NULL;
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memory, 1);
+ if (memcg != swap_memcg) {
+ if (!mem_cgroup_is_root(swap_memcg))
+ page_counter_charge(&swap_memcg->memsw, 1);
+ page_counter_uncharge(&memcg->memsw, 1);
+ }
+
/*
* Interrupts should be disabled here because the caller holds the
* mapping->tree_lock lock which is taken with interrupts-off. It is
if (!memcg)
return 0;
+ memcg = mem_cgroup_id_get_online(memcg);
+
if (!mem_cgroup_is_root(memcg) &&
- !page_counter_try_charge(&memcg->swap, 1, &counter))
+ !page_counter_try_charge(&memcg->swap, 1, &counter)) {
+ mem_cgroup_id_put(memcg);
return -ENOMEM;
+ }
- mem_cgroup_id_get(memcg);
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
VM_BUG_ON_PAGE(oldid, page);
mem_cgroup_swap_statistics(memcg, true);
/* init node's zones as empty zones, we don't have any present pages.*/
free_area_init_node(nid, zones_size, start_pfn, zholes_size);
+ pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
/*
* The node we allocated has no zone fallback lists. For avoiding
static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
{
arch_refresh_nodedata(nid, NULL);
+ free_percpu(pgdat->per_cpu_nodestats);
arch_free_nodedata(pgdat);
return;
}
{
struct mm_struct *mm = task->mm;
struct task_struct *p;
- bool ret;
+ bool ret = true;
/*
* Skip tasks without mm because it might have passed its exit_mm and
}
if (PageMappingFlags(page))
page->mapping = NULL;
- if (memcg_kmem_enabled() && PageKmemcg(page)) {
+ if (memcg_kmem_enabled() && PageKmemcg(page))
memcg_kmem_uncharge(page, order);
- __ClearPageKmemcg(page);
- }
if (check_free)
bad += free_pages_check(page);
if (bad)
}
out:
- if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) {
- if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) {
- __free_pages(page, order);
- page = NULL;
- } else
- __SetPageKmemcg(page);
+ if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
+ unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
+ __free_pages(page, order);
+ page = NULL;
}
if (kmemcheck_enabled && page)
int lru;
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
- pages[lru] = global_page_state(NR_LRU_BASE + lru);
+ pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
for_each_zone(zone)
wmark_low += zone->watermark[WMARK_LOW];
}
#endif
+static void setup_min_unmapped_ratio(void);
+static void setup_min_slab_ratio(void);
#else /* CONFIG_NUMA */
static void set_zonelist_order(void)
zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
#ifdef CONFIG_NUMA
zone->node = nid;
- pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
- / 100;
- pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
#endif
zone->name = zone_names[j];
zone->zone_pgdat = pgdat;
setup_per_zone_wmarks();
refresh_zone_stat_thresholds();
setup_per_zone_lowmem_reserve();
+
+#ifdef CONFIG_NUMA
+ setup_min_unmapped_ratio();
+ setup_min_slab_ratio();
+#endif
+
return 0;
}
core_initcall(init_per_zone_wmark_min)
}
#ifdef CONFIG_NUMA
+static void setup_min_unmapped_ratio(void)
+{
+ pg_data_t *pgdat;
+ struct zone *zone;
+
+ for_each_online_pgdat(pgdat)
+ pgdat->min_unmapped_pages = 0;
+
+ for_each_zone(zone)
+ zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
+ sysctl_min_unmapped_ratio) / 100;
+}
+
+
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
- struct pglist_data *pgdat;
- struct zone *zone;
int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
+ setup_min_unmapped_ratio();
+
+ return 0;
+}
+
+static void setup_min_slab_ratio(void)
+{
+ pg_data_t *pgdat;
+ struct zone *zone;
+
for_each_online_pgdat(pgdat)
pgdat->min_slab_pages = 0;
for_each_zone(zone)
- zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
- sysctl_min_unmapped_ratio) / 100;
- return 0;
+ zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
+ sysctl_min_slab_ratio) / 100;
}
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
- struct pglist_data *pgdat;
- struct zone *zone;
int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
- for_each_online_pgdat(pgdat)
- pgdat->min_slab_pages = 0;
+ setup_min_slab_ratio();
- for_each_zone(zone)
- zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
- sysctl_min_slab_ratio) / 100;
return 0;
}
#endif
*/
#include <linux/kernel.h>
+#include <linux/dax.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/blkdev.h>
if (!mapping || !mapping->a_ops)
return -EINVAL;
+ /*
+ * Readahead doesn't make sense for DAX inodes, but we don't want it
+ * to report a failure either. Instead, we just return success and
+ * don't do any work.
+ */
+ if (dax_mapping(mapping))
+ return 0;
+
return force_page_cache_readahead(mapping, filp, index, nr);
}
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
__inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
} else {
- if (PageTransCompound(page)) {
- VM_BUG_ON_PAGE(!PageLocked(page), page);
+ if (PageTransCompound(page) && page_mapping(page)) {
+ VM_WARN_ON_ONCE(!PageLocked(page));
+
SetPageDoubleMap(compound_head(page));
if (PageMlocked(page))
clear_page_mlock(compound_head(page));
{
int i, nr = 1;
- VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
+ VM_BUG_ON_PAGE(compound && !PageHead(page), page);
lock_page_memcg(page);
/* Hugepages are not counted in NR_FILE_MAPPED for now. */
struct kobj_attribute shmem_enabled_attr =
__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
+#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
+#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
bool shmem_huge_enabled(struct vm_area_struct *vma)
{
struct inode *inode = file_inode(vma->vm_file);
return false;
}
}
-#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
+#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
#else /* !CONFIG_SHMEM */
module_init(slab_proc_init);
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page)
+{
+ struct kmem_cache *cachep;
+ unsigned int objnr;
+ unsigned long offset;
+
+ /* Find and validate object. */
+ cachep = page->slab_cache;
+ objnr = obj_to_index(cachep, page, (void *)ptr);
+ BUG_ON(objnr >= cachep->num);
+
+ /* Find offset within object. */
+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+
+ /* Allow address range falling entirely within object size. */
+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+ return NULL;
+
+ return cachep->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
*/
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
{
+ LIST_HEAD(discard);
struct page *page, *h;
BUG_ON(irqs_disabled());
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
remove_partial(n, page);
- discard_slab(s, page);
+ list_add(&page->lru, &discard);
} else {
list_slab_objects(s, page,
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
spin_unlock_irq(&n->list_lock);
+
+ list_for_each_entry_safe(page, h, &discard, lru)
+ discard_slab(s, page);
}
/*
EXPORT_SYMBOL(__kmalloc_node);
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page)
+{
+ struct kmem_cache *s;
+ unsigned long offset;
+ size_t object_size;
+
+ /* Find object and usable object size. */
+ s = page->slab_cache;
+ object_size = slab_ksize(s);
+
+ /* Reject impossible pointers. */
+ if (ptr < page_address(page))
+ return s->name;
+
+ /* Find offset within object. */
+ offset = (ptr - page_address(page)) % s->size;
+
+ /* Adjust for redzone and reject if within the redzone. */
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+ if (offset < s->red_left_pad)
+ return s->name;
+ offset -= s->red_left_pad;
+ }
+
+ /* Allow address range falling entirely within object size. */
+ if (offset <= object_size && n <= object_size - offset)
+ return NULL;
+
+ return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
static size_t __ksize(const void *object)
{
struct page *page;
--- /dev/null
+/*
+ * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
+ * which are designed to protect kernel memory from needless exposure
+ * and overwrite under many unintended conditions. This code is based
+ * on PAX_USERCOPY, which is:
+ *
+ * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
+ * Security Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/sections.h>
+
+enum {
+ BAD_STACK = -1,
+ NOT_STACK = 0,
+ GOOD_FRAME,
+ GOOD_STACK,
+};
+
+/*
+ * Checks if a given pointer and length is contained by the current
+ * stack frame (if possible).
+ *
+ * Returns:
+ * NOT_STACK: not at all on the stack
+ * GOOD_FRAME: fully within a valid stack frame
+ * GOOD_STACK: fully on the stack (when can't do frame-checking)
+ * BAD_STACK: error condition (invalid stack position or bad stack frame)
+ */
+static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+ const void * const stack = task_stack_page(current);
+ const void * const stackend = stack + THREAD_SIZE;
+ int ret;
+
+ /* Object is not on the stack at all. */
+ if (obj + len <= stack || stackend <= obj)
+ return NOT_STACK;
+
+ /*
+ * Reject: object partially overlaps the stack (passing the
+ * the check above means at least one end is within the stack,
+ * so if this check fails, the other end is outside the stack).
+ */
+ if (obj < stack || stackend < obj + len)
+ return BAD_STACK;
+
+ /* Check if object is safely within a valid frame. */
+ ret = arch_within_stack_frames(stack, stackend, obj, len);
+ if (ret)
+ return ret;
+
+ return GOOD_STACK;
+}
+
+static void report_usercopy(const void *ptr, unsigned long len,
+ bool to_user, const char *type)
+{
+ pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+ to_user ? "exposure" : "overwrite",
+ to_user ? "from" : "to", ptr, type ? : "unknown", len);
+ /*
+ * For greater effect, it would be nice to do do_group_exit(),
+ * but BUG() actually hooks all the lock-breaking and per-arch
+ * Oops code, so that is used here instead.
+ */
+ BUG();
+}
+
+/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
+static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
+ unsigned long high)
+{
+ unsigned long check_low = (uintptr_t)ptr;
+ unsigned long check_high = check_low + n;
+
+ /* Does not overlap if entirely above or entirely below. */
+ if (check_low >= high || check_high <= low)
+ return false;
+
+ return true;
+}
+
+/* Is this address range in the kernel text area? */
+static inline const char *check_kernel_text_object(const void *ptr,
+ unsigned long n)
+{
+ unsigned long textlow = (unsigned long)_stext;
+ unsigned long texthigh = (unsigned long)_etext;
+ unsigned long textlow_linear, texthigh_linear;
+
+ if (overlaps(ptr, n, textlow, texthigh))
+ return "<kernel text>";
+
+ /*
+ * Some architectures have virtual memory mappings with a secondary
+ * mapping of the kernel text, i.e. there is more than one virtual
+ * kernel address that points to the kernel image. It is usually
+ * when there is a separate linear physical memory mapping, in that
+ * __pa() is not just the reverse of __va(). This can be detected
+ * and checked:
+ */
+ textlow_linear = (unsigned long)__va(__pa(textlow));
+ /* No different mapping: we're done. */
+ if (textlow_linear == textlow)
+ return NULL;
+
+ /* Check the secondary mapping... */
+ texthigh_linear = (unsigned long)__va(__pa(texthigh));
+ if (overlaps(ptr, n, textlow_linear, texthigh_linear))
+ return "<linear kernel text>";
+
+ return NULL;
+}
+
+static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+{
+ /* Reject if object wraps past end of memory. */
+ if ((unsigned long)ptr + n < (unsigned long)ptr)
+ return "<wrapped address>";
+
+ /* Reject if NULL or ZERO-allocation. */
+ if (ZERO_OR_NULL_PTR(ptr))
+ return "<null>";
+
+ return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ struct page *page, *endpage;
+ const void *end = ptr + n - 1;
+ bool is_reserved, is_cma;
+
+ /*
+ * Some architectures (arm64) return true for virt_addr_valid() on
+ * vmalloced addresses. Work around this by checking for vmalloc
+ * first.
+ */
+ if (is_vmalloc_addr(ptr))
+ return NULL;
+
+ if (!virt_addr_valid(ptr))
+ return NULL;
+
+ page = virt_to_head_page(ptr);
+
+ /* Check slab allocator for flags and size. */
+ if (PageSlab(page))
+ return __check_heap_object(ptr, n, page);
+
+ /*
+ * Sometimes the kernel data regions are not marked Reserved (see
+ * check below). And sometimes [_sdata,_edata) does not cover
+ * rodata and/or bss, so check each range explicitly.
+ */
+
+ /* Allow reads of kernel rodata region (if not marked as Reserved). */
+ if (ptr >= (const void *)__start_rodata &&
+ end <= (const void *)__end_rodata) {
+ if (!to_user)
+ return "<rodata>";
+ return NULL;
+ }
+
+ /* Allow kernel data region (if not marked as Reserved). */
+ if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
+ return NULL;
+
+ /* Allow kernel bss region (if not marked as Reserved). */
+ if (ptr >= (const void *)__bss_start &&
+ end <= (const void *)__bss_stop)
+ return NULL;
+
+ /* Is the object wholly within one base page? */
+ if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
+ ((unsigned long)end & (unsigned long)PAGE_MASK)))
+ return NULL;
+
+ /* Allow if start and end are inside the same compound page. */
+ endpage = virt_to_head_page(end);
+ if (likely(endpage == page))
+ return NULL;
+
+ /*
+ * Reject if range is entirely either Reserved (i.e. special or
+ * device memory), or CMA. Otherwise, reject since the object spans
+ * several independently allocated pages.
+ */
+ is_reserved = PageReserved(page);
+ is_cma = is_migrate_cma_page(page);
+ if (!is_reserved && !is_cma)
+ goto reject;
+
+ for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
+ page = virt_to_head_page(ptr);
+ if (is_reserved && !PageReserved(page))
+ goto reject;
+ if (is_cma && !is_migrate_cma_page(page))
+ goto reject;
+ }
+
+ return NULL;
+
+reject:
+ return "<spans multiple pages>";
+}
+
+/*
+ * Validates that the given object is:
+ * - not bogus address
+ * - known-safe heap or stack object
+ * - not in kernel text
+ */
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+ const char *err;
+
+ /* Skip all tests if size is zero. */
+ if (!n)
+ return;
+
+ /* Check for invalid addresses. */
+ err = check_bogus_address(ptr, n);
+ if (err)
+ goto report;
+
+ /* Check for bad heap object. */
+ err = check_heap_object(ptr, n, to_user);
+ if (err)
+ goto report;
+
+ /* Check for bad stack object. */
+ switch (check_stack_object(ptr, n)) {
+ case NOT_STACK:
+ /* Object is not touching the current process stack. */
+ break;
+ case GOOD_FRAME:
+ case GOOD_STACK:
+ /*
+ * Object is either in the correct frame (when it
+ * is possible to check) or just generally on the
+ * process stack (when frame checking not available).
+ */
+ return;
+ default:
+ err = "<process stack>";
+ goto report;
+ }
+
+ /* Check for object in kernel to avoid text exposure. */
+ err = check_kernel_text_object(ptr, n);
+ if (!err)
+ return;
+
+report:
+ report_usercopy(ptr, n, to_user, err);
+}
+EXPORT_SYMBOL(__check_object_size);
if (err < 0)
goto out_uninit_mvrp;
- vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
+ vlan->nest_level = dev_get_nest_level(real_dev) + 1;
err = register_netdevice(dev);
if (err < 0)
goto out_uninit_mvrp;
/* wakeup anybody waiting for slots to pin pages */
wake_up(&vp_wq);
}
- kfree(in_pages);
- kfree(out_pages);
+ kvfree(in_pages);
+ kvfree(out_pages);
return err;
}
/* If old entry was unassociated with any port, then delete it. */
f = __br_fdb_get(br, br->dev->dev_addr, 0);
- if (f && f->is_local && !f->dst)
+ if (f && f->is_local && !f->dst && !f->added_by_user)
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, 0);
if (!br_vlan_should_use(v))
continue;
f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
- if (f && f->is_local && !f->dst)
+ if (f && f->is_local && !f->dst && !f->added_by_user)
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, v->vid);
}
}
/* Update (create or replace) forwarding database entry */
-static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
- __u16 state, __u16 flags, __u16 vid)
+static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
+ const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
{
- struct net_bridge *br = source->br;
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
bool modified = false;
/* If the port cannot learn allow only local and static entries */
- if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
+ if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
!(source->state == BR_STATE_LEARNING ||
source->state == BR_STATE_FORWARDING))
return -EPERM;
+ if (!source && !(state & NUD_PERMANENT)) {
+ pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
+ br->dev->name);
+ return -EINVAL;
+ }
+
fdb = fdb_find(head, addr, vid);
if (fdb == NULL) {
if (!(flags & NLM_F_CREATE))
return 0;
}
-static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
- const unsigned char *addr, u16 nlh_flags, u16 vid)
+static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
+ struct net_bridge_port *p, const unsigned char *addr,
+ u16 nlh_flags, u16 vid)
{
int err = 0;
if (ndm->ndm_flags & NTF_USE) {
+ if (!p) {
+ pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
+ br->dev->name);
+ return -EINVAL;
+ }
local_bh_disable();
rcu_read_lock();
- br_fdb_update(p->br, p, addr, vid, true);
+ br_fdb_update(br, p, addr, vid, true);
rcu_read_unlock();
local_bh_enable();
} else {
- spin_lock_bh(&p->br->hash_lock);
- err = fdb_add_entry(p, addr, ndm->ndm_state,
+ spin_lock_bh(&br->hash_lock);
+ err = fdb_add_entry(br, p, addr, ndm->ndm_state,
nlh_flags, vid);
- spin_unlock_bh(&p->br->hash_lock);
+ spin_unlock_bh(&br->hash_lock);
}
return err;
dev->name);
return -EINVAL;
}
+ br = p->br;
vg = nbp_vlan_group(p);
}
}
/* VID was specified, so use it. */
- if (dev->priv_flags & IFF_EBRIDGE)
- err = br_fdb_insert(br, NULL, addr, vid);
- else
- err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
+ err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid);
} else {
- if (dev->priv_flags & IFF_EBRIDGE)
- err = br_fdb_insert(br, NULL, addr, 0);
- else
- err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
+ err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0);
if (err || !vg || !vg->num_vlans)
goto out;
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
- if (dev->priv_flags & IFF_EBRIDGE)
- err = br_fdb_insert(br, NULL, addr, v->vid);
- else
- err = __br_fdb_add(ndm, p, addr, nlh_flags,
- v->vid);
+ err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid);
if (err)
goto out;
}
put_generic_request(req);
}
-void cancel_generic_request(struct ceph_mon_generic_request *req)
+static void cancel_generic_request(struct ceph_mon_generic_request *req)
{
struct ceph_mon_client *monc = req->monc;
struct ceph_mon_generic_request *lookup_req;
pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
GFP_NOIO);
- if (!pages) {
+ if (IS_ERR(pages)) {
ceph_msg_put(m);
return NULL;
}
}
EXPORT_SYMBOL(ceph_find_or_create_string);
-static void ceph_free_string(struct rcu_head *head)
-{
- struct ceph_string *cs = container_of(head, struct ceph_string, rcu);
- kfree(cs);
-}
-
void ceph_release_string(struct kref *ref)
{
struct ceph_string *cs = container_of(ref, struct ceph_string, kref);
}
spin_unlock(&string_tree_lock);
- call_rcu(&cs->rcu, ceph_free_string);
+ kfree_rcu(cs, rcu);
}
EXPORT_SYMBOL(ceph_release_string);
EXPORT_SYMBOL(netdev_lower_dev_get_private);
-int dev_get_nest_level(struct net_device *dev,
- bool (*type_check)(const struct net_device *dev))
+int dev_get_nest_level(struct net_device *dev)
{
struct net_device *lower = NULL;
struct list_head *iter;
ASSERT_RTNL();
netdev_for_each_lower_dev(dev, lower, iter) {
- nest = dev_get_nest_level(lower, type_check);
+ nest = dev_get_nest_level(lower);
if (max_nest < nest)
max_nest = nest;
}
- if (type_check(dev))
- max_nest++;
-
- return max_nest;
+ return max_nest + 1;
}
EXPORT_SYMBOL(dev_get_nest_level);
{
int err;
- if (!skb_cloned(skb))
- return 0;
- if (skb_clone_writable(skb, write_len))
- return 0;
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (!err)
- bpf_compute_data_end(skb);
+ err = skb_ensure_writable(skb, write_len);
+ bpf_compute_data_end(skb);
+
return err;
}
+static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
+{
+ if (skb_at_tc_ingress(skb))
+ skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
+}
+
+static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
+{
+ if (skb_at_tc_ingress(skb))
+ skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
+}
+
static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
{
- struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
struct sk_buff *skb = (struct sk_buff *) (long) r1;
- int offset = (int) r2;
+ unsigned int offset = (unsigned int) r2;
void *from = (void *) (long) r3;
unsigned int len = (unsigned int) r4;
void *ptr;
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
return -EINVAL;
-
- /* bpf verifier guarantees that:
- * 'from' pointer points to bpf program stack
- * 'len' bytes of it were initialized
- * 'len' > 0
- * 'skb' is a valid pointer to 'struct sk_buff'
- *
- * so check for invalid 'offset' and too large 'len'
- */
- if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
+ if (unlikely(offset > 0xffff))
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + len)))
return -EFAULT;
- ptr = skb_header_pointer(skb, offset, len, sp->buff);
- if (unlikely(!ptr))
- return -EFAULT;
-
+ ptr = skb->data + offset;
if (flags & BPF_F_RECOMPUTE_CSUM)
- skb_postpull_rcsum(skb, ptr, len);
+ __skb_postpull_rcsum(skb, ptr, len, offset);
memcpy(ptr, from, len);
- if (ptr == sp->buff)
- /* skb_store_bits cannot return -EFAULT here */
- skb_store_bits(skb, offset, ptr, len);
-
if (flags & BPF_F_RECOMPUTE_CSUM)
- skb_postpush_rcsum(skb, ptr, len);
+ __skb_postpush_rcsum(skb, ptr, len, offset);
if (flags & BPF_F_INVALIDATE_HASH)
skb_clear_hash(skb);
static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
- int offset = (int) r2;
+ unsigned int offset = (unsigned int) r2;
void *to = (void *)(unsigned long) r3;
unsigned int len = (unsigned int) r4;
void *ptr;
- if (unlikely((u32) offset > 0xffff))
+ if (unlikely(offset > 0xffff))
goto err_clear;
ptr = skb_header_pointer(skb, offset, len, to);
static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
- int offset = (int) r2;
- __sum16 sum, *ptr;
+ unsigned int offset = (unsigned int) r2;
+ __sum16 *ptr;
if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
return -EINVAL;
- if (unlikely((u32) offset > 0xffff))
+ if (unlikely(offset > 0xffff || offset & 1))
return -EFAULT;
- if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
- return -EFAULT;
-
- ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
- if (unlikely(!ptr))
+ if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
return -EFAULT;
+ ptr = (__sum16 *)(skb->data + offset);
switch (flags & BPF_F_HDR_FIELD_MASK) {
case 0:
if (unlikely(from != 0))
return -EINVAL;
}
- if (ptr == &sum)
- /* skb_store_bits guaranteed to not return -EFAULT here */
- skb_store_bits(skb, offset, ptr, sizeof(sum));
-
return 0;
}
struct sk_buff *skb = (struct sk_buff *) (long) r1;
bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
- int offset = (int) r2;
- __sum16 sum, *ptr;
+ unsigned int offset = (unsigned int) r2;
+ __sum16 *ptr;
if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
BPF_F_HDR_FIELD_MASK)))
return -EINVAL;
- if (unlikely((u32) offset > 0xffff))
+ if (unlikely(offset > 0xffff || offset & 1))
return -EFAULT;
- if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
+ if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
return -EFAULT;
- ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
- if (unlikely(!ptr))
- return -EFAULT;
+ ptr = (__sum16 *)(skb->data + offset);
if (is_mmzero && !*ptr)
return 0;
if (is_mmzero && !*ptr)
*ptr = CSUM_MANGLED_0;
- if (ptr == &sum)
- /* skb_store_bits guaranteed to not return -EFAULT here */
- skb_store_bits(skb, offset, ptr, sizeof(sum));
-
return 0;
}
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{
- if (skb_at_tc_ingress(skb))
- skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
-
return dev_forward_skb(dev, skb);
}
if (unlikely(!skb))
return -ENOMEM;
+ bpf_push_mac_rcsum(skb);
+
return flags & BPF_F_INGRESS ?
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
}
return -EINVAL;
}
+ bpf_push_mac_rcsum(skb);
+
return ri->flags & BPF_F_INGRESS ?
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
}
vlan_proto != htons(ETH_P_8021AD)))
vlan_proto = htons(ETH_P_8021Q);
+ bpf_push_mac_rcsum(skb);
ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
+ bpf_pull_mac_rcsum(skb);
+
bpf_compute_data_end(skb);
return ret;
}
struct sk_buff *skb = (struct sk_buff *) (long) r1;
int ret;
+ bpf_push_mac_rcsum(skb);
ret = skb_vlan_pop(skb);
+ bpf_pull_mac_rcsum(skb);
+
bpf_compute_data_end(skb);
return ret;
}
}
#ifdef CONFIG_SOCK_CGROUP_DATA
-static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long)r1;
struct bpf_map *map = (struct bpf_map *)(long)r2;
return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp);
}
-static const struct bpf_func_proto bpf_skb_in_cgroup_proto = {
- .func = bpf_skb_in_cgroup,
+static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
+ .func = bpf_skb_under_cgroup,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
case BPF_FUNC_get_smp_processor_id:
return &bpf_get_smp_processor_id_proto;
#ifdef CONFIG_SOCK_CGROUP_DATA
- case BPF_FUNC_skb_in_cgroup:
- return &bpf_skb_in_cgroup_proto;
+ case BPF_FUNC_skb_under_cgroup:
+ return &bpf_skb_under_cgroup_proto;
#endif
default:
return sk_filter_func_proto(func_id);
static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
loff_t pos)
{
- struct fib_table *tb = iter->main_tb;
struct key_vector *l, **tp = &iter->tnode;
- struct trie *t;
t_key key;
/* use cache location of next-to-find key */
pos -= iter->pos;
key = iter->key;
} else {
- t = (struct trie *)tb->tb_data;
- iter->tnode = t->kv;
iter->pos = 0;
key = 0;
}
return NULL;
iter->main_tb = tb;
+ t = (struct trie *)tb->tb_data;
+ iter->tnode = t->kv;
if (*pos != 0)
return fib_route_get_idx(iter, *pos);
- t = (struct trie *)tb->tb_data;
- iter->tnode = t->kv;
iter->pos = 0;
iter->key = 0;
tunnel->parms.o_flags, proto, tunnel->parms.o_key,
htonl(tunnel->o_seqno));
- skb_set_inner_protocol(skb, proto);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
.get_link_net = ip_tunnel_get_link_net,
};
+static bool is_vti_tunnel(const struct net_device *dev)
+{
+ return dev->netdev_ops == &vti_netdev_ops;
+}
+
+static int vti_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+
+ if (!is_vti_tunnel(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ if (!net_eq(tunnel->net, dev_net(dev)))
+ xfrm_garbage_collect(tunnel->net);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vti_notifier_block __read_mostly = {
+ .notifier_call = vti_device_event,
+};
+
static int __init vti_init(void)
{
const char *msg;
pr_info("IPv4 over IPsec tunneling driver\n");
+ register_netdevice_notifier(&vti_notifier_block);
+
msg = "tunnel device";
err = register_pernet_device(&vti_net_ops);
if (err < 0)
xfrm_proto_esp_failed:
unregister_pernet_device(&vti_net_ops);
pernet_dev_failed:
+ unregister_netdevice_notifier(&vti_notifier_block);
pr_err("vti init: failed to register %s\n", msg);
return err;
}
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
unregister_pernet_device(&vti_net_ops);
+ unregister_netdevice_notifier(&vti_notifier_block);
}
module_init(vti_init);
/* combine the user config with event to determine if permanent
* addresses are to be removed from address hash table
*/
- keep_addr = !(how || _keep_addr <= 0);
+ keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
/* Step 2: clear hash table */
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
/* re-combine the user config with event to determine if permanent
* addresses are to be removed from the interface list
*/
- keep_addr = (!how && _keep_addr > 0);
+ keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
INIT_LIST_HEAD(&del_list);
list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
memcpy(new, hop, start);
ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def,
secattr);
- if (ret_val < 0)
+ if (ret_val < 0) {
+ kfree(new);
return ERR_PTR(ret_val);
+ }
buf_len = start + ret_val;
/* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */
gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
- skb_set_inner_protocol(skb, protocol);
-
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
NEXTHDR_GRE);
}
struct icmp6hdr user_icmph;
int addr_type;
struct in6_addr *daddr;
- int iif = 0;
+ int oif = 0;
struct flowi6 fl6;
int err;
struct dst_entry *dst;
if (u->sin6_family != AF_INET6) {
return -EAFNOSUPPORT;
}
- if (sk->sk_bound_dev_if &&
- sk->sk_bound_dev_if != u->sin6_scope_id) {
- return -EINVAL;
- }
daddr = &(u->sin6_addr);
- iif = u->sin6_scope_id;
+ if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
+ oif = u->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &sk->sk_v6_daddr;
}
- if (!iif)
- iif = sk->sk_bound_dev_if;
+ if (!oif)
+ oif = sk->sk_bound_dev_if;
+
+ if (!oif)
+ oif = np->sticky_pktinfo.ipi6_ifindex;
+
+ if (!oif && ipv6_addr_is_multicast(daddr))
+ oif = np->mcast_oif;
+ else if (!oif)
+ oif = np->ucast_oif;
addr_type = ipv6_addr_type(daddr);
- if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
- return -EINVAL;
- if (addr_type & IPV6_ADDR_MAPPED)
+ if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
+ (addr_type & IPV6_ADDR_MAPPED) ||
+ (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
return -EINVAL;
/* TODO: use ip6_datagram_send_ctl to get options from cmsg */
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
+ fl6.flowi6_oif = oif;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
- fl6.flowi6_oif = np->mcast_oif;
- else if (!fl6.flowi6_oif)
- fl6.flowi6_oif = np->ucast_oif;
-
ipc6.tclass = np->tclass;
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
self->magic = IAS_MAGIC;
self->mode = mode;
- if (mode == IAS_CLIENT)
- iriap_register_lsap(self, slsap_sel, mode);
+ if (mode == IAS_CLIENT) {
+ if (iriap_register_lsap(self, slsap_sel, mode)) {
+ kfree(self);
+ return NULL;
+ }
+ }
self->confirm = callback;
self->priv = priv;
/* free all potentially still buffered bcast frames */
local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
- skb_queue_purge(&sdata->u.ap.ps.bc_buf);
+ ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
mutex_lock(&local->mtx);
ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
trace_drv_get_expected_throughput(sta);
if (local->ops->get_expected_throughput)
- ret = local->ops->get_expected_throughput(sta);
+ ret = local->ops->get_expected_throughput(&local->hw, sta);
trace_drv_return_u32(local, ret);
return ret;
netif_carrier_off(sdata->dev);
+ /* flush STAs and mpaths on this iface */
+ sta_info_flush(sdata);
+ mesh_path_flush_by_iface(sdata);
+
/* stop the beacon */
ifmsh->mesh_id_len = 0;
sdata->vif.bss_conf.enable_beacon = false;
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
+
+ /* remove beacon */
bcn = rcu_dereference_protected(ifmsh->beacon,
lockdep_is_held(&sdata->wdev.mtx));
RCU_INIT_POINTER(ifmsh->beacon, NULL);
kfree_rcu(bcn, rcu_head);
- /* flush STAs and mpaths on this iface */
- sta_info_flush(sdata);
- mesh_path_flush_by_iface(sdata);
-
/* free all potentially still buffered group-addressed frames */
local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
skb_queue_purge(&ifmsh->ps.bc_buf);
for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
- if (!txqi->tin.backlog_packets)
+ if (txqi->tin.backlog_packets)
set_bit(tid, &sta->txq_buffered_tids);
else
clear_bit(tid, &sta->txq_buffered_tids);
clear_sta_flag(sta, WLAN_STA_SP);
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
+
+ /* mesh Peer Service Period support */
+ if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
+ ieee80211_is_data_qos(fc))
+ ieee80211_mpsp_trigger_process(
+ ieee80211_get_qos_ctl(hdr), sta, true, acked);
+
if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
/*
* The STA is in power save mode, so assume
return;
}
- /* mesh Peer Service Period support */
- if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
- ieee80211_is_data_qos(fc))
- ieee80211_mpsp_trigger_process(
- ieee80211_get_qos_ctl(hdr),
- sta, true, acked);
-
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
(ieee80211_is_data(hdr->frame_control)) &&
(rates_idx != -1))
skb = skb_dequeue(&ps->bc_buf);
if (skb) {
purged++;
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
}
total += skb_queue_len(&ps->bc_buf);
}
if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
ps_dbg(tx->sdata,
"BC TX buffer full - dropping the oldest frame\n");
- dev_kfree_skb(skb_dequeue(&ps->bc_buf));
+ ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
} else
tx->local->total_ps_buffered++;
sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
break;
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
info = IEEE80211_SKB_CB(skb);
helper = rcu_dereference(nfct_help(expect->master)->helper);
if (helper) {
seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
- if (helper->expect_policy[expect->class].name)
+ if (helper->expect_policy[expect->class].name[0])
seq_printf(s, "/%s",
helper->expect_policy[expect->class].name);
}
"timeout to %u seconds for",
info->timeout);
nf_ct_dump_tuple(&exp->tuple);
- mod_timer(&exp->timeout, jiffies + info->timeout * HZ);
+ mod_timer_pending(&exp->timeout,
+ jiffies + info->timeout * HZ);
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
return -EINVAL;
+ if (otuple.dst.protonum != rtuple.dst.protonum)
+ return -EINVAL;
ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
&rtuple, u3);
return PTR_ERR(exp);
err = nf_ct_expect_related_report(exp, portid, report);
- if (err < 0) {
- nf_ct_expect_put(exp);
- return err;
- }
-
- return 0;
+ nf_ct_expect_put(exp);
+ return err;
}
static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
return NF_DROP;
}
cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
- if (!cseq) {
+ if (!cseq && *(*dptr + matchoff) != '0') {
nf_ct_helper_log(skb, ct, "cannot get cseq");
return NF_DROP;
}
return NF_DROP;
}
cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
- if (!cseq) {
+ if (!cseq && *(*dptr + matchoff) != '0') {
nf_ct_helper_log(skb, ct, "cannot get cseq");
return NF_DROP;
}
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
int err;
- queue = instance_lookup(q, queue_num);
- if (!queue)
- queue = verdict_instance_lookup(q, queue_num,
- NETLINK_CB(skb).portid);
+ queue = verdict_instance_lookup(q, queue_num,
+ NETLINK_CB(skb).portid);
if (IS_ERR(queue))
return PTR_ERR(queue);
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
+ u32 offset, len;
if (tb[NFTA_EXTHDR_DREG] == NULL ||
tb[NFTA_EXTHDR_TYPE] == NULL ||
tb[NFTA_EXTHDR_LEN] == NULL)
return -EINVAL;
+ offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
+ len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
+
+ if (offset > U8_MAX || len > U8_MAX)
+ return -ERANGE;
+
priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
- priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
- priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
+ priv->offset = offset;
+ priv->len = len;
priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
return nft_validate_register_store(ctx, priv->dreg, NULL,
} else if (d > 0)
parent = parent->rb_right;
else {
-found:
if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = parent->rb_left;
continue;
}
}
- if (set->flags & NFT_SET_INTERVAL && interval != NULL) {
- rbe = interval;
- goto found;
+ if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+ nft_set_elem_active(&interval->ext, genmask) &&
+ !nft_rbtree_interval_end(interval)) {
+ spin_unlock_bh(&nft_rbtree_lock);
+ *ext = &interval->ext;
+ return true;
}
out:
spin_unlock_bh(&nft_rbtree_lock);
struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
- enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
unsigned int dataoff;
u8 protonum;
ct = nf_ct_tuplehash_to_ctrack(h);
- ctinfo = ovs_ct_get_info(h);
- if (ctinfo == IP_CT_NEW) {
- /* This should not happen. */
- WARN_ONCE(1, "ovs_ct_find_existing: new packet for %p\n", ct);
- }
skb->nfct = &ct->ct_general;
- skb->nfctinfo = ctinfo;
+ skb->nfctinfo = ovs_ct_get_info(h);
return ct;
}
return ERR_CAST(dev);
}
- dev_change_flags(dev, dev->flags | IFF_UP);
+ err = dev_change_flags(dev, dev->flags | IFF_UP);
+ if (err < 0) {
+ rtnl_delete_link(dev);
+ rtnl_unlock();
+ ovs_vport_free(vport);
+ goto error;
+ }
+
rtnl_unlock();
return vport;
error:
struct net *net = ovs_dp_get_net(parms->dp);
struct net_device *dev;
struct vport *vport;
+ int err;
vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
if (IS_ERR(vport))
return ERR_CAST(dev);
}
- dev_change_flags(dev, dev->flags | IFF_UP);
- rtnl_unlock();
+ err = dev_change_flags(dev, dev->flags | IFF_UP);
+ if (err < 0) {
+ rtnl_delete_link(dev);
+ rtnl_unlock();
+ ovs_vport_free(vport);
+ return ERR_PTR(err);
+ }
+ rtnl_unlock();
return vport;
}
static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
{
- dev->needed_headroom = new_hr;
+ dev->needed_headroom = new_hr < 0 ? 0 : new_hr;
}
static const struct net_device_ops internal_dev_netdev_ops = {
return ERR_CAST(dev);
}
- dev_change_flags(dev, dev->flags | IFF_UP);
+ err = dev_change_flags(dev, dev->flags | IFF_UP);
+ if (err < 0) {
+ rtnl_delete_link(dev);
+ rtnl_unlock();
+ ovs_vport_free(vport);
+ goto error;
+ }
+
rtnl_unlock();
return vport;
error:
spinlock_t lock;
rwlock_t state_lock; /* lock for state transition */
atomic_t usage;
+ atomic_t skb_count; /* Outstanding packets on this call */
atomic_t sequence; /* Tx data packet sequence counter */
u32 local_abort; /* local abort code */
u32 remote_abort; /* remote abort code */
call->state = RXRPC_CALL_SERVER_ACCEPTING;
list_add_tail(&call->accept_link, &rx->acceptq);
rxrpc_get_call(call);
+ atomic_inc(&call->skb_count);
nsp = rxrpc_skb(notification);
nsp->call = call;
ASSERTCMP(sp->call, ==, NULL);
sp->call = call;
rxrpc_get_call(call);
+ atomic_inc(&call->skb_count);
/* insert into the buffer in sequence order */
spin_lock_bh(&call->lock);
skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
sp->call = call;
rxrpc_get_call(call);
+ atomic_inc(&call->skb_count);
spin_lock_bh(&call->lock);
if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
BUG();
sp->error = error;
sp->call = call;
rxrpc_get_call(call);
+ atomic_inc(&call->skb_count);
spin_lock_bh(&call->lock);
ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
return;
}
+ if (!call->conn)
+ goto skip_msg_init;
+
/* there's a good chance we're going to have to send a message, so set
* one up in advance */
msg.msg_name = &call->conn->params.peer->srx.transport;
memset(iov, 0, sizeof(iov));
iov[0].iov_base = &whdr;
iov[0].iov_len = sizeof(whdr);
+skip_msg_init:
/* deal with events of a final nature */
if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
list_del_init(&call->link);
write_unlock_bh(&rxrpc_call_lock);
+ set_bit(RXRPC_CALL_RELEASED, &call->flags);
call->state = RXRPC_CALL_DEAD;
rxrpc_put_call(call);
_leave(" = %d", ret);
*/
found_user_ID_now_present:
write_unlock(&rx->call_lock);
+ set_bit(RXRPC_CALL_RELEASED, &call->flags);
call->state = RXRPC_CALL_DEAD;
rxrpc_put_call(call);
_leave(" = -EEXIST [%p]", call);
spin_lock_bh(&call->lock);
while ((skb = skb_dequeue(&call->rx_queue)) ||
(skb = skb_dequeue(&call->rx_oos_queue))) {
- sp = rxrpc_skb(skb);
- if (sp->call) {
- ASSERTCMP(sp->call, ==, call);
- rxrpc_put_call(call);
- sp->call = NULL;
- }
- skb->destructor = NULL;
spin_unlock_bh(&call->lock);
+ sp = rxrpc_skb(skb);
_debug("- zap %s %%%u #%u",
rxrpc_pkts[sp->hdr.type],
sp->hdr.serial, sp->hdr.seq);
if (atomic_dec_and_test(&call->usage)) {
_debug("call %d dead", call->debug_id);
+ WARN_ON(atomic_read(&call->skb_count) != 0);
ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
rxrpc_queue_work(&call->destroyer);
}
if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
_debug("already terminated");
ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
- skb->destructor = NULL;
- sp->call = NULL;
- rxrpc_put_call(call);
rxrpc_free_skb(skb);
return 0;
}
ret = 0;
out:
- /* release the socket buffer */
- if (skb) {
- skb->destructor = NULL;
- sp->call = NULL;
- rxrpc_put_call(call);
- rxrpc_free_skb(skb);
- }
+ rxrpc_free_skb(skb);
_leave(" = %d", ret);
return ret;
struct rxrpc_skb_priv *sp;
bool terminal;
int ret, ackbit, ack;
+ u32 serial;
+ u8 flags;
_enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
sp = rxrpc_skb(skb);
ASSERTCMP(sp->call, ==, NULL);
+ flags = sp->hdr.flags;
+ serial = sp->hdr.serial;
spin_lock(&call->lock);
sp->call = call;
rxrpc_get_call(call);
- terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
- !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
+ atomic_inc(&call->skb_count);
+ terminal = ((flags & RXRPC_LAST_PACKET) &&
+ !(flags & RXRPC_CLIENT_INITIATED));
ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
if (ret < 0) {
if (ret == -ENOMEM || ret == -ENOBUFS) {
}
skb = NULL;
+ sp = NULL;
_debug("post #%u", seq);
ASSERTCMP(call->rx_data_post, ==, seq);
call->rx_data_post++;
- if (sp->hdr.flags & RXRPC_LAST_PACKET)
+ if (flags & RXRPC_LAST_PACKET)
set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
/* if we've reached an out of sequence packet then we need to drain
spin_unlock(&call->lock);
atomic_inc(&call->ackr_not_idle);
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false);
+ rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false);
_leave(" = 0 [posted]");
return 0;
discard_and_ack:
_debug("discard and ACK packet %p", skb);
- __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
+ __rxrpc_propose_ACK(call, ack, serial, true);
discard:
spin_unlock(&call->lock);
rxrpc_free_skb(skb);
return 0;
enqueue_and_ack:
- __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
+ __rxrpc_propose_ACK(call, ack, serial, true);
enqueue_packet:
_net("defer skb %p", skb);
spin_unlock(&call->lock);
* post connection-level events to the connection
* - this includes challenges, responses and some aborts
*/
-static bool rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
+static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
struct sk_buff *skb)
{
_enter("%p,%p", conn, skb);
skb_queue_tail(&conn->rx_queue, skb);
- return rxrpc_queue_conn(conn);
+ rxrpc_queue_conn(conn);
}
/*
rcu_read_lock();
-retry_find_conn:
conn = rxrpc_find_connection_rcu(local, skb);
if (!conn)
goto cant_route_call;
if (sp->hdr.callNumber == 0) {
/* Connection-level packet */
_debug("CONN %p {%d}", conn, conn->debug_id);
- if (!rxrpc_post_packet_to_conn(conn, skb))
- goto retry_find_conn;
+ rxrpc_post_packet_to_conn(conn, skb);
} else {
/* Call-bound packets are routed by connection channel. */
unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK;
if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
_debug("reject type %d",sp->hdr.type);
rxrpc_reject_packet(local, skb);
+ } else {
+ rxrpc_free_skb(skb);
}
_leave(" [no call]");
return;
}
/* we transferred the whole data packet */
+ if (!(flags & MSG_PEEK))
+ rxrpc_kernel_data_consumed(call, skb);
+
if (sp->hdr.flags & RXRPC_LAST_PACKET) {
_debug("last");
if (rxrpc_conn_is_client(call->conn)) {
}
-/**
- * rxrpc_kernel_data_delivered - Record delivery of data message
- * @skb: Message holding data
- *
- * Record the delivery of a data message. This permits RxRPC to keep its
- * tracking correct. The socket buffer will be deleted.
- */
-void rxrpc_kernel_data_delivered(struct sk_buff *skb)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_call *call = sp->call;
-
- ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
- ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
- call->rx_data_recv = sp->hdr.seq;
-
- ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
- rxrpc_free_skb(skb);
-}
-
-EXPORT_SYMBOL(rxrpc_kernel_data_delivered);
-
/**
* rxrpc_kernel_is_data_last - Determine if data message is last one
* @skb: Message holding data
spin_unlock_bh(&call->lock);
}
+/**
+ * rxrpc_kernel_data_consumed - Record consumption of data message
+ * @call: The call to which the message pertains.
+ * @skb: Message holding data
+ *
+ * Record the consumption of a data message and generate an ACK if appropriate.
+ * The call state is shifted if this was the final packet. The caller must be
+ * in process context with no spinlocks held.
+ *
+ * TODO: Actually generate the ACK here rather than punting this to the
+ * workqueue.
+ */
+void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq);
+
+ ASSERTCMP(sp->call, ==, call);
+ ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA);
+
+ /* TODO: Fix the sequence number tracking */
+ ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
+ ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
+ ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
+
+ call->rx_data_recv = sp->hdr.seq;
+ rxrpc_hard_ACK_data(call, sp);
+}
+EXPORT_SYMBOL(rxrpc_kernel_data_consumed);
+
/*
- * destroy a packet that has an RxRPC control buffer
- * - advance the hard-ACK state of the parent call (done here in case something
- * in the kernel bypasses recvmsg() and steals the packet directly off of the
- * socket receive queue)
+ * Destroy a packet that has an RxRPC control buffer
*/
void rxrpc_packet_destructor(struct sk_buff *skb)
{
_enter("%p{%p}", skb, call);
if (call) {
- /* send the final ACK on a client call */
- if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
- rxrpc_hard_ACK_data(call, sp);
+ if (atomic_dec_return(&call->skb_count) < 0)
+ BUG();
rxrpc_put_call(call);
sp->call = NULL;
}
if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
if (p->ops->cleanup)
p->ops->cleanup(p, bind);
- list_del(&p->list);
tcf_hash_destroy(p->hinfo, p);
ret = ACT_P_DELETED;
}
return res;
}
-int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
- struct tcf_result *res)
+int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
+ int nr_actions, struct tcf_result *res)
{
- const struct tc_action *a;
- int ret = -1;
+ int ret = -1, i;
if (skb->tc_verd & TC_NCLS) {
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
ret = TC_ACT_OK;
goto exec_done;
}
- list_for_each_entry(a, actions, list) {
+ for (i = 0; i < nr_actions; i++) {
+ const struct tc_action *a = actions[i];
+
repeat:
ret = a->ops->act(skb, a, res);
if (ret == TC_ACT_REPEAT)
return ERR_PTR(err);
}
-static void cleanup_a(struct list_head *actions)
-{
- struct tc_action *a, *tmp;
-
- list_for_each_entry_safe(a, tmp, actions, list) {
- list_del(&a->list);
- kfree(a);
- }
-}
-
static int tca_action_flush(struct net *net, struct nlattr *nla,
struct nlmsghdr *n, u32 portid)
{
return ret;
}
err:
- cleanup_a(&actions);
+ tcf_action_destroy(&actions, 0);
return ret;
}
ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
if (ret)
- goto done;
+ return ret;
- /* dump then free all the actions after update; inserted policy
- * stays intact
- */
- ret = tcf_add_notify(net, n, &actions, portid);
- cleanup_a(&actions);
-done:
- return ret;
+ return tcf_add_notify(net, n, &actions, portid);
}
static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
const struct tc_action_ops *ops)
{
struct tc_action_net *tn = net_generic(net, police_net_id);
- struct tcf_hashinfo *hinfo = tn->hinfo;
- int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
- struct nlattr *nest;
-
- spin_lock_bh(&hinfo->lock);
-
- s_i = cb->args[0];
-
- for (i = 0; i < (POL_TAB_MASK + 1); i++) {
- struct hlist_head *head;
- struct tc_action *p;
-
- head = &hinfo->htab[tcf_hash(i, POL_TAB_MASK)];
-
- hlist_for_each_entry_rcu(p, head, tcfa_head) {
- index++;
- if (index < s_i)
- continue;
- nest = nla_nest_start(skb, index);
- if (nest == NULL)
- goto nla_put_failure;
- if (type == RTM_DELACTION)
- err = tcf_action_dump_1(skb, p, 0, 1);
- else
- err = tcf_action_dump_1(skb, p, 0, 0);
- if (err < 0) {
- index--;
- nla_nest_cancel(skb, nest);
- goto done;
- }
- nla_nest_end(skb, nest);
- n_i++;
- }
- }
-done:
- spin_unlock_bh(&hinfo->lock);
- if (n_i)
- cb->args[0] += n_i;
- return n_i;
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- goto done;
+ return tcf_generic_walker(tn, skb, cb, type, ops);
}
static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
struct tcf_police *police;
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
struct tc_action_net *tn = net_generic(net, police_net_id);
+ bool exists = false;
int size;
if (nla == NULL)
size = nla_len(tb[TCA_POLICE_TBF]);
if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
return -EINVAL;
+
parm = nla_data(tb[TCA_POLICE_TBF]);
+ exists = tcf_hash_check(tn, parm->index, a, bind);
+ if (exists && bind)
+ return 0;
- if (parm->index) {
- if (tcf_hash_check(tn, parm->index, a, bind)) {
- if (ovr)
- goto override;
- /* not replacing */
- return -EEXIST;
- }
- } else {
+ if (!exists) {
ret = tcf_hash_create(tn, parm->index, NULL, a,
&act_police_ops, bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
+ } else {
+ tcf_hash_release(*a, bind);
+ if (!ovr)
+ return -EEXIST;
}
-override:
police = to_police(*a);
if (parm->rate.rate) {
err = -ENOMEM;
void tcf_exts_destroy(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
- tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND);
- INIT_LIST_HEAD(&exts->actions);
+ LIST_HEAD(actions);
+
+ tcf_exts_to_list(exts, &actions);
+ tcf_action_destroy(&actions, TCA_ACT_UNBIND);
+ kfree(exts->actions);
+ exts->nr_actions = 0;
#endif
}
EXPORT_SYMBOL(tcf_exts_destroy);
{
struct tc_action *act;
- INIT_LIST_HEAD(&exts->actions);
if (exts->police && tb[exts->police]) {
act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
"police", ovr,
return PTR_ERR(act);
act->type = exts->type = TCA_OLD_COMPAT;
- list_add(&act->list, &exts->actions);
+ exts->actions[0] = act;
+ exts->nr_actions = 1;
} else if (exts->action && tb[exts->action]) {
- int err;
+ LIST_HEAD(actions);
+ int err, i = 0;
+
err = tcf_action_init(net, tb[exts->action], rate_tlv,
NULL, ovr,
- TCA_ACT_BIND, &exts->actions);
+ TCA_ACT_BIND, &actions);
if (err)
return err;
+ list_for_each_entry(act, &actions, list)
+ exts->actions[i++] = act;
+ exts->nr_actions = i;
}
}
#else
struct tcf_exts *src)
{
#ifdef CONFIG_NET_CLS_ACT
- LIST_HEAD(tmp);
+ struct tcf_exts old = *dst;
+
tcf_tree_lock(tp);
- list_splice_init(&dst->actions, &tmp);
- list_splice(&src->actions, &dst->actions);
+ dst->nr_actions = src->nr_actions;
+ dst->actions = src->actions;
dst->type = src->type;
tcf_tree_unlock(tp);
- tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
+
+ tcf_exts_destroy(&old);
#endif
}
EXPORT_SYMBOL(tcf_exts_change);
-#define tcf_exts_first_act(ext) \
- list_first_entry_or_null(&(exts)->actions, \
- struct tc_action, list)
+#ifdef CONFIG_NET_CLS_ACT
+static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
+{
+ if (exts->nr_actions == 0)
+ return NULL;
+ else
+ return exts->actions[0];
+}
+#endif
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
struct nlattr *nest;
- if (exts->action && !list_empty(&exts->actions)) {
+ if (exts->action && exts->nr_actions) {
/*
* again for backward compatible mode - we want
* to work with both old and new modes of entering
* tc data even if iproute2 was newer - jhs
*/
if (exts->type != TCA_OLD_COMPAT) {
+ LIST_HEAD(actions);
+
nest = nla_nest_start(skb, exts->action);
if (nest == NULL)
goto nla_put_failure;
- if (tcf_action_dump(skb, &exts->actions, 0, 0) < 0)
+
+ tcf_exts_to_list(exts, &actions);
+ if (tcf_action_dump(skb, &actions, 0, 0) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
} else if (exts->police) {
return ERR_PTR(err);
}
+ iter->start_fail = 0;
return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
}
{
union sctp_addr laddr, paddr;
struct dst_entry *dst;
+ struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
laddr = list_entry(asoc->base.bind_addr.address_list.next,
struct sctp_sockaddr_entry, list)->a;
}
r->idiag_state = asoc->state;
- r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
- r->idiag_retrans = asoc->rtx_data_chunks;
- r->idiag_expires = jiffies_to_msecs(
- asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies);
+ if (timer_pending(t3_rtx)) {
+ r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
+ r->idiag_retrans = asoc->rtx_data_chunks;
+ r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
+ } else {
+ r->idiag_timer = 0;
+ r->idiag_retrans = 0;
+ r->idiag_expires = 0;
+ }
}
static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
if (cb->args[4] < cb->args[1])
goto next;
- if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs))
+ if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
goto next;
if (r->sdiag_family != AF_UNSPEC &&
* 3 : to mark if we have dumped the ep info of the current asoc
* 4 : to work as a temporary variable to traversal list
*/
- if (!(idiag_states & ~TCPF_LISTEN))
+ if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
goto done;
sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
done:
*/
sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
- sctp_ulpevent_receive_data(event, asoc);
-
/* And hold the chunk as we need it for getting the IP headers
* later in recvmsg
*/
sctp_chunk_hold(chunk);
event->chunk = chunk;
+ sctp_ulpevent_receive_data(event, asoc);
+
event->stream = ntohs(chunk->subh.data_hdr->stream);
event->ssn = ntohs(chunk->subh.data_hdr->ssn);
event->ppid = chunk->subh.data_hdr->ppid;
}
static struct gss_upcall_msg *
-__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid)
+__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
{
struct gss_upcall_msg *pos;
list_for_each_entry(pos, &pipe->in_downcall, list) {
if (!uid_eq(pos->uid, uid))
continue;
+ if (auth && pos->auth->service != auth->service)
+ continue;
atomic_inc(&pos->count);
dprintk("RPC: %s found msg %p\n", __func__, pos);
return pos;
struct gss_upcall_msg *old;
spin_lock(&pipe->lock);
- old = __gss_find_upcall(pipe, gss_msg->uid);
+ old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
if (old == NULL) {
atomic_inc(&gss_msg->count);
list_add(&gss_msg->list, &pipe->in_downcall);
err = -ENOENT;
/* Find a matching upcall */
spin_lock(&pipe->lock);
- gss_msg = __gss_find_upcall(pipe, uid);
+ gss_msg = __gss_find_upcall(pipe, uid, NULL);
if (gss_msg == NULL) {
spin_unlock(&pipe->lock);
goto err_put_ctx;
{
struct rpc_xprt_switch *xps;
struct rpc_xprt *xprt;
+ unsigned long reconnect_timeout;
unsigned char resvport;
int ret = 0;
return -EAGAIN;
}
resvport = xprt->resvport;
+ reconnect_timeout = xprt->max_reconnect_timeout;
rcu_read_unlock();
xprt = xprt_create_transport(xprtargs);
goto out_put_switch;
}
xprt->resvport = resvport;
+ xprt->max_reconnect_timeout = reconnect_timeout;
rpc_xprt_switch_set_roundrobin(xps);
if (setup) {
}
EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt);
+static int
+rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt,
+ void *data)
+{
+ unsigned long timeout = *((unsigned long *)data);
+
+ if (timeout < xprt->max_reconnect_timeout)
+ xprt->max_reconnect_timeout = timeout;
+ return 0;
+}
+
+void
+rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo)
+{
+ rpc_clnt_iterate_for_each_xprt(clnt,
+ rpc_xprt_cap_max_reconnect_timeout,
+ &timeo);
+}
+EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout);
+
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
static void rpc_show_header(void)
{
spin_unlock_bh(&xprt->transport_lock);
}
+static bool
+xprt_has_timer(const struct rpc_xprt *xprt)
+{
+ return xprt->idle_timeout != 0;
+}
+
+static void
+xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
+ __must_hold(&xprt->transport_lock)
+{
+ if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
+ mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
+}
+
static void
xprt_init_autodisconnect(unsigned long data)
{
spin_lock(&xprt->transport_lock);
if (!list_empty(&xprt->recv))
goto out_abort;
+ /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
+ xprt->last_used = jiffies;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
goto out_abort;
spin_unlock(&xprt->transport_lock);
goto out;
xprt->snd_task =NULL;
xprt->ops->release_xprt(xprt, NULL);
+ xprt_schedule_autodisconnect(xprt);
out:
spin_unlock_bh(&xprt->transport_lock);
wake_up_bit(&xprt->state, XPRT_LOCKED);
spin_unlock_bh(&xprt->transport_lock);
}
-static inline int xprt_has_timer(struct rpc_xprt *xprt)
-{
- return xprt->idle_timeout != 0;
-}
-
/**
* xprt_prepare_transmit - reserve the transport before sending a request
* @task: RPC task about to send a request
if (!list_empty(&req->rq_list))
list_del(&req->rq_list);
xprt->last_used = jiffies;
- if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
- mod_timer(&xprt->timer,
- xprt->last_used + xprt->idle_timeout);
+ xprt_schedule_autodisconnect(xprt);
spin_unlock_bh(&xprt->transport_lock);
if (req->rq_buffer)
xprt->ops->buf_free(req->rq_buffer);
* increase over time if the server is down or not responding.
*/
#define XS_TCP_INIT_REEST_TO (3U * HZ)
-#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
/*
* TCP idle timeout; client drops the transport socket if it is idle
write_unlock_bh(&sk->sk_callback_lock);
}
xs_udp_do_set_buffer_size(xprt);
+
+ xprt->stat.connect_start = jiffies;
}
static void xs_udp_setup_socket(struct work_struct *work)
unsigned int keepcnt = xprt->timeout->to_retries + 1;
unsigned int opt_on = 1;
unsigned int timeo;
+ unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC;
/* TCP Keepalive options */
kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
(char *)&keepcnt, sizeof(keepcnt));
+ /* Avoid temporary address, they are bad for long-lived
+ * connections such as NFS mounts.
+ * RFC4941, section 3.6 suggests that:
+ * Individual applications, which have specific
+ * knowledge about the normal duration of connections,
+ * MAY override this as appropriate.
+ */
+ kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES,
+ (char *)&addr_pref, sizeof(addr_pref));
+
/* TCP user timeout (see RFC5482) */
timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
(xprt->timeout->to_retries + 1);
/* SYN_SENT! */
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ break;
+ case -EADDRNOTAVAIL:
+ /* Source port number is unavailable. Try a new one! */
+ transport->srcport = 0;
}
out:
return ret;
xprt_wake_pending_tasks(xprt, status);
}
+static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt)
+{
+ unsigned long start, now = jiffies;
+
+ start = xprt->stat.connect_start + xprt->reestablish_timeout;
+ if (time_after(start, now))
+ return start - now;
+ return 0;
+}
+
+static void xs_reconnect_backoff(struct rpc_xprt *xprt)
+{
+ xprt->reestablish_timeout <<= 1;
+ if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
+ xprt->reestablish_timeout = xprt->max_reconnect_timeout;
+ if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+}
+
/**
* xs_connect - connect a socket to a remote endpoint
* @xprt: pointer to transport structure
static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ unsigned long delay = 0;
WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
/* Start by resetting any existing state */
xs_reset_transport(transport);
- queue_delayed_work(xprtiod_workqueue,
- &transport->connect_worker,
- xprt->reestablish_timeout);
- xprt->reestablish_timeout <<= 1;
- if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
- xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
- if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
- xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
- } else {
+ delay = xs_reconnect_delay(xprt);
+ xs_reconnect_backoff(xprt);
+
+ } else
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
- queue_delayed_work(xprtiod_workqueue,
- &transport->connect_worker, 0);
- }
+
+ queue_delayed_work(xprtiod_workqueue,
+ &transport->connect_worker,
+ delay);
}
/**
xprt->ops = &xs_tcp_ops;
xprt->timeout = &xs_tcp_default_timeout;
+ xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
+
INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
u32 bearer_id, u32 *prev_node)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
- struct tipc_peer *peer = mon->self;
+ struct tipc_peer *peer;
if (!mon)
return -EINVAL;
read_lock_bh(&mon->lock);
+ peer = mon->self;
do {
if (*prev_node) {
if (peer->addr == *prev_node)
TIPC_CONN_MSG, SHORT_H_SIZE,
0, dnode, onode, dport, oport,
TIPC_CONN_SHUTDOWN);
- tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
+ if (skb)
+ tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
}
tsk->connected = 0;
sock->state = SS_DISCONNECTING;
vq = vsock->vqs[VSOCK_VQ_TX];
- /* Avoid unnecessary interrupts while we're processing the ring */
- virtqueue_disable_cb(vq);
-
for (;;) {
struct virtio_vsock_pkt *pkt;
struct scatterlist hdr, buf, *sgs[2];
spin_lock_bh(&vsock->send_pkt_list_lock);
if (list_empty(&vsock->send_pkt_list)) {
spin_unlock_bh(&vsock->send_pkt_list_lock);
- virtqueue_enable_cb(vq);
break;
}
}
ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
+ /* Usually this means that there is no more space available in
+ * the vq
+ */
if (ret < 0) {
spin_lock_bh(&vsock->send_pkt_list_lock);
list_add(&pkt->list, &vsock->send_pkt_list);
spin_unlock_bh(&vsock->send_pkt_list_lock);
-
- if (!virtqueue_enable_cb(vq) && ret == -ENOSPC)
- continue; /* retry now that we have more space */
break;
}
r = cfg80211_get_chans_dfs_available(wiphy,
chandef->center_freq2,
width);
+ break;
default:
WARN_ON(chandef->center_freq2);
break;
{
struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1];
u32 mask = 0;
+ u16 ht_opmode;
#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \
do { \
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
mask, NL80211_MESHCONF_RSSI_THRESHOLD,
nl80211_check_s32);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16,
- mask, NL80211_MESHCONF_HT_OPMODE,
- nl80211_check_u16);
+ /*
+ * Check HT operation mode based on
+ * IEEE 802.11 2012 8.4.2.59 HT Operation element.
+ */
+ if (tb[NL80211_MESHCONF_HT_OPMODE]) {
+ ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
+
+ if (ht_opmode & ~(IEEE80211_HT_OP_MODE_PROTECTION |
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
+ IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
+ return -EINVAL;
+
+ if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
+ (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
+ return -EINVAL;
+
+ switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
+ return -EINVAL;
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
+ return -EINVAL;
+ break;
+ }
+ cfg->ht_opmode = ht_opmode;
+ }
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
1, 65535, mask,
NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
(void *) BPF_FUNC_l3_csum_replace;
static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
(void *) BPF_FUNC_l4_csum_replace;
-static int (*bpf_skb_in_cgroup)(void *ctx, void *map, int index) =
- (void *) BPF_FUNC_skb_in_cgroup;
+static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
+ (void *) BPF_FUNC_skb_under_cgroup;
#if defined(__x86_64__)
bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg),
eth->h_proto, ip6h->nexthdr);
return TC_ACT_OK;
- } else if (bpf_skb_in_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) {
+ } else if (bpf_skb_under_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) {
bpf_trace_printk(pass_msg, sizeof(pass_msg));
return TC_ACT_OK;
} else {
assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 &&
errno == E2BIG);
+ /* update existing element, thought the map is full */
+ key = 1;
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0);
+ key = 2;
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0);
+ key = 1;
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0);
+
/* check that key = 0 doesn't exist */
+ key = 0;
assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT);
/* iterate over two elements */
for (i = fn; i < MAP_SIZE; i += TASKS) {
key = value = i;
- if (do_update)
+ if (do_update) {
assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0);
- else
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0);
+ } else {
assert(bpf_delete_elem(map_fd, &key) == 0);
+ }
}
}
as-instr = $(call try-run,\
printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
+# Do not attempt to build with gcc plugins during cc-option tests.
+# (And this uses delayed resolution so the flags will be up to date.)
+CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+
# cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
cc-option = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+ $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
# cc-option-yn
# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
cc-option-yn = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+ $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
# cc-option-align
# Prefix align with either -falign or -malign
# cc-disable-warning
# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
cc-disable-warning = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+ $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
# cc-name
# Expands to either gcc or clang
endif
endif
- GCC_PLUGINS_CFLAGS := $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y))
+ GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
- export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN SANCOV_PLUGIN
+ export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN
+ ifneq ($(PLUGINCC),)
+ # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
+ GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
+ endif
+
+ KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+ GCC_PLUGIN := $(gcc-plugin-y)
+ GCC_PLUGIN_SUBDIR := $(gcc-plugin-subdir-y)
+endif
+
+# If plugins aren't supported, abort the build before hard-to-read compiler
+# errors start getting spewed by the main build.
+PHONY += gcc-plugins-check
+gcc-plugins-check: FORCE
+ifdef CONFIG_GCC_PLUGINS
ifeq ($(PLUGINCC),)
ifneq ($(GCC_PLUGINS_CFLAGS),)
ifeq ($(call cc-ifversion, -ge, 0405, y), y)
- PLUGINCC := $(shell $(CONFIG_SHELL) -x $(srctree)/scripts/gcc-plugin.sh "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)")
- $(warning warning: your gcc installation does not support plugins, perhaps the necessary headers are missing?)
+ $(Q)$(srctree)/scripts/gcc-plugin.sh --show-error "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)" || true
+ @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc installation does not support plugins, perhaps the necessary headers are missing?" >&2 && exit 1
else
- $(warning warning: your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least)
+ @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc version does not support plugins, you should upgrade it to at least gcc 4.5" >&2 && exit 1
endif
endif
- else
- # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
- GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
endif
+endif
+ @:
- KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
- GCC_PLUGIN := $(gcc-plugin-y)
-
+# Actually do the build, if requested.
+PHONY += gcc-plugins
+gcc-plugins: scripts_basic gcc-plugins-check
+ifdef CONFIG_GCC_PLUGINS
+ $(Q)$(MAKE) $(build)=scripts/gcc-plugins
endif
+ @:
#!/bin/sh
srctree=$(dirname "$0")
+
+SHOW_ERROR=
+if [ "$1" = "--show-error" ] ; then
+ SHOW_ERROR=1
+ shift || true
+fi
+
gccplugins_dir=$($3 -print-file-name=plugin)
plugincc=$($1 -E -x c++ - -o /dev/null -I"${srctree}"/gcc-plugins -I"${gccplugins_dir}"/include 2>&1 <<EOF
#include "gcc-common.h"
if [ $? -ne 0 ]
then
+ if [ -n "$SHOW_ERROR" ] ; then
+ echo "${plugincc}" >&2
+ fi
exit 1
fi
echo "$2"
exit 0
fi
+
+if [ -n "$SHOW_ERROR" ] ; then
+ echo "${plugincc}" >&2
+fi
exit 1
export HOST_EXTRACXXFLAGS
endif
-export GCCPLUGINS_DIR HOSTLIBS
-
ifneq ($(CFLAGS_KCOV), $(SANCOV_PLUGIN))
GCC_PLUGIN := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGIN))
endif
-$(HOSTLIBS)-y := $(GCC_PLUGIN)
+export HOSTLIBS
+
+$(HOSTLIBS)-y := $(foreach p,$(GCC_PLUGIN),$(if $(findstring /,$(p)),,$(p)))
always := $($(HOSTLIBS)-y)
-cyc_complexity_plugin-objs := cyc_complexity_plugin.o
-sancov_plugin-objs := sancov_plugin.o
+$(foreach p,$($(HOSTLIBS)-y:%.so=%),$(eval $(p)-objs := $(p).o))
+
+subdir-y := $(GCC_PLUGIN_SUBDIR)
+subdir- += $(GCC_PLUGIN_SUBDIR)
clean-files += *.so
die "$P: file '${file}' not found\n";
}
}
- if ($from_filename || vcs_file_exists($file)) {
+ if ($from_filename || ($file ne "&STDIN" && vcs_file_exists($file))) {
$file =~ s/^\Q${cur_path}\E//; #strip any absolute path
$file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree
push(@files, $file);
my $cmd = $VCS_cmds{"file_exists_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd
-
+ $cmd .= " 2>&1";
$exists = &{$VCS_cmds{"execute_cmd"}}($cmd);
+ return 0 if ($? != 0);
+
return $exists;
}
this low address space will need the permission specific to the
systems running LSM.
+config HAVE_HARDENED_USERCOPY_ALLOCATOR
+ bool
+ help
+ The heap allocator implements __check_heap_object() for
+ validating memory ranges against heap object sizes in
+ support of CONFIG_HARDENED_USERCOPY.
+
+config HAVE_ARCH_HARDENED_USERCOPY
+ bool
+ help
+ The architecture supports CONFIG_HARDENED_USERCOPY by
+ calling check_object_size() just before performing the
+ userspace copies in the low level implementation of
+ copy_to_user() and copy_from_user().
+
+config HARDENED_USERCOPY
+ bool "Harden memory copies between kernel and userspace"
+ depends on HAVE_ARCH_HARDENED_USERCOPY
+ depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
+ select BUG
+ help
+ This option checks for obviously wrong memory regions when
+ copying memory to/from the kernel (via copy_to_user() and
+ copy_from_user() functions) by rejecting memory ranges that
+ are larger than the specified heap object, span multiple
+ separately allocates pages, are not on the process stack,
+ or are part of the kernel text. This kills entire classes
+ of heap overflow exploits and similar kernel memory exposures.
+
source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
struct hda_intel *hda;
+ struct hdac_bus *bus;
if (!card)
return 0;
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
+ bus = azx_bus(chip);
if (chip->disabled || hda->init_failed || !chip->running)
return 0;
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
- && hda->need_i915_power) {
- snd_hdac_display_power(azx_bus(chip), true);
- snd_hdac_i915_set_bclk(azx_bus(chip));
+ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+ snd_hdac_display_power(bus, true);
+ if (hda->need_i915_power)
+ snd_hdac_i915_set_bclk(bus);
}
+
if (chip->msi)
if (pci_enable_msi(pci) < 0)
chip->msi = 0;
hda_intel_init_chip(chip, true);
+ /* power down again for link-controlled chips */
+ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
+ !hda->need_i915_power)
+ snd_hdac_display_power(bus, false);
+
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
trace_azx_resume(chip);
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
+ bus = azx_bus(chip);
if (chip->disabled || hda->init_failed)
return 0;
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- bus = azx_bus(chip);
- if (hda->need_i915_power) {
- snd_hdac_display_power(bus, true);
+ snd_hdac_display_power(bus, true);
+ if (hda->need_i915_power)
snd_hdac_i915_set_bclk(bus);
- } else {
- /* toggle codec wakeup bit for STATESTS read */
- snd_hdac_set_codec_wakeup(bus, true);
- snd_hdac_set_codec_wakeup(bus, false);
- }
}
/* Read STATESTS before controller reset */
azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
~STATESTS_INT_MASK);
+ /* power down again for link-controlled chips */
+ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
+ !hda->need_i915_power)
+ snd_hdac_display_power(bus, false);
+
trace_azx_runtime_resume(chip);
return 0;
}
ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC292_FIXUP_TPT440_DOCK,
ALC292_FIXUP_TPT440,
- ALC283_FIXUP_BXBT2807_MIC,
+ ALC283_FIXUP_HEADSET_MIC,
ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
ALC282_FIXUP_ASPIRE_V5_PINS,
ALC280_FIXUP_HP_GPIO4,
.chained = true,
.chain_id = ALC292_FIXUP_TPT440_DOCK,
},
- [ALC283_FIXUP_BXBT2807_MIC] = {
+ [ALC283_FIXUP_HEADSET_MIC] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x19, 0x04a110f0 },
SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
- SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
+ SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
clk_enable(ssc_p->ssc->clk);
ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
- /* Reset the SSC to keep it at a clean status */
- ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
+ /* Reset the SSC unless initialized to keep it in a clean state */
+ if (!ssc_p->initialized)
+ ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
dir = 0;
return -EINVAL;
}
- /* By default only 32 BCLK per WCLK is supported */
- dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_32;
+ /* By default only 64 BCLK per WCLK is supported */
+ dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_64;
snd_soc_write(codec, DA7213_DAI_CLK_MODE, dai_clk_mode);
snd_soc_update_bits(codec, DA7213_DAI_CTRL, DA7213_DAI_FORMAT_MASK,
static const struct i2c_device_id max98371_i2c_id[] = {
{ "max98371", 0 },
+ { }
};
MODULE_DEVICE_TABLE(i2c, max98371_i2c_id);
0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47
};
-static struct snd_soc_dai *nau8825_get_codec_dai(struct nau8825 *nau8825)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(nau8825->dapm);
- struct snd_soc_component *component = &codec->component;
- struct snd_soc_dai *codec_dai, *_dai;
-
- list_for_each_entry_safe(codec_dai, _dai, &component->dai_list, list) {
- if (!strncmp(codec_dai->name, NUVOTON_CODEC_DAI,
- strlen(NUVOTON_CODEC_DAI)))
- return codec_dai;
- }
- return NULL;
-}
-
-static bool nau8825_dai_is_active(struct nau8825 *nau8825)
-{
- struct snd_soc_dai *codec_dai = nau8825_get_codec_dai(nau8825);
-
- if (codec_dai) {
- if (codec_dai->playback_active || codec_dai->capture_active)
- return true;
- }
- return false;
-}
-
/**
* nau8825_sema_acquire - acquire the semaphore of nau88l25
* @nau8825: component to register the codec private data with
* Acquires the semaphore without jiffies. If no more tasks are allowed
* to acquire the semaphore, calling this function will put the task to
* sleep until the semaphore is released.
- * It returns if the semaphore was acquired.
+ * If the semaphore is not released within the specified number of jiffies,
+ * this function returns -ETIME.
+ * If the sleep is interrupted by a signal, this function will return -EINTR.
+ * It returns 0 if the semaphore was acquired successfully.
*/
-static void nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
+static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
{
int ret;
- if (timeout)
+ if (timeout) {
ret = down_timeout(&nau8825->xtalk_sem, timeout);
- else
+ if (ret < 0)
+ dev_warn(nau8825->dev, "Acquire semaphone timeout\n");
+ } else {
ret = down_interruptible(&nau8825->xtalk_sem);
+ if (ret < 0)
+ dev_warn(nau8825->dev, "Acquire semaphone fail\n");
+ }
- if (ret < 0)
- dev_warn(nau8825->dev, "Acquire semaphone fail\n");
+ return ret;
}
/**
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
unsigned int val_len = 0;
+ nau8825_sema_acquire(nau8825, 2 * HZ);
+
switch (params_width(params)) {
case 16:
val_len |= NAU8825_I2S_DL_16;
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1,
NAU8825_I2S_DL_MASK, val_len);
+ /* Release the semaphone. */
+ nau8825_sema_release(nau8825);
+
return 0;
}
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
unsigned int ctrl1_val = 0, ctrl2_val = 0;
+ nau8825_sema_acquire(nau8825, 2 * HZ);
+
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
ctrl2_val |= NAU8825_I2S_MS_MASTER;
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
NAU8825_I2S_MS_MASK, ctrl2_val);
+ /* Release the semaphone. */
+ nau8825_sema_release(nau8825);
+
return 0;
}
* cess and restore changes if process
* is ongoing when ejection.
*/
+ int ret;
nau8825->xtalk_protect = true;
- nau8825_sema_acquire(nau8825, 0);
+ ret = nau8825_sema_acquire(nau8825, 0);
+ if (ret < 0)
+ nau8825->xtalk_protect = false;
}
/* Startup cross talk detection process */
nau8825->xtalk_state = NAU8825_XTALK_PREPARE;
static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec)
{
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+ int ret;
regcache_cache_only(nau8825->regmap, false);
regcache_sync(nau8825->regmap);
- if (nau8825_is_jack_inserted(nau8825->regmap)) {
- /* If the jack is inserted, we need to check whether the play-
- * back is active before suspend. If active, the driver has to
- * raise the protection for cross talk function to avoid the
- * playback recovers before cross talk process finish. Other-
- * wise, the playback will be interfered by cross talk func-
- * tion. It is better to apply hardware related parameters
- * before starting playback or record.
- */
- if (nau8825_dai_is_active(nau8825)) {
- nau8825->xtalk_protect = true;
- nau8825_sema_acquire(nau8825, 0);
- }
- }
+ nau8825->xtalk_protect = true;
+ ret = nau8825_sema_acquire(nau8825, 0);
+ if (ret < 0)
+ nau8825->xtalk_protect = false;
enable_irq(nau8825->irq);
return 0;
if (anc_transitions[i].dest == ANC_OFF)
clk_disable_unprepare(wm2000->mclk);
- return ret;
+ return 0;
}
static int wm2000_anc_set_mode(struct wm2000_priv *wm2000)
-obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) := simple-card-utils.o
-
+snd-soc-simple-card-utils-objs := simple-card-utils.o
snd-soc-simple-card-objs := simple-card.o
-obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o
+obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) += snd-soc-simple-card-utils.o
+obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/module.h>
#include <linux/of.h>
#include <sound/simple_card_utils.h>
return 0;
}
EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name);
+
+/* Module information */
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_DESCRIPTION("ALSA SoC Simple Card Utils");
+MODULE_LICENSE("GPL v2");
uuid_mod = (uuid_le *)uuid;
+ if (list_empty(&ctx->uuid_list)) {
+ dev_err(ctx->dev, "Module list is empty\n");
+ return -EINVAL;
+ }
+
list_for_each_entry(module, &ctx->uuid_list, list) {
if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
dfw_config->module_id = module->id;
skl->nhlt = skl_nhlt_init(bus->dev);
- if (skl->nhlt == NULL)
+ if (skl->nhlt == NULL) {
+ err = -ENODEV;
goto out_free;
+ }
skl_nhlt_update_topology_bin(skl);
struct abe_twl6040 {
int jack_detection; /* board can detect jack events */
int mclk_freq; /* MCLK frequency speed for twl6040 */
-
- struct platform_device *dmic_codec_dev;
};
+struct platform_device *dmic_codec_dev;
+
static int omap_abe_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
if (priv == NULL)
return -ENOMEM;
- priv->dmic_codec_dev = ERR_PTR(-EINVAL);
-
if (snd_soc_of_parse_card_name(card, "ti,model")) {
dev_err(&pdev->dev, "Card name is not provided\n");
return -ENODEV;
num_links = 2;
abe_twl6040_dai_links[1].cpu_of_node = dai_node;
abe_twl6040_dai_links[1].platform_of_node = dai_node;
-
- priv->dmic_codec_dev = platform_device_register_simple(
- "dmic-codec", -1, NULL, 0);
- if (IS_ERR(priv->dmic_codec_dev)) {
- dev_err(&pdev->dev, "Can't instantiate dmic-codec\n");
- return PTR_ERR(priv->dmic_codec_dev);
- }
} else {
num_links = 1;
}
of_property_read_u32(node, "ti,mclk-freq", &priv->mclk_freq);
if (!priv->mclk_freq) {
dev_err(&pdev->dev, "MCLK frequency not provided\n");
- ret = -EINVAL;
- goto err_unregister;
+ return -EINVAL;
}
card->fully_routed = 1;
if (!priv->mclk_freq) {
dev_err(&pdev->dev, "MCLK frequency missing\n");
- ret = -ENODEV;
- goto err_unregister;
+ return -ENODEV;
}
card->dai_link = abe_twl6040_dai_links;
snd_soc_card_set_drvdata(card, priv);
ret = snd_soc_register_card(card);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
- goto err_unregister;
- }
-
- return 0;
-
-err_unregister:
- if (!IS_ERR(priv->dmic_codec_dev))
- platform_device_unregister(priv->dmic_codec_dev);
return ret;
}
static int omap_abe_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
snd_soc_unregister_card(card);
- if (!IS_ERR(priv->dmic_codec_dev))
- platform_device_unregister(priv->dmic_codec_dev);
-
return 0;
}
.remove = omap_abe_remove,
};
-module_platform_driver(omap_abe_driver);
+static int __init omap_abe_init(void)
+{
+ int ret;
+
+ dmic_codec_dev = platform_device_register_simple("dmic-codec", -1, NULL,
+ 0);
+ if (IS_ERR(dmic_codec_dev)) {
+ pr_err("%s: dmic-codec device registration failed\n", __func__);
+ return PTR_ERR(dmic_codec_dev);
+ }
+
+ ret = platform_driver_register(&omap_abe_driver);
+ if (ret) {
+ pr_err("%s: platform driver registration failed\n", __func__);
+ platform_device_unregister(dmic_codec_dev);
+ }
+
+ return ret;
+}
+module_init(omap_abe_init);
+
+static void __exit omap_abe_exit(void)
+{
+ platform_driver_unregister(&omap_abe_driver);
+ platform_device_unregister(dmic_codec_dev);
+}
+module_exit(omap_abe_exit);
MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
MODULE_DESCRIPTION("ALSA SoC for OMAP boards with ABE and twl6040 codec");
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
unsigned long phys_base;
void __iomem *io_base;
int irq;
- struct clk *pdmclk;
struct mutex mutex;
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
int ret;
- clk_prepare_enable(mcpdm->pdmclk);
pm_runtime_enable(mcpdm->dev);
/* Disable lines while request is ongoing */
pm_runtime_get_sync(mcpdm->dev);
omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00);
- ret = devm_request_irq(mcpdm->dev, mcpdm->irq, omap_mcpdm_irq_handler,
- 0, "McPDM", (void *)mcpdm);
+ ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM",
+ (void *)mcpdm);
pm_runtime_put_sync(mcpdm->dev);
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+ free_irq(mcpdm->irq, (void *)mcpdm);
pm_runtime_disable(mcpdm->dev);
- clk_disable_unprepare(mcpdm->pdmclk);
return 0;
}
mcpdm->pm_active_count++;
}
- clk_disable_unprepare(mcpdm->pdmclk);
-
return 0;
}
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
- clk_prepare_enable(mcpdm->pdmclk);
-
if (mcpdm->pm_active_count) {
while (mcpdm->pm_active_count--)
pm_runtime_get_sync(mcpdm->dev);
mcpdm->dev = &pdev->dev;
- mcpdm->pdmclk = devm_clk_get(&pdev->dev, "pdmclk");
- if (IS_ERR(mcpdm->pdmclk)) {
- if (PTR_ERR(mcpdm->pdmclk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_warn(&pdev->dev, "Error getting pdmclk (%ld)!\n",
- PTR_ERR(mcpdm->pdmclk));
- mcpdm->pdmclk = NULL;
- }
-
ret = devm_snd_soc_register_component(&pdev->dev,
&omap_mcpdm_component,
&omap_mcpdm_dai, 1);
static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
{
- int ret = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
#ifdef ENFORCE_RATES
struct snd_pcm_runtime *runtime = substream->runtime;
#endif
+ int ret = 0;
mutex_lock(&clk_lock);
pr_debug("%s %d\n", __func__, clk_users);
printk(KERN_ERR "%s cannot get xtal\n", __func__);
ret = PTR_ERR(xtal);
} else {
- pclk = clk_get(&s3c24xx_uda134x_snd_device->dev,
- "pclk");
+ pclk = clk_get(cpu_dai->dev, "iis");
if (IS_ERR(pclk)) {
printk(KERN_ERR "%s cannot get pclk\n",
__func__);
ifscr = 0;
fsrate = 0;
if (fin != fout) {
+ u64 n;
+
ifscr = 1;
- fsrate = 0x0400000 / fout * fin;
+ n = (u64)0x0400000 * fin;
+ do_div(n, fout);
+ fsrate = n;
}
/*
dpcm_be_disconnect(fe, stream);
fe->dpcm[stream].runtime = NULL;
- goto fe_err;
+ goto path_err;
}
dpcm_clear_pending_state(fe, stream);
return 0;
+path_err:
+ dpcm_path_put(&list);
fe_err:
if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
fe->dai_link->compr_ops->shutdown(cstream);
if (!rtd->platform) {
dev_err(card->dev, "ASoC: platform %s not registered\n",
dai_link->platform_name);
- return -EPROBE_DEFER;
+ goto _err_defer;
}
soc_add_pcm_runtime(card, rtd);
/* remove auxiliary devices */
soc_remove_aux_devices(card);
+ snd_soc_dapm_free(&card->dapm);
soc_cleanup_card_debugfs(card);
/* remove the card */
if (card->remove)
card->remove(card);
- snd_soc_dapm_free(&card->dapm);
-
snd_card_free(card->snd_card);
return 0;
const struct snd_soc_pcm_stream *config = w->params + w->params_select;
struct snd_pcm_substream substream;
struct snd_pcm_hw_params *params = NULL;
+ struct snd_pcm_runtime *runtime = NULL;
u64 fmt;
int ret;
memset(&substream, 0, sizeof(substream));
+ /* Allocate a dummy snd_pcm_runtime for startup() and other ops() */
+ runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
+ if (!runtime) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ substream.runtime = runtime;
+
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
substream.stream = SNDRV_PCM_STREAM_CAPTURE;
}
out:
+ kfree(runtime);
kfree(params);
return ret;
}
err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE);
if (err < 0) {
line6pcm->impulse_volume = 0;
- line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE);
return err;
}
} else {
spin_lock_irqsave(&pstr->lock, flags);
clear_bit(type, &pstr->running);
if (!pstr->running) {
+ spin_unlock_irqrestore(&pstr->lock, flags);
line6_unlink_audio_urbs(line6pcm, pstr);
+ spin_lock_irqsave(&pstr->lock, flags);
if (direction == SNDRV_PCM_STREAM_CAPTURE) {
line6pcm->prev_fbuf = NULL;
line6pcm->prev_fsize = 0;
static ssize_t serial_number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6_pod *pod = usb_get_intfdata(interface);
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_pod *pod = card->private_data;
return sprintf(buf, "%u\n", pod->serial_number);
}
static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6_pod *pod = usb_get_intfdata(interface);
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_pod *pod = card->private_data;
return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100,
pod->firmware_version % 100);
static ssize_t device_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6_pod *pod = usb_get_intfdata(interface);
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_pod *pod = card->private_data;
return sprintf(buf, "%d\n", pod->device_id);
}
{
/* devices which do not support reading the sample rate. */
switch (chip->usb_id) {
+ case USB_ID(0x041E, 0x4080): /* Creative Live Cam VF0610 */
case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+ case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
/* Supported VGICv3 address types */
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
+#define KVM_VGIC_ITS_ADDR_TYPE 4
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
+#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
__u64 fac_list[256];
};
+#define KVM_S390_VM_CPU_PROCESSOR_FEAT 2
+#define KVM_S390_VM_CPU_MACHINE_FEAT 3
+
+#define KVM_S390_VM_CPU_FEAT_NR_BITS 1024
+#define KVM_S390_VM_CPU_FEAT_ESOP 0
+#define KVM_S390_VM_CPU_FEAT_SIEF2 1
+#define KVM_S390_VM_CPU_FEAT_64BSCAO 2
+#define KVM_S390_VM_CPU_FEAT_SIIF 3
+#define KVM_S390_VM_CPU_FEAT_GPERE 4
+#define KVM_S390_VM_CPU_FEAT_GSLS 5
+#define KVM_S390_VM_CPU_FEAT_IB 6
+#define KVM_S390_VM_CPU_FEAT_CEI 7
+#define KVM_S390_VM_CPU_FEAT_IBS 8
+#define KVM_S390_VM_CPU_FEAT_SKEY 9
+#define KVM_S390_VM_CPU_FEAT_CMMA 10
+#define KVM_S390_VM_CPU_FEAT_PFMFI 11
+#define KVM_S390_VM_CPU_FEAT_SIGPIF 12
+struct kvm_s390_vm_cpu_feat {
+ __u64 feat[16];
+};
+
+#define KVM_S390_VM_CPU_PROCESSOR_SUBFUNC 4
+#define KVM_S390_VM_CPU_MACHINE_SUBFUNC 5
+/* for "test bit" instructions MSB 0 bit ordering, for "query" raw blocks */
+struct kvm_s390_vm_cpu_subfunc {
+ __u8 plo[32]; /* always */
+ __u8 ptff[16]; /* with TOD-clock steering */
+ __u8 kmac[16]; /* with MSA */
+ __u8 kmc[16]; /* with MSA */
+ __u8 km[16]; /* with MSA */
+ __u8 kimd[16]; /* with MSA */
+ __u8 klmd[16]; /* with MSA */
+ __u8 pckmo[16]; /* with MSA3 */
+ __u8 kmctr[16]; /* with MSA4 */
+ __u8 kmf[16]; /* with MSA4 */
+ __u8 kmo[16]; /* with MSA4 */
+ __u8 pcc[16]; /* with MSA4 */
+ __u8 ppno[16]; /* with MSA5 */
+ __u8 reserved[1824];
+};
+
/* kvm attributes for crypto */
#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
exit_code_ipa0(0xB2, 0x4c, "TAR"), \
exit_code_ipa0(0xB2, 0x50, "CSP"), \
exit_code_ipa0(0xB2, 0x54, "MVPG"), \
+ exit_code_ipa0(0xB2, 0x56, "STHYI"), \
exit_code_ipa0(0xB2, 0x58, "BSG"), \
exit_code_ipa0(0xB2, 0x5a, "BSA"), \
exit_code_ipa0(0xB2, 0x5f, "CHSC"), \
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-#define X86_BUG_NULL_SEG X86_BUG(9) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE X86_BUG(10) /* SWAPGS without input dep on GS */
-
-
#ifdef CONFIG_X86_32
/*
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
*/
#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
#endif
-
+#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
+#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
#endif /* _ASM_X86_CPUFEATURES_H */
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
+#define DISABLED_MASK17 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
#define REQUIRED_MASK14 0
#define REQUIRED_MASK15 0
#define REQUIRED_MASK16 0
+#define REQUIRED_MASK17 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
#define EXIT_REASON_PML_FULL 62
#define EXIT_REASON_XSAVES 63
#define EXIT_REASON_XRSTORS 64
-#define EXIT_REASON_PCOMMIT 65
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
{ EXIT_REASON_INVVPID, "INVVPID" }, \
{ EXIT_REASON_INVPCID, "INVPCID" }, \
{ EXIT_REASON_XSAVES, "XSAVES" }, \
- { EXIT_REASON_XRSTORS, "XRSTORS" }, \
- { EXIT_REASON_PCOMMIT, "PCOMMIT" }
+ { EXIT_REASON_XRSTORS, "XRSTORS" }
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
/*
- * gpio-hammer - example swiss army knife to shake GPIO lines on a system
+ * gpio-event-mon - monitor GPIO line events from userspace
*
* Copyright (C) 2016 Linus Walleij
*
int strtobool(const char *s, bool *res);
-#ifdef __GLIBC__
+/*
+ * glibc based builds needs the extern while uClibc doesn't.
+ * However uClibc headers also define __GLIBC__ hence the hack below
+ */
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
extern size_t strlcpy(char *dest, const char *src, size_t size);
#endif
BPF_MAP_TYPE_PERCPU_HASH,
BPF_MAP_TYPE_PERCPU_ARRAY,
BPF_MAP_TYPE_STACK_TRACE,
+ BPF_MAP_TYPE_CGROUP_ARRAY,
};
enum bpf_prog_type {
BPF_PROG_TYPE_SCHED_CLS,
BPF_PROG_TYPE_SCHED_ACT,
BPF_PROG_TYPE_TRACEPOINT,
+ BPF_PROG_TYPE_XDP,
};
#define BPF_PSEUDO_MAP_FD 1
*/
BPF_FUNC_skb_get_tunnel_opt,
BPF_FUNC_skb_set_tunnel_opt,
+
+ /**
+ * bpf_skb_change_proto(skb, proto, flags)
+ * Change protocol of the skb. Currently supported is
+ * v4 -> v6, v6 -> v4 transitions. The helper will also
+ * resize the skb. eBPF program is expected to fill the
+ * new headers via skb_store_bytes and lX_csum_replace.
+ * @skb: pointer to skb
+ * @proto: new skb->protocol type
+ * @flags: reserved
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_change_proto,
+
+ /**
+ * bpf_skb_change_type(skb, type)
+ * Change packet type of skb.
+ * @skb: pointer to skb
+ * @type: new skb->pkt_type type
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_change_type,
+
+ /**
+ * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb
+ * @skb: pointer to skb
+ * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+ * @index: index of the cgroup in the bpf_map
+ * Return:
+ * == 0 skb failed the cgroup2 descendant test
+ * == 1 skb succeeded the cgroup2 descendant test
+ * < 0 error
+ */
+ BPF_FUNC_skb_in_cgroup,
+
+ /**
+ * bpf_get_hash_recalc(skb)
+ * Retrieve and possibly recalculate skb->hash.
+ * @skb: pointer to skb
+ * Return: hash
+ */
+ BPF_FUNC_get_hash_recalc,
+
+ /**
+ * u64 bpf_get_current_task(void)
+ * Returns current task_struct
+ * Return: current
+ */
+ BPF_FUNC_get_current_task,
+
+ /**
+ * bpf_probe_write_user(void *dst, void *src, int len)
+ * safely attempt to write to a location
+ * @dst: destination address in userspace
+ * @src: source address on stack
+ * @len: number of bytes to copy
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_probe_write_user,
+
__BPF_FUNC_MAX_ID,
};
#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
#define BPF_F_DONT_FRAGMENT (1ULL << 2)
-/* BPF_FUNC_perf_event_output flags. */
+/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */
#define BPF_F_INDEX_MASK 0xffffffffULL
#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
+/* BPF_FUNC_perf_event_output for sk_buff input context. */
+#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
__u32 tunnel_label;
};
+/* User return codes for XDP prog type.
+ * A valid XDP program must return one of these defined values. All other
+ * return codes are reserved for future use. Unknown return codes will result
+ * in packet drop.
+ */
+enum xdp_action {
+ XDP_ABORTED = 0,
+ XDP_DROP,
+ XDP_PASS,
+ XDP_TX,
+};
+
+/* user accessible metadata for XDP packet hook
+ * new fields must be added to the end of this structure
+ */
+struct xdp_md {
+ __u32 data;
+ __u32 data_end;
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
'$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters.
-'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
+'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), signedness casting (u/s), "string" and bitfield are supported. (see TYPES for detail)
On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.
+TYPES
+-----
+Basic types (u8/u16/u32/u64/s8/s16/s32/s64) are integer types. Prefix 's' and 'u' means those types are signed and unsigned respectively. Traced arguments are shown in decimal (signed) or hex (unsigned). You can also use 's' or 'u' to specify only signedness and leave its size auto-detected by perf probe.
+String type is a special type, which fetches a "null-terminated" string from kernel space. This means it will fail and store NULL if the string container has been paged out. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
+Bitfield is another special type, which takes 3 parameters, bit-width, bit-offset, and container-size (usually 32). The syntax is;
+
+ b<bit-width>@<bit-offset>/<container-size>
+
LINE SYNTAX
-----------
Line range is described by following syntax.
--fields::
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
- srcline, period, iregs, brstack, brstacksym, flags.
- Field list can be prepended with the type, trace, sw or hw,
+ srcline, period, iregs, brstack, brstacksym, flags, bpf-output,
+ callindent. Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
#endif
#if defined(_CALL_ELF) && _CALL_ELF == 2
-bool arch__prefers_symtab(void)
-{
- return true;
-}
#ifdef HAVE_LIBELF_SUPPORT
void arch__sym_update(struct symbol *s, GElf_Sym *sym)
tev->point.offset += lep_offset;
}
}
+
+#ifdef HAVE_LIBELF_SUPPORT
+void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
+ int ntevs)
+{
+ struct probe_trace_event *tev;
+ struct map *map;
+ struct symbol *sym = NULL;
+ struct rb_node *tmp;
+ int i = 0;
+
+ map = get_target_map(pev->target, pev->uprobes);
+ if (!map || map__load(map, NULL) < 0)
+ return;
+
+ for (i = 0; i < ntevs; i++) {
+ tev = &pev->tevs[i];
+ map__for_each_symbol(map, sym, tmp) {
+ if (map->unmap_ip(map, sym->start) == tev->point.address)
+ arch__fix_tev_from_maps(pev, tev, map, sym);
+ }
+ }
+}
+#endif /* HAVE_LIBELF_SUPPORT */
+
#endif
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
- bool have_timing_info;
+ bool have_timing_info, need_immediate = false;
struct perf_evsel *evsel, *intel_pt_evsel = NULL;
const struct cpu_map *cpus = evlist->cpus;
bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
ptr->have_sched_switch = 3;
} else {
opts->record_switch_events = true;
+ need_immediate = true;
if (cpu_wide)
ptr->have_sched_switch = 3;
else
tracking_evsel->attr.freq = 0;
tracking_evsel->attr.sample_period = 1;
+ if (need_immediate)
+ tracking_evsel->immediate = true;
+
/* In per-cpu case, always need the time of mmap events etc */
if (!cpu_map__empty(cpus)) {
perf_evsel__set_sample_bit(tracking_evsel, TIME);
if (mem->operation & MEM_OPERATION_LOAD)
perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true;
+ if (mem->operation & MEM_OPERATION_STORE)
+ perf_mem_events[PERF_MEM_EVENTS__STORE].record = true;
+
if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record)
rec_argv[i++] = "-W";
if (!no_callchain) {
bool use_callchain = false;
+ bool not_pipe = false;
evlist__for_each_entry(session->evlist, evsel) {
+ not_pipe = true;
if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
use_callchain = true;
break;
}
}
- if (!use_callchain)
+ if (not_pipe && !use_callchain)
symbol_conf.use_callchain = false;
}
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
scripts_dir = opendir(scripts_path);
- if (!scripts_dir)
- return -1;
+ if (!scripts_dir) {
+ fprintf(stdout,
+ "open(%s) failed.\n"
+ "Check \"PERF_EXEC_PATH\" env to set scripts dir.\n",
+ scripts_path);
+ exit(-1);
+ }
for_each_lang(scripts_path, scripts_dir, lang_dirent) {
snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
"Valid types: hw,sw,trace,raw. "
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
"addr,symoff,period,iregs,brstack,brstacksym,flags,"
- "callindent", parse_output_fields),
+ "bpf-output,callindent", parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
return 0;
}
-static void read_counters(bool close_counters)
+static void read_counters(void)
{
struct perf_evsel *counter;
if (perf_stat_process_counter(&stat_config, counter))
pr_warning("failed to process counter %s\n", counter->name);
-
- if (close_counters) {
- perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
- thread_map__nr(evsel_list->threads));
- }
}
}
{
struct timespec ts, rs;
- read_counters(false);
+ read_counters();
clock_gettime(CLOCK_MONOTONIC, &ts);
diff_timespec(&rs, &ts, &ref_time);
perf_evlist__enable(evsel_list);
}
+static void disable_counters(void)
+{
+ /*
+ * If we don't have tracee (attaching to task or cpu), counters may
+ * still be running. To get accurate group ratios, we must stop groups
+ * from counting before reading their constituent counters.
+ */
+ if (!target__none(&target))
+ perf_evlist__disable(evsel_list);
+}
+
static volatile int workload_exec_errno;
/*
}
}
+ disable_counters();
+
t1 = rdclock();
update_stats(&walltime_nsecs_stats, t1 - t0);
- read_counters(true);
+ /*
+ * Closing a group leader splits the group, and as we only disable
+ * group leaders, results in remaining events becoming enabled. To
+ * avoid arbitrary skew, we must read all counters before closing any
+ * group leaders.
+ */
+ read_counters();
+ perf_evlist__close(evsel_list);
return WEXITSTATUS(status);
}
u8 op, result, type = (config >> 0) & 0xff;
const char *err = "unknown-ext-hardware-cache-type";
- if (type > PERF_COUNT_HW_CACHE_MAX)
+ if (type >= PERF_COUNT_HW_CACHE_MAX)
goto out_err;
op = (config >> 8) & 0xff;
err = "unknown-ext-hardware-cache-op";
- if (op > PERF_COUNT_HW_CACHE_OP_MAX)
+ if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
goto out_err;
result = (config >> 16) & 0xff;
err = "unknown-ext-hardware-cache-result";
- if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+ if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
goto out_err;
err = "invalid-cache";
bool have_calc_cyc_to_tsc;
int exec_mode;
unsigned int insn_bytes;
- uint64_t sign_bit;
- uint64_t sign_bits;
uint64_t period;
enum intel_pt_period_type period_type;
uint64_t tot_insn_cnt;
decoder->data = params->data;
decoder->return_compression = params->return_compression;
- decoder->sign_bit = (uint64_t)1 << 47;
- decoder->sign_bits = ~(((uint64_t)1 << 48) - 1);
-
decoder->period = params->period;
decoder->period_type = params->period_type;
return 0;
}
-static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder,
- const struct intel_pt_pkt *packet,
+static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
uint64_t last_ip)
{
uint64_t ip;
switch (packet->count) {
- case 2:
+ case 1:
ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) |
packet->payload;
break;
- case 4:
+ case 2:
ip = (last_ip & (uint64_t)0xffffffff00000000ULL) |
packet->payload;
break;
+ case 3:
+ ip = packet->payload;
+ /* Sign-extend 6-byte ip */
+ if (ip & (uint64_t)0x800000000000ULL)
+ ip |= (uint64_t)0xffff000000000000ULL;
+ break;
+ case 4:
+ ip = (last_ip & (uint64_t)0xffff000000000000ULL) |
+ packet->payload;
+ break;
case 6:
ip = packet->payload;
break;
return 0;
}
- if (ip & decoder->sign_bit)
- return ip | decoder->sign_bits;
-
return ip;
}
static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
{
- decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet,
- decoder->last_ip);
+ decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
}
static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
}
}
+static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
+{
+ return decoder->last_ip || decoder->packet.count == 0 ||
+ decoder->packet.count == 3 || decoder->packet.count == 6;
+}
+
/* Walk PSB+ packets to get in sync. */
static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
{
case INTEL_PT_FUP:
decoder->pge = true;
- if (decoder->last_ip || decoder->packet.count == 6 ||
- decoder->packet.count == 0) {
+ if (intel_pt_have_ip(decoder)) {
uint64_t current_ip = decoder->ip;
intel_pt_set_ip(decoder);
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
- if (decoder->last_ip || decoder->packet.count == 6 ||
- decoder->packet.count == 0)
+ if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (decoder->ip)
return 0;
case INTEL_PT_FUP:
if (decoder->overflow) {
- if (decoder->last_ip ||
- decoder->packet.count == 6 ||
- decoder->packet.count == 0)
+ if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (decoder->ip)
return 0;
const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
- switch (byte >> 5) {
+ int ip_len;
+
+ packet->count = byte >> 5;
+
+ switch (packet->count) {
case 0:
- packet->count = 0;
+ ip_len = 0;
break;
case 1:
if (len < 3)
return INTEL_PT_NEED_MORE_BYTES;
- packet->count = 2;
+ ip_len = 2;
packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
break;
case 2:
if (len < 5)
return INTEL_PT_NEED_MORE_BYTES;
- packet->count = 4;
+ ip_len = 4;
packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1));
break;
case 3:
- case 6:
+ case 4:
if (len < 7)
return INTEL_PT_NEED_MORE_BYTES;
- packet->count = 6;
+ ip_len = 6;
memcpy_le64(&packet->payload, buf + 1, 6);
break;
+ case 6:
+ if (len < 9)
+ return INTEL_PT_NEED_MORE_BYTES;
+ ip_len = 8;
+ packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1));
+ break;
default:
return INTEL_PT_BAD_PACKET;
}
packet->type = type;
- return packet->count + 1;
+ return ip_len + 1;
}
static int intel_pt_get_mode(const unsigned char *buf, size_t len,
+#include <sys/sysmacros.h>
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
module = "kernel";
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
+ /* short_name is "[module]" */
if (strncmp(pos->dso->short_name + 1, module,
- pos->dso->short_name_len - 2) == 0) {
+ pos->dso->short_name_len - 2) == 0 &&
+ module[pos->dso->short_name_len - 2] == '\0') {
return pos;
}
}
return NULL;
}
-static struct map *get_target_map(const char *target, bool user)
+struct map *get_target_map(const char *target, bool user)
{
/* Init maps of given executable or kernel */
if (user)
if (uprobes)
address = sym->start;
else
- address = map->unmap_ip(map, sym->start);
+ address = map->unmap_ip(map, sym->start) - map->reloc;
break;
}
if (!address) {
return ret;
}
-/* Post processing the probe events */
-static int post_process_probe_trace_events(struct probe_trace_event *tevs,
- int ntevs, const char *module,
- bool uprobe)
+static int
+post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs)
{
struct ref_reloc_sym *reloc_sym;
char *tmp;
int i, skipped = 0;
- if (uprobe)
- return add_exec_to_probe_trace_events(tevs, ntevs, module);
-
- /* Note that currently ref_reloc_sym based probe is not for drivers */
- if (module)
- return add_module_to_probe_trace_events(tevs, ntevs, module);
-
reloc_sym = kernel_get_ref_reloc_sym();
if (!reloc_sym) {
pr_warning("Relocated base symbol is not found!\n");
return skipped;
}
+void __weak
+arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unused,
+ int ntevs __maybe_unused)
+{
+}
+
+/* Post processing the probe events */
+static int post_process_probe_trace_events(struct perf_probe_event *pev,
+ struct probe_trace_event *tevs,
+ int ntevs, const char *module,
+ bool uprobe)
+{
+ int ret;
+
+ if (uprobe)
+ ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
+ else if (module)
+ /* Currently ref_reloc_sym based probe is not for drivers */
+ ret = add_module_to_probe_trace_events(tevs, ntevs, module);
+ else
+ ret = post_process_kernel_probe_trace_events(tevs, ntevs);
+
+ if (ret >= 0)
+ arch__post_process_probe_trace_events(pev, ntevs);
+
+ return ret;
+}
+
/* Try to find perf_probe_event with debuginfo */
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs)
if (ntevs > 0) { /* Succeeded to find trace events */
pr_debug("Found %d probe_trace_events.\n", ntevs);
- ret = post_process_probe_trace_events(*tevs, ntevs,
+ ret = post_process_probe_trace_events(pev, *tevs, ntevs,
pev->target, pev->uprobes);
if (ret < 0 || ret == ntevs) {
clear_probe_trace_events(*tevs, ntevs);
return err;
}
-bool __weak arch__prefers_symtab(void) { return false; }
-
/* Concatinate two arrays */
static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b)
{
if (ret > 0 || pev->sdt) /* SDT can be found only in the cache */
return ret == 0 ? -ENOENT : ret; /* Found in probe cache */
- if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) {
- ret = find_probe_trace_events_from_map(pev, tevs);
- if (ret > 0)
- return ret; /* Found in symbol table */
- }
-
/* Convert perf_probe_event with debuginfo */
ret = try_to_find_probe_trace_events(pev, tevs);
if (ret != 0)
int show_available_vars(struct perf_probe_event *pevs, int npevs,
struct strfilter *filter);
int show_available_funcs(const char *module, struct strfilter *filter, bool user);
-bool arch__prefers_symtab(void);
void arch__fix_tev_from_maps(struct perf_probe_event *pev,
struct probe_trace_event *tev, struct map *map,
struct symbol *sym);
int copy_to_probe_trace_arg(struct probe_trace_arg *tvar,
struct perf_probe_arg *pvar);
+struct map *get_target_map(const char *target, bool user);
+
+void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
+ int ntevs);
+
#endif /*_PROBE_EVENT_H */
/* Get raw string list of current kprobe_events or uprobe_events */
struct strlist *probe_file__get_rawlist(int fd)
{
- int ret, idx;
+ int ret, idx, fddup;
FILE *fp;
char buf[MAX_CMDLEN];
char *p;
return NULL;
sl = strlist__new(NULL, NULL);
+ if (sl == NULL)
+ return NULL;
+
+ fddup = dup(fd);
+ if (fddup < 0)
+ goto out_free_sl;
+
+ fp = fdopen(fddup, "r");
+ if (!fp)
+ goto out_close_fddup;
- fp = fdopen(dup(fd), "r");
while (!feof(fp)) {
p = fgets(buf, MAX_CMDLEN, fp);
if (!p)
ret = strlist__add(sl, buf);
if (ret < 0) {
pr_debug("strlist__add failed (%d)\n", ret);
- strlist__delete(sl);
- return NULL;
+ goto out_close_fp;
}
}
fclose(fp);
return sl;
+
+out_close_fp:
+ fclose(fp);
+ goto out_free_sl;
+out_close_fddup:
+ close(fddup);
+out_free_sl:
+ strlist__delete(sl);
+ return NULL;
}
static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
{
struct probe_cache_entry *entry = NULL;
char buf[MAX_CMDLEN], *p;
- int ret = 0;
+ int ret = 0, fddup;
FILE *fp;
- fp = fdopen(dup(pcache->fd), "r");
- if (!fp)
+ fddup = dup(pcache->fd);
+ if (fddup < 0)
+ return -errno;
+ fp = fdopen(fddup, "r");
+ if (!fp) {
+ close(fddup);
return -EINVAL;
+ }
while (!feof(fp)) {
if (!fgets(buf, MAX_CMDLEN, fp))
char sbuf[STRERR_BUFSIZE];
int bsize, boffs, total;
int ret;
+ char sign;
/* TODO: check all types */
- if (cast && strcmp(cast, "string") != 0) {
+ if (cast && strcmp(cast, "string") != 0 &&
+ strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) {
/* Non string type is OK */
+ /* and respect signedness cast */
tvar->type = strdup(cast);
return (tvar->type == NULL) ? -ENOMEM : 0;
}
return (tvar->type == NULL) ? -ENOMEM : 0;
}
+ if (cast && (strcmp(cast, "u") == 0))
+ sign = 'u';
+ else if (cast && (strcmp(cast, "s") == 0))
+ sign = 's';
+ else
+ sign = die_is_signed_type(&type) ? 's' : 'u';
+
ret = dwarf_bytesize(&type);
if (ret <= 0)
/* No size ... try to use default type */
dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
ret = MAX_BASIC_TYPE_BITS;
}
- ret = snprintf(buf, 16, "%c%d",
- die_is_signed_type(&type) ? 's' : 'u', ret);
+ ret = snprintf(buf, 16, "%c%d", sign, ret);
formatted:
if (ret < 0 || ret >= 16) {
} else {
pevent_event_info(&seq, evsel->tp_format, &rec);
}
- return seq.buffer;
+ /*
+ * Trim the buffer, it starts at 4KB and we're not going to
+ * add anything more to this buffer.
+ */
+ return realloc(seq.buffer, seq.len + 1);
}
static int64_t
sec = syms_ss->symtab;
shdr = syms_ss->symshdr;
- if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL))
+ if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
+ ".text", NULL))
dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
if (runtime_ss->opdsec)
if (__report_module(&al, ip, ui))
return -1;
- e->ip = ip;
+ e->ip = al.addr;
e->map = al.map;
e->sym = al.sym;
thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
MAP__FUNCTION, ip, &al);
- e.ip = ip;
+ e.ip = al.addr;
e.map = al.map;
e.sym = al.sym;
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
#include <linux/libnvdimm.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
if (nfit_test->setup != nfit_test0_setup)
return 0;
+ flush_work(&acpi_desc->work);
nfit_test->setup_hotplug = 1;
nfit_test->setup(nfit_test);
GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown")
-CFLAGS := -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
+CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
export CFLAGS
DMA_NONE = 3,
};
+#define dma_alloc_coherent(d, s, hp, f) ({ \
+ void *__dma_alloc_coherent_p = kmalloc((s), (f)); \
+ *(hp) = (unsigned long)__dma_alloc_coherent_p; \
+ __dma_alloc_coherent_p; \
+})
+
+#define dma_free_coherent(d, s, p, h) kfree(p)
+
+#define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o))
+
+#define dma_map_single(d, p, s, dir) (virt_to_phys(p))
+#define dma_mapping_error(...) (0)
+
+#define dma_unmap_single(...) do { } while (0)
+#define dma_unmap_page(...) do { } while (0)
+
#endif
#define PAGE_SIZE getpagesize()
#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
+typedef unsigned long long phys_addr_t;
typedef unsigned long long dma_addr_t;
typedef size_t __kernel_size_t;
typedef unsigned int __wsum;
return p;
}
+static inline void *alloc_pages_exact(size_t s, gfp_t gfp)
+{
+ return kmalloc(s, gfp);
+}
+
static inline void kfree(void *p)
{
if (p >= __kfree_ignore_start && p < __kfree_ignore_end)
free(p);
}
+static inline void free_pages_exact(void *p, size_t s)
+{
+ kfree(p);
+}
+
static inline void *krealloc(void *p, size_t s, gfp_t gfp)
{
return realloc(p, s);
#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
+#define WARN_ON_ONCE(cond) ((cond) && fprintf (stderr, "WARNING\n"))
+
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
typeof(y) _min2 = (y); \
#ifndef LINUX_SLAB_H
+#define GFP_KERNEL 0
+#define GFP_ATOMIC 0
+#define __GFP_NOWARN 0
+#define __GFP_ZERO 0
#endif
#include <linux/scatterlist.h>
#include <linux/kernel.h>
+struct device {
+ void *parent;
+};
+
struct virtio_device {
- void *dev;
+ struct device dev;
u64 features;
};
#define virtio_has_feature(dev, feature) \
(__virtio_test_bit((dev), feature))
+/**
+ * virtio_has_iommu_quirk - determine whether this device has the iommu quirk
+ * @vdev: the device
+ */
+static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
+{
+ /*
+ * Note the reverse polarity of the quirk feature (compared to most
+ * other features), this is for compatibility with legacy systems.
+ */
+ return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+}
+
static inline bool virtio_is_little_endian(struct virtio_device *vdev)
{
return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
#define cache_line_size() SMP_CACHE_BYTES
#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
#define unlikely(x) (__builtin_expect(!!(x), 0))
+#define likely(x) (__builtin_expect(!!(x), 1))
#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
typedef pthread_spinlock_t spinlock_t;
static struct timecounter *timecounter;
static struct workqueue_struct *wqueue;
static unsigned int host_vtimer_irq;
+static u32 host_vtimer_irq_flags;
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{
static void kvm_timer_init_interrupt(void *info)
{
- enable_percpu_irq(host_vtimer_irq, 0);
+ enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
}
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
}
host_vtimer_irq = info->virtual_irq;
+ host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
+ if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
+ host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
+ kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
+ host_vtimer_irq);
+ host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
+ }
+
err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
"kvm guest timer", kvm_get_running_vcpus());
if (err) {
int i, vcpu_lock_idx = -1, ret;
struct kvm_vcpu *vcpu;
- mutex_lock(&kvm->lock);
-
- if (irqchip_in_kernel(kvm)) {
- ret = -EEXIST;
- goto out;
- }
+ if (irqchip_in_kernel(kvm))
+ return -EEXIST;
/*
* This function is also called by the KVM_CREATE_IRQCHIP handler,
* the proper checks already.
*/
if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
- !kvm_vgic_global_state.can_emulate_gicv2) {
- ret = -ENODEV;
- goto out;
- }
+ !kvm_vgic_global_state.can_emulate_gicv2)
+ return -ENODEV;
/*
* Any time a vcpu is run, vcpu_load is called which tries to grab the
vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
mutex_unlock(&vcpu->mutex);
}
-
-out:
- mutex_unlock(&kvm->lock);
return ret;
}
irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
if (!irq)
- return NULL;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&irq->lpi_list);
INIT_LIST_HEAD(&irq->ap_list);
* Find the target VCPU and the LPI number for a given devid/eventid pair
* and make this IRQ pending, possibly injecting it.
* Must be called with the its_lock mutex held.
+ * Returns 0 on success, a positive error value for any ITS mapping
+ * related errors and negative error values for generic errors.
*/
-static void vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
- u32 devid, u32 eventid)
+static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
+ u32 devid, u32 eventid)
{
+ struct kvm_vcpu *vcpu;
struct its_itte *itte;
if (!its->enabled)
- return;
+ return -EBUSY;
itte = find_itte(its, devid, eventid);
- /* Triggering an unmapped IRQ gets silently dropped. */
- if (itte && its_is_collection_mapped(itte->collection)) {
- struct kvm_vcpu *vcpu;
-
- vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
- if (vcpu && vcpu->arch.vgic_cpu.lpis_enabled) {
- spin_lock(&itte->irq->irq_lock);
- itte->irq->pending = true;
- vgic_queue_irq_unlock(kvm, itte->irq);
- }
- }
+ if (!itte || !its_is_collection_mapped(itte->collection))
+ return E_ITS_INT_UNMAPPED_INTERRUPT;
+
+ vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
+ if (!vcpu)
+ return E_ITS_INT_UNMAPPED_INTERRUPT;
+
+ if (!vcpu->arch.vgic_cpu.lpis_enabled)
+ return -EBUSY;
+
+ spin_lock(&itte->irq->irq_lock);
+ itte->irq->pending = true;
+ vgic_queue_irq_unlock(kvm, itte->irq);
+
+ return 0;
+}
+
+static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
+{
+ struct vgic_io_device *iodev;
+
+ if (dev->ops != &kvm_io_gic_ops)
+ return NULL;
+
+ iodev = container_of(dev, struct vgic_io_device, dev);
+
+ if (iodev->iodev_type != IODEV_ITS)
+ return NULL;
+
+ return iodev;
}
/*
* Queries the KVM IO bus framework to get the ITS pointer from the given
* doorbell address.
* We then call vgic_its_trigger_msi() with the decoded data.
+ * According to the KVM_SIGNAL_MSI API description returns 1 on success.
*/
int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
{
u64 address;
struct kvm_io_device *kvm_io_dev;
struct vgic_io_device *iodev;
+ int ret;
if (!vgic_has_its(kvm))
return -ENODEV;
kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
if (!kvm_io_dev)
- return -ENODEV;
+ return -EINVAL;
- iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
+ iodev = vgic_get_its_iodev(kvm_io_dev);
+ if (!iodev)
+ return -EINVAL;
mutex_lock(&iodev->its->its_lock);
- vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
+ ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
mutex_unlock(&iodev->its->its_lock);
- return 0;
+ if (ret < 0)
+ return ret;
+
+ /*
+ * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
+ * if the guest has blocked the MSI. So we map any LPI mapping
+ * related error to that.
+ */
+ if (ret)
+ return 0;
+ else
+ return 1;
}
/* Requires the its_lock to be held. */
list_del(&itte->itte_list);
/* This put matches the get in vgic_add_lpi. */
- vgic_put_irq(kvm, itte->irq);
+ if (itte->irq)
+ vgic_put_irq(kvm, itte->irq);
kfree(itte);
}
struct its_device *device;
struct its_collection *collection, *new_coll = NULL;
int lpi_nr;
+ struct vgic_irq *irq;
device = find_its_device(its, device_id);
if (!device)
lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
return E_ITS_MAPTI_PHYSICALID_OOR;
+ /* If there is an existing mapping, behavior is UNPREDICTABLE. */
+ if (find_itte(its, device_id, event_id))
+ return 0;
+
collection = find_collection(its, coll_id);
if (!collection) {
int ret = vgic_its_alloc_collection(its, &collection, coll_id);
new_coll = collection;
}
- itte = find_itte(its, device_id, event_id);
+ itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
if (!itte) {
- itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
- if (!itte) {
- if (new_coll)
- vgic_its_free_collection(its, coll_id);
- return -ENOMEM;
- }
-
- itte->event_id = event_id;
- list_add_tail(&itte->itte_list, &device->itt_head);
+ if (new_coll)
+ vgic_its_free_collection(its, coll_id);
+ return -ENOMEM;
}
+ itte->event_id = event_id;
+ list_add_tail(&itte->itte_list, &device->itt_head);
+
itte->collection = collection;
itte->lpi = lpi_nr;
- itte->irq = vgic_add_lpi(kvm, lpi_nr);
+
+ irq = vgic_add_lpi(kvm, lpi_nr);
+ if (IS_ERR(irq)) {
+ if (new_coll)
+ vgic_its_free_collection(its, coll_id);
+ its_free_itte(kvm, itte);
+ return PTR_ERR(irq);
+ }
+ itte->irq = irq;
+
update_affinity_itte(kvm, itte);
/*
u32 msi_data = its_cmd_get_id(its_cmd);
u64 msi_devid = its_cmd_get_deviceid(its_cmd);
- vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
-
- return 0;
+ return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
}
/*
its_sync_lpi_pending_table(vcpu);
}
-static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its)
+static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
{
struct vgic_io_device *iodev = &its->iodev;
int ret;
- if (its->initialized)
- return 0;
+ if (!its->initialized)
+ return -EBUSY;
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
return -ENXIO;
KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
mutex_unlock(&kvm->slots_lock);
- if (!ret)
- its->initialized = true;
-
return ret;
}
if (type != KVM_VGIC_ITS_ADDR_TYPE)
return -ENODEV;
- if (its->initialized)
- return -EBUSY;
-
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
case KVM_DEV_ARM_VGIC_GRP_CTRL:
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT:
- return vgic_its_init_its(dev->kvm, its);
+ its->initialized = true;
+
+ return 0;
}
break;
}
return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
KVM_DEV_TYPE_ARM_VGIC_ITS);
}
+
+/*
+ * Registers all ITSes with the kvm_io_bus framework.
+ * To follow the existing VGIC initialization sequence, this has to be
+ * done as late as possible, just before the first VCPU runs.
+ */
+int vgic_register_its_iodevs(struct kvm *kvm)
+{
+ struct kvm_device *dev;
+ int ret = 0;
+
+ list_for_each_entry(dev, &kvm->devices, vm_node) {
+ if (dev->ops != &kvm_arm_vgic_its_ops)
+ continue;
+
+ ret = vgic_register_its_iodev(kvm, dev->private);
+ if (ret)
+ return ret;
+ /*
+ * We don't need to care about tearing down previously
+ * registered ITSes, as the kvm_io_bus framework removes
+ * them for us if the VM gets destroyed.
+ */
+ }
+
+ return ret;
+}
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- u64 propbaser = dist->propbaser;
+ u64 old_propbaser, propbaser;
/* Storing a value with LPIs already enabled is undefined */
if (vgic_cpu->lpis_enabled)
return;
- propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
- propbaser = vgic_sanitise_propbaser(propbaser);
-
- dist->propbaser = propbaser;
+ do {
+ old_propbaser = dist->propbaser;
+ propbaser = old_propbaser;
+ propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
+ propbaser = vgic_sanitise_propbaser(propbaser);
+ } while (cmpxchg64(&dist->propbaser, old_propbaser,
+ propbaser) != old_propbaser);
}
static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
unsigned long val)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- u64 pendbaser = vgic_cpu->pendbaser;
+ u64 old_pendbaser, pendbaser;
/* Storing a value with LPIs already enabled is undefined */
if (vgic_cpu->lpis_enabled)
return;
- pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
- pendbaser = vgic_sanitise_pendbaser(pendbaser);
-
- vgic_cpu->pendbaser = pendbaser;
+ do {
+ old_pendbaser = vgic_cpu->pendbaser;
+ pendbaser = old_pendbaser;
+ pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
+ pendbaser = vgic_sanitise_pendbaser(pendbaser);
+ } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
+ pendbaser) != old_pendbaser);
}
/*
goto out;
}
+ if (vgic_has_its(kvm)) {
+ ret = vgic_register_its_iodevs(kvm);
+ if (ret) {
+ kvm_err("Unable to register VGIC ITS MMIO regions\n");
+ goto out;
+ }
+ }
+
dist->ready = true;
out:
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
- struct vgic_dist *dist;
+ struct vgic_dist *dist = &kvm->arch.vgic;
if (irq->intid < VGIC_MIN_LPI)
return;
- if (!kref_put(&irq->refcount, vgic_irq_release))
+ spin_lock(&dist->lpi_list_lock);
+ if (!kref_put(&irq->refcount, vgic_irq_release)) {
+ spin_unlock(&dist->lpi_list_lock);
return;
+ };
- dist = &kvm->arch.vgic;
-
- spin_lock(&dist->lpi_list_lock);
list_del(&irq->lpi_list);
dist->lpi_list_count--;
spin_unlock(&dist->lpi_list_lock);
int vgic_v3_probe(const struct gic_kvm_info *info);
int vgic_v3_map_resources(struct kvm *kvm);
int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
+int vgic_register_its_iodevs(struct kvm *kvm);
bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void);
void vgic_enable_lpis(struct kvm_vcpu *vcpu);
return -ENODEV;
}
+static inline int vgic_register_its_iodevs(struct kvm *kvm)
+{
+ return -ENODEV;
+}
+
static inline bool vgic_has_its(struct kvm *kvm)
{
return false;
{
struct kvm_device *dev, *tmp;
+ /*
+ * We do not need to take the kvm->lock here, because nobody else
+ * has a reference to the struct kvm at this point and therefore
+ * cannot access the devices list anyhow.
+ */
list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
list_del(&dev->vm_node);
dev->ops->destroy(dev);
dev->ops = ops;
dev->kvm = kvm;
+ mutex_lock(&kvm->lock);
ret = ops->create(dev, cd->type);
if (ret < 0) {
+ mutex_unlock(&kvm->lock);
kfree(dev);
return ret;
}
+ list_add(&dev->vm_node, &kvm->devices);
+ mutex_unlock(&kvm->lock);
+
+ if (ops->init)
+ ops->init(dev);
ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
if (ret < 0) {
ops->destroy(dev);
+ mutex_lock(&kvm->lock);
+ list_del(&dev->vm_node);
+ mutex_unlock(&kvm->lock);
return ret;
}
- list_add(&dev->vm_node, &kvm->devices);
kvm_get_kvm(kvm);
cd->fd = ret;
return 0;