- const: toradex,apalis_t30
- const: nvidia,tegra30
- items:
- - const: toradex,apalis_t30-eval-v1.1
+ - const: toradex,apalis_t30-v1.1-eval
- const: toradex,apalis_t30-eval
- const: toradex,apalis_t30-v1.1
- const: toradex,apalis_t30
All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
For a description of the MMSYS_CONFIG binding, see
-Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
+Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
DISP function blocks
====================
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC series UFS host controller Device Tree Bindings
+
+maintainers:
+ - Alim Akhtar <alim.akhtar@samsung.com>
+
+description: |
+ Each Samsung UFS host controller instance should have its own node.
+ This binding define Samsung specific binding other then what is used
+ in the common ufshcd bindings
+ [1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+
+properties:
+
+ compatible:
+ enum:
+ - samsung,exynos7-ufs
+
+ reg:
+ items:
+ - description: HCI register
+ - description: vendor specific register
+ - description: unipro register
+ - description: UFS protector register
+
+ reg-names:
+ items:
+ - const: hci
+ - const: vs_hci
+ - const: unipro
+ - const: ufsp
+
+ clocks:
+ items:
+ - description: ufs link core clock
+ - description: unipro main clock
+
+ clock-names:
+ items:
+ - const: core_clk
+ - const: sclk_unipro_main
+
+ interrupts:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: ufs-phy
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - phys
+ - phy-names
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/exynos7-clk.h>
+
+ ufs: ufs@15570000 {
+ compatible = "samsung,exynos7-ufs";
+ reg = <0x15570000 0x100>,
+ <0x15570100 0x100>,
+ <0x15571000 0x200>,
+ <0x15572000 0x300>;
+ reg-names = "hci", "vs_hci", "unipro", "ufsp";
+ interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
+ <&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
+ clock-names = "core_clk", "sclk_unipro_main";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
+ phys = <&ufs_phy>;
+ phy-names = "ufs-phy";
+ };
+...
F: arch/arm/mach-pxa/vpac270.c
ARM/VT8500 ARM ARCHITECTURE
-M: Tony Prisk <linux@prisktech.co.nz>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
F: arch/arm/mach-vt8500/
F: drivers/clocksource/timer-vt8500.c
F: drivers/scsi/nsp32*
NIOS2 ARCHITECTURE
-M: Ley Foon Tan <ley.foon.tan@intel.com>
+M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
F: arch/nios2/
NITRO ENCLAVES (NE)
F: drivers/pci/controller/pci-ixp4xx.c
PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
-M: Jonathan Derrick <jonathan.derrick@intel.com>
+M: Nirmal Patel <nirmal.patel@linux.intel.com>
+R: Jonathan Derrick <jonathan.derrick@linux.dev>
L: linux-pci@vger.kernel.org
S: Supported
F: drivers/pci/controller/vmd.c
F: arch/x86/boot/video*
SWIOTLB SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Christoph Hellwig <hch@infradead.org>
L: iommu@lists.linux-foundation.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
+W: http://git.infradead.org/users/hch/dma-mapping.git
+T: git git://git.infradead.org/users/hch/dma-mapping.git
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
F: kernel/dma/swiotlb.c
F: tools/lib/bpf/xsk*
XEN BLOCK SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
M: Roger Pau Monné <roger.pau@citrix.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: drivers/net/xen-netback/*
XEN PCI SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Juergen Gross <jgross@suse.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: arch/x86/pci/*xen*
F: sound/xen/*
XEN SWIOTLB SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Juergen Gross <jgross@suse.com>
+M: Stefano Stabellini <sstabellini@kernel.org>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: iommu@lists.linux-foundation.org
S: Supported
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Opossums on Parade
# *DOCUMENTATION*
select NEED_SG_DMA_LENGTH
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
- select GENERIC_PCI_IOMAP if PCI
+ select GENERIC_PCI_IOMAP
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
config ALPHA_JENSEN
bool "Jensen"
- depends on BROKEN
select HAVE_EISA
help
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
extern void __remlu(void);
extern void __divqu(void);
extern void __remqu(void);
+extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
* convinced that I need one of the newer machines.
*/
-static inline unsigned int jensen_local_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
{
return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
}
-static inline void jensen_local_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
{
*(vuip)((addr << 9) + EISA_VL82C106) = b;
mb();
}
-static inline unsigned int jensen_bus_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
{
long result;
return __kernel_extbl(result, addr & 3);
}
-static inline void jensen_bus_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
{
jensen_set_hae(0);
*(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
*
* Code supporting the Jensen.
*/
+#define __EXTERN_INLINE
+#include <asm/io.h>
+#include <asm/jensen.h>
+#undef __EXTERN_INLINE
+
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/ptrace.h>
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/jensen.h>
-#undef __EXTERN_INLINE
-
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
ev67-$(CONFIG_ALPHA_EV67) := ev67-
lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
+ udiv-qrnnd.o \
udelay.o \
$(ev6-y)memset.o \
$(ev6-y)memcpy.o \
# along with GCC; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
+#include <asm/export.h>
.set noreorder
.set noat
ret $31,($26),1
.end __udiv_qrnnd
+EXPORT_SYMBOL(__udiv_qrnnd)
obj-$(CONFIG_MATHEMU) += math-emu.o
-math-emu-objs := math.o qrnnd.o
+math-emu-objs := math.o
egress:
return si_code;
}
-
-EXPORT_SYMBOL(__udiv_qrnnd);
void sve_alloc(struct task_struct *task)
{
if (task->thread.sve_state) {
- memset(task->thread.sve_state, 0, sve_state_size(current));
+ memset(task->thread.sve_state, 0, sve_state_size(task));
return;
}
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/nospec.h>
-#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <linux/unistd.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
+unsigned long __stack_chk_guard __ro_after_init;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
}
}
+#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
if (!INDIRECT_ADDR(addr)) {
iounmap(addr);
}
}
+EXPORT_SYMBOL(pci_iounmap);
+#endif
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
EXPORT_SYMBOL(iowrite32_rep);
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
-EXPORT_SYMBOL(pci_iounmap);
#include <asm/switch_to.h>
#include <asm/syscall.h>
#include <asm/time.h>
+#include <asm/tm.h>
#include <asm/unistd.h>
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
*/
irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
+ /*
+ * If system call is called with TM active, set _TIF_RESTOREALL to
+ * prevent RFSCV being used to return to userspace, because POWER9
+ * TM implementation has problems with this instruction returning to
+ * transactional state. Final register values are not relevant because
+ * the transaction will be aborted upon return anyway. Or in the case
+ * of unsupported_scv SIGILL fault, the return state does not much
+ * matter because it's an edge case.
+ */
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
+ current_thread_info()->flags |= _TIF_RESTOREALL;
+
+ /*
+ * If the system call was made with a transaction active, doom it and
+ * return without performing the system call. Unless it was an
+ * unsupported scv vector, in which case it's treated like an illegal
+ * instruction.
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
+ !trap_is_unsupported_scv(regs)) {
+ /* Enable TM in the kernel, and disable EE (for scv) */
+ hard_irq_disable();
+ mtmsr(mfmsr() | MSR_TM);
+
+ /* tabort, this dooms the transaction, nothing else */
+ asm volatile(".long 0x7c00071d | ((%0) << 16)"
+ :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
+
+ /*
+ * Userspace will never see the return value. Execution will
+ * resume after the tbegin. of the aborted transaction with the
+ * checkpointed register state. A context switch could occur
+ * or signal delivered to the process before resuming the
+ * doomed transaction context, but that should all be handled
+ * as expected.
+ */
+ return -ENOSYS;
+ }
+#endif // CONFIG_PPC_TRANSACTIONAL_MEM
+
local_irq_enable();
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/ptrace.h>
-#include <asm/tm.h>
.section ".toc","aw"
SYS_CALL_TABLE:
.globl system_call_vectored_\name
system_call_vectored_\name:
_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
SCV_INTERRUPT_TO_KERNEL
mr r10,r1
ld r1,PACAKSAVE(r13)
.globl system_call_common
system_call_common:
_ASM_NOKPROBE_SYMBOL(system_call_common)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
mr r10,r1
ld r1,PACAKSAVE(r13)
std r10,0(r1)
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-tabort_syscall:
-_ASM_NOKPROBE_SYMBOL(tabort_syscall)
- /* Firstly we need to enable TM in the kernel */
- mfmsr r10
- li r9, 1
- rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r10, 0
-
- /* tabort, this dooms the transaction, nothing else */
- li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
- TABORT(R9)
-
- /*
- * Return directly to userspace. We have corrupted user register state,
- * but userspace will never see that register state. Execution will
- * resume after the tbegin of the aborted transaction with the
- * checkpointed register state.
- */
- li r9, MSR_RI
- andc r10, r10, r9
- mtmsrd r10, 1
- mtspr SPRN_SRR0, r11
- mtspr SPRN_SRR1, r12
- RFI_TO_USER
- b . /* prevent speculative execution */
-#endif
-
/*
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
* touched, no exit work created, then this can be used.
{
int index;
struct machine_check_event evt;
+ unsigned long msr;
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
memcpy(&local_paca->mce_info->mce_event_queue[index],
&evt, sizeof(evt));
- /* Queue irq work to process this event later. */
- irq_work_queue(&mce_event_process_work);
+ /*
+ * Queue irq work to process this event later. Before
+ * queuing the work enable translation for non radix LPAR,
+ * as irq_work_queue may try to access memory outside RMO
+ * region.
+ */
+ if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
+ msr = mfmsr();
+ mtmsr(msr | MSR_IR | MSR_DR);
+ irq_work_queue(&mce_event_process_work);
+ mtmsr(msr);
+ } else {
+ irq_work_queue(&mce_event_process_work);
+ }
}
void mce_common_process_ue(struct pt_regs *regs,
/* The following code handles the fake_suspend = 1 case */
mflr r0
std r0, PPC_LR_STKOFF(r1)
- stdu r1, -PPC_MIN_STKFRM(r1)
+ stdu r1, -TM_FRAME_SIZE(r1)
/* Turn on TM. */
mfmsr r8
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
+ /*
+ * It's possible that treclaim. may modify registers, if we have lost
+ * track of fake-suspend state in the guest due to it using rfscv.
+ * Save and restore registers in case this occurs.
+ */
+ mfspr r3, SPRN_DSCR
+ mfspr r4, SPRN_XER
+ mfspr r5, SPRN_AMR
+ /* SPRN_TAR would need to be saved here if the kernel ever used it */
+ mfcr r12
+ SAVE_NVGPRS(r1)
+ SAVE_GPR(2, r1)
+ SAVE_GPR(3, r1)
+ SAVE_GPR(4, r1)
+ SAVE_GPR(5, r1)
+ stw r12, 8(r1)
+ std r1, HSTATE_HOST_R1(r13)
+
/* We have to treclaim here because that's the only way to do S->N */
li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3)
+ GET_PACA(r13)
+ ld r1, HSTATE_HOST_R1(r13)
+ REST_GPR(2, r1)
+ REST_GPR(3, r1)
+ REST_GPR(4, r1)
+ REST_GPR(5, r1)
+ lwz r12, 8(r1)
+ REST_NVGPRS(r1)
+ mtspr SPRN_DSCR, r3
+ mtspr SPRN_XER, r4
+ mtspr SPRN_AMR, r5
+ mtcr r12
+ HMT_MEDIUM
+
/*
* We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
- addi r1, r1, PPC_MIN_STKFRM
+ addi r1, r1, TM_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr
if (xics_ics->check(xics_ics, hwirq))
return -EINVAL;
- /* No chip data for the XICS domain */
+ /* Let the ICS be the chip data for the XICS domain. For ICS native */
irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
- NULL, handle_fasteoi_irq, NULL, NULL);
+ xics_ics, handle_fasteoi_irq, NULL, NULL);
return 0;
}
The minimum size for the stack guard should be 256 for 31 bit and
512 for 64 bit.
-config WARN_DYNAMIC_STACK
- def_bool n
- prompt "Emit compiler warnings for function with dynamic stack usage"
- help
- This option enables the compiler option -mwarn-dynamicstack. If the
- compiler supports this options generates warnings for functions
- that dynamically allocate stack space using alloca.
-
- Say N if you are unsure.
-
endmenu
menu "I/O subsystem"
endif
endif
-ifdef CONFIG_WARN_DYNAMIC_STACK
- ifneq ($(call cc-option,-mwarn-dynamicstack),)
- KBUILD_CFLAGS += -mwarn-dynamicstack
- KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
- endif
-endif
-
ifdef CONFIG_EXPOLINE
ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y
CONFIG_PREEMPT=y
+CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_DMA_API_DEBUG=y
-CONFIG_STRING_SELFTEST=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_MIN_HEAP=y
-CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_STRING_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
CONFIG_TEST_LIVEPATCH=m
CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y
+CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
- /* Branch instruction needs 6 bytes */ \
- int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
+ int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
EMIT4(0xb9080000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
- if (!imm)
- break;
- /* alfi %dst,imm */
- EMIT6_IMM(0xc20b0000, dst_reg, imm);
+ if (imm != 0) {
+ /* alfi %dst,imm */
+ EMIT6_IMM(0xc20b0000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
EMIT4(0xb9090000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
- if (!imm)
- break;
- /* alfi %dst,-imm */
- EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+ if (imm != 0) {
+ /* alfi %dst,-imm */
+ EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
if (!imm)
break;
- /* agfi %dst,-imm */
- EMIT6_IMM(0xc2080000, dst_reg, -imm);
+ if (imm == -0x80000000) {
+ /* algfi %dst,0x80000000 */
+ EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
+ } else {
+ /* agfi %dst,-imm */
+ EMIT6_IMM(0xc2080000, dst_reg, -imm);
+ }
break;
/*
* BPF_MUL
EMIT4(0xb90c0000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
- if (imm == 1)
- break;
- /* msfi %r5,imm */
- EMIT6_IMM(0xc2010000, dst_reg, imm);
+ if (imm != 1) {
+ /* msfi %r5,imm */
+ EMIT6_IMM(0xc2010000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
if (BPF_OP(insn->code) == BPF_MOD)
/* lhgi %dst,0 */
EMIT4_IMM(0xa7090000, dst_reg, 0);
+ else
+ EMIT_ZERO(dst_reg);
break;
}
/* lhi %w0,0 */
EMIT4(0xb9820000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
- if (!imm)
- break;
- /* xilf %dst,imm */
- EMIT6_IMM(0xc0070000, dst_reg, imm);
+ if (imm != 0) {
+ /* xilf %dst,imm */
+ EMIT6_IMM(0xc0070000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
- if (imm == 0)
- break;
- /* sll %dst,imm(%r0) */
- EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* sll %dst,imm(%r0) */
+ EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
- if (imm == 0)
- break;
- /* srl %dst,imm(%r0) */
- EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* srl %dst,imm(%r0) */
+ EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
- if (imm == 0)
- break;
- /* sra %dst,imm(%r0) */
- EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* sra %dst,imm(%r0) */
+ EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
mmap_read_lock(current->mm);
ret = -EINVAL;
- vma = find_vma(current->mm, mmio_addr);
+ vma = vma_lookup(current->mm, mmio_addr);
if (!vma)
goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
mmap_read_lock(current->mm);
ret = -EINVAL;
- vma = find_vma(current->mm, mmio_addr);
+ vma = vma_lookup(current->mm, mmio_addr);
if (!vma)
goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzo)
-$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
$(call if_changed,uimage,bzip2)
-$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip)
-$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma)
-$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
$(call if_changed,uimage,xz)
-$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
$(call if_changed,uimage,lzo)
-$(obj)/uImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none)
OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
-$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
+$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
OBJCOPYFLAGS_uImage.srec := -I binary -O srec
-$(obj)/uImage.srec: $(obj)/uImage
+$(obj)/uImage.srec: $(obj)/uImage FORCE
$(call if_changed,objcopy)
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
- if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
+ size = PAGE_ALIGN(size);
+
+ if (!sparc_dma_free_resource(cpu_addr, size))
return;
dma_make_coherent(dma_addr, size);
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
+#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
/* nothing to do */
}
EXPORT_SYMBOL(pci_iounmap);
+#endif
config ARCH_HIBERNATION_POSSIBLE
def_bool y
+config ARCH_NR_GPIO
+ int
+ default 1024 if X86_64
+ default 512
+
config ARCH_SUSPEND_POSSIBLE
def_bool y
tune = $(call cc-option,-mtune=$(1),$(2))
+ifdef CONFIG_CC_IS_CLANG
+align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
+else
+align := -falign-functions=0 -falign-jumps=0 -falign-loops=0
+endif
+
cflags-$(CONFIG_M486SX) += -march=i486
cflags-$(CONFIG_M486) += -march=i486
cflags-$(CONFIG_M586) += -march=i586
# They make zero difference whatsosever to performance at this time.
cflags-$(CONFIG_MK7) += -march=athlon
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
-cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
-cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)
+cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
static void kill_me_now(struct callback_head *ch)
{
+ struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
+
+ p->mce_count = 0;
force_sig(SIGBUS);
}
int flags = MF_ACTION_REQUIRED;
int ret;
+ p->mce_count = 0;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv)
}
}
-static void queue_task_work(struct mce *m, int kill_current_task)
+static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
{
- current->mce_addr = m->addr;
- current->mce_kflags = m->kflags;
- current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
- current->mce_whole_page = whole_page(m);
+ int count = ++current->mce_count;
- if (kill_current_task)
- current->mce_kill_me.func = kill_me_now;
- else
- current->mce_kill_me.func = kill_me_maybe;
+ /* First call, save all the details */
+ if (count == 1) {
+ current->mce_addr = m->addr;
+ current->mce_kflags = m->kflags;
+ current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+ current->mce_whole_page = whole_page(m);
+
+ if (kill_current_task)
+ current->mce_kill_me.func = kill_me_now;
+ else
+ current->mce_kill_me.func = kill_me_maybe;
+ }
+
+ /* Ten is likely overkill. Don't expect more than two faults before task_work() */
+ if (count > 10)
+ mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
+
+ /* Second or later call, make sure page address matches the one from first call */
+ if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
+ mce_panic("Consecutive machine checks to different user pages", m, msg);
+
+ /* Do not call task_work_add() more than once */
+ if (count > 1)
+ return;
task_work_add(current, ¤t->mce_kill_me, TWA_RESUME);
}
/* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs));
- queue_task_work(&m, kill_current_task);
+ queue_task_work(&m, msg, kill_current_task);
} else {
/*
}
if (m.kflags & MCE_IN_KERNEL_COPYIN)
- queue_task_work(&m, kill_current_task);
+ queue_task_work(&m, msg, kill_current_task);
}
out:
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
return 0;
p4d = p4d_offset(pgd, addr);
- if (p4d_none(*p4d))
+ if (!p4d_present(*p4d))
return 0;
pud = pud_offset(p4d, addr);
- if (pud_none(*pud))
+ if (!pud_present(*pud))
return 0;
if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ if (!pmd_present(*pmd))
return 0;
if (pmd_large(*pmd))
int err = 0;
start = sanitize_phys(start);
- end = sanitize_phys(end);
+
+ /*
+ * The end address passed into this function is exclusive, but
+ * sanitize_phys() expects an inclusive address.
+ */
+ end = sanitize_phys(end - 1) + 1;
if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type));
x86_platform.legacy.rtc = 1;
}
+static void __init xen_domu_set_legacy_features(void)
+{
+ x86_platform.legacy.rtc = 0;
+}
+
/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void)
{
add_preferred_console("xenboot", 0, NULL);
if (pci_xen)
x86_init.pci.arch_init = pci_xen_init;
+ x86_platform.set_legacy_features =
+ xen_domu_set_legacy_features;
} else {
const struct dom0_vga_console_info *info =
(void *)((char *)xen_start_info +
if (pinned) {
struct page *page = pfn_to_page(pfn);
- if (static_branch_likely(&xen_struct_pages_ready))
+ pinned = false;
+ if (static_branch_likely(&xen_struct_pages_ready)) {
+ pinned = PagePinned(page);
SetPagePinned(page);
+ }
xen_mc_batch();
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(PARAVIRT_LAZY_MMU);
if (preloaded)
radix_tree_preload_end();
- ret = blk_iolatency_init(q);
- if (ret)
- goto err_destroy_all;
-
ret = blk_ioprio_init(q);
if (ret)
goto err_destroy_all;
if (ret)
goto err_destroy_all;
+ ret = blk_iolatency_init(q);
+ if (ret) {
+ blk_throtl_exit(q);
+ goto err_destroy_all;
+ }
+
return 0;
err_destroy_all:
/* alloc failed, nothing's initialized yet, free everything */
spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ struct blkcg *blkcg = blkg->blkcg;
+
+ spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) {
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
+ spin_unlock(&blkcg->lock);
}
spin_unlock_irq(&q->queue_lock);
ret = -ENOMEM;
__clear_bit(pol->plid, q->blkcg_pols);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ struct blkcg *blkcg = blkg->blkcg;
+
+ spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) {
if (pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[pol->plid]);
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
+ spin_unlock(&blkcg->lock);
}
spin_unlock_irq(&q->queue_lock);
*/
void blk_integrity_unregister(struct gendisk *disk)
{
+ struct blk_integrity *bi = &disk->queue->integrity;
+
+ if (!bi->profile)
+ return;
+
+ /* ensure all bios are off the integrity workqueue */
+ blk_flush_integrity();
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
- memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+ memset(bi, 0, sizeof(*bi));
}
EXPORT_SYMBOL(blk_integrity_unregister);
spin_lock_irqsave(&tags->lock, flags);
rq = tags->rqs[bitnr];
- if (!rq || !refcount_inc_not_zero(&rq->ref))
+ if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
rq = NULL;
spin_unlock_irqrestore(&tags->lock, flags);
return rq;
#include <linux/export.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
+#include <linux/init.h>
#include <linux/mc146818rtc.h>
const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value;
+ if (!x86_platform.legacy.rtc)
+ return;
+
user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
static int __init early_resume_init(void)
{
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
user = val % USERHASH;
val = val / USERHASH;
file = val % FILEHASH;
if (count)
return count;
- kobject_put(&attr_set->kobj);
mutex_destroy(&attr_set->update_lock);
+ kobject_put(&attr_set->kobj);
return 0;
}
EXPORT_SYMBOL_GPL(gov_attr_set_put);
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
- if (no_load)
- return -ENODEV;
-
id = x86_match_cpu(hwp_support_ids);
if (id) {
+ bool hwp_forced = intel_pstate_hwp_is_enabled();
+
+ if (hwp_forced)
+ pr_info("HWP enabled by BIOS\n");
+ else if (no_load)
+ return -ENODEV;
+
copy_cpu_funcs(&core_funcs);
/*
* Avoid enabling HWP for processors without EPP support,
* If HWP is enabled already, though, there is no choice but to
* deal with it.
*/
- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
- intel_pstate_hwp_is_enabled()) {
+ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
hwp_active++;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
goto hwp_cpu_matched;
}
+ pr_info("HWP not enabled\n");
} else {
+ if (no_load)
+ return -ENODEV;
+
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) {
pr_info("CPU model not supported\n");
else if (!strcmp(str, "passive"))
default_driver = &intel_cpufreq;
- if (!strcmp(str, "no_hwp")) {
- pr_info("HWP disabled\n");
+ if (!strcmp(str, "no_hwp"))
no_hwp = 1;
- }
+
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
MAX_HWIP
};
-#define HWIP_MAX_INSTANCE 8
+#define HWIP_MAX_INSTANCE 10
struct amd_powerplay {
void *pp_handle;
kgd2kfd_suspend(adev->kfd.dev, run_pm);
}
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd.dev)
+ r = kgd2kfd_resume_iommu(adev->kfd.dev);
+
+ return r;
+}
+
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{
int r = 0;
void amdgpu_amdkfd_fini(void);
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
{
}
+static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
return 0;
struct dentry *ent;
int r, i;
-
-
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt);
- if (!ent) {
+ if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
- return -EIO;
+ return PTR_ERR(ent);
}
ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
&fops_sclk_set);
- if (!ent) {
+ if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
- return -EIO;
+ return PTR_ERR(ent);
}
/* Register debugfs entries for amdgpu_ttm */
if (r)
goto init_failed;
+ r = amdgpu_amdkfd_resume_iommu(adev);
+ if (r)
+ goto init_failed;
+
r = amdgpu_device_ip_hw_init_phase1(adev);
if (r)
goto init_failed;
{
int r;
+ r = amdgpu_amdkfd_resume_iommu(adev);
+ if (r)
+ return r;
+
r = amdgpu_device_ip_resume_phase1(adev);
if (r)
return r;
dev_warn(tmp_adev->dev, "asic atom init failed!");
} else {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_amdkfd_resume_iommu(tmp_adev);
+ if (r)
+ goto out;
+
r = amdgpu_device_ip_resume_phase1(tmp_adev);
if (r)
goto out;
break;
default:
adev->gmc.tmz_enabled = false;
- dev_warn(adev->dev,
+ dev_info(adev->dev,
"Trusted Memory Zone (TMZ) feature not supported\n");
break;
}
return res;
}
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void)
+uint32_t amdgpu_ras_eeprom_max_record_count(void)
{
return RAS_MAX_RECORD_COUNT;
}
int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records, const u32 num);
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void);
+uint32_t amdgpu_ras_eeprom_max_record_count(void);
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops);
- if (!ent)
- return -ENOMEM;
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent;
goto out;
}
+ if (bo->type == ttm_bo_type_device &&
+ new_mem->mem_type == TTM_PL_VRAM &&
+ old_mem->mem_type != TTM_PL_VRAM) {
+ /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+ * accesses the BO after it's moved.
+ */
+ abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ }
+
if (adev->mman.buffer_funcs_enabled) {
if (((old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) ||
return r;
}
- if (bo->type == ttm_bo_type_device &&
- new_mem->mem_type == TTM_PL_VRAM &&
- old_mem->mem_type != TTM_PL_VRAM) {
- /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
- * accesses the BO after it's moved.
- */
- abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- }
-
out:
/* update statistics */
atomic64_add(bo->base.size, &adev->num_bytes_moved);
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 145,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 145,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 145,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 4,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
if (!kfd)
return NULL;
- /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
- * 32 and 64-bit requests are possible and must be
- * supported.
- */
- kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
- if (device_info->needs_pci_atomics &&
- !kfd->pci_atomic_requested) {
- dev_info(kfd_device,
- "skipped device %x:%x, PCI rejects atomics\n",
- pdev->vendor, pdev->device);
- kfree(kfd);
- return NULL;
- }
-
kfd->kgd = kgd;
kfd->device_info = device_info;
kfd->pdev = pdev;
kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- kfd->vm_info.first_vmid_kfd + 1;
+ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+ * 32 and 64-bit requests are possible and must be
+ * supported.
+ */
+ kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
+ if (!kfd->pci_atomic_requested &&
+ kfd->device_info->needs_pci_atomics &&
+ (!kfd->device_info->no_atomic_fw_version ||
+ kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
+ dev_info(kfd_device,
+ "skipped device %x:%x, PCI rejects atomics %d<%d\n",
+ kfd->pdev->vendor, kfd->pdev->device,
+ kfd->mec_fw_version,
+ kfd->device_info->no_atomic_fw_version);
+ return false;
+ }
+
/* Verify module parameters regarding mapped process number*/
if ((hws_max_conc_proc < 0)
|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
return ret;
}
-static int kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
int err = 0;
err = kfd_iommu_resume(kfd);
- if (err) {
+ if (err)
dev_err(kfd_device,
"Failed to resume IOMMU for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
- return err;
- }
+ return err;
+}
+
+static int kfd_resume(struct kfd_dev *kfd)
+{
+ int err = 0;
err = kfd->dqm->ops.start(kfd->dqm);
if (err) {
bool supports_cwsr;
bool needs_iommu_device;
bool needs_pci_atomics;
+ uint32_t no_atomic_fw_version;
unsigned int num_sdma_engines;
unsigned int num_xgmi_sdma_engines;
unsigned int num_sdma_queues_per_engine;
uint32_t agp_base, agp_bot, agp_top;
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
+ memset(pa_config, 0, sizeof(*pa_config));
+
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
return 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
+ if (dm->vblank_control_workqueue) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
- INIT_WORK(&work->work, vblank_control_worker);
- work->dm = dm;
- work->acrtc = acrtc;
- work->enable = enable;
+ INIT_WORK(&work->work, vblank_control_worker);
+ work->dm = dm;
+ work->acrtc = acrtc;
+ work->enable = enable;
- if (acrtc_state->stream) {
- dc_stream_retain(acrtc_state->stream);
- work->stream = acrtc_state->stream;
- }
+ if (acrtc_state->stream) {
+ dc_stream_retain(acrtc_state->stream);
+ work->stream = acrtc_state->stream;
+ }
- queue_work(dm->vblank_control_workqueue, &work->work);
+ queue_work(dm->vblank_control_workqueue, &work->work);
+ }
#endif
return 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
- struct dc_state *dc_state)
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
{
struct dc_stream_state *stream = NULL;
struct drm_connector *connector;
struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
- int i, j, clock, bpp;
+ int i, j, clock;
int vcpi, pbn_div, pbn = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
}
pbn_div = dm_mst_get_pbn_divider(stream->link);
- bpp = stream->timing.dsc_cfg.bits_per_pixel;
clock = stream->timing.pix_clk_100hz / 10;
- pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+ /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+ for (j = 0; j < dc_state->stream_count; j++) {
+ if (vars[j].aconnector == aconnector) {
+ pbn = vars[j].pbn;
+ break;
+ }
+ }
+
vcpi = drm_dp_mst_atomic_enable_dsc(state,
aconnector->port,
pbn, pbn_div,
}
}
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+ const struct drm_display_mode *native_mode;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ return;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+ if (!encoder)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ native_mode = &amdgpu_encoder->native_mode;
+ if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+ return;
+
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ native_mode->hdisplay,
+ native_mode->vdisplay);
+}
+
static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
struct edid *edid)
{
* restored here.
*/
amdgpu_dm_update_freesync_caps(connector, edid);
+
+ amdgpu_set_panel_orientation(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
}
state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
- * hot-plug, headless s3, dpms
+ /* Stream removed and re-enabled
+ *
+ * Can sometimes overlap with the HPD case,
+ * thus set update_hdcp to false to avoid
+ * setting HDCP multiple times.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (!(old_state->crtc && old_state->crtc->enabled) &&
+ state->crtc && state->crtc->enabled &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ dm_con_state->update_hdcp = false;
+ return true;
+ }
+
+ /* Hot-plug, headless s3, dpms
+ *
+ * Only start HDCP if the display is connected/enabled.
+ * update_hdcp flag will be set to false until the next
+ * HPD comes in.
*
* Handles: DESIRED -> DESIRED (Special case)
*/
* If PSR or idle optimizations are enabled then flush out
* any pending work before hardware programming.
*/
- flush_workqueue(dm->vblank_control_workqueue);
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
#endif
bundle->stream_update.stream = acrtc_state->stream;
/* if there mode set or reset, disable eDP PSR */
if (mode_set_reset_required) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
- flush_workqueue(dm->vblank_control_workqueue);
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
#endif
amdgpu_dm_psr_disable_all(dm);
}
int ret, i;
bool lock_and_validation_needed = false;
struct dm_crtc_state *dm_old_crtc_state;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+#endif
trace_amdgpu_dm_atomic_check_begin(state);
goto fail;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
goto fail;
- ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
if (ret)
goto fail;
#endif
goto fail;
status = dc_validate_global_state(dc, dm_state->context, false);
if (status != DC_OK) {
- DC_LOG_WARNING("DC global validation failure: %s (%d)",
+ drm_dbg_atomic(dev,
+ "DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
uint32_t num_slices_h;
uint32_t num_slices_v;
uint32_t bpp_overwrite;
-};
-
-struct dsc_mst_fairness_vars {
- int pbn;
- bool dsc_enabled;
- int bpp_x16;
+ struct amdgpu_dm_connector *aconnector;
};
static int kbps_to_peak_pbn(int kbps)
static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
- struct dc_link *dc_link)
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_vars *vars)
{
int i;
struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES];
- struct dsc_mst_fairness_vars vars[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
int count = 0;
bool debugfs_overwrite = false;
params[count].timing = &stream->timing;
params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ params[count].aconnector = aconnector;
params[count].port = aconnector->port;
params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
}
/* Try no compression */
for (i = 0; i < count; i++) {
+ vars[i].aconnector = params[i].aconnector;
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i].dsc_enabled = false;
vars[i].bpp_x16 = 0;
}
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state)
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
{
int i, j;
struct dc_stream_state *stream;
return false;
mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
mutex_unlock(&aconnector->mst_mgr.lock);
return false;
}
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
#if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_vars {
+ int pbn;
+ bool dsc_enabled;
+ int bpp_x16;
+ struct amdgpu_dm_connector *aconnector;
+};
+
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state);
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars);
#endif
#endif
depth = *pcpu;
put_cpu_ptr(&fpu_recursion_depth);
- ASSERT(depth > 1);
+ ASSERT(depth >= 1);
}
/**
int dc_link_get_backlight_level(const struct dc_link *link)
{
-
struct abm *abm = get_abm_from_stream_res(link);
+ struct panel_cntl *panel_cntl = link->panel_cntl;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ bool fw_set_brightness = true;
- if (abm == NULL || abm->funcs->get_current_backlight == NULL)
- return DC_ERROR_UNEXPECTED;
+ if (dmcu)
+ fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
- return (int) abm->funcs->get_current_backlight(abm);
+ if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+ return panel_cntl->funcs->get_current_backlight(panel_cntl);
+ else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+ return (int) abm->funcs->get_current_backlight(abm);
+ else
+ return DC_ERROR_UNEXPECTED;
}
int dc_link_get_target_backlight_pwm(const struct dc_link *link)
-/* Copyright 2015 Advanced Micro Devices, Inc. */
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
#include "dm_services.h"
#include "dc.h"
#include "dc_link_dp.h"
dp_disable_link_phy(link, signal);
/* Abort link training if failure due to sink being unplugged. */
- if (status == LINK_TRAINING_ABORT)
- break;
- else if (do_fallback) {
+ if (status == LINK_TRAINING_ABORT) {
+ enum dc_connection_type type = dc_connection_none;
+
+ dc_link_detect_sink(link, &type);
+ if (type == dc_connection_none)
+ break;
+ } else if (do_fallback) {
decide_fallback_link_setting(*link_setting, ¤t_setting, status);
/* Fail link training if reduced link bandwidth no longer meets
* stream requirements.
static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
{
uint64_t current_backlight;
- uint32_t round_result;
uint32_t bl_period, bl_int_count;
uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
current_backlight = div_u64(current_backlight, bl_period);
current_backlight = (current_backlight + 1) >> 1;
- current_backlight = (uint64_t)(current_backlight) * bl_period;
-
- round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
- round_result = (round_result >> (bl_int_count-1)) & 1;
-
- current_backlight >>= bl_int_count;
- current_backlight += round_result;
-
return (uint32_t)(current_backlight);
}
#define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging
#define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible
#define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible
-#define TABLE_COUNT 6
+#define TABLE_SMU_METRICS 6 // Called by Driver
+#define TABLE_COUNT 7
-#define NUM_DSPCLK_LEVELS 8
-#define NUM_SOCCLK_DPM_LEVELS 8
-#define NUM_DCEFCLK_DPM_LEVELS 4
-#define NUM_FCLK_DPM_LEVELS 4
-#define NUM_MEMCLK_DPM_LEVELS 4
+typedef struct SmuMetricsTable_t {
+ //CPU status
+ uint16_t CoreFrequency[6]; //[MHz]
+ uint32_t CorePower[6]; //[mW]
+ uint16_t CoreTemperature[6]; //[centi-Celsius]
+ uint16_t L3Frequency[2]; //[MHz]
+ uint16_t L3Temperature[2]; //[centi-Celsius]
+ uint16_t C0Residency[6]; //Percentage
-#define NUMBER_OF_PSTATES 8
-#define NUMBER_OF_CORES 8
+ // GFX status
+ uint16_t GfxclkFrequency; //[MHz]
+ uint16_t GfxTemperature; //[centi-Celsius]
-typedef enum {
- S3_TYPE_ENTRY,
- S5_TYPE_ENTRY,
-} Sleep_Type_e;
+ // SOC IP info
+ uint16_t SocclkFrequency; //[MHz]
+ uint16_t VclkFrequency; //[MHz]
+ uint16_t DclkFrequency; //[MHz]
+ uint16_t MemclkFrequency; //[MHz]
-typedef enum {
- GFX_OFF = 0,
- GFX_ON = 1,
-} GFX_Mode_e;
+ // power, VF info for CPU/GFX telemetry rails, and then socket power total
+ uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX
+ uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX
+ uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX
+ uint32_t CurrentSocketPower; //[mW]
-typedef enum {
- CPU_P0 = 0,
- CPU_P1,
- CPU_P2,
- CPU_P3,
- CPU_P4,
- CPU_P5,
- CPU_P6,
- CPU_P7
-} CPU_PState_e;
+ uint16_t SocTemperature; //[centi-Celsius]
+ uint16_t EdgeTemperature;
+ uint16_t ThrottlerStatus;
+ uint16_t Spare;
-typedef enum {
- CPU_CORE0 = 0,
- CPU_CORE1,
- CPU_CORE2,
- CPU_CORE3,
- CPU_CORE4,
- CPU_CORE5,
- CPU_CORE6,
- CPU_CORE7
-} CORE_ID_e;
+} SmuMetricsTable_t;
-typedef enum {
- DF_DPM0 = 0,
- DF_DPM1,
- DF_DPM2,
- DF_DPM3,
- DF_PState_Count
-} DF_PState_e;
-
-typedef enum {
- GFX_DPM0 = 0,
- GFX_DPM1,
- GFX_DPM2,
- GFX_DPM3,
- GFX_PState_Count
-} GFX_PState_e;
+typedef struct SmuMetrics_t {
+ SmuMetricsTable_t Current;
+ SmuMetricsTable_t Average;
+ uint32_t SampleStartTime;
+ uint32_t SampleStopTime;
+ uint32_t Accnt;
+} SmuMetrics_t;
#endif
__SMU_DUMMY_MAP(SetUclkDpmMode), \
__SMU_DUMMY_MAP(LightSBR), \
__SMU_DUMMY_MAP(GfxDriverResetRecovery), \
- __SMU_DUMMY_MAP(BoardPowerCalibration),
+ __SMU_DUMMY_MAP(BoardPowerCalibration), \
+ __SMU_DUMMY_MAP(RequestGfxclk), \
+ __SMU_DUMMY_MAP(ForceGfxVid), \
+ __SMU_DUMMY_MAP(UnforceGfxVid),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
#define PPSMC_MSG_SetDriverTableVMID 0x34
#define PPSMC_MSG_SetSoftMinCclk 0x35
#define PPSMC_MSG_SetSoftMaxCclk 0x36
-#define PPSMC_Message_Count 0x37
+#define PPSMC_MSG_GetGfxFrequency 0x37
+#define PPSMC_MSG_GetGfxVid 0x38
+#define PPSMC_MSG_ForceGfxFreq 0x39
+#define PPSMC_MSG_UnForceGfxFreq 0x3A
+#define PPSMC_MSG_ForceGfxVid 0x3B
+#define PPSMC_MSG_UnforceGfxVid 0x3C
+#define PPSMC_MSG_GetEnabledSmuFeatures 0x3D
+#define PPSMC_Message_Count 0x3E
#endif
*/
if (smu->uploading_custom_pp_table &&
(adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
+ (adev->asic_type <= CHIP_BEIGE_GOBY))
return smu_disable_all_features_with_exception(smu,
true,
SMU_FEATURE_COUNT);
struct smu_11_0_dpm_context *dpm_context = NULL;
uint32_t gen_speed, lane_width;
- if (amdgpu_ras_intr_triggered())
- return sysfs_emit(buf, "unavailable\n");
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
dpm_context = smu_dpm->dpm_context;
#undef pr_info
#undef pr_debug
+/* unit: MHz */
+#define CYAN_SKILLFISH_SCLK_MIN 1000
+#define CYAN_SKILLFISH_SCLK_MAX 2000
+#define CYAN_SKILLFISH_SCLK_DEFAULT 1800
+
+/* unit: mV */
+#define CYAN_SKILLFISH_VDDC_MIN 700
+#define CYAN_SKILLFISH_VDDC_MAX 1129
+#define CYAN_SKILLFISH_VDDC_MAGIC 5118 // 0x13fe
+
+static struct gfx_user_settings {
+ uint32_t sclk;
+ uint32_t vddc;
+} cyan_skillfish_user_settings;
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+ FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+
static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0),
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
+ MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0),
+ MSG_MAP(RequestGfxclk, PPSMC_MSG_RequestGfxclk, 0),
+ MSG_MAP(ForceGfxVid, PPSMC_MSG_ForceGfxVid, 0),
+ MSG_MAP(UnforceGfxVid, PPSMC_MSG_UnforceGfxVid, 0),
+};
+
+static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP_VALID(SMU_METRICS),
};
+static int cyan_skillfish_tables_init(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+ sizeof(SmuMetrics_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ goto err0_out;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table)
+ goto err1_out;
+
+ smu_table->metrics_time = 0;
+
+ return 0;
+
+err1_out:
+ smu_table->gpu_metrics_table_size = 0;
+ kfree(smu_table->metrics_table);
+err0_out:
+ return -ENOMEM;
+}
+
+static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = cyan_skillfish_tables_init(smu);
+ if (ret)
+ return ret;
+
+ return smu_v11_0_init_smc_tables(smu);
+}
+
+static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ kfree(smu_table->metrics_table);
+ smu_table->metrics_table = NULL;
+
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+ smu_table->gpu_metrics_table_size = 0;
+
+ smu_table->metrics_time = 0;
+
+ return 0;
+}
+
+static int
+cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ mutex_lock(&smu->metrics_lock);
+
+ ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+ if (ret) {
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
+ }
+
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ *value = metrics->Current.GfxclkFrequency;
+ break;
+ case METRICS_CURR_SOCCLK:
+ *value = metrics->Current.SocclkFrequency;
+ break;
+ case METRICS_CURR_VCLK:
+ *value = metrics->Current.VclkFrequency;
+ break;
+ case METRICS_CURR_DCLK:
+ *value = metrics->Current.DclkFrequency;
+ break;
+ case METRICS_CURR_UCLK:
+ *value = metrics->Current.MemclkFrequency;
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = (metrics->Current.CurrentSocketPower << 8) /
+ 1000;
+ break;
+ case METRICS_TEMPERATURE_EDGE:
+ *value = metrics->Current.GfxTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = metrics->Current.SocTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_VOLTAGE_VDDSOC:
+ *value = metrics->Current.Voltage[0];
+ break;
+ case METRICS_VOLTAGE_VDDGFX:
+ *value = metrics->Current.Voltage[1];
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = metrics->Current.ThrottlerStatus;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ mutex_unlock(&smu->metrics_lock);
+
+ return ret;
+}
+
+static int cyan_skillfish_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data,
+ uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ mutex_lock(&smu->sensor_lock);
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_CURR_GFXCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_CURR_UCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_HOTSPOT,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_EDGE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDNB:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDSOC,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDGFX,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&smu->sensor_lock);
+
+ return ret;
+}
+
+static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ member_type = METRICS_CURR_GFXCLK;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ member_type = METRICS_CURR_UCLK;
+ break;
+ case SMU_SOCCLK:
+ member_type = METRICS_CURR_SOCCLK;
+ break;
+ case SMU_VCLK:
+ member_type = METRICS_CURR_VCLK;
+ break;
+ case SMU_DCLK:
+ member_type = METRICS_CURR_DCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cyan_skillfish_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ char *buf)
+{
+ int ret = 0, size = 0;
+ uint32_t cur_value = 0;
+
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ switch (clk_type) {
+ case SMU_OD_SCLK:
+ ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC");
+ size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value);
+ break;
+ case SMU_OD_RANGE:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+ size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n",
+ CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ case SMU_FCLK:
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+ break;
+ default:
+ dev_warn(smu->adev->dev, "Unsupported clock type\n");
+ return ret;
+ }
+
+ return size;
+}
+
+static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+ uint32_t feature_mask[2];
+ uint64_t feature_enabled;
+
+ /* we need to re-init after suspend so return false */
+ if (adev->in_suspend)
+ return false;
+
+ ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+
+ if (ret)
+ return false;
+
+ feature_enabled = (uint64_t)feature_mask[0] |
+ ((uint64_t)feature_mask[1] << 32);
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_2 *gpu_metrics =
+ (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int i, ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
+
+ gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+
+ gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+ gpu_metrics->average_soc_power = metrics.Current.Power[0];
+ gpu_metrics->average_gfx_power = metrics.Current.Power[1];
+
+ gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+ gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+ gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+ gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+ for (i = 0; i < 6; i++) {
+ gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i];
+ gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i];
+ gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i];
+ }
+
+ for (i = 0; i < 2; i++) {
+ gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i];
+ gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i];
+ }
+
+ gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_2);
+}
+
+static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ int ret = 0;
+ uint32_t vid;
+
+ switch (type) {
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (size != 3 || input[0] != 0) {
+ dev_err(smu->adev->dev, "Invalid parameter!\n");
+ return -EINVAL;
+ }
+
+ if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
+ input[1] > CYAN_SKILLFISH_SCLK_MAX) {
+ dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+ CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+ return -EINVAL;
+ }
+
+ if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
+ input[2] > CYAN_SKILLFISH_VDDC_MAX) {
+ dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+ CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+ return -EINVAL;
+ }
+
+ cyan_skillfish_user_settings.sclk = input[1];
+ cyan_skillfish_user_settings.vddc = input[2];
+
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Invalid parameter!\n");
+ return -EINVAL;
+ }
+
+ cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
+ cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
+
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Invalid parameter!\n");
+ return -EINVAL;
+ }
+
+ if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN ||
+ cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) {
+ dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+ CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+ return -EINVAL;
+ }
+
+ if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) &&
+ (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN ||
+ cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) {
+ dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+ CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk,
+ cyan_skillfish_user_settings.sclk, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set sclk failed!\n");
+ return ret;
+ }
+
+ if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Unforce vddc failed!\n");
+ return ret;
+ }
+ } else {
+ /*
+ * PMFW accepts SVI2 VID code, convert voltage to VID:
+ * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001)
+ */
+ vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Force vddc failed!\n");
+ return ret;
+ }
+ }
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
.check_fw_version = smu_v11_0_check_fw_version,
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
+ .init_smc_tables = cyan_skillfish_init_smc_tables,
+ .fini_smc_tables = cyan_skillfish_finit_smc_tables,
+ .read_sensor = cyan_skillfish_read_sensor,
+ .print_clk_levels = cyan_skillfish_print_clk_levels,
+ .is_dpm_running = cyan_skillfish_is_dpm_running,
+ .get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
+ .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
.register_irq_handler = smu_v11_0_register_irq_handler,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
{
smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
smu->message_map = cyan_skillfish_message_map;
+ smu->table_map = cyan_skillfish_table_map;
smu->is_apu = true;
}
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
uint32_t min_value, max_value;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
case SMU_OD_RANGE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm)
+ /*
+ * This aims the case below:
+ * amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
+ *
+ * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
+ * make that possible, PMFW needs to acknowledge the dstate transition
+ * process for both gfx(function 0) and audio(function 1) function of
+ * the ASIC.
+ *
+ * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
+ * device representing the audio function of the ASIC. And that means
+ * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
+ * possible runpm suspend kicked on the ASIC. However without the dstate
+ * transition notification from audio function, pmfw cannot handle the
+ * BACO in/exit correctly. And that will cause driver hang on runpm
+ * resuming.
+ *
+ * To address this, we revert to legacy message way(driver masters the
+ * timing for BACO in/exit) on sound driver missing.
+ */
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
else
return smu_v11_0_baco_enter(smu);
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm) {
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
msleep(10);
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
uint32_t min_value, max_value;
uint32_t smu_version;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm)
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
else
return smu_v11_0_baco_enter(smu);
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm) {
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
msleep(10);
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
if (ret)
return ret;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
break;
case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
break;
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
if (ret)
return ret;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
break;
case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
break;
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
if (ret)
return ret;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
uint32_t freq_values[3] = {0};
uint32_t min_clk, max_clk;
- if (amdgpu_ras_intr_triggered())
- return sysfs_emit(buf, "unavailable\n");
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
dpm_context = smu_dpm->dpm_context;
switch (type) {
case SMU_OD_SCLK:
- size = sysfs_emit(buf, "%s:\n", "GFXCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
fallthrough;
case SMU_SCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
break;
case SMU_OD_MCLK:
- size = sysfs_emit(buf, "%s:\n", "MCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
fallthrough;
case SMU_MCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_SCLK:
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break;
case SMU_OD_RANGE:
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
break;
return ret;
}
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
+{
+ struct pci_dev *p = NULL;
+ bool snd_driver_loaded;
+
+ /*
+ * If the ASIC comes with no audio function, we always assume
+ * it is "enabled".
+ */
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (!p)
+ return true;
+
+ snd_driver_loaded = pci_is_enabled(p) ? true : false;
+
+ pci_dev_put(p);
+
+ return snd_driver_loaded;
+}
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
+/*
+ * Helper function to make sysfs_emit_at() happy. Align buf to
+ * the current page boundary and record the offset.
+ */
+static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
+{
+ if (!*buf || !offset)
+ return;
+
+ *offset = offset_in_page(*buf);
+ *buf -= *offset;
+}
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
+
#endif
#endif
if (switch_mmu_context) {
struct etnaviv_iommu_context *old_context = gpu->mmu_context;
- etnaviv_iommu_context_get(mmu_context);
- gpu->mmu_context = mmu_context;
+ gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
etnaviv_iommu_context_put(old_context);
}
list_del(&mapping->obj_node);
}
- etnaviv_iommu_context_get(mmu_context);
- mapping->context = mmu_context;
+ mapping->context = etnaviv_iommu_context_get(mmu_context);
mapping->use = 1;
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
goto err_submit_objects;
submit->ctx = file->driver_priv;
- etnaviv_iommu_context_get(submit->ctx->mmu);
- submit->mmu_context = submit->ctx->mmu;
+ submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->exec_state = args->exec_state;
submit->flags = args->flags;
/* We rely on the GPU running, so program the clock */
etnaviv_gpu_update_clock(gpu);
+ gpu->fe_running = false;
+ gpu->exec_state = -1;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = NULL;
+
return 0;
}
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
}
+
+ gpu->fe_running = true;
}
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
- &gpu->mmu_context->cmdbuf_mapping);
u16 prefetch;
+ u32 address;
/* setup the MMU */
- etnaviv_iommu_restore(gpu, gpu->mmu_context);
+ etnaviv_iommu_restore(gpu, context);
/* Start command processor */
prefetch = etnaviv_buffer_init(gpu);
+ address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch);
}
/* Now program the hardware */
mutex_lock(&gpu->lock);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
- gpu->mmu_context = NULL;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
goto out_unlock;
}
- if (!gpu->mmu_context) {
- etnaviv_iommu_context_get(submit->mmu_context);
- gpu->mmu_context = submit->mmu_context;
- etnaviv_gpu_start_fe_idleloop(gpu);
- } else {
- etnaviv_iommu_context_get(gpu->mmu_context);
- submit->prev_mmu_context = gpu->mmu_context;
- }
+ if (!gpu->fe_running)
+ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+ if (submit->prev_mmu_context)
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
+ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->initialized && gpu->mmu_context) {
+ if (gpu->initialized && gpu->fe_running) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
*/
etnaviv_gpu_wait_idle(gpu, 100);
- etnaviv_iommu_context_put(gpu->mmu_context);
- gpu->mmu_context = NULL;
+ gpu->fe_running = false;
}
gpu->exec_state = -1;
etnaviv_gpu_hw_suspend(gpu);
#endif
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+
if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_iommu_global_fini(gpu);
struct workqueue_struct *wq;
struct drm_gpu_scheduler sched;
bool initialized;
+ bool fe_running;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
/* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)v2_context->mtlb_dma,
(u32)context->global->bad_page_dma);
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
etnaviv_iommu_remove_mapping(context, m);
+ etnaviv_iommu_context_put(m->context);
m->context = NULL;
list_del_init(&m->mmu_node);
list_del_init(&m->scan_node);
struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
struct etnaviv_cmdbuf_suballoc *suballoc);
-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+static inline struct etnaviv_iommu_context *
+etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
{
kref_get(&ctx->refcount);
+ return ctx;
}
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
# clang warnings
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
-subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
*/
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
- sizeof(intel_dp->edp_dpcd))
+ sizeof(intel_dp->edp_dpcd)) {
drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
(int)sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
+ intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
+ }
+
/*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
}
if (ret)
- intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
+ ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
if (intel_dp->set_idle_link_train)
intel_dp->set_idle_link_train(intel_dp, crtc_state);
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ if (ctx->syncobj)
+ drm_syncobj_put(ctx->syncobj);
+
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
if (vm)
i915_vm_close(vm);
- if (ctx->syncobj)
- drm_syncobj_put(ctx->syncobj);
-
ctx->file_priv = ERR_PTR(-EBADF);
/*
err = PTR_ERR(import);
goto out_dmabuf;
}
+ import_obj = to_intel_bo(import);
if (import != &obj->base) {
pr_err("i915_gem_prime_import created a new object!\n");
err = -EINVAL;
goto out_import;
}
- import_obj = to_intel_bo(import);
i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj);
pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
PTR_ERR(import));
err = PTR_ERR(import);
+ } else {
+ err = 0;
}
dma_buf_put(dmabuf);
err = PTR_ERR(import);
goto out_dmabuf;
}
+ import_obj = to_intel_bo(import);
if (import == &obj->base) {
pr_err("i915_gem_prime_import reused gem object!\n");
goto out_import;
}
- import_obj = to_intel_bo(import);
-
i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj);
if (err) {
return I915_MMAP_TYPE_GTT;
}
+static struct drm_i915_gem_object *
+create_sys_or_internal(struct drm_i915_private *i915,
+ unsigned long size)
+{
+ if (HAS_LMEM(i915)) {
+ struct intel_memory_region *sys_region =
+ i915->mm.regions[INTEL_REGION_SMEM];
+
+ return __i915_gem_object_create_user(i915, size, &sys_region, 1);
+ }
+
+ return i915_gem_object_create_internal(i915, size);
+}
+
static bool assert_mmap_offset(struct drm_i915_private *i915,
unsigned long size,
int expected)
u64 offset;
int ret;
- obj = i915_gem_object_create_internal(i915, size);
+ obj = create_sys_or_internal(i915, size);
if (IS_ERR(obj))
return expected && expected == PTR_ERR(obj);
struct drm_mm_node *hole, *next;
int loop, err = 0;
u64 offset;
+ int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
/* Disable background reaper */
disable_retire_worker(i915);
}
/* Too large */
- if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
+ if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
err = -EINVAL;
goto out;
}
/* Fill the hole, further allocation attempts should then fail */
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = create_sys_or_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
pr_err("Unable to create object for reclaimed hole\n");
goto err_obj;
}
- if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
+ if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
err = -EINVAL;
goto err_obj;
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
bool no_map;
- if (HAS_LMEM(i915))
+ if (obj->ops->mmap_offset)
return type == I915_MMAP_TYPE_FIXED;
else if (type == I915_MMAP_TYPE_FIXED)
return false;
u32 intel_rps_read_punit_req(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+ intel_wakeref_t wakeref;
+ u32 freq = 0;
- return intel_uncore_read(uncore, GEN6_RPNSWREQ);
+ with_intel_runtime_pm_if_in_use(rpm, wakeref)
+ freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+
+ return freq;
}
static u32 intel_rps_get_req(u32 pureq)
__uc_free_load_err_log(uc);
}
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
- return intel_guc_ct_enabled(&guc->ct);
-}
-
/*
* Events triggered while CT buffers are disabled are logged in the SCRATCH_15
* register using the same bits used in the CT message payload. Since our
static void guc_handle_mmio_msg(struct intel_guc *guc)
{
/* we need communication to be enabled to reply to GuC */
- GEM_BUG_ON(!guc_communication_enabled(guc));
+ GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
spin_lock_irq(&guc->irq_lock);
if (guc->mmio_msg) {
struct drm_i915_private *i915 = gt->i915;
int ret;
- GEM_BUG_ON(guc_communication_enabled(guc));
+ GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
ret = i915_inject_probe_error(i915, -ENXIO);
if (ret)
return 0;
/* Make sure we enable communication if and only if it's disabled */
- GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+ GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
if (enable_communication)
guc_enable_communication(guc);
args->v0.count = 0;
args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
- args->v0.pwrsrc = -ENOSYS;
+ args->v0.pwrsrc = -ENODEV;
args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
}
#endif
if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
- rdev->agp = radeon_agp_head_init(rdev->ddev);
+ rdev->agp = radeon_agp_head_init(dev);
if (rdev->agp) {
rdev->agp->agp_mtrr = arch_phys_wc_add(
rdev->agp->agp_info.aper_base,
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
-
if (vc4_hdmi->hpd_gpio &&
gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
connected = true;
}
}
- pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
- pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected;
}
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
- struct drm_crtc *crtc = cstate->crtc;
+ struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
union hdmi_infoframe frame;
int ret;
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_connector_state *cstate = connector->state;
- struct drm_crtc *crtc = cstate->crtc;
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
if (!vc4_hdmi_supports_scrambling(encoder, mode))
return;
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_connector_state *cstate = connector->state;
+ struct drm_crtc *crtc = encoder->crtc;
/*
- * At boot, connector->state will be NULL. Since we don't know the
+ * At boot, encoder->crtc will be NULL. Since we don't know the
* state of the scrambler and in order to avoid any
* inconsistency, let's disable it all the time.
*/
- if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode))
+ if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
return;
- if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode))
+ if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
return;
if (delayed_work_pending(&vc4_hdmi->scrambling_work))
vc4_hdmi->variant->phy_disable(vc4_hdmi);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
vc4_hdmi_encoder_get_connector_state(encoder, state);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long bvb_rate, pixel_rate, hsm_rate;
int ret;
return;
}
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ return;
+ }
+
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (pixel_rate > 297000000)
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_connector_state *conn_state =
- vc4_hdmi_encoder_get_connector_state(encoder, state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_connector_state *conn_state =
- vc4_hdmi_encoder_get_connector_state(encoder, state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
{
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_crtc *crtc = connector->state->crtc;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+ struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 n, cts;
u64 tmp;
static int vc4_hdmi_audio_startup(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
/*
* If the HDMI encoder hasn't probed, or the encoder is
* currently in DVI mode, treat the codec dai as missing.
*/
- if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
+ if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE))
return -ENODEV;
return 0;
}
-#ifdef CONFIG_PM
-static int vc4_hdmi_runtime_suspend(struct device *dev)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
-
- return 0;
-}
-
-static int vc4_hdmi_runtime_resume(struct device *dev)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
- if (ret)
- return ret;
-
- return 0;
-}
-#endif
-
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
{}
};
-static const struct dev_pm_ops vc4_hdmi_pm_ops = {
- SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
- vc4_hdmi_runtime_resume,
- NULL)
-};
-
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
- .pm = &vc4_hdmi_pm_ops,
},
};
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
set_dma_addr(priv->param.dma,
- (int) priv->tx_buf[priv->tx_tail] + n);
+ virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
set_dma_count(priv->param.dma,
priv->tx_len[priv->tx_tail] - n);
release_dma_lock(flags);
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_READ);
set_dma_addr(priv->param.dma,
- (int) priv->rx_buf[priv->rx_head]);
+ virt_to_bus(priv->rx_buf[priv->rx_head]));
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
enable_dma(priv->param.dma);
if (priv->param.dma >= 0) {
flags = claim_dma_lock();
set_dma_addr(priv->param.dma,
- (int) priv->rx_buf[priv->rx_head]);
+ virt_to_bus(priv->rx_buf[priv->rx_head]));
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
} else {
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) {
- if (h->ns_id == nsid && nvme_tryget_ns_head(h))
+ if (h->ns_id != nsid)
+ continue;
+ if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
return h;
}
mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
+ if (list_empty(&ns->head->list)) {
+ list_del_init(&ns->head->entry);
+ last_path = true;
+ }
mutex_unlock(&ns->ctrl->subsys->lock);
/* guarantee not available in head->list */
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
- if (blk_get_integrity(ns->disk))
- blk_integrity_unregister(ns->disk);
down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);
up_write(&ns->ctrl->namespaces_rwsem);
- /* Synchronize with nvme_init_ns_head() */
- mutex_lock(&ns->head->subsys->lock);
- if (list_empty(&ns->head->list)) {
- list_del_init(&ns->head->entry);
- last_path = true;
- }
- mutex_unlock(&ns->head->subsys->lock);
if (last_path)
nvme_mpath_shutdown_disk(ns->head);
nvme_put_ns(ns);
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- unsigned nsid = le32_to_cpu(desc->nsids[n]);
-
+ unsigned nsid;
+again:
+ nsid = le32_to_cpu(desc->nsids[n]);
if (ns->head->ns_id < nsid)
continue;
if (ns->head->ns_id == nsid)
nvme_update_ns_ana_state(desc, ns);
if (++n == nr_nsids)
break;
+ if (ns->head->ns_id > nsid)
+ goto again;
}
up_read(&ctrl->namespaces_rwsem);
return 0;
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
- nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
+ nvme_rdma_destroy_queue_ib(queue);
mutex_destroy(&queue->queue_lock);
}
for (i = 0; i < queue->queue_size; i++) {
ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
if (ret)
- goto out_destroy_queue_ib;
+ return ret;
}
return 0;
-
-out_destroy_queue_ib:
- nvme_rdma_destroy_queue_ib(queue);
- return ret;
}
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
if (ret) {
dev_err(ctrl->ctrl.device,
"rdma_connect_locked failed (%d).\n", ret);
- goto out_destroy_queue_ib;
+ return ret;
}
return 0;
-
-out_destroy_queue_ib:
- nvme_rdma_destroy_queue_ib(queue);
- return ret;
}
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
- nvme_rdma_destroy_queue_ib(queue);
- fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
} while (ret > 0);
}
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !list_empty(&queue->send_list) ||
+ !llist_empty(&queue->req_list) || queue->more_requests;
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
- } else if (last) {
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
+
+ if (last && nvme_tcp_queue_more(queue))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
read_unlock_bh(&sk->sk_callback_lock);
}
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
-{
- return !list_empty(&queue->send_list) ||
- !llist_empty(&queue->req_list) || queue->more_requests;
-}
-
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
{
queue->request = NULL;
pending = true;
else if (unlikely(result < 0))
break;
- } else
- pending = !llist_empty(&queue->req_list);
+ }
result = nvme_tcp_try_recv(queue);
if (result > 0)
{
struct nvmet_subsys *subsys = to_subsys(item);
- return snprintf(page, PAGE_SIZE, "%*s\n",
+ return snprintf(page, PAGE_SIZE, "%.*s\n",
NVMET_SN_MAX_SIZE, subsys->serial);
}
break;
}
- if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
+ /*
+ * Attempt to initialize a restricted-dma-pool region if one was found.
+ * Note that count can hold a negative error code.
+ */
+ if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
}
DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
DEFINE_SIMPLE_PROP(leds, "leds", NULL)
DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
-DEFINE_SIMPLE_PROP(phy_handle, "phy-handle", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
{ .parse_prop = parse_resets, },
{ .parse_prop = parse_leds, },
{ .parse_prop = parse_backlight, },
- { .parse_prop = parse_phy_handle, },
{ .parse_prop = parse_gpio_compat, },
{ .parse_prop = parse_interrupts, },
{ .parse_prop = parse_regulators, },
void pci_set_acpi_fwnode(struct pci_dev *dev)
{
- if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev))
+ if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
ACPI_COMPANION_SET(&dev->dev,
acpi_pci_find_companion(&dev->dev));
}
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
/*
- * Create device link for NVIDIA GPU with integrated USB xHCI Host
+ * Create device link for GPUs with integrated USB xHCI Host
* controller to VGA.
*/
static void quirk_gpu_usb(struct pci_dev *usb)
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
/*
- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
+ * Create device link for GPUs with integrated Type-C UCSI controller
* to VGA. Currently there is no class code defined for UCSI device over PCI
* so using UNKNOWN class for now and it will be updated when UCSI
* over PCI gets a class code.
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_SERIAL_UNKNOWN, 8,
quirk_gpu_usb_typec_ucsi);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_UNKNOWN, 8,
+ quirk_gpu_usb_typec_ucsi);
/*
* Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
return off ?: PCI_VPD_SZ_INVALID;
}
+static bool pci_vpd_available(struct pci_dev *dev)
+{
+ struct pci_vpd *vpd = &dev->vpd;
+
+ if (!vpd->cap)
+ return false;
+
+ if (vpd->len == 0) {
+ vpd->len = pci_vpd_size(dev);
+ if (vpd->len == PCI_VPD_SZ_INVALID) {
+ vpd->cap = 0;
+ return false;
+ }
+ }
+
+ return true;
+}
+
/*
* Wait for last operation to complete.
* This code has to spin since there is no other notification from the PCI
loff_t end = pos + count;
u8 *buf = arg;
- if (!vpd->cap)
+ if (!pci_vpd_available(dev))
return -ENODEV;
if (pos < 0)
loff_t end = pos + count;
int ret = 0;
- if (!vpd->cap)
+ if (!pci_vpd_available(dev))
return -ENODEV;
if (pos < 0 || (pos & 3) || (count & 3))
void pci_vpd_init(struct pci_dev *dev)
{
+ if (dev->vpd.len == PCI_VPD_SZ_INVALID)
+ return;
+
dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
mutex_init(&dev->vpd.lock);
-
- if (!dev->vpd.len)
- dev->vpd.len = pci_vpd_size(dev);
-
- if (dev->vpd.len == PCI_VPD_SZ_INVALID)
- dev->vpd.cap = 0;
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
{
- unsigned int len = dev->vpd.len;
+ unsigned int len;
void *buf;
int cnt;
- if (!dev->vpd.cap)
+ if (!pci_vpd_available(dev))
return ERR_PTR(-ENODEV);
+ len = dev->vpd.len;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
#define AMD_CPU_ID_YC 0x14B5
#define PMC_MSG_DELAY_MIN_US 100
-#define RESPONSE_REGISTER_LOOP_MAX 200
+#define RESPONSE_REGISTER_LOOP_MAX 20000
#define SOC_SUBSYSTEM_IP_MAX 12
#define DELAY_MIN_US 2000
config DELL_WMI_PRIVACY
bool "Dell WMI Hardware Privacy Support"
- depends on DELL_WMI
- depends on LEDS_TRIGGER_AUDIO
+ depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
help
This option adds integration with the "Dell Hardware Privacy"
feature of Dell laptops to the dell-wmi driver.
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
{ }
};
+/*
+ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE
+ * reports. Accept such reports only from devices in this list.
+ */
+static const struct dmi_system_id dmi_auto_add_switch[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+ },
+ },
+ {} /* Array terminator */
+};
+
struct intel_hid_priv {
struct input_dev *input_dev;
struct input_dev *array;
struct input_dev *switches;
bool wakeup_mode;
- bool dual_accel;
+ bool auto_add_switch;
};
#define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"
* Some convertible have unreliable VGBS return which could cause incorrect
* SW_TABLET_MODE report, in these cases we enable support when receiving
* the first event instead of during driver setup.
- *
- * See dual_accel_detect.h for more info on the dual_accel check.
*/
- if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
+ if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) {
dev_info(&device->dev, "switch event received, enable switches supports\n");
err = intel_hid_switches_setup(device);
if (err)
return -ENOMEM;
dev_set_drvdata(&device->dev, priv);
- priv->dual_accel = dual_accel_detect();
+ /* See dual_accel_detect.h for more info on the dual_accel check. */
+ priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect();
err = intel_hid_input_setup(device);
if (err) {
* which provide mailbox interface for power management usage.
*/
-#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
.remove = intel_punit_ipc_remove,
.driver = {
.name = "intel_punit_ipc",
- .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),
+ .acpi_match_table = punit_ipc_acpi_ids,
},
};
goto out_platform_registered;
}
product = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (strlen(product) > 4)
+ if (product && strlen(product) > 4)
switch (product[4]) {
case '5':
case '6':
};
static const struct property_entry chuwi_hi10_plus_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
- PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1914),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1283),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
};
static const struct ts_dmi_data chuwi_hi10_plus_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hi10plus.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 34056,
+ .sha256 = { 0xfd, 0x0a, 0x08, 0x08, 0x3c, 0xa6, 0x34, 0x4e,
+ 0x2c, 0x49, 0x9c, 0xcd, 0x7d, 0x44, 0x9d, 0x38,
+ 0x10, 0x68, 0xb5, 0xbd, 0xb7, 0x2a, 0x63, 0xb5,
+ 0x67, 0x0b, 0x96, 0xbd, 0x89, 0x67, 0x85, 0x09 },
+ },
.acpi_name = "MSSL0017:00",
.properties = chuwi_hi10_plus_props,
};
.properties = chuwi_hi10_pro_props,
};
+static const struct property_entry chuwi_hibook_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 30),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1892),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1276),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hibook_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hibook.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 40392,
+ .sha256 = { 0xf7, 0xc0, 0xe8, 0x5a, 0x6c, 0xf2, 0xeb, 0x8d,
+ 0x12, 0xc4, 0x45, 0xbf, 0x55, 0x13, 0x4c, 0x1a,
+ 0x13, 0x04, 0x31, 0x08, 0x65, 0x73, 0xf7, 0xa8,
+ 0x1b, 0x7d, 0x59, 0xc9, 0xe6, 0x97, 0xf7, 0x38 },
+ },
+ .acpi_name = "MSSL0017:00",
+ .properties = chuwi_hibook_props,
+};
+
static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
},
},
{
+ /* Chuwi HiBook (CWI514) */
+ .driver_data = (void *)&chuwi_hibook_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:max14577-regulator");
-MODULE_ALIAS("platform:max77836-regulator");
RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
- RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
{}
};
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
- sclp.has_sipl = !!(sccb->cbl & 0x4000);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
if (sccb->cpuoff > 134)
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
+ if (sccb->cpuoff > 137)
+ sclp.has_sipl = !!(sccb->cbl & 0x4000);
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp.rzm <<= 20;
* ap_init_qci_info(): Allocate and query qci config info.
* Does also update the static variables ap_max_domain_id
* and ap_max_adapter_id if this info is available.
-
*/
static void __init ap_init_qci_info(void)
{
/**
* ap_interrupt_handler() - Schedule ap_tasklet on interrupt
* @airq: pointer to adapter interrupt descriptor
+ * @floating: ignored
*/
static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
{
/**
* ap_scan_bus(): Scan the AP bus for new devices
* Runs periodically, workqueue timer (ap_config_time)
+ * @unused: Unused pointer.
*/
static void ap_scan_bus(struct work_struct *unused)
{
/**
* ap_queue_enable_irq(): Enable interrupt support on this AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
* @ind: the notification indicator byte
*
* Enables interruption on AP queue via ap_aqic(). Based on the return
/**
* ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
*
* Submit the Reset command to an AP queue.
*/
int ret;
bool use_dma;
+ /* Zero length transfers won't trigger an interrupt on completion */
+ if (!xfer->len) {
+ spi_finalize_current_transfer(ctlr);
+ return 1;
+ }
+
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
struct dma_async_tx_descriptor *tx_dma_desc;
};
-static int tegra_slink_runtime_suspend(struct device *dev);
-static int tegra_slink_runtime_resume(struct device *dev);
-
static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
unsigned long reg)
{
}
#endif
+#ifdef CONFIG_PM
static int tegra_slink_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
}
return 0;
}
+#endif /* CONFIG_PM */
static const struct dev_pm_ops slink_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
const struct spi_device *spi = to_spi_device(dev);
int len;
- len = of_device_modalias(dev, buf, PAGE_SIZE);
- if (len != -ENODEV)
- return len;
-
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
if (len != -ENODEV)
return len;
const struct spi_device *spi = to_spi_device(dev);
int rc;
- rc = of_device_uevent_modalias(dev, env);
- if (rc != -ENODEV)
- return rc;
-
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
config FB_TGA
tristate "TGA/SFB+ framebuffer support"
- depends on FB && (ALPHA || TC)
+ depends on FB
+ depends on PCI || TC
+ depends on ALPHA || TC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
implements them.
config XEN_PVCALLS_BACKEND
- bool "XEN PV Calls backend driver"
+ tristate "XEN PV Calls backend driver"
depends on INET && XEN && XEN_BACKEND
help
Experimental backend for the Xen PV Calls protocol
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/errno.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/pagemap.h>
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
/*
- * balloon_process() state:
+ * balloon_thread() state:
*
* BP_DONE: done or nothing to do,
* BP_WAIT: wait to be rescheduled,
BP_ECANCELED
};
+/* Main waiting point for xen-balloon thread. */
+static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
static DEFINE_MUTEX(balloon_mutex);
static LIST_HEAD(ballooned_pages);
static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
-/* Main work function, always executed in process context. */
-static void balloon_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
{
if (val == MEM_ONLINE)
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
return NOTIFY_OK;
}
}
/*
- * As this is a work item it is guaranteed to run as a single instance only.
+ * Stop waiting if either state is not BP_EAGAIN and ballooning action is
+ * needed, or if the credit has changed while state is BP_EAGAIN.
+ */
+static bool balloon_thread_cond(enum bp_state state, long credit)
+{
+ if (state != BP_EAGAIN)
+ credit = 0;
+
+ return current_credit() != credit || kthread_should_stop();
+}
+
+/*
+ * As this is a kthread it is guaranteed to run as a single instance only.
* We may of course race updates of the target counts (which are protected
* by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time.
*/
-static void balloon_process(struct work_struct *work)
+static int balloon_thread(void *unused)
{
enum bp_state state = BP_DONE;
long credit;
+ unsigned long timeout;
+
+ set_freezable();
+ for (;;) {
+ if (state == BP_EAGAIN)
+ timeout = balloon_stats.schedule_delay * HZ;
+ else
+ timeout = 3600 * HZ;
+ credit = current_credit();
+ wait_event_interruptible_timeout(balloon_thread_wq,
+ balloon_thread_cond(state, credit), timeout);
+
+ if (kthread_should_stop())
+ return 0;
- do {
mutex_lock(&balloon_mutex);
credit = current_credit();
mutex_unlock(&balloon_mutex);
cond_resched();
-
- } while (credit && state == BP_DONE);
-
- /* Schedule more work if there is some still to be done. */
- if (state == BP_EAGAIN)
- schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
+ }
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
{
/* No need for lock. Not read-modify-write updates. */
balloon_stats.target_pages = target;
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
}
EXPORT_SYMBOL_GPL(balloon_set_new_target);
/* The balloon may be too large now. Shrink it if needed. */
if (current_credit())
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
mutex_unlock(&balloon_mutex);
}
static int __init balloon_init(void)
{
+ struct task_struct *task;
+
if (!xen_domain())
return -ENODEV;
}
#endif
+ task = kthread_run(balloon_thread, NULL, "xen-balloon");
+ if (IS_ERR(task)) {
+ pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
+ return PTR_ERR(task);
+ }
+
/* Init the xen-balloon driver. */
xen_balloon_init();
static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
- int i, rc;
- int dma_bits;
+ int rc;
+ unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+ unsigned int i, dma_bits = order + PAGE_SHIFT;
dma_addr_t dma_handle;
phys_addr_t p = virt_to_phys(buf);
- dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
+ BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
+ BUG_ON(nslabs % IO_TLB_SEGSIZE);
i = 0;
do {
- int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
-
do {
rc = xen_create_contiguous_region(
- p + (i << IO_TLB_SHIFT),
- get_order(slabs << IO_TLB_SHIFT),
+ p + (i << IO_TLB_SHIFT), order,
dma_bits, &dma_handle);
} while (rc && dma_bits++ < MAX_DMA_BITS);
if (rc)
return rc;
- i += slabs;
+ i += IO_TLB_SEGSIZE;
} while (i < nslabs);
return 0;
}
return "";
}
-#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
-
-int __ref xen_swiotlb_init(void)
+int xen_swiotlb_init(void)
{
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned long bytes = swiotlb_size_or_default();
order--;
}
if (!start)
- goto error;
+ goto exit;
if (order != get_order(bytes)) {
pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
(PAGE_SIZE << order) >> 20);
swiotlb_set_max_segment(PAGE_SIZE);
return 0;
error:
- if (repeat--) {
+ if (nslabs > 1024 && repeat--) {
/* Min is 2MB */
- nslabs = max(1024UL, (nslabs >> 1));
- pr_info("Lowering to %luMB\n",
- (nslabs << IO_TLB_SHIFT) >> 20);
+ nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
+ bytes = nslabs << IO_TLB_SHIFT;
+ pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
}
+exit:
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
- free_pages((unsigned long)start, order);
return rc;
}
rc = xen_swiotlb_fixup(start, nslabs);
if (rc) {
memblock_free(__pa(start), PAGE_ALIGN(bytes));
- if (repeat--) {
+ if (nslabs > 1024 && repeat--) {
/* Min is 2MB */
- nslabs = max(1024UL, (nslabs >> 1));
+ nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
bytes = nslabs << IO_TLB_SHIFT;
pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
}
- if (swiotlb_init_with_tbl(start, nslabs, false))
+ if (swiotlb_init_with_tbl(start, nslabs, true))
panic("Cannot allocate SWIOTLB buffer");
swiotlb_set_max_segment(PAGE_SIZE);
}
#include "internal.h"
/*
+ * Handle invalidation of an mmap'd file. We invalidate all the PTEs referring
+ * to the pages in this file's pagecache, forcing the kernel to go through
+ * ->fault() or ->page_mkwrite() - at which point we can handle invalidation
+ * more fully.
+ */
+void afs_invalidate_mmap_work(struct work_struct *work)
+{
+ struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
+
+ unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
+}
+
+void afs_server_init_callback_work(struct work_struct *work)
+{
+ struct afs_server *server = container_of(work, struct afs_server, initcb_work);
+ struct afs_vnode *vnode;
+ struct afs_cell *cell = server->cell;
+
+ down_read(&cell->fs_open_mmaps_lock);
+
+ list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) {
+ if (vnode->cb_server == server) {
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ queue_work(system_unbound_wq, &vnode->cb_work);
+ }
+ }
+
+ up_read(&cell->fs_open_mmaps_lock);
+}
+
+/*
* Allow the fileserver to request callback state (re-)initialisation.
* Unfortunately, UUIDs are not guaranteed unique.
*/
rcu_read_lock();
do {
server->cb_s_break++;
- server = rcu_dereference(server->uuid_next);
- } while (0);
+ atomic_inc(&server->cell->fs_s_break);
+ if (!list_empty(&server->cell->fs_open_mmaps))
+ queue_work(system_unbound_wq, &server->initcb_work);
+
+ } while ((server = rcu_dereference(server->uuid_next)));
rcu_read_unlock();
}
clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
vnode->cb_break++;
+ vnode->cb_v_break = vnode->volume->cb_v_break;
afs_clear_permits(vnode);
if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
afs_lock_may_be_available(vnode);
+ if (reason != afs_cb_break_for_deleted &&
+ vnode->status.type == AFS_FTYPE_FILE &&
+ atomic_read(&vnode->cb_nr_mmap))
+ queue_work(system_unbound_wq, &vnode->cb_work);
+
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
} else {
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);
seqlock_init(&cell->volume_lock);
cell->fs_servers = RB_ROOT;
seqlock_init(&cell->fs_lock);
+ INIT_LIST_HEAD(&cell->fs_open_mmaps);
+ init_rwsem(&cell->fs_open_mmaps_lock);
rwlock_init(&cell->vl_servers_lock);
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
*/
static int afs_d_revalidate_rcu(struct dentry *dentry)
{
- struct afs_vnode *dvnode, *vnode;
+ struct afs_vnode *dvnode;
struct dentry *parent;
- struct inode *dir, *inode;
+ struct inode *dir;
long dir_version, de_version;
_enter("%p", dentry);
return -ECHILD;
}
- /* Check to see if the vnode referred to by the dentry still
- * has a callback.
- */
- if (d_really_is_positive(dentry)) {
- inode = d_inode_rcu(dentry);
- if (inode) {
- vnode = AFS_FS_I(inode);
- if (!afs_check_validity(vnode))
- return -ECHILD;
- }
- }
-
return 1; /* Still valid */
}
if (IS_ERR(key))
key = NULL;
- if (d_really_is_positive(dentry)) {
- inode = d_inode(dentry);
- if (inode) {
- vnode = AFS_FS_I(inode);
- afs_validate(vnode, key);
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- goto out_bad;
- }
- }
-
- /* lock down the parent dentry so we can peer at it */
+ /* Hold the parent dentry so we can peer at it */
parent = dget_parent(dentry);
dir = AFS_FS_I(d_inode(parent));
if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
_debug("%pd: parent dir deleted", dentry);
- goto out_bad_parent;
+ goto not_found;
}
/* We only need to invalidate a dentry if the server's copy changed
case 0:
/* the filename maps to something */
if (d_really_is_negative(dentry))
- goto out_bad_parent;
+ goto not_found;
inode = d_inode(dentry);
if (is_bad_inode(inode)) {
printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
dentry);
- goto out_bad_parent;
+ goto not_found;
}
vnode = AFS_FS_I(inode);
dentry, fid.unique,
vnode->fid.unique,
vnode->vfs_inode.i_generation);
- write_seqlock(&vnode->cb_lock);
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- write_sequnlock(&vnode->cb_lock);
goto not_found;
}
goto out_valid;
default:
_debug("failed to iterate dir %pd: %d",
parent, ret);
- goto out_bad_parent;
+ goto not_found;
}
out_valid:
_leave(" = 1 [valid]");
return 1;
- /* the dirent, if it exists, now points to a different vnode */
not_found:
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_NFSFS_RENAMED;
- spin_unlock(&dentry->d_lock);
-
-out_bad_parent:
_debug("dropping dentry %pd2", dentry);
dput(parent);
-out_bad:
key_put(key);
_leave(" = 0 [bad]");
goto error;
}
+ ret = afs_validate(vnode, op->key);
+ if (ret < 0)
+ goto error_op;
+
afs_op_set_vnode(op, 0, dvnode);
afs_op_set_vnode(op, 1, vnode);
op->file[0].dv_delta = 1;
op->create.reason = afs_edit_dir_for_link;
return afs_do_sync_operation(op);
+error_op:
+ afs_put_operation(op);
error:
d_drop(dentry);
_leave(" = %d", ret);
if (IS_ERR(op))
return PTR_ERR(op);
+ ret = afs_validate(vnode, op->key);
+ op->error = ret;
+ if (ret < 0)
+ goto error;
+
afs_op_set_vnode(op, 0, orig_dvnode);
afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
op->file[0].dv_delta = 1;
if (b == nr_blocks) {
_debug("init %u", b);
afs_edit_init_block(meta, block, b);
- i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE);
+ afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
}
/* Only lower dir pages have a counter in the header. */
new_directory:
afs_edit_init_block(meta, meta, 0);
i_size = AFS_DIR_BLOCK_SIZE;
- i_size_write(&vnode->vfs_inode, i_size);
+ afs_set_i_size(vnode, i_size);
slot = AFS_DIR_RESV_BLOCKS0;
page = page0;
block = meta;
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
static void afs_readahead(struct readahead_control *ractl);
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+static void afs_vm_open(struct vm_area_struct *area);
+static void afs_vm_close(struct vm_area_struct *area);
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
const struct file_operations afs_file_operations = {
.open = afs_open,
.release = afs_release,
.llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = afs_file_read_iter,
.write_iter = afs_file_write,
.mmap = afs_file_mmap,
.splice_read = generic_file_splice_read,
};
static const struct vm_operations_struct afs_vm_ops = {
+ .open = afs_vm_open,
+ .close = afs_vm_close,
.fault = filemap_fault,
- .map_pages = filemap_map_pages,
+ .map_pages = afs_vm_map_pages,
.page_mkwrite = afs_page_mkwrite,
};
fsreq->subreq = subreq;
fsreq->pos = subreq->start + subreq->transferred;
fsreq->len = subreq->len - subreq->transferred;
- fsreq->key = subreq->rreq->netfs_priv;
+ fsreq->key = key_get(subreq->rreq->netfs_priv);
fsreq->vnode = vnode;
fsreq->iter = &fsreq->def_iter;
fsreq->pos, fsreq->len);
afs_fetch_data(fsreq->vnode, fsreq);
+ afs_put_read(fsreq);
}
static int afs_symlink_readpage(struct page *page)
return 1;
}
+static void afs_add_open_mmap(struct afs_vnode *vnode)
+{
+ if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
+ down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+ list_add_tail(&vnode->cb_mmap_link,
+ &vnode->volume->cell->fs_open_mmaps);
+
+ up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ }
+}
+
+static void afs_drop_open_mmap(struct afs_vnode *vnode)
+{
+ if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
+ return;
+
+ down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+ if (atomic_read(&vnode->cb_nr_mmap) == 0)
+ list_del_init(&vnode->cb_mmap_link);
+
+ up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ flush_work(&vnode->cb_work);
+}
+
/*
* Handle setting up a memory mapping on an AFS file.
*/
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
int ret;
+ afs_add_open_mmap(vnode);
+
ret = generic_file_mmap(file, vma);
if (ret == 0)
vma->vm_ops = &afs_vm_ops;
+ else
+ afs_drop_open_mmap(vnode);
return ret;
}
+
+static void afs_vm_open(struct vm_area_struct *vma)
+{
+ afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static void afs_vm_close(struct vm_area_struct *vma)
+{
+ afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
+{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
+ struct afs_file *af = vmf->vma->vm_file->private_data;
+
+ switch (afs_validate(vnode, af->key)) {
+ case 0:
+ return filemap_map_pages(vmf, start_pgoff, end_pgoff);
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EINTR:
+ case -ERESTARTSYS:
+ return VM_FAULT_RETRY;
+ case -ESTALE:
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+ struct afs_file *af = iocb->ki_filp->private_data;
+ int ret;
+
+ ret = afs_validate(vnode, af->key);
+ if (ret < 0)
+ return ret;
+
+ return generic_file_read_iter(iocb, iter);
+}
#include <linux/slab.h>
#include "afs_fs.h"
#include "internal.h"
+#include "protocol_afs.h"
#include "protocol_yfs.h"
static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
struct afs_addr_list *alist = call->alist;
struct afs_server *server = call->server;
unsigned int index = call->addr_ix;
- unsigned int rtt_us = 0;
+ unsigned int rtt_us = 0, cap0;
int ret = call->error;
_enter("%pU,%u", &server->uuid, index);
clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
alist->addrs[index].srx_service = call->service_id;
}
+ cap0 = ntohl(call->tmp);
+ if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
+ set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
+ else
+ clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
}
if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
struct afs_read *req = op->fetch.req;
__be32 *bp;
- if (upper_32_bits(req->pos) ||
- upper_32_bits(req->len) ||
- upper_32_bits(req->pos + req->len))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_fetch_data64(op);
_enter("");
(unsigned long long)op->store.pos,
(unsigned long long)op->store.i_size);
- if (upper_32_bits(op->store.pos) ||
- upper_32_bits(op->store.size) ||
- upper_32_bits(op->store.i_size))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_store_data64(op);
call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
key_serial(op->key), vp->fid.vid, vp->fid.vnode);
ASSERT(attr->ia_valid & ATTR_SIZE);
- if (upper_32_bits(attr->ia_size))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_setattr_size64(op);
call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
return ret;
count = ntohl(call->tmp);
-
call->count = count;
call->count2 = count;
- afs_extract_discard(call, count * sizeof(__be32));
+ if (count == 0) {
+ call->unmarshall = 4;
+ call->tmp = 0;
+ break;
+ }
+
+ /* Extract the first word of the capabilities to call->tmp */
+ afs_extract_to_tmp(call);
call->unmarshall++;
fallthrough;
- /* Extract capabilities words */
case 2:
ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
- /* TODO: Examine capabilities */
+ afs_extract_discard(call, (count - 1) * sizeof(__be32));
+ call->unmarshall++;
+ fallthrough;
+
+ /* Extract remaining capabilities words */
+ case 3:
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
call->unmarshall++;
break;
}
/*
- * Set the file size and block count. Estimate the number of 512 bytes blocks
- * used, rounded up to nearest 1K for consistency with other AFS clients.
- */
-static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
-{
- i_size_write(&vnode->vfs_inode, size);
- vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
-}
-
-/*
* Initialise an inode from the vnode status.
*/
static int afs_inode_init_from_status(struct afs_operation *op,
}
/*
- * Get the server reinit counter for a vnode's current server.
+ * Check to see if we have a server currently serving this volume and that it
+ * hasn't been reinitialised or dropped from the list.
*/
-static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
+static bool afs_check_server_good(struct afs_vnode *vnode)
{
- struct afs_server_list *slist = rcu_dereference(vnode->volume->servers);
+ struct afs_server_list *slist;
struct afs_server *server;
+ bool good;
int i;
+ if (vnode->cb_fs_s_break == atomic_read(&vnode->volume->cell->fs_s_break))
+ return true;
+
+ rcu_read_lock();
+
+ slist = rcu_dereference(vnode->volume->servers);
for (i = 0; i < slist->nr_servers; i++) {
server = slist->servers[i].server;
if (server == vnode->cb_server) {
- *_s_break = READ_ONCE(server->cb_s_break);
- return true;
+ good = (vnode->cb_s_break == server->cb_s_break);
+ rcu_read_unlock();
+ return good;
}
}
+ rcu_read_unlock();
return false;
}
*/
bool afs_check_validity(struct afs_vnode *vnode)
{
- struct afs_volume *volume = vnode->volume;
enum afs_cb_break_reason need_clear = afs_cb_break_no_break;
time64_t now = ktime_get_real_seconds();
- bool valid;
- unsigned int cb_break, cb_s_break, cb_v_break;
+ unsigned int cb_break;
int seq = 0;
do {
read_seqbegin_or_lock(&vnode->cb_lock, &seq);
- cb_v_break = READ_ONCE(volume->cb_v_break);
cb_break = vnode->cb_break;
- if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
- afs_get_s_break_rcu(vnode, &cb_s_break)) {
- if (vnode->cb_s_break != cb_s_break ||
- vnode->cb_v_break != cb_v_break) {
- vnode->cb_s_break = cb_s_break;
- vnode->cb_v_break = cb_v_break;
- need_clear = afs_cb_break_for_vsbreak;
- valid = false;
- } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+ if (vnode->cb_v_break != vnode->volume->cb_v_break)
+ need_clear = afs_cb_break_for_v_break;
+ else if (!afs_check_server_good(vnode))
+ need_clear = afs_cb_break_for_s_reinit;
+ else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
need_clear = afs_cb_break_for_zap;
- valid = false;
- } else if (vnode->cb_expires_at - 10 <= now) {
+ else if (vnode->cb_expires_at - 10 <= now)
need_clear = afs_cb_break_for_lapsed;
- valid = false;
- } else {
- valid = true;
- }
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
- valid = true;
+ ;
} else {
- vnode->cb_v_break = cb_v_break;
- valid = false;
+ need_clear = afs_cb_break_no_promise;
}
} while (need_seqretry(&vnode->cb_lock, seq));
done_seqretry(&vnode->cb_lock, seq);
- if (need_clear != afs_cb_break_no_break) {
- write_seqlock(&vnode->cb_lock);
- if (cb_break == vnode->cb_break)
- __afs_break_callback(vnode, need_clear);
- else
- trace_afs_cb_miss(&vnode->fid, need_clear);
- write_sequnlock(&vnode->cb_lock);
- valid = false;
- }
+ if (need_clear == afs_cb_break_no_break)
+ return true;
- return valid;
+ write_seqlock(&vnode->cb_lock);
+ if (need_clear == afs_cb_break_no_promise)
+ vnode->cb_v_break = vnode->volume->cb_v_break;
+ else if (cb_break == vnode->cb_break)
+ __afs_break_callback(vnode, need_clear);
+ else
+ trace_afs_cb_miss(&vnode->fid, need_clear);
+ write_sequnlock(&vnode->cb_lock);
+ return false;
}
/*
*/
int afs_validate(struct afs_vnode *vnode, struct key *key)
{
- bool valid;
int ret;
_enter("{v={%llx:%llu} fl=%lx},%x",
vnode->fid.vid, vnode->fid.vnode, vnode->flags,
key_serial(key));
- rcu_read_lock();
- valid = afs_check_validity(vnode);
- rcu_read_unlock();
-
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- clear_nlink(&vnode->vfs_inode);
+ if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) {
+ if (vnode->vfs_inode.i_nlink)
+ clear_nlink(&vnode->vfs_inode);
+ goto valid;
+ }
- if (valid)
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
+ afs_check_validity(vnode))
goto valid;
down_write(&vnode->validate_lock);
/* Active fileserver interaction state. */
struct rb_root fs_servers; /* afs_server (by server UUID) */
seqlock_t fs_lock; /* For fs_servers */
+ struct rw_semaphore fs_open_mmaps_lock;
+ struct list_head fs_open_mmaps; /* List of vnodes that are mmapped */
+ atomic_t fs_s_break; /* Counter of CB.InitCallBackState messages */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
struct hlist_node addr4_link; /* Link in net->fs_addresses4 */
struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
struct hlist_node proc_link; /* Link in net->fs_proc */
+ struct work_struct initcb_work; /* Work for CB.InitCallBackState* */
struct afs_server *gc_next; /* Next server in manager's list */
time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
#define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
+#define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */
atomic_t ref; /* Object refcount */
atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
afs_lock_type_t lock_type : 8;
/* outstanding callback notification on this file */
+ struct work_struct cb_work; /* Work for mmap'd files */
+ struct list_head cb_mmap_link; /* Link in cell->fs_open_mmaps */
void *cb_server; /* Server with callback/filelock */
+ atomic_t cb_nr_mmap; /* Number of mmaps */
+ unsigned int cb_fs_s_break; /* Mass server break counter (cell->fs_s_break) */
unsigned int cb_s_break; /* Mass break counter on ->server */
unsigned int cb_v_break; /* Mass break counter on ->volume */
unsigned int cb_break; /* Break counter on vnode */
/*
* callback.c
*/
+extern void afs_invalidate_mmap_work(struct work_struct *);
+extern void afs_server_init_callback_work(struct work_struct *work);
extern void afs_init_callback_state(struct afs_server *);
extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
}
/*
+ * Set the file size and block count. Estimate the number of 512 bytes blocks
+ * used, rounded up to nearest 1K for consistency with other AFS clients.
+ */
+static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
+{
+ i_size_write(&vnode->vfs_inode, size);
+ vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
+}
+
+/*
* Check for a conflicting operation on a directory that we just unlinked from.
* If someone managed to sneak a link or an unlink in on the file we just
* unlinked, we won't be able to trust nlink on an AFS file (but not YFS).
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* AFS protocol bits
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+
+#define AFSCAPABILITIESMAX 196 /* Maximum number of words in a capability set */
+
+/* AFS3 Fileserver capabilities word 0 */
+#define AFS3_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Uses UAE errors */
+#define AFS3_VICED_CAPABILITY_64BITFILES 0x0002 /* FetchData64 & StoreData64 supported */
+#define AFS3_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
+#define AFS3_VICED_CAPABILITY_SANEACLS 0x0008 /* ACLs reviewed for sanity - don't use */
yfs_LockMandatoryWrite = 0x101,
yfs_LockMandatoryExtend = 0x102,
};
+
+/* RXYFS Viced Capability Flags */
+#define YFS_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_64BITFILES 0x0002 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
+#define YFS_VICED_CAPABILITY_SANEACLS 0x0008 /* Deprecated v0.195 */
if (vnode->cb_server != server) {
vnode->cb_server = server;
vnode->cb_s_break = server->cb_s_break;
+ vnode->cb_fs_s_break = atomic_read(&server->cell->fs_s_break);
vnode->cb_v_break = vnode->volume->cb_v_break;
clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
}
server->addr_version = alist->version;
server->uuid = *uuid;
rwlock_init(&server->fs_lock);
+ INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
init_waitqueue_head(&server->probe_wq);
INIT_LIST_HEAD(&server->probe_link);
spin_lock_init(&server->probe_lock);
if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
afs_give_up_callbacks(net, server);
+ flush_work(&server->initcb_work);
afs_put_server(net, server, afs_server_trace_destroy);
}
vnode->lock_state = AFS_VNODE_LOCK_NONE;
init_rwsem(&vnode->rmdir_lock);
+ INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
_leave(" = %p", &vnode->vfs_inode);
return &vnode->vfs_inode;
write_seqlock(&vnode->cb_lock);
i_size = i_size_read(&vnode->vfs_inode);
if (maybe_i_size > i_size)
- i_size_write(&vnode->vfs_inode, maybe_i_size);
+ afs_set_i_size(vnode, maybe_i_size);
write_sequnlock(&vnode->cb_lock);
}
}
/* Has the page moved or been split? */
- if (unlikely(page != xas_reload(&xas)))
+ if (unlikely(page != xas_reload(&xas))) {
+ put_page(page);
break;
+ }
- if (!trylock_page(page))
+ if (!trylock_page(page)) {
+ put_page(page);
break;
+ }
if (!PageDirty(page) || PageWriteback(page)) {
unlock_page(page);
+ put_page(page);
break;
}
t = afs_page_dirty_to(page, priv);
if (f != 0 && !new_content) {
unlock_page(page);
+ put_page(page);
break;
}
ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+ struct afs_file *af = iocb->ki_filp->private_data;
ssize_t result;
size_t count = iov_iter_count(from);
if (!count)
return 0;
+ result = afs_validate(vnode, af->key);
+ if (result < 0)
+ return result;
+
result = generic_file_write_iter(iocb, from);
_leave(" = %zd", result);
*/
int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file_inode(file);
- struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct afs_file *af = file->private_data;
+ int ret;
_enter("{%llx:%llu},{n=%pD},%d",
vnode->fid.vid, vnode->fid.vnode, file,
datasync);
+ ret = afs_validate(vnode, af->key);
+ if (ret < 0)
+ return ret;
+
return file_write_and_wait_range(file, start, end);
}
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_file *af = file->private_data;
unsigned long priv;
vm_fault_t ret = VM_FAULT_RETRY;
_enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
+ afs_validate(vnode, af->key);
+
sb_start_pagefault(inode->i_sb);
/* Wait for the page to be written to the cache before we allow it to
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cache.c - CIFS filesystem cache index structure definitions
+ * CIFS filesystem cache index structure definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs_debug.c
*
* Copyright (C) International Business Machines Corp., 2000,2005
*
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_fs_sb.h
*
* Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French (sfrench@us.ibm.com)
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_ioctl.h
*
* Structure definitions for io control for cifs/smb3
*
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS
+ * SPNEGO upcall management for CIFS
*
* Copyright (c) 2007 Red Hat, Inc.
* Author(s): Jeff Layton (jlayton@redhat.com)
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_spnego.h -- SPNEGO upcall management for CIFS
+ * SPNEGO upcall management for CIFS
*
* Copyright (c) 2007 Red Hat, Inc.
* Author(s): Jeff Layton (jlayton@redhat.com)
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/cifs_unicode.c
*
* Copyright (c) International Business Machines Corp., 2000,2009
* Modified by Steve French (sfrench@us.ibm.com)
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsacl.c
*
* Copyright (C) International Business Machines Corp., 2007,2008
* Author(s): Steve French (sfrench@us.ibm.com)
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsacl.h
*
* Copyright (c) International Business Machines Corp., 2007
* Author(s): Steve French (sfrench@us.ibm.com)
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsencrypt.c
*
* Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP
* for more detailed information
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsfs.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsfs.h
*
* Copyright (c) International Business Machines Corp., 2002, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsglob.h
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
#define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
#define CIFS_INO_LOCK (5) /* lock bit for synchronization */
#define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */
+#define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */
unsigned long flags;
spinlock_t writers_lock;
unsigned int writers; /* Number of writers on this inode */
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifspdu.h
*
* Copyright (c) International Business Machines Corp., 2002,2009
* Author(s): Steve French (sfrench@us.ibm.com)
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsproto.h
*
* Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
+extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+ const char *path);
+
extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx);
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
int from_reconnect);
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifssmb.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/connect.c
*
* Copyright (C) International Business Machines Corp., 2002,2011
* Author(s): Steve French (sfrench@us.ibm.com)
module_put_and_exit(0);
}
-/**
+/*
* Returns true if srcaddr isn't specified and rhs isn't specified, or
* if srcaddr is specified and matches the IP address of the rhs argument
*/
/**
* cifs_setup_ipc - helper to setup the IPC tcon for the session
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
+ * new tree connection for the IPC (interprocess communication RPC)
*
* A new IPC connection is made and stored in the session
* tcon_ipc. The IPC tcon has the same lifetime as the session.
/**
* cifs_free_ipc - helper to release the session IPC tcon
+ * @ses: smb session to unmount the IPC from
*
* Needs to be called everytime a session is destroyed.
*
/**
* cifs_get_smb_ses - get a session matching @ctx data from @server
+ * @server: server to setup the session to
+ * @ctx: superblock configuration context to use to setup the session
*
* This function assumes it is being called from cifs_mount() where we
* already got a server reference (server refcount +1). See
/**
* cifs_get_tcon - get a tcon matching @ctx data from @ses
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
*
* - tcon refcount is the number of mount points using the tcon.
* - ses refcount is the number of tcon using the session.
return full_path;
}
-/**
+/*
* expand_dfs_referral - Perform a dfs referral query and update the cifs_sb
*
* If a referral is found, cifs_sb->ctx->mount_options will be (re-)allocated
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/dir.c
*
* vfs operations that deal with dentries
*
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/dns_resolve.c
*
* Copyright (c) 2007 Igor Mammedov
* Author(s): Igor Mammedov (niallain@gmail.com)
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
- * Handles host name to IP address resolution
+ * DNS Resolver upcall management for CIFS DFS
+ * Handles host name to IP address resolution
*
* Copyright (c) International Business Machines Corp., 2008
* Author(s): Steve French (sfrench@us.ibm.com)
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/export.c
*
* Copyright (C) International Business Machines Corp., 2007
* Author(s): Steve French (sfrench@us.ibm.com)
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/file.c
*
* vfs operations that deal with files
*
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
cinode->lease_granted &&
+ !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
dclose) {
if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
inode->i_ctime = inode->i_mtime = current_time(inode);
cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
cifs_sb = CIFS_FILE_SB(file);
+ set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/fscache.c - CIFS filesystem cache interface
+ * CIFS filesystem cache interface
*
* Copyright (c) 2010 Novell, Inc.
* Author(s): Suresh Jayaraman <sjayaraman@suse.de>
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/fscache.h - CIFS filesystem cache interface definitions
+ * CIFS filesystem cache interface definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/inode.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
goto unlink_out;
}
- cifs_close_deferred_file(CIFS_I(inode));
+ cifs_close_deferred_file_under_dentry(tcon, full_path);
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
goto cifs_rename_exit;
}
- cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+ cifs_close_deferred_file_under_dentry(tcon, from_name);
if (d_inode(target_dentry) != NULL)
- cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+ cifs_close_deferred_file_under_dentry(tcon, to_name);
rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
to_name);
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/ioctl.c
*
* vfs operations that deal with io control
*
if (pSMBFile == NULL)
break;
tcon = tlink_tcon(pSMBFile->tlink);
- caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+ /* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */
if (get_user(ExtAttrBits, (int __user *)arg)) {
rc = -EFAULT;
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/link.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/misc.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
if (cancel_delayed_work(&cfile->deferred)) {
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
- continue;
+ break;
tmp_list->cfile = cfile;
list_add_tail(&tmp_list->list, &file_head);
}
if (cancel_delayed_work(&cfile->deferred)) {
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
- continue;
+ break;
tmp_list->cfile = cfile;
list_add_tail(&tmp_list->list, &file_head);
}
kfree(tmp_list);
}
}
+void
+cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+{
+ struct cifsFileInfo *cfile;
+ struct list_head *tmp;
+ struct file_list *tmp_list, *tmp_next_list;
+ struct list_head file_head;
+ void *page;
+ const char *full_path;
+
+ INIT_LIST_HEAD(&file_head);
+ page = alloc_dentry_path();
+ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp, &tcon->openFileList) {
+ cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+ full_path = build_path_from_dentry(cfile->dentry, page);
+ if (strstr(full_path, path)) {
+ if (delayed_work_pending(&cfile->deferred)) {
+ if (cancel_delayed_work(&cfile->deferred)) {
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ break;
+ tmp_list->cfile = cfile;
+ list_add_tail(&tmp_list->list, &file_head);
+ }
+ }
+ }
+ }
+ spin_unlock(&tcon->open_file_lock);
+
+ list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+ _cifsFileInfo_put(tmp_list->cfile, true, false);
+ list_del(&tmp_list->list);
+ kfree(tmp_list);
+ }
+ free_dentry_path(page);
+}
/* parses DFS refferal V3 structure
* caller is responsible for freeing target_nodes
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/netmisc.c
*
* Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/ntlmssp.h
*
* Copyright (c) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/readdir.c
*
* Directory search handling
*
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/rfc1002pdu.h
*
* Protocol Data Unit definitions for RFC 1001/1002 support
*
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/sess.c
*
* SMB/CIFS session setup handling routines
*
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2file.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Author(s): Steve French (sfrench@us.ibm.com),
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2glob.h
*
* Definitions for various global variables and structures
*
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2inode.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2misc.c
*
* Copyright (C) International Business Machines Corp., 2002,2011
* Etersoft, 2012
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2pdu.c
*
* Copyright (C) International Business Machines Corp., 2009, 2013
* Etersoft, 2012
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2pdu.h
*
* Copyright (c) International Business Machines Corp., 2009, 2013
* Etersoft, 2012
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2proto.h
*
* Copyright (c) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2status.h
*
* SMB2 Status code (network error) definitions
* Definitions are from MS-ERREF
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2transport.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smberr.h
*
* Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French (sfrench@us.ibm.com)
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/transport.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/winucase.c
*
* Copyright (c) Jeffrey Layton <jlayton@redhat.com>, 2013
*
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/xattr.c
*
* Copyright (c) International Business Machines Corp., 2003, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
#include <linux/rculist_nulls.h>
#include <linux/cpu.h>
#include <linux/tracehook.h>
+#include <uapi/linux/io_uring.h>
#include "io-wq.h"
static void io_worker_exit(struct io_worker *worker)
{
struct io_wqe *wqe = worker->wqe;
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
if (refcount_dec_and_test(&worker->ref))
complete(&worker->ref_done);
if (worker->flags & IO_WORKER_F_FREE)
hlist_nulls_del_rcu(&worker->nulls_node);
list_del_rcu(&worker->all_list);
- acct->nr_workers--;
preempt_disable();
io_wqe_dec_running(worker);
worker->flags = 0;
*/
static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
{
- bool do_create = false;
-
/*
* Most likely an attempt to queue unbounded work on an io_wq that
* wasn't setup with any unbounded workers.
pr_warn_once("io-wq is not configured for unbound workers");
raw_spin_lock(&wqe->lock);
- if (acct->nr_workers < acct->max_workers) {
- acct->nr_workers++;
- do_create = true;
+ if (acct->nr_workers == acct->max_workers) {
+ raw_spin_unlock(&wqe->lock);
+ return true;
}
+ acct->nr_workers++;
raw_spin_unlock(&wqe->lock);
- if (do_create) {
- atomic_inc(&acct->nr_running);
- atomic_inc(&wqe->wq->worker_refs);
- return create_io_worker(wqe->wq, wqe, acct->index);
- }
-
- return true;
+ atomic_inc(&acct->nr_running);
+ atomic_inc(&wqe->wq->worker_refs);
+ return create_io_worker(wqe->wq, wqe, acct->index);
}
static void io_wqe_inc_running(struct io_worker *worker)
}
/* timed out, exit unless we're the last worker */
if (last_timeout && acct->nr_workers > 1) {
+ acct->nr_workers--;
raw_spin_unlock(&wqe->lock);
__set_current_state(TASK_RUNNING);
break;
{
int i, node, prev = 0;
+ BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
+ BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
+ BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
+
for (i = 0; i < 2; i++) {
if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
new_count[i] = task_rlimit(current, RLIMIT_NPROC);
struct iovec fast_iov[UIO_FASTIOV];
const struct iovec *free_iovec;
struct iov_iter iter;
+ struct iov_iter_state iter_state;
size_t bytes_done;
struct wait_page_queue wpq;
};
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_COMPLETE_INLINE_BIT,
REQ_F_REISSUE_BIT,
- REQ_F_DONT_REISSUE_BIT,
REQ_F_CREDS_BIT,
REQ_F_REFCOUNT_BIT,
REQ_F_ARM_LTIMEOUT_BIT,
REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
/* caller should reissue async */
REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
- /* don't attempt request reissue, see io_rw_reissue() */
- REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
/* supports async reads */
REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
/* supports async writes */
req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
- if (READ_ONCE(req->result) == -EAGAIN &&
- !(req->flags & REQ_F_DONT_REISSUE)) {
- req->iopoll_completed = 0;
- io_req_task_queue_reissue(req);
- continue;
- }
-
__io_cqring_fill_event(ctx, req->user_data, req->result,
io_put_rw_kbuf(req));
(*nr_events)++;
if (!rw)
return !io_req_prep_async(req);
- /* may have left rw->iter inconsistent on -EIOCBQUEUED */
- iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
+ iov_iter_restore(&rw->iter, &rw->iter_state);
return true;
}
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
if (unlikely(res != req->result)) {
- if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
- io_resubmit_prep(req))) {
- req_set_fail(req);
- req->flags |= REQ_F_DONT_REISSUE;
+ if (res == -EAGAIN && io_rw_should_reissue(req)) {
+ req->flags |= REQ_F_REISSUE;
+ return;
}
}
return __io_file_supports_nowait(req->file, rw);
}
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ int rw)
{
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw.kiocb;
if (unlikely(ret))
return ret;
- /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
- if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
+ /*
+ * If the file is marked O_NONBLOCK, still allow retry for it if it
+ * supports async. Otherwise it's impossible to use O_NONBLOCK files
+ * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+ */
+ if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+ ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
req->flags |= REQ_F_NOWAIT;
ioprio = READ_ONCE(sqe->ioprio);
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
struct io_async_rw *io = req->async_data;
- bool check_reissue = kiocb->ki_complete == io_complete_rw;
/* add previously done IO, if any */
if (io && io->bytes_done > 0) {
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
- if (ret >= 0 && check_reissue)
+ if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
__io_complete_rw(req, ret, 0, issue_flags);
else
io_rw_done(kiocb, ret);
- if (check_reissue && (req->flags & REQ_F_REISSUE)) {
+ if (req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) {
io_req_task_queue_reissue(req);
} else {
+ unsigned int cflags = io_put_rw_kbuf(req);
+ struct io_ring_ctx *ctx = req->ctx;
+
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret,
- io_put_rw_kbuf(req));
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ mutex_lock(&ctx->uring_lock);
+ __io_req_complete(req, issue_flags, ret, cflags);
+ mutex_unlock(&ctx->uring_lock);
+ } else {
+ __io_req_complete(req, issue_flags, ret, cflags);
+ }
}
}
}
ret = nr;
break;
}
+ if (!iov_iter_is_bvec(iter)) {
+ iov_iter_advance(iter, nr);
+ } else {
+ req->rw.len -= nr;
+ req->rw.addr += nr;
+ }
ret += nr;
if (nr != iovec.iov_len)
break;
- req->rw.len -= nr;
- req->rw.addr += nr;
- iov_iter_advance(iter, nr);
}
return ret;
if (!force && !io_op_defs[req->opcode].needs_async_setup)
return 0;
if (!req->async_data) {
+ struct io_async_rw *iorw;
+
if (io_alloc_async_data(req)) {
kfree(iovec);
return -ENOMEM;
}
io_req_map_rw(req, iovec, fast_iov, iter);
+ iorw = req->async_data;
+ /* we've copied and mapped the iter, ensure state is saved */
+ iov_iter_save_state(&iorw->iter, &iorw->iter_state);
}
return 0;
}
iorw->free_iovec = iov;
if (iov)
req->flags |= REQ_F_NEED_CLEANUP;
+ iov_iter_save_state(&iorw->iter, &iorw->iter_state);
return 0;
}
{
if (unlikely(!(req->file->f_mode & FMODE_READ)))
return -EBADF;
- return io_prep_rw(req, sqe);
+ return io_prep_rw(req, sqe, READ);
}
/*
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
- ssize_t io_size, ret, ret2;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ struct iov_iter_state __state, *state;
+ ssize_t ret, ret2;
if (rw) {
iter = &rw->iter;
+ state = &rw->iter_state;
+ /*
+ * We come here from an earlier attempt, restore our state to
+ * match in case it doesn't. It's cheap enough that we don't
+ * need to make this conditional.
+ */
+ iov_iter_restore(iter, state);
iovec = NULL;
} else {
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ state = &__state;
+ iov_iter_save_state(iter, state);
}
- io_size = iov_iter_count(iter);
- req->result = io_size;
+ req->result = iov_iter_count(iter);
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
return ret ?: -EAGAIN;
}
- ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
+ ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
if (unlikely(ret)) {
kfree(iovec);
return ret;
/* no retry on NONBLOCK nor RWF_NOWAIT */
if (req->flags & REQ_F_NOWAIT)
goto done;
- /* some cases will consume bytes even on error returns */
- iov_iter_reexpand(iter, iter->count + iter->truncated);
- iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = 0;
} else if (ret == -EIOCBQUEUED) {
goto out_free;
- } else if (ret <= 0 || ret == io_size || !force_nonblock ||
+ } else if (ret <= 0 || ret == req->result || !force_nonblock ||
(req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
/* read all, failed, already did sync or don't want to retry */
goto done;
}
+ /*
+ * Don't depend on the iter state matching what was consumed, or being
+ * untouched in case of error. Restore it and we'll advance it
+ * manually if we need to.
+ */
+ iov_iter_restore(iter, state);
+
ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
if (ret2)
return ret2;
iovec = NULL;
rw = req->async_data;
- /* now use our persistent iterator, if we aren't already */
- iter = &rw->iter;
+ /*
+ * Now use our persistent iterator and state, if we aren't already.
+ * We've restored and mapped the iter to match.
+ */
+ if (iter != &rw->iter) {
+ iter = &rw->iter;
+ state = &rw->iter_state;
+ }
do {
- io_size -= ret;
+ /*
+ * We end up here because of a partial read, either from
+ * above or inside this loop. Advance the iter by the bytes
+ * that were consumed.
+ */
+ iov_iter_advance(iter, ret);
+ if (!iov_iter_count(iter))
+ break;
rw->bytes_done += ret;
+ iov_iter_save_state(iter, state);
+
/* if we can retry, do so with the callbacks armed */
if (!io_rw_should_retry(req)) {
kiocb->ki_flags &= ~IOCB_WAITQ;
return 0;
/* we got some bytes, but not all. retry. */
kiocb->ki_flags &= ~IOCB_WAITQ;
- } while (ret > 0 && ret < io_size);
+ iov_iter_restore(iter, state);
+ } while (ret > 0);
done:
kiocb_done(kiocb, ret, issue_flags);
out_free:
{
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
return -EBADF;
- return io_prep_rw(req, sqe);
+ return io_prep_rw(req, sqe, WRITE);
}
static int io_write(struct io_kiocb *req, unsigned int issue_flags)
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
- ssize_t ret, ret2, io_size;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ struct iov_iter_state __state, *state;
+ ssize_t ret, ret2;
if (rw) {
iter = &rw->iter;
+ state = &rw->iter_state;
+ iov_iter_restore(iter, state);
iovec = NULL;
} else {
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ state = &__state;
+ iov_iter_save_state(iter, state);
}
- io_size = iov_iter_count(iter);
- req->result = io_size;
+ req->result = iov_iter_count(iter);
+ ret2 = 0;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
(req->flags & REQ_F_ISREG))
goto copy_iov;
- ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
+ ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
if (unlikely(ret))
goto out_free;
kiocb_done(kiocb, ret2, issue_flags);
} else {
copy_iov:
- /* some cases will consume bytes even on error returns */
- iov_iter_reexpand(iter, iter->count + iter->truncated);
- iov_iter_revert(iter, io_size - iov_iter_count(iter));
+ iov_iter_restore(iter, state);
+ if (ret2 > 0)
+ iov_iter_advance(iter, ret2);
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
return ret ?: -EAGAIN;
}
break;
} while (1);
+ if (uts) {
+ struct timespec64 ts;
+
+ if (get_timespec64(&ts, uts))
+ return -EFAULT;
+ timeout = timespec64_to_jiffies(&ts);
+ }
+
if (sig) {
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
return ret;
}
- if (uts) {
- struct timespec64 ts;
-
- if (get_timespec64(&ts, uts))
- return -EFAULT;
- timeout = timespec64_to_jiffies(&ts);
- }
-
init_waitqueue_func_entry(&iowq.wq, io_wake_function);
iowq.wq.private = current;
INIT_LIST_HEAD(&iowq.wq.entry);
#endif
}
+static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
+ struct io_rsrc_node *node, void *rsrc)
+{
+ struct io_rsrc_put *prsrc;
+
+ prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+ if (!prsrc)
+ return -ENOMEM;
+
+ prsrc->tag = *io_get_tag_slot(data, idx);
+ prsrc->rsrc = rsrc;
+ list_add(&prsrc->list, &node->rsrc_list);
+ return 0;
+}
+
static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
unsigned int issue_flags, u32 slot_index)
{
struct io_ring_ctx *ctx = req->ctx;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ bool needs_switch = false;
struct io_fixed_file *file_slot;
int ret = -EBADF;
slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
- ret = -EBADF;
- if (file_slot->file_ptr)
- goto err;
+
+ if (file_slot->file_ptr) {
+ struct file *old_file;
+
+ ret = io_rsrc_node_switch_start(ctx);
+ if (ret)
+ goto err;
+
+ old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+ ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
+ ctx->rsrc_node, old_file);
+ if (ret)
+ goto err;
+ file_slot->file_ptr = 0;
+ needs_switch = true;
+ }
*io_get_tag_slot(ctx->file_data, slot_index) = 0;
io_fixed_file_set(file_slot, file);
ret = 0;
err:
+ if (needs_switch)
+ io_rsrc_node_switch(ctx, ctx->file_data);
io_ring_submit_unlock(ctx, !force_nonblock);
if (ret)
fput(file);
return ret;
}
-static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
- struct io_rsrc_node *node, void *rsrc)
-{
- struct io_rsrc_put *prsrc;
-
- prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
- if (!prsrc)
- return -ENOMEM;
-
- prsrc->tag = *io_get_tag_slot(data, idx);
- prsrc->rsrc = rsrc;
- list_add(&prsrc->list, &node->rsrc_list);
- return 0;
-}
-
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update2 *up,
unsigned nr_args)
* ordering. Fine to drop uring_lock here, we hold
* a ref to the ctx.
*/
+ refcount_inc(&sqd->refs);
mutex_unlock(&ctx->uring_lock);
mutex_lock(&sqd->lock);
mutex_lock(&ctx->uring_lock);
- tctx = sqd->thread->io_uring;
+ if (sqd->thread)
+ tctx = sqd->thread->io_uring;
}
} else {
tctx = current->io_uring;
if (ret)
goto err;
- if (sqd)
+ if (sqd) {
mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
+ }
if (copy_to_user(arg, new_count, sizeof(new_count)))
return -EFAULT;
return 0;
err:
- if (sqd)
+ if (sqd) {
mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
+ }
return ret;
}
return nlink;
}
-void ksmbd_conv_path_to_unix(char *path)
+char *ksmbd_conv_path_to_unix(char *path)
{
+ size_t path_len, remain_path_len, out_path_len;
+ char *out_path, *out_next;
+ int i, pre_dotdot_cnt = 0, slash_cnt = 0;
+ bool is_last;
+
strreplace(path, '\\', '/');
-}
+ path_len = strlen(path);
+ remain_path_len = path_len;
+ if (path_len == 0)
+ return ERR_PTR(-EINVAL);
-void ksmbd_strip_last_slash(char *path)
-{
- int len = strlen(path);
+ out_path = kzalloc(path_len + 2, GFP_KERNEL);
+ if (!out_path)
+ return ERR_PTR(-ENOMEM);
+ out_path_len = 0;
+ out_next = out_path;
+
+ do {
+ char *name = path + path_len - remain_path_len;
+ char *next = strchrnul(name, '/');
+ size_t name_len = next - name;
+
+ is_last = !next[0];
+ if (name_len == 2 && name[0] == '.' && name[1] == '.') {
+ pre_dotdot_cnt++;
+ /* handle the case that path ends with "/.." */
+ if (is_last)
+ goto follow_dotdot;
+ } else {
+ if (pre_dotdot_cnt) {
+follow_dotdot:
+ slash_cnt = 0;
+ for (i = out_path_len - 1; i >= 0; i--) {
+ if (out_path[i] == '/' &&
+ ++slash_cnt == pre_dotdot_cnt + 1)
+ break;
+ }
+
+ if (i < 0 &&
+ slash_cnt != pre_dotdot_cnt) {
+ kfree(out_path);
+ return ERR_PTR(-EINVAL);
+ }
+
+ out_next = &out_path[i+1];
+ *out_next = '\0';
+ out_path_len = i + 1;
- while (len && path[len - 1] == '/') {
- path[len - 1] = '\0';
- len--;
- }
+ }
+
+ if (name_len != 0 &&
+ !(name_len == 1 && name[0] == '.') &&
+ !(name_len == 2 && name[0] == '.' && name[1] == '.')) {
+ next[0] = '\0';
+ sprintf(out_next, "%s/", name);
+ out_next += name_len + 1;
+ out_path_len += name_len + 1;
+ next[0] = '/';
+ }
+ pre_dotdot_cnt = 0;
+ }
+
+ remain_path_len -= name_len + 1;
+ } while (!is_last);
+
+ if (out_path_len > 0)
+ out_path[out_path_len-1] = '\0';
+ path[path_len] = '\0';
+ return out_path;
}
void ksmbd_conv_path_to_windows(char *path)
int parse_stream_name(char *filename, char **stream_name, int *s_type);
char *convert_to_nt_pathname(char *filename, char *sharepath);
int get_nlink(struct kstat *st);
-void ksmbd_conv_path_to_unix(char *path);
-void ksmbd_strip_last_slash(char *path);
+char *ksmbd_conv_path_to_unix(char *path);
void ksmbd_conv_path_to_windows(char *path);
char *ksmbd_extract_sharename(char *treename);
char *convert_to_unix_name(struct ksmbd_share_config *share, char *name);
smb2_get_name(struct ksmbd_share_config *share, const char *src,
const int maxlen, struct nls_table *local_nls)
{
- char *name, *unixname;
+ char *name, *norm_name, *unixname;
name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);
if (IS_ERR(name)) {
}
/* change it to absolute unix name */
- ksmbd_conv_path_to_unix(name);
- ksmbd_strip_last_slash(name);
-
- unixname = convert_to_unix_name(share, name);
+ norm_name = ksmbd_conv_path_to_unix(name);
+ if (IS_ERR(norm_name)) {
+ kfree(name);
+ return norm_name;
+ }
kfree(name);
+
+ unixname = convert_to_unix_name(share, norm_name);
+ kfree(norm_name);
if (!unixname) {
pr_err("can not convert absolute name\n");
return ERR_PTR(-ENOMEM);
path = &fp->filp->f_path;
/* single EA entry is requested with given user.* name */
if (req->InputBufferLength) {
+ if (le32_to_cpu(req->InputBufferLength) <
+ sizeof(struct smb2_ea_info_req))
+ return -EINVAL;
+
ea_req = (struct smb2_ea_info_req *)req->Buffer;
} else {
/* need to send all EAs, if no specific EA is requested*/
#define SUBMOD_NAME "smb_direct"
#include <linux/kthread.h>
-#include <linux/rwlock.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/highmem.h>
static inline bool
svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj)
{
- unsigned int quadlen = XDR_QUADLEN(obj->len);
- __be32 *p;
-
- if (xdr_stream_encode_u32(xdr, obj->len) < 0)
- return false;
- p = xdr_reserve_space(xdr, obj->len);
- if (!p)
+ if (obj->len > XDR_MAX_NETOBJ)
return false;
- p[quadlen - 1] = 0; /* XDR pad */
- memcpy(p, obj->data, obj->len);
-
- return true;
+ return xdr_stream_encode_opaque(xdr, obj->data, obj->len) > 0;
}
#endif /* _LOCKD_SVCXDR_H_ */
}
static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
- struct nfsd4_session *session, u32 req)
+ struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
{
struct nfs4_client *clp = session->se_client;
struct svc_xprt *xpt = rqst->rq_xprt;
else
status = nfserr_inval;
spin_unlock(&clp->cl_lock);
+ if (status == nfs_ok && conn)
+ *conn = c;
return status;
}
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(session->se_client, rqstp))
goto out;
- status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
- if (status == nfs_ok || status == nfserr_inval)
+ status = nfsd4_match_existing_connection(rqstp, session,
+ bcts->dir, &conn);
+ if (status == nfs_ok) {
+ if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
+ bcts->dir == NFS4_CDFC4_BACK)
+ conn->cn_flags |= NFS4_CDFC4_BACK;
+ nfsd4_probe_callback(session->se_client);
+ goto out;
+ }
+ if (status == nfserr_inval)
goto out;
status = nfsd4_map_bcts_dir(&bcts->dir);
if (status)
* depending on the status field in the last byte. The
* first byte is where the name start either way, and a
* zero means it's empty.
+ *
+ * Also, due to a bug in gcc, we don't want to use the
+ * real (differently sized) name arrays in the inode and
+ * link entries, but always the 'de_name[]' one in the
+ * fake struct entry.
+ *
+ * See
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6
+ *
+ * for details, but basically gcc will take the size of the
+ * 'name' array from one of the used union entries randomly.
+ *
+ * This use of 'de_name[]' (48 bytes) avoids the false positive
+ * warnings that would happen if gcc decides to use 'inode.di_name'
+ * (16 bytes) even when the pointer and size were to come from
+ * 'link.dl_name' (48 bytes).
+ *
+ * In all cases the actual name pointer itself is the same, it's
+ * only the gcc internal 'what is the size of this field' logic
+ * that can get confused.
*/
union qnx4_directory_entry {
struct {
- char de_name;
- char de_pad[62];
- char de_status;
+ const char de_name[48];
+ u8 de_pad[15];
+ u8 de_status;
};
struct qnx4_inode_entry inode;
struct qnx4_link_info link;
ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
union qnx4_directory_entry *de;
- const char *name;
offset = ix * QNX4_DIR_ENTRY_SIZE;
de = (union qnx4_directory_entry *) (bh->b_data + offset);
- if (!de->de_name)
+ if (!de->de_name[0])
continue;
if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
continue;
if (!(de->de_status & QNX4_FILE_LINK)) {
size = sizeof(de->inode.di_fname);
- name = de->inode.di_fname;
ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
} else {
size = sizeof(de->link.dl_fname);
- name = de->link.dl_fname;
ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
QNX4_INODES_PER_BLOCK +
de->link.dl_inode_ndx;
}
- size = strnlen(name, size);
+ size = strnlen(de->de_name, size);
QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
- if (!dir_emit(ctx, name, size, ino, DT_UNKNOWN)) {
+ if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) {
brelse(bh);
return 0;
}
/* SPDX-License-Identifier: LGPL-2.1+ */
/*
- * fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+ * SMB, CIFS, SMB2 FSCTL definitions
*
* Copyright (c) International Business Machines Corp., 2002,2013
* Author(s): Steve French (sfrench@us.ibm.com)
port &= IO_SPACE_LIMIT;
return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
-#define __pci_ioport_unmap __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p)
-{
- uintptr_t start = (uintptr_t) PCI_IOBASE;
- uintptr_t addr = (uintptr_t) p;
-
- if (addr >= start && addr < start + IO_SPACE_LIMIT)
- return;
- iounmap(p);
-}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
#endif
#ifndef ioport_unmap
#endif /* CONFIG_HAS_IOPORT_MAP */
#ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p) {}
-#endif
-
#ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
- __pci_ioport_unmap(p);
-}
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
#endif
-#endif /* CONFIG_GENERIC_IOMAP */
#ifndef xlate_dev_mem_ptr
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
}
#endif
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
-#endif
-
#include <asm-generic/pci_iomap.h>
#endif
extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
{
return NULL;
}
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
#endif
#endif /* __ASM_GENERIC_PCI_IOMAP_H */
mce_whole_page : 1,
__mce_reserved : 62;
struct callback_head mce_kill_me;
+ int mce_count;
#endif
#ifdef CONFIG_KRETPROBES
ITER_DISCARD,
};
+struct iov_iter_state {
+ size_t iov_offset;
+ size_t count;
+ unsigned long nr_segs;
+};
+
struct iov_iter {
u8 iter_type;
bool data_source;
};
loff_t xarray_start;
};
- size_t truncated;
};
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
return i->iter_type;
}
+static inline void iov_iter_save_state(struct iov_iter *iter,
+ struct iov_iter_state *state)
+{
+ state->iov_offset = iter->iov_offset;
+ state->count = iter->count;
+ state->nr_segs = iter->nr_segs;
+}
+
static inline bool iter_is_iovec(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_IOVEC;
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
* conversion in assignement is by definition greater than all
* values of size_t, including old i->count.
*/
- if (i->count > count) {
- i->truncated += i->count - count;
+ if (i->count > count)
i->count = count;
- }
}
/*
*/
static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
- i->truncated -= count - i->count;
i->count = count;
}
enum afs_cb_break_reason {
afs_cb_break_no_break,
+ afs_cb_break_no_promise,
afs_cb_break_for_callback,
afs_cb_break_for_deleted,
afs_cb_break_for_lapsed,
+ afs_cb_break_for_s_reinit,
afs_cb_break_for_unlink,
- afs_cb_break_for_vsbreak,
+ afs_cb_break_for_v_break,
afs_cb_break_for_volume_callback,
afs_cb_break_for_zap,
};
#define afs_cb_break_reasons \
EM(afs_cb_break_no_break, "no-break") \
+ EM(afs_cb_break_no_promise, "no-promise") \
EM(afs_cb_break_for_callback, "break-cb") \
EM(afs_cb_break_for_deleted, "break-del") \
EM(afs_cb_break_for_lapsed, "break-lapsed") \
+ EM(afs_cb_break_for_s_reinit, "s-reinit") \
EM(afs_cb_break_for_unlink, "break-unlink") \
- EM(afs_cb_break_for_vsbreak, "break-vs") \
+ EM(afs_cb_break_for_v_break, "break-v") \
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
E_(afs_cb_break_for_zap, "break-zap")
/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
/*
- * include/uapi/linux/cifs/cifs_mount.h
*
* Author(s): Scott Lovenberg (scott.lovenberg@gmail.com)
*
IORING_REGISTER_IOWQ_AFF = 17,
IORING_UNREGISTER_IOWQ_AFF = 18,
- /* set/get max number of workers */
+ /* set/get max number of io-wq workers */
IORING_REGISTER_IOWQ_MAX_WORKERS = 19,
/* this goes last */
IORING_REGISTER_LAST
};
+/* io-wq worker categories */
+enum {
+ IO_WQ_BOUND,
+ IO_WQ_UNBOUND,
+};
+
/* deprecated, see struct io_uring_rsrc_update */
struct io_uring_files_update {
__u32 offset;
{
ktime_t *calltime = (ktime_t *)data;
- printk(KERN_DEBUG "calling %pS @ %i irqs_disabled() %d\n", fn, task_pid_nr(current), irqs_disabled());
+ printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current));
*calltime = ktime_get();
}
rettime = ktime_get();
delta = ktime_sub(rettime, *calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs, irqs_disabled() %d\n",
- fn, ret, duration, irqs_disabled());
+ printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
+ fn, ret, duration);
}
static ktime_t initcall_calltime;
pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
} else if (rc == -EEXIST) {
- pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
+ err_printk(entry->dev, entry,
+ "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
}
}
/**
* dma_map_sg_attrs - Map the given buffer for DMA
* @dev: The device for which to perform the DMA operation
- * @sg: The sg_table object describing the buffer
+ * @sg: The sg_table object describing the buffer
+ * @nents: Number of entries to map
* @dir: DMA direction
* @attrs: Optional DMA attributes for the map operation
*
return;
if (ifh->nr_file_filters) {
- mm = get_task_mm(event->ctx->task);
+ mm = get_task_mm(task);
if (!mm)
goto restart;
* The risk of writer starvation is there, but the pathological use cases
* which trigger it are not necessarily the typical RT workloads.
*
+ * Fast-path orderings:
+ * The lock/unlock of readers can run in fast paths: lock and unlock are only
+ * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
+ * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
+ * and _release() (or stronger).
+ *
* Common code shared between RT rw_semaphore and rwlock
*/
* set.
*/
for (r = atomic_read(&rwb->readers); r < 0;) {
+ /* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
return 1;
}
/*
* rwb->readers can only hit 0 when a writer is waiting for the
* active readers to leave the critical section.
+ *
+ * dec_and_test() is fully ordered, provides RELEASE.
*/
if (unlikely(atomic_dec_and_test(&rwb->readers)))
__rwbase_read_unlock(rwb, state);
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
- atomic_add(READER_BIAS - bias, &rwb->readers);
+ /*
+ * _release() is needed in case that reader is in fast path, pairing
+ * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
+ */
+ (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
rwbase_rtmutex_unlock(rtm);
}
__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
}
+static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
+{
+ /* Can do without CAS because we're serialized by wait_lock. */
+ lockdep_assert_held(&rwb->rtmutex.wait_lock);
+
+ /*
+ * _acquire is needed in case the reader is in the fast path, pairing
+ * with rwbase_read_unlock(), provides ACQUIRE.
+ */
+ if (!atomic_read_acquire(&rwb->readers)) {
+ atomic_set(&rwb->readers, WRITER_BIAS);
+ return 1;
+ }
+
+ return 0;
+}
+
static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
unsigned int state)
{
atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
- /*
- * set_current_state() for rw_semaphore
- * current_save_and_set_rtlock_wait_state() for rwlock
- */
- rwbase_set_and_save_current_state(state);
+ if (__rwbase_write_trylock(rwb))
+ goto out_unlock;
- /* Block until all readers have left the critical section. */
- for (; atomic_read(&rwb->readers);) {
+ rwbase_set_and_save_current_state(state);
+ for (;;) {
/* Optimized out for rwlocks */
if (rwbase_signal_pending_state(state, current)) {
- __set_current_state(TASK_RUNNING);
+ rwbase_restore_current_state();
__rwbase_write_unlock(rwb, 0, flags);
return -EINTR;
}
+
+ if (__rwbase_write_trylock(rwb))
+ break;
+
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+ rwbase_schedule();
+ raw_spin_lock_irqsave(&rtm->wait_lock, flags);
- /*
- * Schedule and wait for the readers to leave the critical
- * section. The last reader leaving it wakes the waiter.
- */
- if (atomic_read(&rwb->readers) != 0)
- rwbase_schedule();
set_current_state(state);
- raw_spin_lock_irqsave(&rtm->wait_lock, flags);
}
-
- atomic_set(&rwb->readers, WRITER_BIAS);
rwbase_restore_current_state();
+
+out_unlock:
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 0;
}
atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
- if (!atomic_read(&rwb->readers)) {
- atomic_set(&rwb->readers, WRITER_BIAS);
+ if (__rwbase_write_trylock(rwb)) {
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 1;
}
return 0;
}
EXPORT_SYMBOL(import_single_range);
+
+/**
+ * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
+ * iov_iter_save_state() was called.
+ *
+ * @i: &struct iov_iter to restore
+ * @state: state to restore from
+ *
+ * Used after iov_iter_save_state() to bring restore @i, if operations may
+ * have advanced it.
+ *
+ * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
+ */
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
+{
+ if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
+ !iov_iter_is_kvec(i))
+ return;
+ i->iov_offset = state->iov_offset;
+ i->count = state->count;
+ /*
+ * For the *vec iters, nr_segs + iov is constant - if we increment
+ * the vec, then we also decrement the nr_segs count. Hence we don't
+ * need to track both of these, just one is enough and we can deduct
+ * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
+ * size, so we can just increment the iov pointer as they are unionzed.
+ * ITER_BVEC _may_ be the same size on some archs, but on others it is
+ * not. Be safe and handle it separately.
+ */
+ BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+ if (iov_iter_is_bvec(i))
+ i->bvec -= state->nr_segs - i->nr_segs;
+ else
+ i->iov -= state->nr_segs - i->nr_segs;
+ i->nr_segs = state->nr_segs;
+}
return pci_iomap_wc_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+ iounmap(p);
+#endif
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
+
#endif /* CONFIG_PCI */
/* memcg and lruvec stats flushing */
static void flush_memcg_stats_dwork(struct work_struct *w);
static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
-static void flush_memcg_stats_work(struct work_struct *w);
-static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work);
-static DEFINE_PER_CPU(unsigned int, stats_flush_threshold);
static DEFINE_SPINLOCK(stats_flush_lock);
#define THRESHOLDS_EVENTS_TARGET 128
/* Update lruvec */
__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
- if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH))
- queue_work(system_unbound_wq, &stats_flush_work);
}
/**
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
}
-static void flush_memcg_stats_work(struct work_struct *w)
-{
- mem_cgroup_flush_stats();
-}
-
static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
unmap_mapping_range_tree(&mapping->i_mmap, &details);
i_mmap_unlock_write(mapping);
}
+EXPORT_SYMBOL_GPL(unmap_mapping_pages);
/**
* unmap_mapping_range - unmap the portion of all mmaps in the specified
inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
+ mem_cgroup_flush_stats();
/*
* Compare the distance to the existing workingset size. We
* don't activate pages that couldn't stay resident even if
else
CLANG_FLAGS += -fintegrated-as
endif
+# By default, clang only warns when it encounters an unknown warning flag or
+# certain optimization flags it knows it has not implemented.
+# Make it behave more like gcc by erroring when these flags are encountered
+# so they can be implemented or wrapped in cc-option.
CLANG_FLAGS += -Werror=unknown-warning-option
+CLANG_FLAGS += -Werror=ignored-optimization-argument
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_AFLAGS += $(CLANG_FLAGS)
export CLANG_FLAGS
# Stage 2 is handled by this file and does the following
# 1) Find all modules listed in modules.order
# 2) modpost is then used to
-# 3) create one <module>.mod.c file pr. module
+# 3) create one <module>.mod.c file per module
# 4) create one Module.symvers file with CRC for all exported symbols
# Step 3 is used to place certain information in the module's ELF
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
-REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$")
REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
"continue.")
if args.commit:
+ if args.commit.startswith('HEAD'):
+ sys.exit("The --commit option can't use the HEAD ref")
+
args.find = False
if args.ignore:
lines = []
defined = []
references = []
- skip = False
if not os.path.exists(kfile):
return defined, references
if REGEX_KCONFIG_DEF.match(line):
symbol_def = REGEX_KCONFIG_DEF.findall(line)
defined.append(symbol_def[0])
- skip = False
- elif REGEX_KCONFIG_HELP.match(line):
- skip = True
- elif skip:
- # ignore content of help messages
- pass
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
symbols = get_symbols_in_line(line)
import os
import re
import subprocess
+import sys
_DEFAULT_OUTPUT = 'compile_commands.json'
_DEFAULT_LOG_LEVEL = 'WARNING'
free(evsel);
}
-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
#define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
int cpu, thread;
for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) {
- FD(evsel, cpu, thread) = -1;
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd)
+ *fd = -1;
}
}
}
static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
{
struct perf_evsel *leader = evsel->leader;
- int fd;
+ int *fd;
if (evsel == leader) {
*group_fd = -1;
return -ENOTCONN;
fd = FD(leader, cpu, thread);
- if (fd == -1)
+ if (fd == NULL || *fd == -1)
return -EBADF;
- *group_fd = fd;
+ *group_fd = *fd;
return 0;
}
for (cpu = 0; cpu < cpus->nr; cpu++) {
for (thread = 0; thread < threads->nr; thread++) {
- int fd, group_fd;
+ int fd, group_fd, *evsel_fd;
+
+ evsel_fd = FD(evsel, cpu, thread);
+ if (evsel_fd == NULL)
+ return -EINVAL;
err = get_group_fd(evsel, cpu, thread, &group_fd);
if (err < 0)
if (fd < 0)
return -errno;
- FD(evsel, cpu, thread) = fd;
+ *evsel_fd = fd;
}
}
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
- if (FD(evsel, cpu, thread) >= 0)
- close(FD(evsel, cpu, thread));
- FD(evsel, cpu, thread) = -1;
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd && *fd >= 0) {
+ close(*fd);
+ *fd = -1;
+ }
}
}
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread);
- struct perf_mmap *map = MMAP(evsel, cpu, thread);
+ int *fd = FD(evsel, cpu, thread);
- if (fd < 0)
+ if (fd == NULL || *fd < 0)
continue;
- perf_mmap__munmap(map);
+ perf_mmap__munmap(MMAP(evsel, cpu, thread));
}
}
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread);
- struct perf_mmap *map = MMAP(evsel, cpu, thread);
+ int *fd = FD(evsel, cpu, thread);
+ struct perf_mmap *map;
- if (fd < 0)
+ if (fd == NULL || *fd < 0)
continue;
+ map = MMAP(evsel, cpu, thread);
perf_mmap__init(map, NULL, false, NULL);
- ret = perf_mmap__mmap(map, &mp, fd, cpu);
+ ret = perf_mmap__mmap(map, &mp, *fd, cpu);
if (ret) {
perf_evsel__munmap(evsel);
return ret;
void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
{
- if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
return NULL;
return MMAP(evsel, cpu, thread)->base;
struct perf_counts_values *count)
{
size_t size = perf_evsel__read_size(evsel);
+ int *fd = FD(evsel, cpu, thread);
memset(count, 0, sizeof(*count));
- if (FD(evsel, cpu, thread) < 0)
+ if (fd == NULL || *fd < 0)
return -EINVAL;
if (MMAP(evsel, cpu, thread) &&
!perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
return 0;
- if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
+ if (readn(*fd, count->values, size) <= 0)
return -errno;
return 0;
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread),
- err = ioctl(fd, ioc, arg);
+ int err;
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd == NULL || *fd < 0)
+ return -1;
+
+ err = ioctl(*fd, ioc, arg);
if (err)
return err;
return OUTPUT_TYPE_OTHER;
}
-static inline unsigned int attr_type(unsigned int type)
-{
- switch (type) {
- case OUTPUT_TYPE_SYNTH:
- return PERF_TYPE_SYNTH;
- default:
- return type;
- }
-}
-
static bool output_set_by_user(void)
{
int j;
output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
}
+static struct evsel *find_first_output_type(struct evlist *evlist,
+ unsigned int type)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (output_type(evsel->core.attr.type) == (int)type)
+ return evsel;
+ }
+ return NULL;
+}
+
/*
* verify all user requested events exist and the samples
* have the expected data
struct evsel *evsel;
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
- evsel = perf_session__find_first_evtype(session, attr_type(j));
+ evsel = find_first_output_type(session->evlist, j);
/*
* even if fields is set to 0 (ie., show nothing) event must
}
void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
- unsigned int row, bool arrow_down)
+ unsigned int row, int diff, bool arrow_down)
{
- unsigned int end_row;
+ int end_row;
- if (row >= browser->top_idx)
- end_row = row - browser->top_idx;
- else
+ if (diff <= 0)
return;
SLsmg_set_char_set(1);
if (arrow_down) {
+ if (row + diff <= browser->top_idx)
+ return;
+
+ end_row = row + diff - browser->top_idx;
ui_browser__gotorc(browser, end_row, column - 1);
- SLsmg_write_char(SLSMG_ULCORN_CHAR);
- ui_browser__gotorc(browser, end_row, column);
- SLsmg_draw_hline(2);
- ui_browser__gotorc(browser, end_row + 1, column - 1);
SLsmg_write_char(SLSMG_LTEE_CHAR);
+
+ while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) {
+ ui_browser__gotorc(browser, end_row, column - 1);
+ SLsmg_draw_vline(1);
+ }
+
+ end_row = (int)(row - browser->top_idx);
+ if (end_row >= 0) {
+ ui_browser__gotorc(browser, end_row, column - 1);
+ SLsmg_write_char(SLSMG_ULCORN_CHAR);
+ ui_browser__gotorc(browser, end_row, column);
+ SLsmg_draw_hline(2);
+ }
} else {
+ if (row < browser->top_idx)
+ return;
+
+ end_row = row - browser->top_idx;
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_write_char(SLSMG_LTEE_CHAR);
ui_browser__gotorc(browser, end_row, column);
void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
u64 start, u64 end);
void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
- unsigned int row, bool arrow_down);
+ unsigned int row, int diff, bool arrow_down);
void __ui_browser__show_title(struct ui_browser *browser, const char *title);
void ui_browser__show_title(struct ui_browser *browser, const char *title);
int ui_browser__show(struct ui_browser *browser, const char *title,
ab->selection = al;
}
-static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
+static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
{
struct disasm_line *pos = list_prev_entry(cursor, al.node);
const char *name;
+ int diff = 1;
+
+ while (pos && pos->al.offset == -1) {
+ pos = list_prev_entry(pos, al.node);
+ if (!ab->opts->hide_src_code)
+ diff++;
+ }
if (!pos)
- return false;
+ return 0;
if (ins__is_lock(&pos->ins))
name = pos->ops.locked.ins.name;
name = pos->ins.name;
if (!name || !cursor->ins.name)
- return false;
+ return 0;
- return ins__is_fused(ab->arch, name, cursor->ins.name);
+ if (ins__is_fused(ab->arch, name, cursor->ins.name))
+ return diff;
+ return 0;
}
static void annotate_browser__draw_current_jump(struct ui_browser *browser)
struct annotation *notes = symbol__annotation(sym);
u8 pcnt_width = annotation__pcnt_width(notes);
int width;
+ int diff = 0;
/* PLT symbols contain external offsets */
if (strstr(sym->name, "@plt"))
pcnt_width + 2 + notes->widths.addr + width,
from, to);
- if (is_fused(ab, cursor)) {
+ diff = is_fused(ab, cursor);
+ if (diff > 0) {
ui_browser__mark_fused(browser,
pcnt_width + 3 + notes->widths.addr + width,
- from - 1,
- to > from);
+ from - diff, diff, to > from);
}
}
struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
{
struct btf *btf;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int err = btf__get_from_id(id, &btf);
+#pragma GCC diagnostic pop
return err ? ERR_PTR(err) : btf;
}
al.filtered = 0;
al.sym = NULL;
+ al.srcline = NULL;
if (!cpumode) {
thread__find_cpumode_addr_location(thread, ip, &al);
} else {
/* SPDX-License-Identifier: GPL-2.0 */
-#include <ppc-asm.h>
+#include <basic_asm.h>
#include <asm/unistd.h>
.text
1:
li r3, -1
blr
+
+
+.macro scv level
+ .long (0x44000001 | (\level) << 5)
+.endm
+
+FUNC_START(getppid_scv_tm_active)
+ PUSH_BASIC_STACK(0)
+ tbegin.
+ beq 1f
+ li r0, __NR_getppid
+ scv 0
+ tend.
+ POP_BASIC_STACK(0)
+ blr
+1:
+ li r3, -1
+ POP_BASIC_STACK(0)
+ blr
+
+FUNC_START(getppid_scv_tm_suspended)
+ PUSH_BASIC_STACK(0)
+ tbegin.
+ beq 1f
+ li r0, __NR_getppid
+ tsuspend.
+ scv 0
+ tresume.
+ tend.
+ POP_BASIC_STACK(0)
+ blr
+1:
+ li r3, -1
+ POP_BASIC_STACK(0)
+ blr
#include "utils.h"
#include "tm.h"
+#ifndef PPC_FEATURE2_SCV
+#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
+#endif
+
extern int getppid_tm_active(void);
extern int getppid_tm_suspended(void);
+extern int getppid_scv_tm_active(void);
+extern int getppid_scv_tm_suspended(void);
unsigned retries = 0;
#define TEST_DURATION 10 /* seconds */
-pid_t getppid_tm(bool suspend)
+pid_t getppid_tm(bool scv, bool suspend)
{
int i;
pid_t pid;
for (i = 0; i < TM_RETRIES; i++) {
- if (suspend)
- pid = getppid_tm_suspended();
- else
- pid = getppid_tm_active();
+ if (suspend) {
+ if (scv)
+ pid = getppid_scv_tm_suspended();
+ else
+ pid = getppid_tm_suspended();
+ } else {
+ if (scv)
+ pid = getppid_scv_tm_active();
+ else
+ pid = getppid_tm_active();
+ }
if (pid >= 0)
return pid;
* Test a syscall within a suspended transaction and verify
* that it succeeds.
*/
- FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */
+ FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */
/*
* Test a syscall within an active transaction and verify that
* it fails with the correct failure code.
*/
- FAIL_IF(getppid_tm(false) != -1); /* Should fail... */
+ FAIL_IF(getppid_tm(false, false) != -1); /* Should fail... */
FAIL_IF(!failure_is_persistent()); /* ...persistently... */
FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
+
+ /* Now do it all again with scv if it is available. */
+ if (have_hwcap2(PPC_FEATURE2_SCV)) {
+ FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */
+ FAIL_IF(getppid_tm(true, false) != -1); /* Should fail... */
+ FAIL_IF(!failure_is_persistent()); /* ...persistently... */
+ FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
+ }
+
gettimeofday(&now, 0);
}