https://btrfs.readthedocs.io
- https://btrfs.wiki.kernel.org
that maintains information about administration tasks, frequently asked
questions, use cases, mount options, comprehensible changelogs, features,
L: linux-btrfs@vger.kernel.org
S: Maintained
W: https://btrfs.readthedocs.io
-W: https://btrfs.wiki.kernel.org/
Q: https://patchwork.kernel.org/project/linux-btrfs/list/
C: irc://irc.libera.chat/btrfs
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
extern int dcache_stride;
extern int icache_stride;
extern struct pdc_cache_info cache_info;
+extern struct pdc_btlb_info btlb_info;
void parisc_setup_cache_timing(void);
#define pdtlb(sr, addr) asm volatile("pdtlb 0(%%sr%0,%1)" \
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_PARISC_MCKINLEY_H
-#define ASM_PARISC_MCKINLEY_H
-
-/* declared in arch/parisc/kernel/setup.c */
-extern struct proc_dir_entry * proc_mckinley_root;
-
-#endif /*ASM_PARISC_MCKINLEY_H*/
int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no);
int pdc_cache_info(struct pdc_cache_info *cache);
int pdc_spaceid_bits(unsigned long *space_bits);
-#ifndef CONFIG_PA20
int pdc_btlb_info(struct pdc_btlb_info *btlb);
+int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
+ unsigned long entry_info, unsigned long slot);
+int pdc_btlb_purge_all(void);
int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
-#endif /* !CONFIG_PA20 */
int pdc_pim_toc11(struct pdc_toc_pim_11 *ret);
int pdc_pim_toc20(struct pdc_toc_pim_20 *ret);
int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);
struct seq_file;
extern void early_trap_init(void);
extern void collect_boot_cpu_data(void);
+extern void btlb_init_per_cpu(void);
extern int show_cpuinfo (struct seq_file *m, void *v);
/* driver code in driver/parisc */
struct ioc {
void __iomem *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
- u64 *pdir_base; /* physical base address */
+ __le64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
#ifdef ZX1_SUPPORT
struct ioc ioc[MAX_IOC];
};
+/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
+extern struct sba_device *sba_list;
+
#define ASTRO_RUNWAY_PORT 0x582
#define IKE_MERCED_PORT 0x803
#define REO_MERCED_PORT 0x804
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
-#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
+#define SBA_AGPGART_COOKIE (__force __le64) 0x0000badbadc0ffeeULL
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#ifndef _ASMPARISC_SHMPARAM_H
#define _ASMPARISC_SHMPARAM_H
+/*
+ * PA-RISC uses virtually indexed & physically tagged (VIPT) caches
+ * which has strict requirements when two pages to the same physical
+ * address are accessed through different mappings. Read the section
+ * "Address Aliasing" in the arch docs for more detail:
+ * PA-RISC 1.1 (page 3-6):
+ * https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf
+ * PA-RISC 2.0 (page F-5):
+ * https://parisc.wiki.kernel.org/images-parisc/7/73/Parisc2.0.pdf
+ *
+ * For Linux we allow kernel and userspace to map pages on page size
+ * granularity (SHMLBA) but have to ensure that, if two pages are
+ * mapped to the same physical address, the virtual and physical
+ * addresses modulo SHM_COLOUR are identical.
+ */
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#define SHM_COLOUR 0x00400000 /* shared mappings colouring */
* and kernel data on physical huge pages */
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
+#elif !defined(CONFIG_64BIT)
+ DEFINE(HUGEPAGE_SIZE, 4*1024*1024);
#else
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
#endif
struct pdc_cache_info cache_info __ro_after_init;
#ifndef CONFIG_PA20
-static struct pdc_btlb_info btlb_info __ro_after_init;
+struct pdc_btlb_info btlb_info __ro_after_init;
#endif
DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
#undef CAFL_STRIDE
-#ifndef CONFIG_PA20
- if (pdc_btlb_info(&btlb_info) < 0) {
- memset(&btlb_info, 0, sizeof btlb_info);
- }
-#endif
-
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
pr_info("#define PARISC_MODEL \"%s\"\n\n",
boot_cpu_data.pdc.sys_model_name);
+ #define p ((unsigned long *)&boot_cpu_data.pdc.model)
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
- #define p ((unsigned long *)&boot_cpu_data.pdc.model)
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
#undef p
return retval;
}
-#ifndef CONFIG_PA20
/**
* pdc_btlb_info - Return block TLB information.
* @btlb: The return buffer.
*/
int pdc_btlb_info(struct pdc_btlb_info *btlb)
{
- int retval;
+ int retval;
unsigned long flags;
- spin_lock_irqsave(&pdc_lock, flags);
- retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
- memcpy(btlb, pdc_result, sizeof(*btlb));
- spin_unlock_irqrestore(&pdc_lock, flags);
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
- if(retval < 0) {
- btlb->max_size = 0;
- }
- return retval;
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
+ memcpy(btlb, pdc_result, sizeof(*btlb));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ if(retval < 0) {
+ btlb->max_size = 0;
+ }
+ return retval;
+}
+
+int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
+ unsigned long entry_info, unsigned long slot)
+{
+ int retval;
+ unsigned long flags;
+
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32),
+ (unsigned long) vpage, physpage, len, entry_info, slot);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+ return retval;
+}
+
+int pdc_btlb_purge_all(void)
+{
+ int retval;
+ unsigned long flags;
+
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+ return retval;
}
/**
int retval;
unsigned long flags;
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
spin_lock_irqsave(&pdc_lock, flags);
memcpy(pdc_result2, mod_path, sizeof(*mod_path));
retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
return retval;
}
-#endif /* !CONFIG_PA20 */
/**
* pdc_lan_station_id - Get the LAN address.
std %dp,0x18(%r10)
#endif
-#ifdef CONFIG_64BIT
- /* Get PDCE_PROC for monarch CPU. */
#define MEM_PDC_LO 0x388
#define MEM_PDC_HI 0x35C
+#ifdef CONFIG_64BIT
+ /* Get PDCE_PROC for monarch CPU. */
ldw MEM_PDC_LO(%r0),%r3
ldw MEM_PDC_HI(%r0),%r10
depd %r10, 31, 32, %r3 /* move to upper word */
tovirt_r1 %r6
mtctl %r6,%cr30 /* restore task thread info */
#endif
-
+
+#ifndef CONFIG_64BIT
+ /* clear all BTLBs */
+ ldi PDC_BLOCK_TLB,%arg0
+ load32 PA(stext_pdc_btlb_ret), %rp
+ ldw MEM_PDC_LO(%r0),%r3
+ bv (%r3)
+ ldi PDC_BTLB_PURGE_ALL,%arg1
+stext_pdc_btlb_ret:
+#endif
+
/* PARANOID: clear user scratch/user space SR's */
mtsp %r0,%sr0
mtsp %r0,%sr1
volatile unsigned int lock[1];
};
-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif
/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
init_percpu_prof(cpunum);
+ btlb_init_per_cpu();
+
return ret;
}
}
/* End of data section */
+ . = ALIGN(PAGE_SIZE);
_edata = .;
/* BSS */
#include <asm/sections.h>
#include <asm/msgbuf.h>
#include <asm/sparsemem.h>
+#include <asm/asm-offsets.h>
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
parisc_bootmem_free();
}
+static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
+ unsigned long entry_info)
+{
+ const int slot_max = btlb_info.fixed_range_info.num_comb;
+ int min_num_pages = btlb_info.min_size;
+ unsigned long size;
+
+ /* map at minimum 4 pages */
+ if (min_num_pages < 4)
+ min_num_pages = 4;
+
+ size = HUGEPAGE_SIZE;
+ while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
+ /* starting address must have same alignment as size! */
+ /* if correctly aligned and fits in double size, increase */
+ if (((start & (2 * size - 1)) == 0) &&
+ (end - start) >= (2 * size)) {
+ size <<= 1;
+ continue;
+ }
+ /* if current size alignment is too big, try smaller size */
+ if ((start & (size - 1)) != 0) {
+ size >>= 1;
+ continue;
+ }
+ if ((end - start) >= size) {
+ if ((size >> PAGE_SHIFT) >= min_num_pages)
+ pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
+ size >> PAGE_SHIFT, entry_info, *slot);
+ (*slot)++;
+ start += size;
+ continue;
+ }
+ size /= 2;
+ continue;
+ }
+}
+
+void btlb_init_per_cpu(void)
+{
+ unsigned long s, t, e;
+ int slot;
+
+ /* BTLBs are not available on 64-bit CPUs */
+ if (IS_ENABLED(CONFIG_PA20))
+ return;
+ else if (pdc_btlb_info(&btlb_info) < 0) {
+ memset(&btlb_info, 0, sizeof btlb_info);
+ }
+
+ /* insert BLTLBs for code and data segments */
+ s = (uintptr_t) dereference_function_descriptor(&_stext);
+ e = (uintptr_t) dereference_function_descriptor(&_etext);
+ t = (uintptr_t) dereference_function_descriptor(&_sdata);
+ BUG_ON(t != e);
+
+ /* code segments */
+ slot = 0;
+ alloc_btlb(s, e, &slot, 0x13800000);
+
+ /* sanity check */
+ t = (uintptr_t) dereference_function_descriptor(&_edata);
+ e = (uintptr_t) dereference_function_descriptor(&__bss_start);
+ BUG_ON(t != e);
+
+ /* data segments */
+ s = (uintptr_t) dereference_function_descriptor(&_sdata);
+ e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
+ alloc_btlb(s, e, &slot, 0x11800000);
+}
+
#ifdef CONFIG_PA20
/*
static int __init
parisc_agp_init(void)
{
- extern struct sba_device *sba_list;
-
int err = -1;
struct parisc_device *sba = NULL, *lba = NULL;
struct lba_device *lbadev = NULL;
.shutdown_pre = tpm_class_shutdown,
};
const struct class tpmrm_class = {
- .name = "tmprm",
+ .name = "tpmrm",
};
dev_t tpm_devt;
struct ioc {
struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */
u8 *res_map; /* resource map, bit == pdir entry */
- u64 *pdir_base; /* physical base address */
+ __le64 *pdir_base; /* physical base address */
u32 pdir_size; /* bytes, function of IOV Space size */
u32 res_hint; /* next available IOVP -
circular search */
BUG_ON(pages_needed == 0);
BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
- DBG_RES("%s() size: %d pages_needed %d\n",
+ DBG_RES("%s() size: %zu pages_needed %d\n",
__func__, size, pages_needed);
/*
BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
BUG_ON(pages_mapped > BITS_PER_LONG);
- DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
+ DBG_RES("%s(): res_idx: %d pages_mapped %lu\n",
__func__, res_idx, pages_mapped);
#ifdef CCIO_COLLECT_STATS
* index are bits 12:19 of the value returned by LCI.
*/
static void
-ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
+ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hints)
{
register unsigned long pa;
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
- u64 *pdir_start;
+ __le64 *pdir_start;
unsigned long hint = hint_lookup[(int)direction];
BUG_ON(!dev);
pdir_start = &(ioc->pdir_base[idx]);
- DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
- __func__, addr, (long)iovp | offset, size);
+ DBG_RUN("%s() %px -> %#lx size: %zu\n",
+ __func__, addr, (long)(iovp | offset), size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
return;
}
- DBG_RUN("%s() iovp 0x%lx/%x\n",
+ DBG_RUN("%s() iovp %#lx/%zx\n",
__func__, (long)iova, size);
iova ^= offset; /* clear offset bits */
iova_space_size>>20,
iov_order + PAGE_SHIFT);
- ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
+ ioc->pdir_base = (__le64 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size));
if(NULL == ioc->pdir_base) {
panic("%s() could not allocate I/O Page Table\n", __func__);
static inline unsigned int
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long hint,
- void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
+ void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
unsigned long))
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
unsigned int n_mappings = 0;
unsigned long dma_offset = 0, dma_len = 0;
- u64 *pdirp = NULL;
+ __le64 *pdirp = NULL;
/* Horrible hack. For efficiency's sake, dma_sg starts one
* entry below the true start (it is immediately incremented
unsigned long vaddr;
long size;
- DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
- (unsigned long)sg_dma_address(startsg), cnt,
+ DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
+ (unsigned long)sg_dma_address(startsg),
sg_virt(startsg), startsg->length
);
static DEFINE_SPINLOCK(iosapic_lock);
-static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
+static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data)
{
- __raw_writel(data, addr);
+ __raw_writel((__force u32)data, addr);
}
/*
struct vector_info {
struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */
struct irt_entry *irte; /* IRT entry */
- u32 __iomem *eoi_addr; /* precalculate EOI reg address */
- u32 eoi_data; /* IA64: ? PA: swapped txn_data */
+ __le32 __iomem *eoi_addr; /* precalculate EOI reg address */
+ __le32 eoi_data; /* IA64: ? PA: swapped txn_data */
int txn_irq; /* virtual IRQ number for processor */
ulong txn_addr; /* IA64: id_eid PA: partial HPA */
u32 txn_data; /* CPU interrupt bit */
#include <linux/module.h>
#include <asm/ropes.h>
-#include <asm/mckinley.h> /* for proc_mckinley_root */
-#include <asm/runway.h> /* for proc_runway_root */
#include <asm/page.h> /* for PAGE0 */
#include <asm/pdc.h> /* for PDC_MODEL_* */
#include <asm/pdcpat.h> /* for is_pdc_pat() */
#endif
static struct proc_dir_entry *proc_runway_root __ro_after_init;
-struct proc_dir_entry *proc_mckinley_root __ro_after_init;
+static struct proc_dir_entry *proc_mckinley_root __ro_after_init;
/************************************
** SBA register read and write support
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
{
/* start printing from lowest pde in rval */
- u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
+ __le64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
uint rcnt;
*/
static void
-sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
+sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hint)
{
u64 pa; /* physical address */
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
- u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
+ __le64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
#ifdef ASSERT_PDIR_SANITY
/* Assert first pdir entry is set.
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
- u64 *pdir_start;
+ __le64 *pdir_start;
int pide;
ioc = GET_IOC(dev);
ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
- DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
+ DBG_INIT("%s() hpa %px mem %ldMB IOV %dMB (%d bits)\n",
__func__,
ioc->ioc_hpa,
(unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
#endif
- DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
+ DBG_INIT("%s() IOV base %#lx mask %#0lx\n",
__func__, ioc->ibase, ioc->imask);
/*
if (!IS_PLUTO(sba_dev->dev)) {
ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
- DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
+ DBG_INIT("%s() hpa %px ioc_ctl 0x%Lx ->",
__func__, sba_dev->sba_hpa, ioc_ctl);
ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
/* flush out the last writes */
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
- DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
+ DBG_INIT(" ioc[%d] ROPE_CFG %#lx ROPE_DBG %lx\n",
i,
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
);
- DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
+ DBG_INIT(" STATUS_CONTROL %#lx FLUSH_CTRL %#lx\n",
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
);
if (IS_PLUTO(sba_dev->dev)) {
#ifdef ASSERT_PDIR_SANITY
/* Mark first bit busy - ie no IOVA 0 */
sba_dev->ioc[i].res_map[0] = 0x80;
- sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
+ sba_dev->ioc[i].pdir_base[0] = (__force __le64) 0xeeffc0addbba0080ULL;
#endif
/* Third (and last) part of PIRANHA BUG */
int i;
char *version;
void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *root;
-#endif
+ struct proc_dir_entry *root __maybe_unused;
sba_dump_ranges(sba_addr);
hppa_dma_ops = &sba_ops;
-#ifdef CONFIG_PROC_FS
switch (dev->id.hversion) {
case PLUTO_MCKINLEY_PORT:
if (!proc_mckinley_root)
proc_create_single("sba_iommu", 0, root, sba_proc_info);
proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
-#endif
return 0;
}
tristate "Mellanox BlueField Firmware Boot Control driver"
depends on ARM64
depends on ACPI
+ depends on NET
help
The Mellanox BlueField firmware implements functionality to
request swapping the primary and alternate eMMC boot partition,
config NVSW_SN2201
tristate "Nvidia SN2201 platform driver support"
- depends on HWMON
- depends on I2C
+ depends on HWMON && I2C
+ depends on ACPI || COMPILE_TEST
select REGMAP_I2C
help
This driver provides support for the Nvidia SN2201 platform.
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
+ { 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
+ { 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
+ { 0x0, "DISABLE" },
{ 0x100, "ECC_SINGLE_ERROR_CNT" },
{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
{ 0x114, "SERR_INJ" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
+ { 0x0, "DISABLE" },
{ 0xc0, "RXREQ_MSS" },
{ 0xc1, "RXDAT_MSS" },
{ 0xc2, "TXRSP_MSS" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
+ { 0x0, "DISABLE" },
{ 0x45, "HNF_REQUESTS" },
{ 0x46, "HNF_REJECTS" },
{ 0x47, "ALL_BUSY" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
+ { 0x0, "DISABLE" },
{ 0x12, "CDN_REQ" },
{ 0x13, "DDN_REQ" },
{ 0x14, "NDN_REQ" },
uint64_t *result)
{
uint32_t perfcfg_offset, perfval_offset;
- uint64_t perfmon_cfg, perfevt, perfctl;
+ uint64_t perfmon_cfg, perfevt;
if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
- MLXBF_PMC_PERFCTL);
- perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
- perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
-
- if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
- MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
- return -EFAULT;
-
- /* Check if the counter is enabled */
-
- if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
- MLXBF_PMC_READ_REG_64, &perfctl))
- return -EFAULT;
-
- if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
- return -EINVAL;
-
- /* Set counter in "read" mode */
- perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFEVT);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
} else
return -EINVAL;
- return sprintf(buf, "0x%llx\n", value);
+ return sysfs_emit(buf, "0x%llx\n", value);
}
/* Store function for "counter" sysfs files */
err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
if (err)
- return sprintf(buf, "No event being monitored\n");
+ return sysfs_emit(buf, "No event being monitored\n");
evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
if (!evt_name)
return -EINVAL;
- return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
+ return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
}
/* Store function for "event" sysfs files */
return -EINVAL;
for (i = 0, buf[0] = '\0'; i < size; ++i) {
- len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
- events[i].evt_name);
- if (len > PAGE_SIZE)
+ len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
+ events[i].evt_num, events[i].evt_name);
+ if (len >= PAGE_SIZE)
break;
strcat(buf, e_info);
ret = len;
value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
/* Store function for "enable" sysfs files - only for l3cache */
* @vq: pointer to the virtio virtqueue
* @desc: current descriptor of the pending packet
* @desc_head: head descriptor of the pending packet
+ * @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
* @pkt_len: total length of the pending packet
struct virtqueue *vq;
struct vring_desc *desc;
struct vring_desc *desc_head;
+ struct vring_desc drop_desc;
int cur_len;
int rem_len;
u32 pkt_len;
struct mlxbf_tmfifo *fifo;
};
+/* Check whether vring is in drop mode. */
+#define IS_VRING_DROP(_r) ({ \
+ typeof(_r) (r) = (_r); \
+ (r->desc_head == &r->drop_desc ? true : false); })
+
+/* A stub length to drop maximum length packet. */
+#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
+
/* Interrupt types. */
enum {
MLXBF_TM_RX_LWM_IRQ,
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
/* Maximum L2 header length. */
-#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
+#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
/* Supported virtio-net features. */
#define MLXBF_TMFIFO_NET_FEATURES \
vring->align = SMP_CACHE_BYTES;
vring->index = i;
vring->vdev_id = tm_vdev->vdev.id.device;
+ vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
dev = &tm_vdev->vdev.dev;
size = vring_size(vring->num, vring->align);
return len;
}
-static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
+static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc_head;
u32 len = 0;
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data, sizeof(u64));
- else
- memcpy(&data, addr + vring->cur_len, sizeof(u64));
+ if (!IS_VRING_DROP(vring)) {
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ sizeof(u64));
+ else
+ memcpy(&data, addr + vring->cur_len,
+ sizeof(u64));
+ }
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data,
- len - vring->cur_len);
- else
- memcpy(&data, addr + vring->cur_len,
- len - vring->cur_len);
+ if (!IS_VRING_DROP(vring)) {
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ len - vring->cur_len);
+ else
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
+ }
vring->cur_len = len;
}
* flag is set.
*/
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
- struct vring_desc *desc,
+ struct vring_desc **desc,
bool is_rx, bool *vring_change)
{
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_net_config *config;
struct mlxbf_tmfifo_msg_hdr hdr;
int vdev_id, hdr_len;
+ bool drop_rx = false;
/* Read/Write packet header. */
if (is_rx) {
if (ntohs(hdr.len) >
__virtio16_to_cpu(virtio_legacy_is_little_endian(),
config->mtu) +
- MLXBF_TMFIFO_NET_L2_OVERHEAD)
- return;
+ MLXBF_TMFIFO_NET_L2_OVERHEAD)
+ drop_rx = true;
} else {
vdev_id = VIRTIO_ID_CONSOLE;
hdr_len = 0;
if (!tm_dev2)
return;
- vring->desc = desc;
+ vring->desc = *desc;
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
*vring_change = true;
}
+
+ if (drop_rx && !IS_VRING_DROP(vring)) {
+ if (vring->desc_head)
+ mlxbf_tmfifo_release_pkt(vring);
+ *desc = &vring->drop_desc;
+ vring->desc_head = *desc;
+ vring->desc = *desc;
+ }
+
vring->pkt_len = ntohs(hdr.len) + hdr_len;
} else {
/* Network virtio has an extra header. */
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
sizeof(struct virtio_net_hdr) : 0;
- vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
hdr.len = htons(vring->pkt_len - hdr_len);
/* Get the descriptor of the next packet. */
if (!vring->desc) {
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
- if (!desc)
- return false;
+ if (!desc) {
+ /* Drop next Rx packet to avoid stuck. */
+ if (is_rx) {
+ desc = &vring->drop_desc;
+ vring->desc_head = desc;
+ vring->desc = desc;
+ } else {
+ return false;
+ }
+ }
} else {
desc = vring->desc;
}
/* Beginning of a packet. Start to Rx/Tx packet header. */
if (vring->pkt_len == 0) {
- mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
+ mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
(*avail)--;
/* Return if new packet is for another ring. */
vring->rem_len -= len;
/* Get the next desc on the chain. */
- if (vring->rem_len > 0 &&
+ if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
goto mlxbf_tmfifo_desc_done;
}
- /* Done and release the pending packet. */
- mlxbf_tmfifo_release_pending_pkt(vring);
+ /* Done and release the packet. */
desc = NULL;
fifo->vring[is_rx] = NULL;
+ if (!IS_VRING_DROP(vring)) {
+ mlxbf_tmfifo_release_pkt(vring);
+ } else {
+ vring->pkt_len = 0;
+ vring->desc_head = NULL;
+ vring->desc = NULL;
+ return false;
+ }
/*
* Make sure the load/store are in order before
/* Release the pending packet. */
if (vring->desc)
- mlxbf_tmfifo_release_pending_pkt(vring);
+ mlxbf_tmfifo_release_pkt(vring);
vq = vring->vq;
if (vq) {
vring->vq = NULL;
},
{
.callback = dmi_matched,
+ .ident = "ASUS ROG FLOW X16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GV601V"),
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUS VivoBook E410MA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
continue to be mountable and usable by newer kernels.
For more information, please see the web pages at
- http://btrfs.wiki.kernel.org.
+ https://btrfs.readthedocs.io
To compile this file system support as a module, choose M here. The
module will be called btrfs.
btrfs_mark_buffer_dirty(leaf);
fail:
btrfs_release_path(path);
- /* We didn't update the block group item, need to revert @commit_used. */
- if (ret < 0) {
+ /*
+ * We didn't update the block group item, need to revert commit_used
+ * unless the block group item didn't exist yet - this is to prevent a
+ * race with a concurrent insertion of the block group item, with
+ * insert_block_group_item(), that happened just after we attempted to
+ * update. In that case we would reset commit_used to 0 just after the
+ * insertion set it to a value greater than 0 - if the block group later
+ * becomes with 0 used bytes, we would incorrectly skip its update.
+ */
+ if (ret < 0 && ret != -ENOENT) {
spin_lock(&cache->lock);
cache->commit_used = old_commit_used;
spin_unlock(&cache->lock);
static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
{
+ struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
struct rb_root_cached *root;
struct btrfs_delayed_root *delayed_root;
if (RB_EMPTY_NODE(&delayed_item->rb_node))
return;
- delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
+ /* If it's in a rbtree, then we need to have delayed node locked. */
+ lockdep_assert_held(&delayed_node->mutex);
+
+ delayed_root = delayed_node->root->fs_info->delayed_root;
BUG_ON(!delayed_root);
if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
- root = &delayed_item->delayed_node->ins_root;
+ root = &delayed_node->ins_root;
else
- root = &delayed_item->delayed_node->del_root;
+ root = &delayed_node->del_root;
rb_erase_cached(&delayed_item->rb_node, root);
RB_CLEAR_NODE(&delayed_item->rb_node);
- delayed_item->delayed_node->count--;
+ delayed_node->count--;
finish_one_item(delayed_root);
}
ret = __btrfs_commit_inode_delayed_items(trans, path,
curr_node);
if (ret) {
- btrfs_release_delayed_node(curr_node);
- curr_node = NULL;
btrfs_abort_transaction(trans, ret);
break;
}
prev_node = curr_node;
curr_node = btrfs_next_delayed_node(curr_node);
+ /*
+ * See the comment below about releasing path before releasing
+ * node. If the commit of delayed items was successful the path
+ * should always be released, but in case of an error, it may
+ * point to locked extent buffers (a leaf at the very least).
+ */
+ ASSERT(path->nodes[0] == NULL);
btrfs_release_delayed_node(prev_node);
}
+ /*
+ * Release the path to avoid a potential deadlock and lockdep splat when
+ * releasing the delayed node, as that requires taking the delayed node's
+ * mutex. If another task starts running delayed items before we take
+ * the mutex, it will first lock the mutex and then it may try to lock
+ * the same btree path (leaf).
+ */
+ btrfs_free_path(path);
+
if (curr_node)
btrfs_release_delayed_node(curr_node);
- btrfs_free_path(path);
trans->block_rsv = block_rsv;
return ret;
btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
}
-/* Will return 0 or -ENOMEM */
+static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
+
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
+ return;
+
+ /*
+ * Adding the new dir index item does not require touching another
+ * leaf, so we can release 1 unit of metadata that was previously
+ * reserved when starting the transaction. This applies only to
+ * the case where we had a transaction start and excludes the
+ * transaction join case (when replaying log trees).
+ */
+ trace_btrfs_space_reservation(fs_info, "transaction",
+ trans->transid, bytes, 0);
+ btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
+ ASSERT(trans->bytes_reserved >= bytes);
+ trans->bytes_reserved -= bytes;
+}
+
+/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct btrfs_inode *dir,
mutex_lock(&delayed_node->mutex);
+ /*
+ * First attempt to insert the delayed item. This is to make the error
+ * handling path simpler in case we fail (-EEXIST). There's no risk of
+ * any other task coming in and running the delayed item before we do
+ * the metadata space reservation below, because we are holding the
+ * delayed node's mutex and that mutex must also be locked before the
+ * node's delayed items can be run.
+ */
+ ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
+ if (unlikely(ret)) {
+ btrfs_err(trans->fs_info,
+"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
+ name_len, name, index, btrfs_root_id(delayed_node->root),
+ delayed_node->inode_id, dir->index_cnt,
+ delayed_node->index_cnt, ret);
+ btrfs_release_delayed_item(delayed_item);
+ btrfs_release_dir_index_item_space(trans);
+ mutex_unlock(&delayed_node->mutex);
+ goto release_node;
+ }
+
if (delayed_node->index_item_leaves == 0 ||
delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
delayed_node->curr_index_batch_size = data_len;
* impossible.
*/
if (WARN_ON(ret)) {
- mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_item(delayed_item);
+ mutex_unlock(&delayed_node->mutex);
goto release_node;
}
delayed_node->index_item_leaves++;
- } else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
- const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
-
- /*
- * Adding the new dir index item does not require touching another
- * leaf, so we can release 1 unit of metadata that was previously
- * reserved when starting the transaction. This applies only to
- * the case where we had a transaction start and excludes the
- * transaction join case (when replaying log trees).
- */
- trace_btrfs_space_reservation(fs_info, "transaction",
- trans->transid, bytes, 0);
- btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
- ASSERT(trans->bytes_reserved >= bytes);
- trans->bytes_reserved -= bytes;
- }
-
- ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
- if (unlikely(ret)) {
- btrfs_err(trans->fs_info,
- "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- name_len, name, delayed_node->root->root_key.objectid,
- delayed_node->inode_id, ret);
- BUG();
+ } else {
+ btrfs_release_dir_index_item_space(trans);
}
mutex_unlock(&delayed_node->mutex);
struct folio *folio)
{
struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
+ struct btrfs_subpage_info *spi = fs_info->subpage_info;
struct btrfs_subpage *subpage;
struct extent_buffer *eb;
int cur_bit = 0;
btrfs_assert_tree_write_locked(eb);
return filemap_dirty_folio(mapping, folio);
}
+
+ ASSERT(spi);
subpage = folio_get_private(folio);
- ASSERT(subpage->dirty_bitmap);
- while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) {
+ for (cur_bit = spi->dirty_offset;
+ cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
+ cur_bit++) {
unsigned long flags;
u64 cur;
- u16 tmp = (1 << cur_bit);
spin_lock_irqsave(&subpage->lock, flags);
- if (!(tmp & subpage->dirty_bitmap)) {
+ if (!test_bit(cur_bit, subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
- cur_bit++;
continue;
}
spin_unlock_irqrestore(&subpage->lock, flags);
btrfs_assert_tree_write_locked(eb);
free_extent_buffer(eb);
- cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
+ cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
}
return filemap_dirty_folio(mapping, folio);
}
delta = ktime_get_seconds() - cur->start_time;
if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
- cur->state < TRANS_STATE_COMMIT_START &&
+ cur->state < TRANS_STATE_COMMIT_PREP &&
delta < fs_info->commit_interval) {
spin_unlock(&fs_info->trans_lock);
delay -= msecs_to_jiffies((delta - 1) * 1000);
btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
- btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_start,
- BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
BTRFS_LOCKDEP_TRANS_UNBLOCKED);
btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
while (!list_empty(&fs_info->trans_list)) {
t = list_first_entry(&fs_info->trans_list,
struct btrfs_transaction, list);
- if (t->state >= TRANS_STATE_COMMIT_START) {
+ if (t->state >= TRANS_STATE_COMMIT_PREP) {
refcount_inc(&t->use_count);
spin_unlock(&fs_info->trans_lock);
btrfs_wait_for_commit(fs_info, t->transid);
goto out_put;
}
+ /*
+ * We don't need the path anymore, so release it and
+ * avoid deadlocks and lockdep warnings in case
+ * btrfs_iget() needs to lookup the inode from its root
+ * btree and lock the same leaf.
+ */
+ btrfs_release_path(path);
temp_inode = btrfs_iget(sb, key2.objectid, root);
if (IS_ERR(temp_inode)) {
ret = PTR_ERR(temp_inode);
goto out_put;
}
- btrfs_release_path(path);
key.objectid = key.offset;
key.offset = (u64)-1;
dirid = key.objectid;
};
enum btrfs_lockdep_trans_states {
- BTRFS_LOCKDEP_TRANS_COMMIT_START,
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP,
BTRFS_LOCKDEP_TRANS_UNBLOCKED,
BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
BTRFS_LOCKDEP_TRANS_COMPLETED,
refcount_inc(&trans->use_count);
spin_unlock(&fs_info->trans_lock);
- ASSERT(trans);
+ ASSERT(trans || BTRFS_FS_ERROR(fs_info));
if (trans) {
if (atomic_dec_and_test(&trans->pending_ordered))
wake_up(&trans->pending_wait);
* | Call btrfs_commit_transaction() on any trans handle attached to
* | transaction N
* V
- * Transaction N [[TRANS_STATE_COMMIT_START]]
+ * Transaction N [[TRANS_STATE_COMMIT_PREP]]
+ * |
+ * | If there are simultaneous calls to btrfs_commit_transaction() one will win
+ * | the race and the rest will wait for the winner to commit the transaction.
+ * |
+ * | The winner will wait for previous running transaction to completely finish
+ * | if there is one.
* |
- * | Will wait for previous running transaction to completely finish if there
- * | is one
+ * Transaction N [[TRANS_STATE_COMMIT_START]]
* |
- * | Then one of the following happes:
+ * | Then one of the following happens:
* | - Wait for all other trans handle holders to release.
* | The btrfs_commit_transaction() caller will do the commit work.
* | - Wait for current transaction to be committed by others.
*/
static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_RUNNING] = 0U,
+ [TRANS_STATE_COMMIT_PREP] = 0U,
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
__TRANS_ATTACH |
* Wait for the current transaction commit to start and block
* subsequent transaction joins
*/
- btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
wait_event(fs_info->transaction_blocked_wait,
cur_trans->state >= TRANS_STATE_COMMIT_START ||
TRANS_ABORTED(cur_trans));
return;
lockdep_assert_held(&trans->fs_info->trans_lock);
- ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
+ ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP);
list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
}
ktime_t interval;
ASSERT(refcount_read(&trans->use_count) == 1);
- btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
}
spin_lock(&fs_info->trans_lock);
- if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
+ if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
add_pending_snapshot(trans);
want_state = TRANS_STATE_SUPER_COMMITTED;
btrfs_trans_state_lockdep_release(fs_info,
- BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
ret = btrfs_end_transaction(trans);
wait_for_commit(cur_trans, want_state);
return ret;
}
- cur_trans->state = TRANS_STATE_COMMIT_START;
+ cur_trans->state = TRANS_STATE_COMMIT_PREP;
wake_up(&fs_info->transaction_blocked_wait);
- btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
if (cur_trans->list.prev != &fs_info->trans_list) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
btrfs_put_transaction(prev_trans);
if (ret)
goto lockdep_release;
- } else {
- spin_unlock(&fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
}
} else {
- spin_unlock(&fs_info->trans_lock);
/*
* The previous transaction was aborted and was already removed
* from the list of transactions at fs_info->trans_list. So we
* corrupt state (pointing to trees with unwritten nodes/leafs).
*/
if (BTRFS_FS_ERROR(fs_info)) {
+ spin_unlock(&fs_info->trans_lock);
ret = -EROFS;
goto lockdep_release;
}
}
+ cur_trans->state = TRANS_STATE_COMMIT_START;
+ wake_up(&fs_info->transaction_blocked_wait);
+ spin_unlock(&fs_info->trans_lock);
+
/*
* Get the time spent on the work done by the commit thread and not
* the time spent waiting on a previous commit
goto cleanup_transaction;
lockdep_trans_commit_start_release:
- btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
btrfs_end_transaction(trans);
return ret;
}
enum btrfs_trans_state {
TRANS_STATE_RUNNING,
+ TRANS_STATE_COMMIT_PREP,
TRANS_STATE_COMMIT_START,
TRANS_STATE_COMMIT_DOING,
TRANS_STATE_UNBLOCKED,
if (err)
return err;
- if (inode->i_flags & OVL_COPY_I_FLAGS_MASK) {
+ if (inode->i_flags & OVL_COPY_I_FLAGS_MASK &&
+ (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) {
/*
* Copy the fileattr inode flags that are the source of already
* copied i_flags
struct kiocb iocb;
refcount_t ref;
struct kiocb *orig_iocb;
- struct fd fd;
};
static struct kmem_cache *ovl_aio_request_cachep;
static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
{
if (refcount_dec_and_test(&aio_req->ref)) {
- fdput(aio_req->fd);
+ fput(aio_req->iocb.ki_filp);
kmem_cache_free(ovl_aio_request_cachep, aio_req);
}
}
if (!aio_req)
goto out;
- aio_req->fd = real;
real.flags = 0;
aio_req->orig_iocb = iocb;
- kiocb_clone(&aio_req->iocb, iocb, real.file);
+ kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
refcount_set(&aio_req->ref, 2);
ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
if (!aio_req)
goto out;
- aio_req->fd = real;
real.flags = 0;
aio_req->orig_iocb = iocb;
- kiocb_clone(&aio_req->iocb, iocb, real.file);
+ kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_flags = ifl;
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
refcount_set(&aio_req->ref, 2);
/**
* eventfs_set_ef_status_free - set the ef->status to free
+ * @ti: the tracefs_inode of the dentry
* @dentry: dentry who's status to be freed
*
* eventfs_set_ef_status_free will be called if no more
* references remain
*/
-void eventfs_set_ef_status_free(struct dentry *dentry)
+void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry)
{
struct tracefs_inode *ti_parent;
- struct eventfs_file *ef;
+ struct eventfs_inode *ei;
+ struct eventfs_file *ef, *tmp;
+
+ /* The top level events directory may be freed by this */
+ if (unlikely(ti->flags & TRACEFS_EVENT_TOP_INODE)) {
+ LIST_HEAD(ef_del_list);
+
+ mutex_lock(&eventfs_mutex);
+
+ ei = ti->private;
+
+ /* Record all the top level files */
+ list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+ lockdep_is_held(&eventfs_mutex)) {
+ list_add_tail(&ef->del_list, &ef_del_list);
+ }
+
+ /* Nothing should access this, but just in case! */
+ ti->private = NULL;
+
+ mutex_unlock(&eventfs_mutex);
+
+ /* Now safely free the top level files and their children */
+ list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
+ list_del(&ef->del_list);
+ eventfs_remove(ef);
+ }
+
+ kfree(ei);
+ return;
+ }
mutex_lock(&eventfs_mutex);
+
ti_parent = get_tracefs(dentry->d_parent->d_inode);
if (!ti_parent || !(ti_parent->flags & TRACEFS_EVENT_INODE))
goto out;
ei = ti->private;
idx = srcu_read_lock(&eventfs_srcu);
- list_for_each_entry_rcu(ef, &ei->e_top_files, list) {
+ list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+ srcu_read_lock_held(&eventfs_srcu)) {
create_dentry(ef, dentry, false);
}
srcu_read_unlock(&eventfs_srcu, idx);
struct tracefs_inode *ti;
struct inode *inode;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (IS_ERR(dentry))
return dentry;
INIT_LIST_HEAD(&ei->e_top_files);
ti = get_tracefs(inode);
- ti->flags |= TRACEFS_EVENT_INODE;
+ ti->flags |= TRACEFS_EVENT_INODE | TRACEFS_EVENT_TOP_INODE;
ti->private = ei;
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
struct eventfs_inode *ei_parent;
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (!parent)
return ERR_PTR(-EINVAL);
{
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (!ef_parent)
return ERR_PTR(-EINVAL);
struct eventfs_inode *ei;
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return -ENODEV;
+
if (!parent)
return -EINVAL;
{
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return -ENODEV;
+
if (!ef_parent)
return -EINVAL;
void eventfs_remove_events_dir(struct dentry *dentry)
{
struct tracefs_inode *ti;
- struct eventfs_inode *ei;
if (!dentry || !dentry->d_inode)
return;
if (!ti || !(ti->flags & TRACEFS_EVENT_INODE))
return;
- ei = ti->private;
d_invalidate(dentry);
dput(dentry);
- kfree(ei);
}
ti = get_tracefs(inode);
if (ti && ti->flags & TRACEFS_EVENT_INODE)
- eventfs_set_ef_status_free(dentry);
+ eventfs_set_ef_status_free(ti, dentry);
iput(inode);
}
*/
struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
{
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
return __create_dir(name, parent, &simple_dir_inode_operations);
}
#define _TRACEFS_INTERNAL_H
enum {
- TRACEFS_EVENT_INODE = BIT(1),
+ TRACEFS_EVENT_INODE = BIT(1),
+ TRACEFS_EVENT_TOP_INODE = BIT(2),
};
struct tracefs_inode {
struct dentry *eventfs_start_creating(const char *name, struct dentry *parent);
struct dentry *eventfs_failed_creating(struct dentry *dentry);
struct dentry *eventfs_end_creating(struct dentry *dentry);
-void eventfs_set_ef_status_free(struct dentry *dentry);
+void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry);
#endif /* _TRACEFS_INTERNAL_H */
#ifdef CONFIG_IA64
#define KSYM_FUNC(name) @fptr(name)
+#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
+#define KSYM_FUNC(name) P%name
#else
#define KSYM_FUNC(name) name
#endif
/* Used to find the offset and length of dynamic fields in trace events */
struct trace_dynamic_info {
#ifdef CONFIG_CPU_BIG_ENDIAN
- u16 offset;
u16 len;
+ u16 offset;
#else
- u16 len;
u16 offset;
+ u16 len;
#endif
-};
+} __packed;
/*
* The trace entry - the most basic unit of tracing. This is what
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
struct eventfs_file *ef;
- struct dentry *dir;
struct trace_array *tr;
struct trace_subsystem_dir *system;
struct list_head triggers;
err = -ENOMEM;
goto out_err;
}
+
+ cond_resched();
}
cpus_read_lock();
*/
commit = rb_page_commit(iter_head_page);
smp_rmb();
+
+ /* An event needs to be at least 8 bytes in size */
+ if (iter->head > commit - 8)
+ goto reset;
+
event = __rb_page_index(iter_head_page, iter->head);
length = rb_event_length(event);
init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
tr->d_max_latency = trace_create_file("tracing_max_latency",
TRACE_MODE_WRITE,
- d_tracer, &tr->max_latency,
+ d_tracer, tr,
&tracing_max_lat_fops);
}
#define trace_create_maxlat_file(tr, d_tracer) \
trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
- d_tracer, &tr->max_latency, &tracing_max_lat_fops)
+ d_tracer, tr, &tracing_max_lat_fops)
#endif
return 0;
}
+/*
+ * The private pointer of the inode is the trace_event_file.
+ * Update the tr ref count associated to it.
+ */
+int tracing_open_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(file->tr);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+
+ return 0;
+}
+
+int tracing_release_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+
+ trace_array_put(file->tr);
+
+ return 0;
+}
+
static int tracing_mark_open(struct inode *inode, struct file *filp)
{
stream_open(inode, filp);
tracing_max_lat_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
+ struct trace_array *tr = filp->private_data;
+
+ return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
}
static ssize_t
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
+ struct trace_array *tr = filp->private_data;
+
+ return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
}
#endif
#ifdef CONFIG_TRACER_MAX_TRACE
static const struct file_operations tracing_max_lat_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_max_lat_read,
.write = tracing_max_lat_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
#endif
static const struct file_operations set_tracer_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_set_trace_read,
.write = tracing_set_trace_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
static const struct file_operations tracing_pipe_fops = {
return cnt;
}
+static int tracing_open_options(struct inode *inode, struct file *filp)
+{
+ struct trace_option_dentry *topt = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(topt->tr);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static int tracing_release_options(struct inode *inode, struct file *file)
+{
+ struct trace_option_dentry *topt = file->private_data;
+
+ trace_array_put(topt->tr);
+ return 0;
+}
static const struct file_operations trace_options_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_options,
.read = trace_options_read,
.write = trace_options_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_options,
};
/*
tr, &tracing_mark_fops);
file = __find_event_file(tr, "ftrace", "print");
- if (file && file->dir)
- trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
+ if (file && file->ef)
+ eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
file, &event_trigger_fops);
tr->trace_marker_file = file;
void tracing_reset_all_online_cpus_unlocked(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+int tracing_open_file_tr(struct inode *inode, struct file *filp);
+int tracing_release_file_tr(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void);
bool tracer_tracing_is_on(struct trace_array *tr);
void tracer_tracing_on(struct trace_array *tr);
static void remove_event_file_dir(struct trace_event_file *file)
{
- struct dentry *dir = file->dir;
- struct dentry *child;
-
- if (dir) {
- spin_lock(&dir->d_lock); /* probably unneeded */
- list_for_each_entry(child, &dir->d_subdirs, d_child) {
- if (d_really_is_positive(child)) /* probably unneeded */
- d_inode(child)->i_private = NULL;
- }
- spin_unlock(&dir->d_lock);
-
- tracefs_remove(dir);
- }
eventfs_remove(file->ef);
list_del(&file->list);
remove_subsystem(file->system);
};
static const struct file_operations ftrace_enable_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_enable_read,
.write = event_enable_write,
+ .release = tracing_release_file_tr,
.llseek = default_llseek,
};
};
static const struct file_operations ftrace_event_filter_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_filter_read,
.write = event_filter_write,
+ .release = tracing_release_file_tr,
.llseek = default_llseek,
};
{
struct event_subsystem *system, *iter;
struct trace_subsystem_dir *dir;
+ struct eventfs_file *ef;
int res;
/* First see if we did not already create this dir */
} else
__get_system(system);
- dir->ef = eventfs_add_subsystem_dir(name, parent);
- if (IS_ERR(dir->ef)) {
+ ef = eventfs_add_subsystem_dir(name, parent);
+ if (IS_ERR(ef)) {
pr_warn("Failed to create system directory %s\n", name);
__put_system(system);
goto out_free;
}
+ dir->ef = ef;
dir->tr = tr;
dir->ref_count = 1;
dir->nr_events = 1;
struct trace_event_call *call = file->event_call;
struct eventfs_file *ef_subsystem = NULL;
struct trace_array *tr = file->tr;
+ struct eventfs_file *ef;
const char *name;
int ret;
return -ENOMEM;
name = trace_event_name(call);
- file->ef = eventfs_add_dir(name, ef_subsystem);
- if (IS_ERR(file->ef)) {
+ ef = eventfs_add_dir(name, ef_subsystem);
+ if (IS_ERR(ef)) {
pr_warn("Could not create tracefs '%s' directory\n", name);
return -1;
}
+ file->ef = ef;
+
if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
eventfs_add_file("enable", TRACE_MODE_WRITE, file->ef, file,
&ftrace_enable_fops);
}
const struct file_operations event_inject_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_inject_read,
.write = event_inject_write,
+ .release = tracing_release_file_tr,
};
break;
default:
- trace_seq_printf(s, print_fmt, name, val, space);
+ trace_seq_printf(s, print_fmt, name, val->as_u64, space);
break;
}
}
};
/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */
-static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
+static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
const char *filter_glob)
{
const int len = strlen(filter_glob);
if (!period) {
parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
+ if (!parsed->suite_glob)
+ return -ENOMEM;
+
parsed->test_glob = NULL;
strcpy(parsed->suite_glob, filter_glob);
- return;
+ return 0;
}
parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL);
+ if (!parsed->suite_glob)
+ return -ENOMEM;
+
parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL);
+ if (!parsed->test_glob) {
+ kfree(parsed->suite_glob);
+ return -ENOMEM;
+ }
strncpy(parsed->suite_glob, filter_glob, period - filter_glob);
strncpy(parsed->test_glob, period + 1, len - (period - filter_glob));
+
+ return 0;
}
/* Create a copy of suite with only tests that match test_glob. */
}
copy_start = copy;
- if (filter_glob)
- kunit_parse_glob_filter(&parsed_glob, filter_glob);
+ if (filter_glob) {
+ *err = kunit_parse_glob_filter(&parsed_glob, filter_glob);
+ if (*err)
+ goto free_copy;
+ }
/* Parse attribute filters */
if (filters) {
filter_count = kunit_get_filter_count(filters);
parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
if (!parsed_filters) {
- kfree(copy);
- return filtered;
+ *err = -ENOMEM;
+ goto free_parsed_glob;
}
for (j = 0; j < filter_count; j++)
parsed_filters[j] = kunit_next_attr_filter(&filters, err);
if (*err)
- goto err;
+ goto free_parsed_filters;
}
for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
parsed_glob.test_glob);
if (IS_ERR(filtered_suite)) {
*err = PTR_ERR(filtered_suite);
- goto err;
+ goto free_parsed_filters;
}
}
if (filter_count > 0 && parsed_filters != NULL) {
filtered_suite = new_filtered_suite;
if (*err)
- goto err;
+ goto free_parsed_filters;
+
if (IS_ERR(filtered_suite)) {
*err = PTR_ERR(filtered_suite);
- goto err;
+ goto free_parsed_filters;
}
if (!filtered_suite)
break;
filtered.start = copy_start;
filtered.end = copy;
-err:
- if (*err)
- kfree(copy);
+free_parsed_filters:
+ if (filter_count)
+ kfree(parsed_filters);
+free_parsed_glob:
if (filter_glob) {
kfree(parsed_glob.suite_glob);
kfree(parsed_glob.test_glob);
}
- if (filter_count)
- kfree(parsed_filters);
+free_copy:
+ if (*err)
+ kfree(copy);
return filtered;
}
{
int j, filter_count;
struct kunit_attr_filter *parsed_filters;
- char *filters = "speed>slow, module!=example";
+ char filters[] = "speed>slow, module!=example", *filter = filters;
int err = 0;
filter_count = kunit_get_filter_count(filters);
parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters),
GFP_KERNEL);
for (j = 0; j < filter_count; j++) {
- parsed_filters[j] = kunit_next_attr_filter(&filters, &err);
+ parsed_filters[j] = kunit_next_attr_filter(&filter, &err);
KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
}
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
+ char filter[] = "speed>slow";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases);
* attribute is unset and thus, the filtering is based on the parent attribute
* of slow.
*/
- got = kunit_filter_suites(&suite_set, NULL, "speed>slow", NULL, &err);
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
+ char filter[] = "module!=dummy";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases);
- got = kunit_filter_suites(&suite_set, NULL, "module!=dummy", NULL, &err);
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start); /* just in case */
.start = subsuite, .end = &subsuite[1],
};
struct kunit_suite_set got;
+ char filter[] = "speed>slow";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases);
/* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */
- got = kunit_filter_suites(&suite_set, NULL, "speed>slow", "skip", &err);
+ got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
switch (val) {
case MODULE_STATE_LIVE:
- kunit_module_init(mod);
break;
case MODULE_STATE_GOING:
kunit_module_exit(mod);
break;
case MODULE_STATE_COMING:
+ kunit_module_init(mod);
+ break;
case MODULE_STATE_UNFORMED:
break;
}
*/
s->is_func = (ELF_ST_TYPE(sym->st_info) == STT_FUNC);
+ /*
+ * For parisc64, symbols prefixed $$ from the library have the symbol type
+ * STT_LOPROC. They should be handled as functions too.
+ */
+ if (elf->hdr->e_ident[EI_CLASS] == ELFCLASS64 &&
+ elf->hdr->e_machine == EM_PARISC &&
+ ELF_ST_TYPE(sym->st_info) == STT_LOPROC)
+ s->is_func = true;
+
if (match(secname, PATTERNS(INIT_SECTIONS)))
warn("%s: %s: EXPORT_SYMBOL used for init symbol. Remove __init or EXPORT_SYMBOL.\n",
mod->name, name);
instance_set() {
while :; do
- echo 1 > foo/events/sched/sched_switch
+ echo 1 > foo/events/sched/sched_switch/enable
done 2> /dev/null
}
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event histogram trigger trace action with dynamic string param
-# requires: set_event synthetic_events events/sched/sched_process_exec/hist "char name[]' >> synthetic_events":README ping:program
+# requires: set_event synthetic_events events/sched/sched_process_exec/hist "' >> synthetic_events":README ping:program
fail() { #msg
echo $1
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test synthetic_events syntax parser errors
-# requires: synthetic_events error_log "char name[]' >> synthetic_events":README
+# requires: synthetic_events error_log "' >> synthetic_events":README
check_error() { # command-with-error-pos-by-^
ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events'
{
# Make sure tests will time out if utility is available.
if [ -x /usr/bin/timeout ] ; then
- /usr/bin/timeout --foreground "$kselftest_timeout" $1
+ /usr/bin/timeout --foreground "$kselftest_timeout" \
+ /usr/bin/timeout "$kselftest_timeout" $1
else
$1
fi
print_targets=0
while getopts "p" arg; do
- case $arg in
- p)
+ case $arg in
+ p)
print_targets=1
shift;;
- esac
+ esac
done
if [ $# -eq 0 ]
# Get all TARGETS from selftests Makefile
targets=$(grep -E "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2)
+# Initially, in LDLIBS related lines, the dep checker needs
+# to ignore lines containing the following strings:
+filter="\$(VAR_LDLIBS)\|pkg-config\|PKG_CONFIG\|IOURING_EXTRA_LIBS"
+
# Single test case
if [ $# -eq 2 ]
then
l1_test $test
l2_test $test
l3_test $test
+ l4_test $test
+ l5_test $test
print_results $1 $2
exit $?
# Append space at the end of the list to append more tests.
l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
- grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
+ grep -v "$filter" | awk -F: '{print $1}' | uniq)
# Level 2: LDLIBS set dynamically.
#
# Append space at the end of the list to append more tests.
l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
- grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
+ grep -v "$filter" | awk -F: '{print $1}' | uniq)
# Level 3
# memfd and others use pkg-config to find mount and fuse libs
# VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \
- grep -v "pkg-config" | awk -F: '{print $1}')
+ grep -v "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
-#echo $l1_tests
-#echo $l2_1_tests
-#echo $l3_tests
+# Level 4
+# some tests may fall back to default using `|| echo -l<libname>`
+# if pkg-config doesn't find the libs, instead of using VAR_LDLIBS
+# as per level 3 checks.
+# e.g:
+# netfilter/Makefile
+# LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
+l4_tests=$(grep -r --include=Makefile "^LDLIBS" | \
+ grep "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
+
+# Level 5
+# some tests may use IOURING_EXTRA_LIBS to add extra libs to LDLIBS,
+# which in turn may be defined in a sub-Makefile
+# e.g.:
+# mm/Makefile
+# $(OUTPUT)/gup_longterm: LDLIBS += $(IOURING_EXTRA_LIBS)
+l5_tests=$(grep -r --include=Makefile "LDLIBS +=.*\$(IOURING_EXTRA_LIBS)" | \
+ awk -F: '{print $1}' | uniq)
+
+#echo l1_tests $l1_tests
+#echo l2_tests $l2_tests
+#echo l3_tests $l3_tests
+#echo l4_tests $l4_tests
+#echo l5_tests $l5_tests
all_tests
print_results $1 $2
for test in $l3_tests; do
l3_test $test
done
+
+ for test in $l4_tests; do
+ l4_test $test
+ done
+
+ for test in $l5_tests; do
+ l5_test $test
+ done
}
# Use same parsing used for l1_tests and pick libraries this time.
l1_test()
{
test_libs=$(grep --include=Makefile "^LDLIBS" $test | \
- grep -v "VAR_LDLIBS" | \
+ grep -v "$filter" | \
sed -e 's/\:/ /' | \
sed -e 's/+/ /' | cut -d "=" -f 2)
check_libs $test $test_libs
}
-# Use same parsing used for l2__tests and pick libraries this time.
+# Use same parsing used for l2_tests and pick libraries this time.
l2_test()
{
test_libs=$(grep --include=Makefile ": LDLIBS" $test | \
- grep -v "VAR_LDLIBS" | \
+ grep -v "$filter" | \
sed -e 's/\:/ /' | sed -e 's/+/ /' | \
cut -d "=" -f 2)
check_libs $test $test_libs
}
+l4_test()
+{
+ test_libs=$(grep --include=Makefile "^VAR_LDLIBS\|^LDLIBS" $test | \
+ grep "\(pkg-config\|PKG_CONFIG\).*|| echo " | \
+ sed -e 's/.*|| echo //' | sed -e 's/)$//')
+
+ check_libs $test $test_libs
+}
+
+l5_test()
+{
+ tests=$(find $(dirname "$test") -type f -name "*.mk")
+ test_libs=$(grep "^IOURING_EXTRA_LIBS +\?=" $tests | \
+ cut -d "=" -f 2)
+
+ check_libs $test $test_libs
+}
+
check_libs()
{
run_tests: all
ifdef building_out_of_srctree
@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
- rsync -aLq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+ rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
fi
@if [ "X$(TEST_PROGS)" != "X" ]; then \
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
define INSTALL_SINGLE_RULE
$(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
- $(if $(INSTALL_LIST),rsync -aL $(INSTALL_LIST) $(INSTALL_PATH)/)
+ $(if $(INSTALL_LIST),rsync -a --copy-unsafe-links $(INSTALL_LIST) $(INSTALL_PATH)/)
endef
define INSTALL_RULE