scftorture: Forgive memory-allocation failure if KASAN
authorPaul E. McKenney <paulmck@kernel.org>
Tue, 16 May 2023 02:00:10 +0000 (19:00 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 23 Sep 2023 09:11:00 +0000 (11:11 +0200)
[ Upstream commit 013608cd0812bdb21fc26d39ed8fdd2fc76e8b9b ]

Kernels built with CONFIG_KASAN=y quarantine newly freed memory in order
to better detect use-after-free errors.  However, this can exhaust memory
more quickly in allocator-heavy tests, which can result in spurious
scftorture failure.  This commit therefore forgives memory-allocation
failure in kernels built with CONFIG_KASAN=y, but continues counting
the errors for use in detailed test-result analyses.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/scftorture.c

index 5d113aa59e7732ecb285ac746a5eee4fcb111932..83c33ba0ca7e00b148cd6fdae652759df113d2ce 100644 (file)
@@ -171,7 +171,8 @@ static void scf_torture_stats_print(void)
                scfs.n_all_wait += scf_stats_p[i].n_all_wait;
        }
        if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) ||
-           atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs))
+           atomic_read(&n_mb_out_errs) ||
+           (!IS_ENABLED(CONFIG_KASAN) && atomic_read(&n_alloc_errs)))
                bangstr = "!!! ";
        pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ",
                 SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched,
@@ -323,7 +324,8 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
                preempt_disable();
        if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) {
                scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC);
-               if (WARN_ON_ONCE(!scfcp)) {
+               if (!scfcp) {
+                       WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN));
                        atomic_inc(&n_alloc_errs);
                } else {
                        scfcp->scfc_cpu = -1;