return read_int("/sys/kernel/debug/zswap/stored_pages", value);
}
+static int get_zswap_written_back_pages(size_t *value)
+{
+ return read_int("/sys/kernel/debug/zswap/written_back_pages", value);
+}
+
+static int allocate_bytes(const char *cgroup, void *arg)
+{
+ size_t size = (size_t)arg;
+ char *mem = (char *)malloc(size);
+
+ if (!mem)
+ return -1;
+ for (int i = 0; i < size; i += 4095)
+ mem[i] = 'a';
+ free(mem);
+ return 0;
+}
+
+/*
+ * When trying to store a memcg page in zswap, if the memcg hits its memory
+ * limit in zswap, writeback should not be triggered.
+ *
+ * This was fixed with commit 0bdf0efa180a("zswap: do not shrink if cgroup may
+ * not zswap"). Needs to be revised when a per memcg writeback mechanism is
+ * implemented.
+ */
+static int test_no_invasive_cgroup_shrink(const char *root)
+{
+ size_t written_back_before, written_back_after;
+ int ret = KSFT_FAIL;
+ char *test_group;
+
+ /* Set up */
+ test_group = cg_name(root, "no_shrink_test");
+ if (!test_group)
+ goto out;
+ if (cg_create(test_group))
+ goto out;
+ if (cg_write(test_group, "memory.max", "1M"))
+ goto out;
+ if (cg_write(test_group, "memory.zswap.max", "10K"))
+ goto out;
+ if (get_zswap_written_back_pages(&written_back_before))
+ goto out;
+
+ /* Allocate 10x memory.max to push memory into zswap */
+ if (cg_run(test_group, allocate_bytes, (void *)MB(10)))
+ goto out;
+
+ /* Verify that no writeback happened because of the memcg allocation */
+ if (get_zswap_written_back_pages(&written_back_after))
+ goto out;
+ if (written_back_after == written_back_before)
+ ret = KSFT_PASS;
+out:
+ cg_destroy(test_group);
+ free(test_group);
+ return ret;
+}
+
struct no_kmem_bypass_child_args {
size_t target_alloc_bytes;
size_t child_allocated;
const char *name;
} tests[] = {
T(test_no_kmem_bypass),
+ T(test_no_invasive_cgroup_shrink),
};
#undef T