{
struct zram_meta *meta = zram->meta;
unsigned long handle = meta->table[index].handle;
- u16 size = meta->table[index].size;
if (unlikely(!handle)) {
/*
return;
}
- if (unlikely(size > max_zpage_size))
- atomic_dec(&zram->stats.bad_compress);
-
zs_free(meta->mem_pool, handle);
- if (size <= PAGE_SIZE / 2)
- atomic_dec(&zram->stats.good_compress);
-
atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
atomic_dec(&zram->stats.pages_stored);
}
if (unlikely(clen > max_zpage_size)) {
- atomic_inc(&zram->stats.bad_compress);
clen = PAGE_SIZE;
src = NULL;
if (is_partial_io(bvec))
/* Update stats */
atomic64_add(clen, &zram->stats.compr_size);
atomic_inc(&zram->stats.pages_stored);
- if (clen <= PAGE_SIZE / 2)
- atomic_inc(&zram->stats.good_compress);
-
out:
if (locked)
mutex_unlock(&meta->buffer_lock);
atomic64_t notify_free; /* no. of swap slot free notifications */
atomic_t pages_zero; /* no. of zero filled pages */
atomic_t pages_stored; /* no. of pages currently stored */
- atomic_t good_compress; /* % of pages with compression ratio<=50% */
- atomic_t bad_compress; /* % of pages with compression ratio>=75% */
};
struct zram_meta {