2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
39 static int zram_major;
40 static struct zram *zram_devices;
41 static const char *default_compressor = "lzo";
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
46 #define ZRAM_ATTR_RO(name) \
47 static ssize_t zram_attr_##name##_show(struct device *d, \
48 struct device_attribute *attr, char *b) \
50 struct zram *zram = dev_to_zram(d); \
51 return sprintf(b, "%llu\n", \
52 (u64)atomic64_read(&zram->stats.name)); \
54 static struct device_attribute dev_attr_##name = \
55 __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
57 static inline int init_done(struct zram *zram)
59 return zram->meta != NULL;
62 static inline struct zram *dev_to_zram(struct device *dev)
64 return (struct zram *)dev_to_disk(dev)->private_data;
67 static ssize_t disksize_show(struct device *dev,
68 struct device_attribute *attr, char *buf)
70 struct zram *zram = dev_to_zram(dev);
72 return sprintf(buf, "%llu\n", zram->disksize);
75 static ssize_t initstate_show(struct device *dev,
76 struct device_attribute *attr, char *buf)
79 struct zram *zram = dev_to_zram(dev);
81 down_read(&zram->init_lock);
82 val = init_done(zram);
83 up_read(&zram->init_lock);
85 return sprintf(buf, "%u\n", val);
88 static ssize_t orig_data_size_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
91 struct zram *zram = dev_to_zram(dev);
93 return sprintf(buf, "%llu\n",
94 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
97 static ssize_t mem_used_total_show(struct device *dev,
98 struct device_attribute *attr, char *buf)
101 struct zram *zram = dev_to_zram(dev);
102 struct zram_meta *meta = zram->meta;
104 down_read(&zram->init_lock);
106 val = zs_get_total_size_bytes(meta->mem_pool);
107 up_read(&zram->init_lock);
109 return sprintf(buf, "%llu\n", val);
112 static ssize_t max_comp_streams_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
116 struct zram *zram = dev_to_zram(dev);
118 down_read(&zram->init_lock);
119 val = zram->max_comp_streams;
120 up_read(&zram->init_lock);
122 return sprintf(buf, "%d\n", val);
125 static ssize_t max_comp_streams_store(struct device *dev,
126 struct device_attribute *attr, const char *buf, size_t len)
129 struct zram *zram = dev_to_zram(dev);
131 if (kstrtoint(buf, 0, &num))
135 down_write(&zram->init_lock);
136 if (init_done(zram)) {
137 if (zcomp_set_max_streams(zram->comp, num))
138 pr_info("Cannot change max compression streams\n");
140 zram->max_comp_streams = num;
141 up_write(&zram->init_lock);
145 static ssize_t comp_algorithm_show(struct device *dev,
146 struct device_attribute *attr, char *buf)
149 struct zram *zram = dev_to_zram(dev);
151 down_read(&zram->init_lock);
152 sz = zcomp_available_show(zram->compressor, buf);
153 up_read(&zram->init_lock);
158 static ssize_t comp_algorithm_store(struct device *dev,
159 struct device_attribute *attr, const char *buf, size_t len)
161 struct zram *zram = dev_to_zram(dev);
162 down_write(&zram->init_lock);
163 if (init_done(zram)) {
164 up_write(&zram->init_lock);
165 pr_info("Can't change algorithm for initialized device\n");
168 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
169 up_write(&zram->init_lock);
173 /* flag operations needs meta->tb_lock */
174 static int zram_test_flag(struct zram_meta *meta, u32 index,
175 enum zram_pageflags flag)
177 return meta->table[index].flags & BIT(flag);
180 static void zram_set_flag(struct zram_meta *meta, u32 index,
181 enum zram_pageflags flag)
183 meta->table[index].flags |= BIT(flag);
186 static void zram_clear_flag(struct zram_meta *meta, u32 index,
187 enum zram_pageflags flag)
189 meta->table[index].flags &= ~BIT(flag);
192 static inline int is_partial_io(struct bio_vec *bvec)
194 return bvec->bv_len != PAGE_SIZE;
198 * Check if request is within bounds and aligned on zram logical blocks.
200 static inline int valid_io_request(struct zram *zram, struct bio *bio)
202 u64 start, end, bound;
204 /* unaligned request */
205 if (unlikely(bio->bi_iter.bi_sector &
206 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
208 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
211 start = bio->bi_iter.bi_sector;
212 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
213 bound = zram->disksize >> SECTOR_SHIFT;
214 /* out of range range */
215 if (unlikely(start >= bound || end > bound || start > end))
218 /* I/O request is valid */
222 static void zram_meta_free(struct zram_meta *meta)
224 zs_destroy_pool(meta->mem_pool);
229 static struct zram_meta *zram_meta_alloc(u64 disksize)
232 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
236 num_pages = disksize >> PAGE_SHIFT;
237 meta->table = vzalloc(num_pages * sizeof(*meta->table));
239 pr_err("Error allocating zram address table\n");
243 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
244 if (!meta->mem_pool) {
245 pr_err("Error creating memory pool\n");
249 rwlock_init(&meta->tb_lock);
261 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
263 if (*offset + bvec->bv_len >= PAGE_SIZE)
265 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
268 static int page_zero_filled(void *ptr)
273 page = (unsigned long *)ptr;
275 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
283 static void handle_zero_page(struct bio_vec *bvec)
285 struct page *page = bvec->bv_page;
288 user_mem = kmap_atomic(page);
289 if (is_partial_io(bvec))
290 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
292 clear_page(user_mem);
293 kunmap_atomic(user_mem);
295 flush_dcache_page(page);
298 /* NOTE: caller should hold meta->tb_lock with write-side */
299 static void zram_free_page(struct zram *zram, size_t index)
301 struct zram_meta *meta = zram->meta;
302 unsigned long handle = meta->table[index].handle;
304 if (unlikely(!handle)) {
306 * No memory is allocated for zero filled pages.
307 * Simply clear zero page flag.
309 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
310 zram_clear_flag(meta, index, ZRAM_ZERO);
311 atomic64_dec(&zram->stats.zero_pages);
316 zs_free(meta->mem_pool, handle);
318 atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
319 atomic64_dec(&zram->stats.pages_stored);
321 meta->table[index].handle = 0;
322 meta->table[index].size = 0;
325 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
329 struct zram_meta *meta = zram->meta;
330 unsigned long handle;
333 read_lock(&meta->tb_lock);
334 handle = meta->table[index].handle;
335 size = meta->table[index].size;
337 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
338 read_unlock(&meta->tb_lock);
343 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
344 if (size == PAGE_SIZE)
345 copy_page(mem, cmem);
347 ret = zcomp_decompress(zram->comp, cmem, size, mem);
348 zs_unmap_object(meta->mem_pool, handle);
349 read_unlock(&meta->tb_lock);
351 /* Should NEVER happen. Return bio error if it does. */
353 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
354 atomic64_inc(&zram->stats.failed_reads);
361 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
362 u32 index, int offset, struct bio *bio)
366 unsigned char *user_mem, *uncmem = NULL;
367 struct zram_meta *meta = zram->meta;
368 page = bvec->bv_page;
370 read_lock(&meta->tb_lock);
371 if (unlikely(!meta->table[index].handle) ||
372 zram_test_flag(meta, index, ZRAM_ZERO)) {
373 read_unlock(&meta->tb_lock);
374 handle_zero_page(bvec);
377 read_unlock(&meta->tb_lock);
379 if (is_partial_io(bvec))
380 /* Use a temporary buffer to decompress the page */
381 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
383 user_mem = kmap_atomic(page);
384 if (!is_partial_io(bvec))
388 pr_info("Unable to allocate temp memory\n");
393 ret = zram_decompress_page(zram, uncmem, index);
394 /* Should NEVER happen. Return bio error if it does. */
398 if (is_partial_io(bvec))
399 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
402 flush_dcache_page(page);
405 kunmap_atomic(user_mem);
406 if (is_partial_io(bvec))
411 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
416 unsigned long handle;
418 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
419 struct zram_meta *meta = zram->meta;
420 struct zcomp_strm *zstrm;
423 page = bvec->bv_page;
424 if (is_partial_io(bvec)) {
426 * This is a partial IO. We need to read the full page
427 * before to write the changes.
429 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
434 ret = zram_decompress_page(zram, uncmem, index);
439 zstrm = zcomp_strm_find(zram->comp);
441 user_mem = kmap_atomic(page);
443 if (is_partial_io(bvec)) {
444 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
446 kunmap_atomic(user_mem);
452 if (page_zero_filled(uncmem)) {
453 kunmap_atomic(user_mem);
454 /* Free memory associated with this sector now. */
455 write_lock(&zram->meta->tb_lock);
456 zram_free_page(zram, index);
457 zram_set_flag(meta, index, ZRAM_ZERO);
458 write_unlock(&zram->meta->tb_lock);
460 atomic64_inc(&zram->stats.zero_pages);
465 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
466 if (!is_partial_io(bvec)) {
467 kunmap_atomic(user_mem);
473 pr_err("Compression failed! err=%d\n", ret);
477 if (unlikely(clen > max_zpage_size)) {
479 if (is_partial_io(bvec))
483 handle = zs_malloc(meta->mem_pool, clen);
485 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
490 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
492 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
493 src = kmap_atomic(page);
494 copy_page(cmem, src);
497 memcpy(cmem, src, clen);
500 zcomp_strm_release(zram->comp, zstrm);
502 zs_unmap_object(meta->mem_pool, handle);
505 * Free memory associated with this sector
506 * before overwriting unused sectors.
508 write_lock(&zram->meta->tb_lock);
509 zram_free_page(zram, index);
511 meta->table[index].handle = handle;
512 meta->table[index].size = clen;
513 write_unlock(&zram->meta->tb_lock);
516 atomic64_add(clen, &zram->stats.compr_data_size);
517 atomic64_inc(&zram->stats.pages_stored);
520 zcomp_strm_release(zram->comp, zstrm);
521 if (is_partial_io(bvec))
524 atomic64_inc(&zram->stats.failed_writes);
528 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
529 int offset, struct bio *bio)
532 int rw = bio_data_dir(bio);
535 atomic64_inc(&zram->stats.num_reads);
536 ret = zram_bvec_read(zram, bvec, index, offset, bio);
538 atomic64_inc(&zram->stats.num_writes);
539 ret = zram_bvec_write(zram, bvec, index, offset);
545 static void zram_reset_device(struct zram *zram, bool reset_capacity)
548 struct zram_meta *meta;
550 down_write(&zram->init_lock);
551 if (!init_done(zram)) {
552 up_write(&zram->init_lock);
557 /* Free all pages that are still in this zram device */
558 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
559 unsigned long handle = meta->table[index].handle;
563 zs_free(meta->mem_pool, handle);
566 zcomp_destroy(zram->comp);
567 zram->max_comp_streams = 1;
569 zram_meta_free(zram->meta);
572 memset(&zram->stats, 0, sizeof(zram->stats));
576 set_capacity(zram->disk, 0);
577 up_write(&zram->init_lock);
580 static ssize_t disksize_store(struct device *dev,
581 struct device_attribute *attr, const char *buf, size_t len)
585 struct zram_meta *meta;
586 struct zram *zram = dev_to_zram(dev);
589 disksize = memparse(buf, NULL);
593 disksize = PAGE_ALIGN(disksize);
594 meta = zram_meta_alloc(disksize);
598 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
600 pr_info("Cannot initialise %s compressing backend\n",
606 down_write(&zram->init_lock);
607 if (init_done(zram)) {
608 pr_info("Cannot change disksize for initialized device\n");
610 goto out_destroy_comp;
615 zram->disksize = disksize;
616 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
617 up_write(&zram->init_lock);
621 up_write(&zram->init_lock);
624 zram_meta_free(meta);
628 static ssize_t reset_store(struct device *dev,
629 struct device_attribute *attr, const char *buf, size_t len)
632 unsigned short do_reset;
634 struct block_device *bdev;
636 zram = dev_to_zram(dev);
637 bdev = bdget_disk(zram->disk, 0);
642 /* Do not reset an active device! */
643 if (bdev->bd_holders) {
648 ret = kstrtou16(buf, 10, &do_reset);
657 /* Make sure all pending I/O is finished */
661 zram_reset_device(zram, true);
669 static void __zram_make_request(struct zram *zram, struct bio *bio)
674 struct bvec_iter iter;
676 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
677 offset = (bio->bi_iter.bi_sector &
678 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
680 bio_for_each_segment(bvec, bio, iter) {
681 int max_transfer_size = PAGE_SIZE - offset;
683 if (bvec.bv_len > max_transfer_size) {
685 * zram_bvec_rw() can only make operation on a single
686 * zram page. Split the bio vector.
690 bv.bv_page = bvec.bv_page;
691 bv.bv_len = max_transfer_size;
692 bv.bv_offset = bvec.bv_offset;
694 if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
697 bv.bv_len = bvec.bv_len - max_transfer_size;
698 bv.bv_offset += max_transfer_size;
699 if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
702 if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
705 update_position(&index, &offset, &bvec);
708 set_bit(BIO_UPTODATE, &bio->bi_flags);
717 * Handler function for all zram I/O requests.
719 static void zram_make_request(struct request_queue *queue, struct bio *bio)
721 struct zram *zram = queue->queuedata;
723 down_read(&zram->init_lock);
724 if (unlikely(!init_done(zram)))
727 if (!valid_io_request(zram, bio)) {
728 atomic64_inc(&zram->stats.invalid_io);
732 __zram_make_request(zram, bio);
733 up_read(&zram->init_lock);
738 up_read(&zram->init_lock);
742 static void zram_slot_free_notify(struct block_device *bdev,
746 struct zram_meta *meta;
748 zram = bdev->bd_disk->private_data;
751 write_lock(&meta->tb_lock);
752 zram_free_page(zram, index);
753 write_unlock(&meta->tb_lock);
754 atomic64_inc(&zram->stats.notify_free);
757 static const struct block_device_operations zram_devops = {
758 .swap_slot_free_notify = zram_slot_free_notify,
762 static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
763 disksize_show, disksize_store);
764 static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
765 static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
766 static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
767 static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
768 static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
769 max_comp_streams_show, max_comp_streams_store);
770 static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
771 comp_algorithm_show, comp_algorithm_store);
773 ZRAM_ATTR_RO(num_reads);
774 ZRAM_ATTR_RO(num_writes);
775 ZRAM_ATTR_RO(failed_reads);
776 ZRAM_ATTR_RO(failed_writes);
777 ZRAM_ATTR_RO(invalid_io);
778 ZRAM_ATTR_RO(notify_free);
779 ZRAM_ATTR_RO(zero_pages);
780 ZRAM_ATTR_RO(compr_data_size);
782 static struct attribute *zram_disk_attrs[] = {
783 &dev_attr_disksize.attr,
784 &dev_attr_initstate.attr,
785 &dev_attr_reset.attr,
786 &dev_attr_num_reads.attr,
787 &dev_attr_num_writes.attr,
788 &dev_attr_failed_reads.attr,
789 &dev_attr_failed_writes.attr,
790 &dev_attr_invalid_io.attr,
791 &dev_attr_notify_free.attr,
792 &dev_attr_zero_pages.attr,
793 &dev_attr_orig_data_size.attr,
794 &dev_attr_compr_data_size.attr,
795 &dev_attr_mem_used_total.attr,
796 &dev_attr_max_comp_streams.attr,
797 &dev_attr_comp_algorithm.attr,
801 static struct attribute_group zram_disk_attr_group = {
802 .attrs = zram_disk_attrs,
805 static int create_device(struct zram *zram, int device_id)
809 init_rwsem(&zram->init_lock);
811 zram->queue = blk_alloc_queue(GFP_KERNEL);
813 pr_err("Error allocating disk queue for device %d\n",
818 blk_queue_make_request(zram->queue, zram_make_request);
819 zram->queue->queuedata = zram;
821 /* gendisk structure */
822 zram->disk = alloc_disk(1);
824 pr_warn("Error allocating disk structure for device %d\n",
829 zram->disk->major = zram_major;
830 zram->disk->first_minor = device_id;
831 zram->disk->fops = &zram_devops;
832 zram->disk->queue = zram->queue;
833 zram->disk->private_data = zram;
834 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
836 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
837 set_capacity(zram->disk, 0);
838 /* zram devices sort of resembles non-rotational disks */
839 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
841 * To ensure that we always get PAGE_SIZE aligned
842 * and n*PAGE_SIZED sized I/O requests.
844 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
845 blk_queue_logical_block_size(zram->disk->queue,
846 ZRAM_LOGICAL_BLOCK_SIZE);
847 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
848 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
850 add_disk(zram->disk);
852 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
853 &zram_disk_attr_group);
855 pr_warn("Error creating sysfs group");
858 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
860 zram->max_comp_streams = 1;
864 del_gendisk(zram->disk);
865 put_disk(zram->disk);
867 blk_cleanup_queue(zram->queue);
872 static void destroy_device(struct zram *zram)
874 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
875 &zram_disk_attr_group);
877 del_gendisk(zram->disk);
878 put_disk(zram->disk);
880 blk_cleanup_queue(zram->queue);
883 static int __init zram_init(void)
887 if (num_devices > max_num_devices) {
888 pr_warn("Invalid value for num_devices: %u\n",
894 zram_major = register_blkdev(0, "zram");
895 if (zram_major <= 0) {
896 pr_warn("Unable to get major number\n");
901 /* Allocate the device array and initialize each one */
902 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
908 for (dev_id = 0; dev_id < num_devices; dev_id++) {
909 ret = create_device(&zram_devices[dev_id], dev_id);
914 pr_info("Created %u device(s) ...\n", num_devices);
920 destroy_device(&zram_devices[--dev_id]);
923 unregister_blkdev(zram_major, "zram");
928 static void __exit zram_exit(void)
933 for (i = 0; i < num_devices; i++) {
934 zram = &zram_devices[i];
936 destroy_device(zram);
938 * Shouldn't access zram->disk after destroy_device
939 * because destroy_device already released zram->disk.
941 zram_reset_device(zram, false);
944 unregister_blkdev(zram_major, "zram");
947 pr_debug("Cleanup done!\n");
950 module_init(zram_init);
951 module_exit(zram_exit);
953 module_param(num_devices, uint, 0);
954 MODULE_PARM_DESC(num_devices, "Number of zram devices");
956 MODULE_LICENSE("Dual BSD/GPL");
957 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
958 MODULE_DESCRIPTION("Compressed RAM Block Device");