2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 * Based on jffs2 zlib code:
19 * Copyright © 2001-2007 Red Hat, Inc.
20 * Created by David Woodhouse <dwmw2@infradead.org>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/zlib.h>
26 #include <linux/zutil.h>
27 #include <linux/vmalloc.h>
28 #include <linux/init.h>
29 #include <linux/err.h>
30 #include <linux/sched.h>
31 #include <linux/pagemap.h>
32 #include <linux/bio.h>
33 #include "compression.h"
35 /* Plan: call deflate() with avail_in == *sourcelen,
36 avail_out = *dstlen - 12 and flush == Z_FINISH.
37 If it doesn't manage to finish, call it again with
38 avail_in == 0 and avail_out set to the remaining 12
39 bytes for it to clean up.
40 Q: Is 12 bytes sufficient?
42 #define STREAM_END_SPACE 12
48 struct list_head list;
51 static LIST_HEAD(idle_workspace);
52 static DEFINE_SPINLOCK(workspace_lock);
53 static unsigned long num_workspace;
54 static atomic_t alloc_workspace = ATOMIC_INIT(0);
55 static DECLARE_WAIT_QUEUE_HEAD(workspace_wait);
58 * this finds an available zlib workspace or allocates a new one
59 * NULL or an ERR_PTR is returned if things go bad.
61 static struct workspace *find_zlib_workspace(void)
63 struct workspace *workspace;
65 int cpus = num_online_cpus();
68 spin_lock(&workspace_lock);
69 if (!list_empty(&idle_workspace)) {
70 workspace = list_entry(idle_workspace.next, struct workspace,
72 list_del(&workspace->list);
74 spin_unlock(&workspace_lock);
78 if (atomic_read(&alloc_workspace) > cpus) {
81 spin_unlock(&workspace_lock);
82 prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
83 if (atomic_read(&alloc_workspace) > cpus && !num_workspace)
85 finish_wait(&workspace_wait, &wait);
88 atomic_inc(&alloc_workspace);
89 spin_unlock(&workspace_lock);
91 workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
97 workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
98 if (!workspace->def_strm.workspace) {
102 workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
103 if (!workspace->inf_strm.workspace) {
107 workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
108 if (!workspace->buf) {
115 vfree(workspace->inf_strm.workspace);
117 vfree(workspace->def_strm.workspace);
120 atomic_dec(&alloc_workspace);
121 wake_up(&workspace_wait);
126 * put a workspace struct back on the list or free it if we have enough
127 * idle ones sitting around
129 static int free_workspace(struct workspace *workspace)
131 spin_lock(&workspace_lock);
132 if (num_workspace < num_online_cpus()) {
133 list_add_tail(&workspace->list, &idle_workspace);
135 spin_unlock(&workspace_lock);
136 if (waitqueue_active(&workspace_wait))
137 wake_up(&workspace_wait);
140 spin_unlock(&workspace_lock);
141 vfree(workspace->def_strm.workspace);
142 vfree(workspace->inf_strm.workspace);
143 kfree(workspace->buf);
146 atomic_dec(&alloc_workspace);
147 if (waitqueue_active(&workspace_wait))
148 wake_up(&workspace_wait);
153 * cleanup function for module exit
155 static void free_workspaces(void)
157 struct workspace *workspace;
158 while (!list_empty(&idle_workspace)) {
159 workspace = list_entry(idle_workspace.next, struct workspace,
161 list_del(&workspace->list);
162 vfree(workspace->def_strm.workspace);
163 vfree(workspace->inf_strm.workspace);
164 kfree(workspace->buf);
166 atomic_dec(&alloc_workspace);
171 * given an address space and start/len, compress the bytes.
173 * pages are allocated to hold the compressed result and stored
176 * out_pages is used to return the number of pages allocated. There
177 * may be pages allocated even if we return an error
179 * total_in is used to return the number of bytes actually read. It
180 * may be smaller then len if we had to exit early because we
181 * ran out of room in the pages array or because we cross the
184 * total_out is used to return the total number of compressed bytes
186 * max_out tells us the max number of bytes that we're allowed to
189 int btrfs_zlib_compress_pages(struct address_space *mapping,
190 u64 start, unsigned long len,
192 unsigned long nr_dest_pages,
193 unsigned long *out_pages,
194 unsigned long *total_in,
195 unsigned long *total_out,
196 unsigned long max_out)
199 struct workspace *workspace;
203 struct page *in_page = NULL;
204 struct page *out_page = NULL;
205 unsigned long bytes_left;
211 workspace = find_zlib_workspace();
212 if (IS_ERR(workspace))
215 if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) {
216 printk(KERN_WARNING "deflateInit failed\n");
221 workspace->def_strm.total_in = 0;
222 workspace->def_strm.total_out = 0;
224 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
225 data_in = kmap(in_page);
227 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
228 if (out_page == NULL) {
232 cpage_out = kmap(out_page);
236 workspace->def_strm.next_in = data_in;
237 workspace->def_strm.next_out = cpage_out;
238 workspace->def_strm.avail_out = PAGE_CACHE_SIZE;
239 workspace->def_strm.avail_in = min(len, PAGE_CACHE_SIZE);
241 while (workspace->def_strm.total_in < len) {
242 ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH);
244 printk(KERN_DEBUG "btrfs deflate in loop returned %d\n",
246 zlib_deflateEnd(&workspace->def_strm);
251 /* we're making it bigger, give up */
252 if (workspace->def_strm.total_in > 8192 &&
253 workspace->def_strm.total_in <
254 workspace->def_strm.total_out) {
258 /* we need another page for writing out. Test this
259 * before the total_in so we will pull in a new page for
260 * the stream end if required
262 if (workspace->def_strm.avail_out == 0) {
264 if (nr_pages == nr_dest_pages) {
269 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
270 if (out_page == NULL) {
274 cpage_out = kmap(out_page);
275 pages[nr_pages] = out_page;
277 workspace->def_strm.avail_out = PAGE_CACHE_SIZE;
278 workspace->def_strm.next_out = cpage_out;
281 if (workspace->def_strm.total_in >= len)
284 /* we've read in a full page, get a new one */
285 if (workspace->def_strm.avail_in == 0) {
286 if (workspace->def_strm.total_out > max_out)
289 bytes_left = len - workspace->def_strm.total_in;
291 page_cache_release(in_page);
293 start += PAGE_CACHE_SIZE;
294 in_page = find_get_page(mapping,
295 start >> PAGE_CACHE_SHIFT);
296 data_in = kmap(in_page);
297 workspace->def_strm.avail_in = min(bytes_left,
299 workspace->def_strm.next_in = data_in;
302 workspace->def_strm.avail_in = 0;
303 ret = zlib_deflate(&workspace->def_strm, Z_FINISH);
304 zlib_deflateEnd(&workspace->def_strm);
306 if (ret != Z_STREAM_END) {
311 if (workspace->def_strm.total_out >= workspace->def_strm.total_in) {
317 *total_out = workspace->def_strm.total_out;
318 *total_in = workspace->def_strm.total_in;
320 *out_pages = nr_pages;
326 page_cache_release(in_page);
328 free_workspace(workspace);
333 * pages_in is an array of pages with compressed data.
335 * disk_start is the starting logical offset of this array in the file
337 * bvec is a bio_vec of pages from the file that we want to decompress into
339 * vcnt is the count of pages in the biovec
341 * srclen is the number of bytes in pages_in
343 * The basic idea is that we have a bio that was created by readpages.
344 * The pages in the bio are for the uncompressed data, and they may not
345 * be contiguous. They all correspond to the range of bytes covered by
346 * the compressed extent.
348 int btrfs_zlib_decompress_biovec(struct page **pages_in,
350 struct bio_vec *bvec,
355 int wbits = MAX_WBITS;
356 struct workspace *workspace;
358 size_t total_out = 0;
359 unsigned long page_bytes_left;
360 unsigned long page_in_index = 0;
361 unsigned long page_out_index = 0;
362 struct page *page_out;
363 unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) /
365 unsigned long buf_start;
366 unsigned long buf_offset;
368 unsigned long working_bytes;
369 unsigned long pg_offset;
370 unsigned long start_byte;
371 unsigned long current_buf_start;
374 workspace = find_zlib_workspace();
375 if (IS_ERR(workspace))
378 data_in = kmap(pages_in[page_in_index]);
379 workspace->inf_strm.next_in = data_in;
380 workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE);
381 workspace->inf_strm.total_in = 0;
383 workspace->inf_strm.total_out = 0;
384 workspace->inf_strm.next_out = workspace->buf;
385 workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
386 page_out = bvec[page_out_index].bv_page;
387 page_bytes_left = PAGE_CACHE_SIZE;
390 /* If it's deflate, and it's got no preset dictionary, then
391 we can tell zlib to skip the adler32 check. */
392 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
393 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
394 !(((data_in[0]<<8) + data_in[1]) % 31)) {
396 wbits = -((data_in[0] >> 4) + 8);
397 workspace->inf_strm.next_in += 2;
398 workspace->inf_strm.avail_in -= 2;
401 if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
402 printk(KERN_WARNING "inflateInit failed\n");
406 while (workspace->inf_strm.total_in < srclen) {
407 ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
408 if (ret != Z_OK && ret != Z_STREAM_END)
411 * buf start is the byte offset we're of the start of
412 * our workspace buffer
414 buf_start = total_out;
416 /* total_out is the last byte of the workspace buffer */
417 total_out = workspace->inf_strm.total_out;
419 working_bytes = total_out - buf_start;
422 * start byte is the first byte of the page we're currently
423 * copying into relative to the start of the compressed data.
425 start_byte = page_offset(page_out) - disk_start;
427 if (working_bytes == 0) {
428 /* we didn't make progress in this inflate
431 if (ret != Z_STREAM_END)
436 /* we haven't yet hit data corresponding to this page */
437 if (total_out <= start_byte)
441 * the start of the data we care about is offset into
442 * the middle of our working buffer
444 if (total_out > start_byte && buf_start < start_byte) {
445 buf_offset = start_byte - buf_start;
446 working_bytes -= buf_offset;
450 current_buf_start = buf_start;
452 /* copy bytes from the working buffer into the pages */
453 while (working_bytes > 0) {
454 bytes = min(PAGE_CACHE_SIZE - pg_offset,
455 PAGE_CACHE_SIZE - buf_offset);
456 bytes = min(bytes, working_bytes);
457 kaddr = kmap_atomic(page_out, KM_USER0);
458 memcpy(kaddr + pg_offset, workspace->buf + buf_offset,
460 kunmap_atomic(kaddr, KM_USER0);
461 flush_dcache_page(page_out);
464 page_bytes_left -= bytes;
466 working_bytes -= bytes;
467 current_buf_start += bytes;
469 /* check if we need to pick another page */
470 if (page_bytes_left == 0) {
472 if (page_out_index >= vcnt) {
477 page_out = bvec[page_out_index].bv_page;
479 page_bytes_left = PAGE_CACHE_SIZE;
480 start_byte = page_offset(page_out) - disk_start;
483 * make sure our new page is covered by this
486 if (total_out <= start_byte)
489 /* the next page in the biovec might not
490 * be adjacent to the last page, but it
491 * might still be found inside this working
492 * buffer. bump our offset pointer
494 if (total_out > start_byte &&
495 current_buf_start < start_byte) {
496 buf_offset = start_byte - buf_start;
497 working_bytes = total_out - start_byte;
498 current_buf_start = buf_start +
504 workspace->inf_strm.next_out = workspace->buf;
505 workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
507 if (workspace->inf_strm.avail_in == 0) {
509 kunmap(pages_in[page_in_index]);
511 if (page_in_index >= total_pages_in) {
515 data_in = kmap(pages_in[page_in_index]);
516 workspace->inf_strm.next_in = data_in;
517 tmp = srclen - workspace->inf_strm.total_in;
518 workspace->inf_strm.avail_in = min(tmp,
522 if (ret != Z_STREAM_END)
527 zlib_inflateEnd(&workspace->inf_strm);
529 kunmap(pages_in[page_in_index]);
531 free_workspace(workspace);
536 * a less complex decompression routine. Our compressed data fits in a
537 * single page, and we want to read a single page out of it.
538 * start_byte tells us the offset into the compressed data we're interested in
540 int btrfs_zlib_decompress(unsigned char *data_in,
541 struct page *dest_page,
542 unsigned long start_byte,
543 size_t srclen, size_t destlen)
546 int wbits = MAX_WBITS;
547 struct workspace *workspace;
548 unsigned long bytes_left = destlen;
549 unsigned long total_out = 0;
552 if (destlen > PAGE_CACHE_SIZE)
555 workspace = find_zlib_workspace();
556 if (IS_ERR(workspace))
559 workspace->inf_strm.next_in = data_in;
560 workspace->inf_strm.avail_in = srclen;
561 workspace->inf_strm.total_in = 0;
563 workspace->inf_strm.next_out = workspace->buf;
564 workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
565 workspace->inf_strm.total_out = 0;
566 /* If it's deflate, and it's got no preset dictionary, then
567 we can tell zlib to skip the adler32 check. */
568 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
569 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
570 !(((data_in[0]<<8) + data_in[1]) % 31)) {
572 wbits = -((data_in[0] >> 4) + 8);
573 workspace->inf_strm.next_in += 2;
574 workspace->inf_strm.avail_in -= 2;
577 if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
578 printk(KERN_WARNING "inflateInit failed\n");
583 while (bytes_left > 0) {
584 unsigned long buf_start;
585 unsigned long buf_offset;
587 unsigned long pg_offset = 0;
589 ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
590 if (ret != Z_OK && ret != Z_STREAM_END)
593 buf_start = total_out;
594 total_out = workspace->inf_strm.total_out;
596 if (total_out == buf_start) {
601 if (total_out <= start_byte)
604 if (total_out > start_byte && buf_start < start_byte)
605 buf_offset = start_byte - buf_start;
609 bytes = min(PAGE_CACHE_SIZE - pg_offset,
610 PAGE_CACHE_SIZE - buf_offset);
611 bytes = min(bytes, bytes_left);
613 kaddr = kmap_atomic(dest_page, KM_USER0);
614 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
615 kunmap_atomic(kaddr, KM_USER0);
620 workspace->inf_strm.next_out = workspace->buf;
621 workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
624 if (ret != Z_STREAM_END && bytes_left != 0)
629 zlib_inflateEnd(&workspace->inf_strm);
631 free_workspace(workspace);
635 void btrfs_zlib_exit(void)