2 * Copyright (c) 2007-2012, Novell Inc.
4 * This program is licensed under the BSD license, read LICENSE.BSD
5 * for further information
11 * Paging and compression functions for the vertical repository data.
12 * Vertical data is grouped by key, normal data is grouped by solvable.
13 * This makes searching for a string in vertical data fast as there's
14 * no need to skip over data if keys we're not interested in.
16 * The vertical data is split into pages, each page is compressed with a fast
17 * compression algorithm. These pages are read in on demand, not recently used
18 * pages automatically get dropped.
21 #define _XOPEN_SOURCE 500
23 #include <sys/types.h>
38 #define BLOCK_SIZE (65536*1)
39 #if BLOCK_SIZE <= 65536
46 The format is tailored for fast decompression (i.e. only byte based),
47 and skewed to ASCII content (highest bit often not set):
50 - self-describing ASCII character hex L
51 b 100lllll <l+1 bytes>
52 - literal run of length l+1
54 - back ref of length l+2, at offset -(o+1) (o < 1 << 10)
56 - back ref of length l+2+8, at offset -(o+1) (o < 1 << 8)
58 - back ref of length l+3, at offset -(o+1) (o < 1 << 16)
59 f1 1111llll <8l> <8o> <8o>
60 - back ref, length l+19 (l < 1<<12), offset -(o+1) (o < 1<<16)
61 f2 11110lll <8l> <8o> <8o>
62 - back ref, length l+19 (l < 1<<11), offset -(o+1) (o < 1<<16)
63 g 11111lll <8l> <8o> <8o> <8o>
64 - back ref, length l+5 (l < 1<<11), offset -(o+1) (o < 1<<24)
66 Generally for a literal of length L we need L+1 bytes, hence it is
67 better to encode also very short backrefs (2 chars) as backrefs if
68 their offset is small, as that only needs two bytes. Except if we
69 already have a literal run, in that case it's better to append there,
70 instead of breaking it for a backref. So given a potential backref
71 at offset O, length L the strategy is as follows:
73 L < 2 : encode as 1-literal
74 L == 2, O > 1024 : encode as 1-literal
75 L == 2, have already literals: encode as 1-literal
77 L >= 2, L <= 9, O < 1024 : encode as c
78 L >= 10, L <= 41, O < 256 : encode as d
79 else we have either O >= 1024, or L >= 42:
80 L < 3 : encode as 1-literal
81 L >= 3, L <= 18, O < 65536 : encode as e
82 L >= 19, L <= 4095+18, O < 65536 : encode as f
83 else we have either L >= 4096+18 or O >= 65536.
84 O >= 65536: encode as 1-literal, too bad
85 (with the current block size this can't happen)
86 L >= 4096+18, so reduce to 4095+18 : encode as f
91 compress_buf(const unsigned char *in, unsigned int in_len,
92 unsigned char *out, unsigned int out_len)
94 unsigned int oo = 0; /* out-offset */
95 unsigned int io = 0; /* in-offset */
98 Ref hnext[BLOCK_SIZE];
99 unsigned int litofs = 0;
100 memset(htab, -1, sizeof (htab));
101 memset(hnext, -1, sizeof (hnext));
102 while (io + 2 < in_len)
104 /* Search for a match of the string starting at IN, we have at
105 least three characters. */
106 unsigned int hval = in[io] | in[io + 1] << 8 | in[io + 2] << 16;
107 unsigned int try, mlen, mofs, tries;
108 hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
109 hval = hval & (HS - 1);
111 hnext[io] = htab[hval];
116 for (tries = 0; try != -1 && tries < 12; tries++)
119 && in[try] == in[io] && in[try + 1] == in[io + 1])
122 mofs = (io - try) - 1;
127 for (; try != -1 && tries < 12; tries++)
129 /* assert(mlen >= 2); */
130 /* assert(io + mlen < in_len); */
131 /* Try a match starting from [io] with the strings at [try].
132 That's only sensible if TRY actually is before IO (can happen
133 with uninit hash table). If we have a previous match already
134 we're only going to take the new one if it's longer, hence
135 check the potentially last character. */
136 if (try < io && in[try + mlen] == in[io + mlen])
138 unsigned int this_len, this_ofs;
139 if (memcmp(in + try, in + io, mlen))
142 /* Now try extending the match by more characters. */
144 io + this_len < in_len
145 && in[try + this_len] == in[io + this_len]; this_len++)
149 for (testi = 0; testi < this_len; testi++)
150 assert(in[try + testi] == in[io + testi]);
152 this_ofs = (io - try) - 1;
153 /*if (this_ofs > 65535)
156 assert(this_len >= 2);
157 assert(this_len >= mlen);
158 assert(this_len > mlen || (this_len == mlen && this_ofs > mofs));
160 mlen = this_len, mofs = this_ofs;
161 /* If our match extends up to the end of input, no next
162 match can become better. This is not just an
163 optimization, it establishes a loop invariant
164 (io + mlen < in_len). */
165 if (io + mlen >= in_len)
170 /*if (io - try - 1 >= 65536)
177 /*fprintf(stderr, "%d %d\n", mlen, mofs);*/
178 if (mlen == 2 && (litofs || mofs >= 1024))
180 /*else if (mofs >= 65536)
182 else if (mofs >= 65536)
184 if (mlen >= 2048 + 5)
191 /*else if (mlen >= 4096 + 19)
193 else if (mlen >= 2048 + 19)
195 /* Skip this match if the next character would deliver a better one,
196 but only do this if we have the chance to really extend the
197 length (i.e. our current length isn't yet the (conservative)
199 if (mlen && mlen < (2048 + 5) && io + 3 < in_len)
202 in[io + 1] | in[io + 2] << 8 | in[io + 3] << 16;
204 hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
205 hval = hval & (HS - 1);
208 && in[try] == in[io + 1] && in[try + 1] == in[io + 2])
210 unsigned int this_len;
213 io + 1 + this_len < in_len
214 && in[try + this_len] == in[io + 1 + this_len];
217 if (this_len >= mlen)
234 litlen = io - litofs;
235 /* fprintf(stderr, "lit: %d\n", litlen); */
238 unsigned int easy_sz;
239 /* Emit everything we can as self-describers. As soon as
240 we hit a byte we can't emit as such we're going to emit
241 a length descriptor anyway, so we can as well include
242 bytes < 0x80 which might follow afterwards in that run. */
244 easy_sz < litlen && in[litofs + easy_sz] < 0x80;
249 if (oo + easy_sz >= out_len)
251 memcpy(out + oo, in + litofs, easy_sz);
260 if (oo + 1 + litlen >= out_len)
262 out[oo++] = 0x80 | (litlen - 1);
264 out[oo++] = in[litofs++];
269 /* Literal length > 32, so chunk it. */
270 if (oo + 1 + 32 >= out_len)
272 out[oo++] = 0x80 | 31;
273 memcpy(out + oo, in + litofs, 32);
282 /* fprintf(stderr, "ref: %d @ %d\n", mlen, mofs); */
284 if (mlen >= 2 && mlen <= 9 && mofs < 1024)
286 if (oo + 2 >= out_len)
288 out[oo++] = 0xa0 | ((mofs & 0x300) >> 5) | (mlen - 2);
289 out[oo++] = mofs & 0xff;
291 else if (mlen >= 10 && mlen <= 41 && mofs < 256)
293 if (oo + 2 >= out_len)
295 out[oo++] = 0xc0 | (mlen - 10);
298 else if (mofs >= 65536)
300 assert(mlen >= 5 && mlen < 2048 + 5);
301 if (oo + 5 >= out_len)
303 out[oo++] = 0xf8 | ((mlen - 5) >> 8);
304 out[oo++] = (mlen - 5) & 0xff;
305 out[oo++] = mofs & 0xff;
306 out[oo++] = (mofs >> 8) & 0xff;
307 out[oo++] = mofs >> 16;
309 else if (mlen >= 3 && mlen <= 18)
311 assert(mofs < 65536);
312 if (oo + 3 >= out_len)
314 out[oo++] = 0xe0 | (mlen - 3);
315 out[oo++] = mofs & 0xff;
316 out[oo++] = mofs >> 8;
320 assert(mlen >= 19 && mlen <= 4095 + 19 && mofs < 65536);
321 if (oo + 4 >= out_len)
323 out[oo++] = 0xf0 | ((mlen - 19) >> 8);
324 out[oo++] = (mlen - 19) & 0xff;
325 out[oo++] = mofs & 0xff;
326 out[oo++] = mofs >> 8;
328 /* Insert the hashes for the compressed run [io..io+mlen-1].
329 For [io] we have it already done at the start of the loop.
330 So it's from [io+1..io+mlen-1], and we need three chars per
331 hash, so the accessed characters will be [io+1..io+mlen-1+2],
332 ergo io+mlen+1 < in_len. */
340 in[io] | in[io + 1] << 8 | in[io + 2] << 16;
341 hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
342 hval = hval & (HS - 1);
343 hnext[io] = htab[hval];
350 /* We might have some characters left. */
351 if (io < in_len && !litofs)
358 litlen = io - litofs;
359 /* fprintf(stderr, "lit: %d\n", litlen); */
362 unsigned int easy_sz;
363 /* Emit everything we can as self-describers. As soon as we hit a
364 byte we can't emit as such we're going to emit a length
365 descriptor anyway, so we can as well include bytes < 0x80 which
366 might follow afterwards in that run. */
367 for (easy_sz = 0; easy_sz < litlen && in[litofs + easy_sz] < 0x80;
372 if (oo + easy_sz >= out_len)
374 memcpy(out + oo, in + litofs, easy_sz);
383 if (oo + 1 + litlen >= out_len)
385 out[oo++] = 0x80 | (litlen - 1);
387 out[oo++] = in[litofs++];
392 /* Literal length > 32, so chunk it. */
393 if (oo + 1 + 32 >= out_len)
395 out[oo++] = 0x80 | 31;
396 memcpy(out + oo, in + litofs, 32);
407 unchecked_decompress_buf(const unsigned char *in, unsigned int in_len,
409 unsigned int out_len __attribute__((unused)))
411 unsigned char *orig_out = out;
412 const unsigned char *in_end = in + in_len;
415 unsigned int first = *in++;
420 /* This default case can't happen, but GCCs VRP is not strong
421 enough to see this, so make this explicitely not fall to
422 the end of the switch, so that we don't have to initialize
430 /* fprintf (stderr, "lit: 1\n"); */
434 /* b 100lllll <l+1 bytes> */
436 unsigned int l = first & 31;
437 /* fprintf (stderr, "lit: %d\n", l); */
444 /* c 101oolll <8o> */
446 o = first & (3 << 3);
447 o = (o << 5) | *in++;
448 first = (first & 7) + 2;
452 /* d 110lllll <8o> */
455 first = (first & 31) + 10;
459 /* e 1110llll <8o> <8o> */
461 o = in[0] | (in[1] << 8);
468 /* f1 1111llll <8o> <8o> <8l> */
469 /* f2 11110lll <8o> <8o> <8l> */
470 /* g 11111lll <8o> <8o> <8o> <8l> */
475 first = (((first - 8) << 8) | in[0]) + 5;
476 o = in[1] | (in[2] << 8) | (in[3] << 16);
481 first = ((first << 8) | in[0]) + 19;
482 o = in[1] | (in[2] << 8);
488 /* fprintf(stderr, "ref: %d @ %d\n", first, o); */
492 /* We know that first will not be zero, and this loop structure is
493 better optimizable. */
503 case 18: *out = *(out + o); out++;
504 case 17: *out = *(out + o); out++;
505 case 16: *out = *(out + o); out++;
506 case 15: *out = *(out + o); out++;
507 case 14: *out = *(out + o); out++;
508 case 13: *out = *(out + o); out++;
509 case 12: *out = *(out + o); out++;
510 case 11: *out = *(out + o); out++;
511 case 10: *out = *(out + o); out++;
512 case 9: *out = *(out + o); out++;
513 case 8: *out = *(out + o); out++;
514 case 7: *out = *(out + o); out++;
515 case 6: *out = *(out + o); out++;
516 case 5: *out = *(out + o); out++;
517 case 4: *out = *(out + o); out++;
518 case 3: *out = *(out + o); out++;
519 case 2: *out = *(out + o); out++;
520 case 1: *out = *(out + o); out++;
528 case 0: *out = *(out + o); out++;
529 case 15: *out = *(out + o); out++;
530 case 14: *out = *(out + o); out++;
531 case 13: *out = *(out + o); out++;
532 case 12: *out = *(out + o); out++;
533 case 11: *out = *(out + o); out++;
534 case 10: *out = *(out + o); out++;
535 case 9: *out = *(out + o); out++;
536 case 8: *out = *(out + o); out++;
537 case 7: *out = *(out + o); out++;
538 case 6: *out = *(out + o); out++;
539 case 5: *out = *(out + o); out++;
540 case 4: *out = *(out + o); out++;
541 case 3: *out = *(out + o); out++;
542 case 2: *out = *(out + o); out++;
543 case 1: *out = *(out + o); out++;
545 while ((int)(first -= 16) > 0);
551 return out - orig_out;
554 /**********************************************************************/
556 void repopagestore_init(Repopagestore *store)
558 memset(store, 0, sizeof(*store));
562 void repopagestore_free(Repopagestore *store)
564 store->blob_store = solv_free(store->blob_store);
565 store->file_pages = solv_free(store->file_pages);
566 store->mapped_at = solv_free(store->mapped_at);
567 store->mapped = solv_free(store->mapped);
568 if (store->pagefd != -1)
569 close(store->pagefd);
574 /**********************************************************************/
577 repopagestore_load_page_range(Repopagestore *store, unsigned int pstart, unsigned int pend)
579 /* Make sure all pages from PSTART to PEND (inclusive) are loaded,
580 and are consecutive. Return a pointer to the mapping of PSTART. */
581 unsigned char buf[REPOPAGE_BLOBSIZE];
582 unsigned int i, best, pnum;
586 /* Quick check in case the requested page is already mapped */
587 if (store->mapped_at[pstart] != -1)
588 return store->blob_store + store->mapped_at[pstart];
592 /* Quick check in case all pages are already mapped and consecutive. */
593 for (pnum = pstart; pnum <= pend; pnum++)
594 if (store->mapped_at[pnum] == -1
596 && store->mapped_at[pnum]
597 != store->mapped_at[pnum-1] + REPOPAGE_BLOBSIZE))
600 return store->blob_store + store->mapped_at[pstart];
603 if (store->pagefd == -1 || !store->file_pages)
604 return 0; /* no backing file */
607 fprintf(stderr, "PAGE: want %d pages starting at %d\n", pend - pstart + 1, pstart);
610 /* Ensure that we can map the numbers of pages we need at all. */
611 if (pend - pstart + 1 > store->nmapped)
613 unsigned int oldcan = store->nmapped;
614 store->nmapped = pend - pstart + 1;
615 if (store->nmapped < 4)
617 store->mapped = solv_realloc2(store->mapped, store->nmapped, sizeof(store->mapped[0]));
618 for (i = oldcan; i < store->nmapped; i++)
619 store->mapped[i] = -1;
620 store->blob_store = solv_realloc2(store->blob_store, store->nmapped, REPOPAGE_BLOBSIZE);
622 fprintf(stderr, "PAGE: can map %d pages\n", store->nmapped);
626 if (store->mapped_at[pstart] != -1)
628 /* assume forward search */
629 best = store->mapped_at[pstart] / REPOPAGE_BLOBSIZE;
630 if (best + (pend - pstart) >= store->nmapped)
633 else if (store->mapped_at[pend] != -1)
635 /* assume backward search */
636 best = store->mapped_at[pend] / REPOPAGE_BLOBSIZE;
637 if (best < pend - pstart)
638 best = store->nmapped - 1;
639 best -= pend - pstart;
643 /* choose some "random" location to avoid thrashing */
644 best = (pstart + store->rr_counter++) % (store->nmapped - pend + pstart);
647 /* So we want to map our pages from [best] to [best+pend-pstart].
648 Use a very simple strategy, which doesn't make the best use of
649 our resources, but works. Throw away all pages in that range
650 (even ours) then copy around ours or read them in. */
651 for (i = best, pnum = pstart; pnum <= pend; i++, pnum++)
653 unsigned int pnum_mapped_at;
654 unsigned int oldpnum = store->mapped[i];
658 continue; /* already have the correct page */
659 /* Evict this page. */
661 fprintf(stderr, "PAGE: evict page %d from %d\n", oldpnum, i);
663 store->mapped[i] = -1;
664 store->mapped_at[oldpnum] = -1;
666 /* check if we can copy the correct content (before it gets evicted) */
667 pnum_mapped_at = store->mapped_at[pnum];
668 if (pnum_mapped_at != -1 && pnum_mapped_at != i * REPOPAGE_BLOBSIZE)
670 void *dest = store->blob_store + i * REPOPAGE_BLOBSIZE;
672 fprintf(stderr, "PAGECOPY: %d from %d to %d\n", pnum, pnum_mapped_at / REPOPAGE_BLOBSIZE, i);
674 memcpy(dest, store->blob_store + pnum_mapped_at, REPOPAGE_BLOBSIZE);
675 store->mapped[pnum_mapped_at / REPOPAGE_BLOBSIZE] = -1;
676 store->mapped[i] = pnum;
677 store->mapped_at[pnum] = i * REPOPAGE_BLOBSIZE;
681 /* Everything is free now. Read in or copy the pages we want. */
682 for (i = best, pnum = pstart; pnum <= pend; i++, pnum++)
684 void *dest = store->blob_store + i * REPOPAGE_BLOBSIZE;
685 if (store->mapped_at[pnum] != -1)
687 unsigned int pnum_mapped_at = store->mapped_at[pnum];
688 if (pnum_mapped_at != i * REPOPAGE_BLOBSIZE)
691 fprintf(stderr, "PAGECOPY: %d from %d to %d\n", pnum, pnum_mapped_at / REPOPAGE_BLOBSIZE, i);
693 /* Still mapped somewhere else, so just copy it from there. */
694 memcpy(dest, store->blob_store + pnum_mapped_at, REPOPAGE_BLOBSIZE);
695 store->mapped[pnum_mapped_at / REPOPAGE_BLOBSIZE] = -1;
700 Attrblobpage *p = store->file_pages + pnum;
701 unsigned int in_len = p->page_size;
702 unsigned int compressed = in_len & 1;
705 fprintf(stderr, "PAGEIN: %d to %d", pnum, i);
707 if (pread(store->pagefd, compressed ? buf : dest, in_len, store->file_offset + p->page_offset) != in_len)
709 perror("mapping pread");
714 unsigned int out_len;
715 out_len = unchecked_decompress_buf(buf, in_len, dest, REPOPAGE_BLOBSIZE);
716 if (out_len != REPOPAGE_BLOBSIZE && pnum < store->num_pages - 1)
719 fprintf(stderr, "can't decompress\n");
724 fprintf(stderr, " (expand %d to %d)", in_len, out_len);
728 fprintf(stderr, "\n");
731 store->mapped_at[pnum] = i * REPOPAGE_BLOBSIZE;
732 store->mapped[i] = pnum;
734 return store->blob_store + best * REPOPAGE_BLOBSIZE;
738 repopagestore_compress_page(unsigned char *page, unsigned int len, unsigned char *cpage, unsigned int max)
740 return compress_buf(page, len, cpage, max);
743 #define SOLV_ERROR_EOF 3
744 #define SOLV_ERROR_CORRUPT 6
746 static inline unsigned int
752 for (i = 0; i < 4; i++)
762 /* Try to either setup on-demand paging (using FP as backing
763 file), or in case that doesn't work (FP not seekable) slurps in
764 all pages and deactivates paging. */
766 repopagestore_read_or_setup_pages(Repopagestore *store, FILE *fp, unsigned int pagesz, unsigned int blobsz)
770 unsigned int can_seek;
771 unsigned int cur_page_ofs;
772 unsigned char buf[REPOPAGE_BLOBSIZE];
774 if (pagesz != REPOPAGE_BLOBSIZE)
776 /* We could handle this by slurping in everything. */
777 return SOLV_ERROR_CORRUPT;
780 if ((store->file_offset = ftell(fp)) < 0)
784 store->pagefd = dup(fileno(fp));
785 if (store->pagefd == -1)
788 fcntl(store->pagefd, F_SETFD, FD_CLOEXEC);
791 fprintf(stderr, "can %sseek\n", can_seek ? "" : "NOT ");
793 npages = (blobsz + REPOPAGE_BLOBSIZE - 1) / REPOPAGE_BLOBSIZE;
795 store->num_pages = npages;
796 store->mapped_at = solv_malloc2(npages, sizeof(*store->mapped_at));
798 /* If we can't seek on our input we have to slurp in everything.
799 * Otherwise set up file_pages containing offest/length of the
802 store->file_pages = solv_malloc2(npages, sizeof(*store->file_pages));
804 store->blob_store = solv_malloc2(npages, REPOPAGE_BLOBSIZE);
806 for (i = 0; i < npages; i++)
808 unsigned int in_len = read_u32(fp);
809 unsigned int compressed = in_len & 1;
812 fprintf(stderr, "page %d: len %d (%scompressed)\n",
813 i, in_len, compressed ? "" : "not ");
817 Attrblobpage *p = store->file_pages + i;
819 store->mapped_at[i] = -1; /* not mapped yet */
820 p->page_offset = cur_page_ofs;
821 p->page_size = in_len * 2 + compressed;
822 if (fseek(fp, in_len, SEEK_CUR) < 0)
824 /* We can't fall back to non-seeking behaviour as we already
825 read over some data pages without storing them away. */
826 close(store->pagefd);
828 return SOLV_ERROR_EOF;
830 cur_page_ofs += in_len;
834 unsigned int out_len;
835 void *dest = store->blob_store + i * REPOPAGE_BLOBSIZE;
836 store->mapped_at[i] = i * REPOPAGE_BLOBSIZE;
837 /* We can't seek, so suck everything in. */
838 if (fread(compressed ? buf : dest, in_len, 1, fp) != 1)
841 return SOLV_ERROR_EOF;
845 out_len = unchecked_decompress_buf(buf, in_len, dest, REPOPAGE_BLOBSIZE);
846 if (out_len != REPOPAGE_BLOBSIZE && i < npages - 1)
848 return SOLV_ERROR_CORRUPT;
857 repopagestore_disable_paging(Repopagestore *store)
859 if (store->num_pages)
860 repopagestore_load_page_range(store, 0, store->num_pages - 1);
866 transfer_file(FILE * from, FILE * to, int compress)
868 unsigned char inb[BLOCK_SIZE];
869 unsigned char outb[BLOCK_SIZE];
870 while (!feof (from) && !ferror (from))
872 unsigned int in_len, out_len;
875 in_len = fread(inb, 1, BLOCK_SIZE, from);
878 unsigned char *b = outb;
879 out_len = compress_buf(inb, in_len, outb, sizeof (outb));
881 b = inb, out_len = in_len;
882 if (fwrite(&out_len, sizeof (out_len), 1, to) != 1)
884 perror("write size");
887 if (fwrite(b, out_len, 1, to) != 1)
889 perror("write data");
896 if (fread(&in_len, sizeof(in_len), 1, from) != 1)
900 perror("can't read size");
903 if (fread(inb, in_len, 1, from) != 1)
905 perror("can't read data");
909 unchecked_decompress_buf(inb, in_len, outb, sizeof(outb));
910 if (fwrite(outb, out_len, 1, to) != 1)
912 perror("can't write output");
919 /* Just for benchmarking purposes. */
921 dumb_memcpy(void *dest, const void *src, unsigned int len)
930 benchmark(FILE * from)
932 unsigned char inb[BLOCK_SIZE];
933 unsigned char outb[BLOCK_SIZE];
934 unsigned int in_len = fread(inb, 1, BLOCK_SIZE, from);
935 unsigned int out_len;
938 perror("can't read from input");
942 unsigned int calib_loop;
943 unsigned int per_loop;
952 while ((clock() - start) < CLOCKS_PER_SEC / 4)
955 for (i = 0; i < calib_loop; i++)
956 dumb_memcpy(outb, inb, in_len);
957 per_loop += calib_loop;
960 fprintf(stderr, "memcpy:\nCalibrated to %d iterations per loop\n",
964 for (i = 0; i < 10; i++)
965 for (j = 0; j < per_loop; j++)
966 dumb_memcpy(outb, inb, in_len);
968 seconds = (end - start) / (float) CLOCKS_PER_SEC;
969 fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
970 ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
976 while ((clock() - start) < CLOCKS_PER_SEC / 4)
979 for (i = 0; i < calib_loop; i++)
980 compress_buf(inb, in_len, outb, sizeof(outb));
981 per_loop += calib_loop;
984 fprintf(stderr, "compression:\nCalibrated to %d iterations per loop\n",
988 for (i = 0; i < 10; i++)
989 for (j = 0; j < per_loop; j++)
990 compress_buf(inb, in_len, outb, sizeof(outb));
992 seconds = (end - start) / (float) CLOCKS_PER_SEC;
993 fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
994 ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
996 out_len = compress_buf(inb, in_len, outb, sizeof(outb));
1001 while ((clock() - start) < CLOCKS_PER_SEC / 4)
1004 for (i = 0; i < calib_loop; i++)
1005 unchecked_decompress_buf(outb, out_len, inb, sizeof(inb));
1006 per_loop += calib_loop;
1009 fprintf(stderr, "decompression:\nCalibrated to %d iterations per loop\n",
1013 for (i = 0; i < 10; i++)
1014 for (j = 0; j < per_loop; j++)
1015 unchecked_decompress_buf(outb, out_len, inb, sizeof(inb));
1017 seconds = (end - start) / (float) CLOCKS_PER_SEC;
1018 fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
1019 ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
1023 main(int argc, char *argv[])
1026 if (argc > 1 && !strcmp(argv[1], "-d"))
1028 if (argc > 1 && !strcmp(argv[1], "-b"))
1031 transfer_file(stdin, stdout, compress);