2 * Copyright (c) 2007-2012, Novell Inc.
4 * This program is licensed under the BSD license, read LICENSE.BSD
5 * for further information
11 * Paging and compression functions for the vertical repository data.
12 * Vertical data is grouped by key, normal data is grouped by solvable.
13 * This makes searching for a string in vertical data fast as there's
14 * no need to skip over data if keys we're not interested in.
16 * The vertical data is split into pages, each page is compressed with a fast
17 * compression algorithm. These pages are read in on demand, not recently used
18 * pages automatically get dropped.
21 #define _XOPEN_SOURCE 500
23 #include <sys/types.h>
44 #define BLOCK_SIZE (65536*1)
45 #if BLOCK_SIZE <= 65536
52 The format is tailored for fast decompression (i.e. only byte based),
53 and skewed to ASCII content (highest bit often not set):
56 - self-describing ASCII character hex L
57 b 100lllll <l+1 bytes>
58 - literal run of length l+1
60 - back ref of length l+2, at offset -(o+1) (o < 1 << 10)
62 - back ref of length l+2+8, at offset -(o+1) (o < 1 << 8)
64 - back ref of length l+3, at offset -(o+1) (o < 1 << 16)
65 f1 1111llll <8l> <8o> <8o>
66 - back ref, length l+19 (l < 1<<12), offset -(o+1) (o < 1<<16)
67 f2 11110lll <8l> <8o> <8o>
68 - back ref, length l+19 (l < 1<<11), offset -(o+1) (o < 1<<16)
69 g 11111lll <8l> <8o> <8o> <8o>
70 - back ref, length l+5 (l < 1<<11), offset -(o+1) (o < 1<<24)
72 Generally for a literal of length L we need L+1 bytes, hence it is
73 better to encode also very short backrefs (2 chars) as backrefs if
74 their offset is small, as that only needs two bytes. Except if we
75 already have a literal run, in that case it's better to append there,
76 instead of breaking it for a backref. So given a potential backref
77 at offset O, length L the strategy is as follows:
79 L < 2 : encode as 1-literal
80 L == 2, O > 1024 : encode as 1-literal
81 L == 2, have already literals: encode as 1-literal
83 L >= 2, L <= 9, O < 1024 : encode as c
84 L >= 10, L <= 41, O < 256 : encode as d
85 else we have either O >= 1024, or L >= 42:
86 L < 3 : encode as 1-literal
87 L >= 3, L <= 18, O < 65536 : encode as e
88 L >= 19, L <= 4095+18, O < 65536 : encode as f
89 else we have either L >= 4096+18 or O >= 65536.
90 O >= 65536: encode as 1-literal, too bad
91 (with the current block size this can't happen)
92 L >= 4096+18, so reduce to 4095+18 : encode as f
97 compress_buf(const unsigned char *in, unsigned int in_len,
98 unsigned char *out, unsigned int out_len)
100 unsigned int oo = 0; /* out-offset */
101 unsigned int io = 0; /* in-offset */
104 Ref hnext[BLOCK_SIZE];
105 unsigned int litofs = 0;
106 memset(htab, -1, sizeof (htab));
107 memset(hnext, -1, sizeof (hnext));
108 while (io + 2 < in_len)
110 /* Search for a match of the string starting at IN, we have at
111 least three characters. */
112 unsigned int hval = in[io] | in[io + 1] << 8 | in[io + 2] << 16;
113 unsigned int try, mlen, mofs, tries;
114 hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
115 hval = hval & (HS - 1);
117 hnext[io] = htab[hval];
122 for (tries = 0; try != -1 && tries < 12; tries++)
125 && in[try] == in[io] && in[try + 1] == in[io + 1])
128 mofs = (io - try) - 1;
133 for (; try != -1 && tries < 12; tries++)
135 /* assert(mlen >= 2); */
136 /* assert(io + mlen < in_len); */
137 /* Try a match starting from [io] with the strings at [try].
138 That's only sensible if TRY actually is before IO (can happen
139 with uninit hash table). If we have a previous match already
140 we're only going to take the new one if it's longer, hence
141 check the potentially last character. */
142 if (try < io && in[try + mlen] == in[io + mlen])
144 unsigned int this_len, this_ofs;
145 if (memcmp(in + try, in + io, mlen))
148 /* Now try extending the match by more characters. */
150 io + this_len < in_len
151 && in[try + this_len] == in[io + this_len]; this_len++)
155 for (testi = 0; testi < this_len; testi++)
156 assert(in[try + testi] == in[io + testi]);
158 this_ofs = (io - try) - 1;
159 /*if (this_ofs > 65535)
162 assert(this_len >= 2);
163 assert(this_len >= mlen);
164 assert(this_len > mlen || (this_len == mlen && this_ofs > mofs));
166 mlen = this_len, mofs = this_ofs;
167 /* If our match extends up to the end of input, no next
168 match can become better. This is not just an
169 optimization, it establishes a loop invariant
170 (io + mlen < in_len). */
171 if (io + mlen >= in_len)
176 /*if (io - try - 1 >= 65536)
183 /*fprintf(stderr, "%d %d\n", mlen, mofs);*/
184 if (mlen == 2 && (litofs || mofs >= 1024))
186 /*else if (mofs >= 65536)
188 else if (mofs >= 65536)
190 if (mlen >= 2048 + 5)
197 /*else if (mlen >= 4096 + 19)
199 else if (mlen >= 2048 + 19)
201 /* Skip this match if the next character would deliver a better one,
202 but only do this if we have the chance to really extend the
203 length (i.e. our current length isn't yet the (conservative)
205 if (mlen && mlen < (2048 + 5) && io + 3 < in_len)
208 in[io + 1] | in[io + 2] << 8 | in[io + 3] << 16;
210 hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
211 hval = hval & (HS - 1);
214 && in[try] == in[io + 1] && in[try + 1] == in[io + 2])
216 unsigned int this_len;
219 io + 1 + this_len < in_len
220 && in[try + this_len] == in[io + 1 + this_len];
223 if (this_len >= mlen)
240 litlen = io - litofs;
241 /* fprintf(stderr, "lit: %d\n", litlen); */
244 unsigned int easy_sz;
245 /* Emit everything we can as self-describers. As soon as
246 we hit a byte we can't emit as such we're going to emit
247 a length descriptor anyway, so we can as well include
248 bytes < 0x80 which might follow afterwards in that run. */
250 easy_sz < litlen && in[litofs + easy_sz] < 0x80;
255 if (oo + easy_sz >= out_len)
257 memcpy(out + oo, in + litofs, easy_sz);
266 if (oo + 1 + litlen >= out_len)
268 out[oo++] = 0x80 | (litlen - 1);
270 out[oo++] = in[litofs++];
275 /* Literal length > 32, so chunk it. */
276 if (oo + 1 + 32 >= out_len)
278 out[oo++] = 0x80 | 31;
279 memcpy(out + oo, in + litofs, 32);
288 /* fprintf(stderr, "ref: %d @ %d\n", mlen, mofs); */
290 if (mlen >= 2 && mlen <= 9 && mofs < 1024)
292 if (oo + 2 >= out_len)
294 out[oo++] = 0xa0 | ((mofs & 0x300) >> 5) | (mlen - 2);
295 out[oo++] = mofs & 0xff;
297 else if (mlen >= 10 && mlen <= 41 && mofs < 256)
299 if (oo + 2 >= out_len)
301 out[oo++] = 0xc0 | (mlen - 10);
304 else if (mofs >= 65536)
306 assert(mlen >= 5 && mlen < 2048 + 5);
307 if (oo + 5 >= out_len)
309 out[oo++] = 0xf8 | ((mlen - 5) >> 8);
310 out[oo++] = (mlen - 5) & 0xff;
311 out[oo++] = mofs & 0xff;
312 out[oo++] = (mofs >> 8) & 0xff;
313 out[oo++] = mofs >> 16;
315 else if (mlen >= 3 && mlen <= 18)
317 assert(mofs < 65536);
318 if (oo + 3 >= out_len)
320 out[oo++] = 0xe0 | (mlen - 3);
321 out[oo++] = mofs & 0xff;
322 out[oo++] = mofs >> 8;
326 assert(mlen >= 19 && mlen <= 4095 + 19 && mofs < 65536);
327 if (oo + 4 >= out_len)
329 out[oo++] = 0xf0 | ((mlen - 19) >> 8);
330 out[oo++] = (mlen - 19) & 0xff;
331 out[oo++] = mofs & 0xff;
332 out[oo++] = mofs >> 8;
334 /* Insert the hashes for the compressed run [io..io+mlen-1].
335 For [io] we have it already done at the start of the loop.
336 So it's from [io+1..io+mlen-1], and we need three chars per
337 hash, so the accessed characters will be [io+1..io+mlen-1+2],
338 ergo io+mlen+1 < in_len. */
346 in[io] | in[io + 1] << 8 | in[io + 2] << 16;
347 hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
348 hval = hval & (HS - 1);
349 hnext[io] = htab[hval];
356 /* We might have some characters left. */
357 if (io < in_len && !litofs)
364 litlen = io - litofs;
365 /* fprintf(stderr, "lit: %d\n", litlen); */
368 unsigned int easy_sz;
369 /* Emit everything we can as self-describers. As soon as we hit a
370 byte we can't emit as such we're going to emit a length
371 descriptor anyway, so we can as well include bytes < 0x80 which
372 might follow afterwards in that run. */
373 for (easy_sz = 0; easy_sz < litlen && in[litofs + easy_sz] < 0x80;
378 if (oo + easy_sz >= out_len)
380 memcpy(out + oo, in + litofs, easy_sz);
389 if (oo + 1 + litlen >= out_len)
391 out[oo++] = 0x80 | (litlen - 1);
393 out[oo++] = in[litofs++];
398 /* Literal length > 32, so chunk it. */
399 if (oo + 1 + 32 >= out_len)
401 out[oo++] = 0x80 | 31;
402 memcpy(out + oo, in + litofs, 32);
413 unchecked_decompress_buf(const unsigned char *in, unsigned int in_len,
415 unsigned int out_len __attribute__((unused)))
417 unsigned char *orig_out = out;
418 const unsigned char *in_end = in + in_len;
421 unsigned int first = *in++;
426 /* This default case can't happen, but GCCs VRP is not strong
427 enough to see this, so make this explicitely not fall to
428 the end of the switch, so that we don't have to initialize
436 /* fprintf (stderr, "lit: 1\n"); */
440 /* b 100lllll <l+1 bytes> */
442 unsigned int l = first & 31;
443 /* fprintf (stderr, "lit: %d\n", l); */
450 /* c 101oolll <8o> */
452 o = first & (3 << 3);
453 o = (o << 5) | *in++;
454 first = (first & 7) + 2;
458 /* d 110lllll <8o> */
461 first = (first & 31) + 10;
465 /* e 1110llll <8o> <8o> */
467 o = in[0] | (in[1] << 8);
474 /* f1 1111llll <8o> <8o> <8l> */
475 /* f2 11110lll <8o> <8o> <8l> */
476 /* g 11111lll <8o> <8o> <8o> <8l> */
481 first = (((first - 8) << 8) | in[0]) + 5;
482 o = in[1] | (in[2] << 8) | (in[3] << 16);
487 first = ((first << 8) | in[0]) + 19;
488 o = in[1] | (in[2] << 8);
494 /* fprintf(stderr, "ref: %d @ %d\n", first, o); */
498 /* We know that first will not be zero, and this loop structure is
499 better optimizable. */
509 case 18: *out = *(out + o); out++;
510 case 17: *out = *(out + o); out++;
511 case 16: *out = *(out + o); out++;
512 case 15: *out = *(out + o); out++;
513 case 14: *out = *(out + o); out++;
514 case 13: *out = *(out + o); out++;
515 case 12: *out = *(out + o); out++;
516 case 11: *out = *(out + o); out++;
517 case 10: *out = *(out + o); out++;
518 case 9: *out = *(out + o); out++;
519 case 8: *out = *(out + o); out++;
520 case 7: *out = *(out + o); out++;
521 case 6: *out = *(out + o); out++;
522 case 5: *out = *(out + o); out++;
523 case 4: *out = *(out + o); out++;
524 case 3: *out = *(out + o); out++;
525 case 2: *out = *(out + o); out++;
526 case 1: *out = *(out + o); out++;
534 case 0: *out = *(out + o); out++;
535 case 15: *out = *(out + o); out++;
536 case 14: *out = *(out + o); out++;
537 case 13: *out = *(out + o); out++;
538 case 12: *out = *(out + o); out++;
539 case 11: *out = *(out + o); out++;
540 case 10: *out = *(out + o); out++;
541 case 9: *out = *(out + o); out++;
542 case 8: *out = *(out + o); out++;
543 case 7: *out = *(out + o); out++;
544 case 6: *out = *(out + o); out++;
545 case 5: *out = *(out + o); out++;
546 case 4: *out = *(out + o); out++;
547 case 3: *out = *(out + o); out++;
548 case 2: *out = *(out + o); out++;
549 case 1: *out = *(out + o); out++;
551 while ((int)(first -= 16) > 0);
557 return out - orig_out;
560 /**********************************************************************/
562 void repopagestore_init(Repopagestore *store)
564 memset(store, 0, sizeof(*store));
568 void repopagestore_free(Repopagestore *store)
570 store->blob_store = solv_free(store->blob_store);
571 store->file_pages = solv_free(store->file_pages);
572 store->mapped_at = solv_free(store->mapped_at);
573 store->mapped = solv_free(store->mapped);
574 if (store->pagefd != -1)
575 close(store->pagefd);
580 /**********************************************************************/
583 repopagestore_load_page_range(Repopagestore *store, unsigned int pstart, unsigned int pend)
585 /* Make sure all pages from PSTART to PEND (inclusive) are loaded,
586 and are consecutive. Return a pointer to the mapping of PSTART. */
587 unsigned char buf[REPOPAGE_BLOBSIZE];
588 unsigned int i, best, pnum;
592 /* Quick check in case the requested page is already mapped */
593 if (store->mapped_at[pstart] != -1)
594 return store->blob_store + store->mapped_at[pstart];
598 /* Quick check in case all pages are already mapped and consecutive. */
599 for (pnum = pstart; pnum <= pend; pnum++)
600 if (store->mapped_at[pnum] == -1
602 && store->mapped_at[pnum]
603 != store->mapped_at[pnum-1] + REPOPAGE_BLOBSIZE))
606 return store->blob_store + store->mapped_at[pstart];
609 if (store->pagefd == -1 || !store->file_pages)
610 return 0; /* no backing file */
613 fprintf(stderr, "PAGE: want %d pages starting at %d\n", pend - pstart + 1, pstart);
616 /* Ensure that we can map the numbers of pages we need at all. */
617 if (pend - pstart + 1 > store->nmapped)
619 unsigned int oldcan = store->nmapped;
620 store->nmapped = pend - pstart + 1;
621 if (store->nmapped < 4)
623 store->mapped = solv_realloc2(store->mapped, store->nmapped, sizeof(store->mapped[0]));
624 for (i = oldcan; i < store->nmapped; i++)
625 store->mapped[i] = -1;
626 store->blob_store = solv_realloc2(store->blob_store, store->nmapped, REPOPAGE_BLOBSIZE);
628 fprintf(stderr, "PAGE: can map %d pages\n", store->nmapped);
632 if (store->mapped_at[pstart] != -1)
634 /* assume forward search */
635 best = store->mapped_at[pstart] / REPOPAGE_BLOBSIZE;
636 if (best + (pend - pstart) >= store->nmapped)
639 else if (store->mapped_at[pend] != -1)
641 /* assume backward search */
642 best = store->mapped_at[pend] / REPOPAGE_BLOBSIZE;
643 if (best < pend - pstart)
644 best = store->nmapped - 1;
645 best -= pend - pstart;
649 /* choose some "random" location to avoid thrashing */
650 best = (pstart + store->rr_counter++) % (store->nmapped - pend + pstart);
653 /* So we want to map our pages from [best] to [best+pend-pstart].
654 Use a very simple strategy, which doesn't make the best use of
655 our resources, but works. Throw away all pages in that range
656 (even ours) then copy around ours or read them in. */
657 for (i = best, pnum = pstart; pnum <= pend; i++, pnum++)
659 unsigned int pnum_mapped_at;
660 unsigned int oldpnum = store->mapped[i];
664 continue; /* already have the correct page */
665 /* Evict this page. */
667 fprintf(stderr, "PAGE: evict page %d from %d\n", oldpnum, i);
669 store->mapped[i] = -1;
670 store->mapped_at[oldpnum] = -1;
672 /* check if we can copy the correct content (before it gets evicted) */
673 pnum_mapped_at = store->mapped_at[pnum];
674 if (pnum_mapped_at != -1 && pnum_mapped_at != i * REPOPAGE_BLOBSIZE)
676 void *dest = store->blob_store + i * REPOPAGE_BLOBSIZE;
678 fprintf(stderr, "PAGECOPY: %d from %d to %d\n", pnum, pnum_mapped_at / REPOPAGE_BLOBSIZE, i);
680 memcpy(dest, store->blob_store + pnum_mapped_at, REPOPAGE_BLOBSIZE);
681 store->mapped[pnum_mapped_at / REPOPAGE_BLOBSIZE] = -1;
682 store->mapped[i] = pnum;
683 store->mapped_at[pnum] = i * REPOPAGE_BLOBSIZE;
687 /* Everything is free now. Read in or copy the pages we want. */
688 for (i = best, pnum = pstart; pnum <= pend; i++, pnum++)
690 void *dest = store->blob_store + i * REPOPAGE_BLOBSIZE;
691 if (store->mapped_at[pnum] != -1)
693 unsigned int pnum_mapped_at = store->mapped_at[pnum];
694 if (pnum_mapped_at != i * REPOPAGE_BLOBSIZE)
697 fprintf(stderr, "PAGECOPY: %d from %d to %d\n", pnum, pnum_mapped_at / REPOPAGE_BLOBSIZE, i);
699 /* Still mapped somewhere else, so just copy it from there. */
700 memcpy(dest, store->blob_store + pnum_mapped_at, REPOPAGE_BLOBSIZE);
701 store->mapped[pnum_mapped_at / REPOPAGE_BLOBSIZE] = -1;
706 Attrblobpage *p = store->file_pages + pnum;
707 unsigned int in_len = p->page_size;
708 unsigned int compressed = in_len & 1;
711 fprintf(stderr, "PAGEIN: %d to %d", pnum, i);
714 if (pread(store->pagefd, compressed ? buf : dest, in_len, store->file_offset + p->page_offset) != in_len)
716 perror("mapping pread");
721 OVERLAPPED ovlp = {0};
722 ovlp.Offset = store->file_offset + p->page_offset;
723 if (!ReadFile((HANDLE) _get_osfhandle(store->pagefd), compressed ? buf : dest, in_len, &read_len, &ovlp) || read_len != in_len)
725 perror("mapping ReadFile");
731 unsigned int out_len;
732 out_len = unchecked_decompress_buf(buf, in_len, dest, REPOPAGE_BLOBSIZE);
733 if (out_len != REPOPAGE_BLOBSIZE && pnum < store->num_pages - 1)
736 fprintf(stderr, "can't decompress\n");
741 fprintf(stderr, " (expand %d to %d)", in_len, out_len);
745 fprintf(stderr, "\n");
748 store->mapped_at[pnum] = i * REPOPAGE_BLOBSIZE;
749 store->mapped[i] = pnum;
751 return store->blob_store + best * REPOPAGE_BLOBSIZE;
755 repopagestore_compress_page(unsigned char *page, unsigned int len, unsigned char *cpage, unsigned int max)
757 return compress_buf(page, len, cpage, max);
760 #define SOLV_ERROR_EOF 3
761 #define SOLV_ERROR_CORRUPT 6
763 static inline unsigned int
769 for (i = 0; i < 4; i++)
779 /* Try to either setup on-demand paging (using FP as backing
780 file), or in case that doesn't work (FP not seekable) slurps in
781 all pages and deactivates paging. */
783 repopagestore_read_or_setup_pages(Repopagestore *store, FILE *fp, unsigned int pagesz, unsigned int blobsz)
787 unsigned int can_seek;
788 unsigned int cur_page_ofs;
789 unsigned char buf[REPOPAGE_BLOBSIZE];
791 if (pagesz != REPOPAGE_BLOBSIZE)
793 /* We could handle this by slurping in everything. */
794 return SOLV_ERROR_CORRUPT;
797 if ((store->file_offset = ftell(fp)) < 0)
801 store->pagefd = dup(fileno(fp));
802 if (store->pagefd == -1)
805 solv_setcloexec(store->pagefd, 1);
808 fprintf(stderr, "can %sseek\n", can_seek ? "" : "NOT ");
810 npages = (blobsz + REPOPAGE_BLOBSIZE - 1) / REPOPAGE_BLOBSIZE;
812 store->num_pages = npages;
813 store->mapped_at = solv_malloc2(npages, sizeof(*store->mapped_at));
815 /* If we can't seek on our input we have to slurp in everything.
816 * Otherwise set up file_pages containing offest/length of the
819 store->file_pages = solv_malloc2(npages, sizeof(*store->file_pages));
821 store->blob_store = solv_malloc2(npages, REPOPAGE_BLOBSIZE);
823 for (i = 0; i < npages; i++)
825 unsigned int in_len = read_u32(fp);
826 unsigned int compressed = in_len & 1;
829 fprintf(stderr, "page %d: len %d (%scompressed)\n",
830 i, in_len, compressed ? "" : "not ");
834 Attrblobpage *p = store->file_pages + i;
836 store->mapped_at[i] = -1; /* not mapped yet */
837 p->page_offset = cur_page_ofs;
838 p->page_size = in_len * 2 + compressed;
839 if (fseek(fp, in_len, SEEK_CUR) < 0)
841 /* We can't fall back to non-seeking behaviour as we already
842 read over some data pages without storing them away. */
843 close(store->pagefd);
845 return SOLV_ERROR_EOF;
847 cur_page_ofs += in_len;
851 unsigned int out_len;
852 void *dest = store->blob_store + i * REPOPAGE_BLOBSIZE;
853 store->mapped_at[i] = i * REPOPAGE_BLOBSIZE;
854 /* We can't seek, so suck everything in. */
855 if (fread(compressed ? buf : dest, in_len, 1, fp) != 1)
858 return SOLV_ERROR_EOF;
862 out_len = unchecked_decompress_buf(buf, in_len, dest, REPOPAGE_BLOBSIZE);
863 if (out_len != REPOPAGE_BLOBSIZE && i < npages - 1)
865 return SOLV_ERROR_CORRUPT;
874 repopagestore_disable_paging(Repopagestore *store)
876 if (store->num_pages)
877 repopagestore_load_page_range(store, 0, store->num_pages - 1);
883 transfer_file(FILE * from, FILE * to, int compress)
885 unsigned char inb[BLOCK_SIZE];
886 unsigned char outb[BLOCK_SIZE];
887 while (!feof (from) && !ferror (from))
889 unsigned int in_len, out_len;
892 in_len = fread(inb, 1, BLOCK_SIZE, from);
895 unsigned char *b = outb;
896 out_len = compress_buf(inb, in_len, outb, sizeof (outb));
898 b = inb, out_len = in_len;
899 if (fwrite(&out_len, sizeof (out_len), 1, to) != 1)
901 perror("write size");
904 if (fwrite(b, out_len, 1, to) != 1)
906 perror("write data");
913 if (fread(&in_len, sizeof(in_len), 1, from) != 1)
917 perror("can't read size");
920 if (fread(inb, in_len, 1, from) != 1)
922 perror("can't read data");
926 unchecked_decompress_buf(inb, in_len, outb, sizeof(outb));
927 if (fwrite(outb, out_len, 1, to) != 1)
929 perror("can't write output");
936 /* Just for benchmarking purposes. */
938 dumb_memcpy(void *dest, const void *src, unsigned int len)
947 benchmark(FILE * from)
949 unsigned char inb[BLOCK_SIZE];
950 unsigned char outb[BLOCK_SIZE];
951 unsigned int in_len = fread(inb, 1, BLOCK_SIZE, from);
952 unsigned int out_len;
955 perror("can't read from input");
959 unsigned int calib_loop;
960 unsigned int per_loop;
969 while ((clock() - start) < CLOCKS_PER_SEC / 4)
972 for (i = 0; i < calib_loop; i++)
973 dumb_memcpy(outb, inb, in_len);
974 per_loop += calib_loop;
977 fprintf(stderr, "memcpy:\nCalibrated to %d iterations per loop\n",
981 for (i = 0; i < 10; i++)
982 for (j = 0; j < per_loop; j++)
983 dumb_memcpy(outb, inb, in_len);
985 seconds = (end - start) / (float) CLOCKS_PER_SEC;
986 fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
987 ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
993 while ((clock() - start) < CLOCKS_PER_SEC / 4)
996 for (i = 0; i < calib_loop; i++)
997 compress_buf(inb, in_len, outb, sizeof(outb));
998 per_loop += calib_loop;
1001 fprintf(stderr, "compression:\nCalibrated to %d iterations per loop\n",
1005 for (i = 0; i < 10; i++)
1006 for (j = 0; j < per_loop; j++)
1007 compress_buf(inb, in_len, outb, sizeof(outb));
1009 seconds = (end - start) / (float) CLOCKS_PER_SEC;
1010 fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
1011 ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
1013 out_len = compress_buf(inb, in_len, outb, sizeof(outb));
1018 while ((clock() - start) < CLOCKS_PER_SEC / 4)
1021 for (i = 0; i < calib_loop; i++)
1022 unchecked_decompress_buf(outb, out_len, inb, sizeof(inb));
1023 per_loop += calib_loop;
1026 fprintf(stderr, "decompression:\nCalibrated to %d iterations per loop\n",
1030 for (i = 0; i < 10; i++)
1031 for (j = 0; j < per_loop; j++)
1032 unchecked_decompress_buf(outb, out_len, inb, sizeof(inb));
1034 seconds = (end - start) / (float) CLOCKS_PER_SEC;
1035 fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
1036 ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
1040 main(int argc, char *argv[])
1043 if (argc > 1 && !strcmp(argv[1], "-d"))
1045 if (argc > 1 && !strcmp(argv[1], "-b"))
1048 transfer_file(stdin, stdout, compress);