2 * Copyright (c) 2007-2008, Novell Inc.
4 * This program is licensed under the BSD license, read LICENSE.BSD
5 * for further information
11 * Paging and compression functions for the vertical repository data.
12 * Vertical data is grouped by key, normal data is grouped by solvable.
13 * This makes searching for a string in vertical data fast as there's
14 * no need to skip over data if keys we're not interested in.
16 * The vertical data is split into pages, each page is compressed with a fast
17 * compression algorithm. These pages are read in on demand, not recently used
18 * pages automatically get dropped.
21 #define _XOPEN_SOURCE 500
23 #include <sys/types.h>
37 #define BLOCK_SIZE (65536*1)
38 #if BLOCK_SIZE <= 65536
39 typedef __uint16_t Ref;
41 typedef __uint32_t Ref;
45 The format is tailored for fast decompression (i.e. only byte based),
46 and skewed to ASCII content (highest bit often not set):
49 - self-describing ASCII character hex L
50 b 100lllll <l+1 bytes>
51 - literal run of length l+1
53 - back ref of length l+2, at offset -(o+1) (o < 1 << 10)
55 - back ref of length l+2+8, at offset -(o+1) (o < 1 << 8)
57 - back ref of length l+3, at offset -(o+1) (o < 1 << 16)
58 f1 1111llll <8l> <8o> <8o>
59 - back ref, length l+19 (l < 1<<12), offset -(o+1) (o < 1<<16)
60 f2 11110lll <8l> <8o> <8o>
61 - back ref, length l+19 (l < 1<<11), offset -(o+1) (o < 1<<16)
62 g 11111lll <8l> <8o> <8o> <8o>
63 - back ref, length l+5 (l < 1<<11), offset -(o+1) (o < 1<<24)
65 Generally for a literal of length L we need L+1 bytes, hence it is
66 better to encode also very short backrefs (2 chars) as backrefs if
67 their offset is small, as that only needs two bytes. Except if we
68 already have a literal run, in that case it's better to append there,
69 instead of breaking it for a backref. So given a potential backref
70 at offset O, length L the strategy is as follows:
72 L < 2 : encode as 1-literal
73 L == 2, O > 1024 : encode as 1-literal
74 L == 2, have already literals: encode as 1-literal
76 L >= 2, L <= 9, O < 1024 : encode as c
77 L >= 10, L <= 41, O < 256 : encode as d
78 else we have either O >= 1024, or L >= 42:
79 L < 3 : encode as 1-literal
80 L >= 3, L <= 18, O < 65536 : encode as e
81 L >= 19, L <= 4095+18, O < 65536 : encode as f
82 else we have either L >= 4096+18 or O >= 65536.
83 O >= 65536: encode as 1-literal, too bad
84 (with the current block size this can't happen)
85 L >= 4096+18, so reduce to 4095+18 : encode as f
90 compress_buf(const unsigned char *in, unsigned int in_len,
91 unsigned char *out, unsigned int out_len)
93 unsigned int oo = 0; //out-offset
94 unsigned int io = 0; //in-offset
97 Ref hnext[BLOCK_SIZE];
98 memset(htab, -1, sizeof (htab));
99 memset(hnext, -1, sizeof (hnext));
100 unsigned int litofs = 0;
101 while (io + 2 < in_len)
103 /* Search for a match of the string starting at IN, we have at
104 least three characters. */
105 unsigned int hval = in[io] | in[io + 1] << 8 | in[io + 2] << 16;
106 unsigned int try, mlen, mofs, tries;
107 hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
108 hval = hval & (HS - 1);
110 hnext[io] = htab[hval];
115 for (tries = 0; try != -1 && tries < 12; tries++)
118 && in[try] == in[io] && in[try + 1] == in[io + 1])
121 mofs = (io - try) - 1;
126 for (; try != -1 && tries < 12; tries++)
129 //assert(io + mlen < in_len);
130 /* Try a match starting from [io] with the strings at [try].
131 That's only sensible if TRY actually is before IO (can happen
132 with uninit hash table). If we have a previous match already
133 we're only going to take the new one if it's longer, hence
134 check the potentially last character. */
135 if (try < io && in[try + mlen] == in[io + mlen])
137 unsigned int this_len, this_ofs;
138 if (memcmp(in + try, in + io, mlen))
141 /* Now try extending the match by more characters. */
143 io + this_len < in_len
144 && in[try + this_len] == in[io + this_len]; this_len++)
148 for (testi = 0; testi < this_len; testi++)
149 assert(in[try + testi] == in[io + testi]);
151 this_ofs = (io - try) - 1;
152 /*if (this_ofs > 65535)
155 assert(this_len >= 2);
156 assert(this_len >= mlen);
157 assert(this_len > mlen || (this_len == mlen && this_ofs > mofs));
159 mlen = this_len, mofs = this_ofs;
160 /* If our match extends up to the end of input, no next
161 match can become better. This is not just an
162 optimization, it establishes a loop invariant
163 (io + mlen < in_len). */
164 if (io + mlen >= in_len)
169 /*if (io - try - 1 >= 65536)
176 /*fprintf(stderr, "%d %d\n", mlen, mofs);*/
177 if (mlen == 2 && (litofs || mofs >= 1024))
179 /*else if (mofs >= 65536)
181 else if (mofs >= 65536)
183 if (mlen >= 2048 + 5)
190 /*else if (mlen >= 4096 + 19)
192 else if (mlen >= 2048 + 19)
194 /* Skip this match if the next character would deliver a better one,
195 but only do this if we have the chance to really extend the
196 length (i.e. our current length isn't yet the (conservative)
198 if (mlen && mlen < (2048 + 5) && io + 3 < in_len)
201 in[io + 1] | in[io + 2] << 8 | in[io + 3] << 16;
203 hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
204 hval = hval & (HS - 1);
207 && in[try] == in[io + 1] && in[try + 1] == in[io + 2])
209 unsigned int this_len;
212 io + 1 + this_len < in_len
213 && in[try + this_len] == in[io + 1 + this_len];
216 if (this_len >= mlen)
232 unsigned litlen = io - litofs;
233 //fprintf(stderr, "lit: %d\n", litlen);
236 unsigned int easy_sz;
237 /* Emit everything we can as self-describers. As soon as
238 we hit a byte we can't emit as such we're going to emit
239 a length descriptor anyway, so we can as well include
240 bytes < 0x80 which might follow afterwards in that run. */
242 easy_sz < litlen && in[litofs + easy_sz] < 0x80;
247 if (oo + easy_sz >= out_len)
249 memcpy(out + oo, in + litofs, easy_sz);
258 if (oo + 1 + litlen >= out_len)
260 out[oo++] = 0x80 | (litlen - 1);
262 out[oo++] = in[litofs++];
267 /* Literal length > 32, so chunk it. */
268 if (oo + 1 + 32 >= out_len)
270 out[oo++] = 0x80 | 31;
271 memcpy(out + oo, in + litofs, 32);
280 //fprintf(stderr, "ref: %d @ %d\n", mlen, mofs);
282 if (mlen >= 2 && mlen <= 9 && mofs < 1024)
284 if (oo + 2 >= out_len)
286 out[oo++] = 0xa0 | ((mofs & 0x300) >> 5) | (mlen - 2);
287 out[oo++] = mofs & 0xff;
289 else if (mlen >= 10 && mlen <= 41 && mofs < 256)
291 if (oo + 2 >= out_len)
293 out[oo++] = 0xc0 | (mlen - 10);
296 else if (mofs >= 65536)
298 assert(mlen >= 5 && mlen < 2048 + 5);
299 if (oo + 5 >= out_len)
301 out[oo++] = 0xf8 | ((mlen - 5) >> 8);
302 out[oo++] = (mlen - 5) & 0xff;
303 out[oo++] = mofs & 0xff;
304 out[oo++] = (mofs >> 8) & 0xff;
305 out[oo++] = mofs >> 16;
307 else if (mlen >= 3 && mlen <= 18)
309 assert(mofs < 65536);
310 if (oo + 3 >= out_len)
312 out[oo++] = 0xe0 | (mlen - 3);
313 out[oo++] = mofs & 0xff;
314 out[oo++] = mofs >> 8;
318 assert(mlen >= 19 && mlen <= 4095 + 19 && mofs < 65536);
319 if (oo + 4 >= out_len)
321 out[oo++] = 0xf0 | ((mlen - 19) >> 8);
322 out[oo++] = (mlen - 19) & 0xff;
323 out[oo++] = mofs & 0xff;
324 out[oo++] = mofs >> 8;
326 /* Insert the hashes for the compressed run [io..io+mlen-1].
327 For [io] we have it already done at the start of the loop.
328 So it's from [io+1..io+mlen-1], and we need three chars per
329 hash, so the accessed characters will be [io+1..io+mlen-1+2],
330 ergo io+mlen+1 < in_len. */
338 in[io] | in[io + 1] << 8 | in[io + 2] << 16;
339 hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
340 hval = hval & (HS - 1);
341 hnext[io] = htab[hval];
348 /* We might have some characters left. */
349 if (io < in_len && !litofs)
355 unsigned litlen = io - litofs;
356 //fprintf(stderr, "lit: %d\n", litlen);
359 unsigned int easy_sz;
360 /* Emit everything we can as self-describers. As soon as we hit a
361 byte we can't emit as such we're going to emit a length
362 descriptor anyway, so we can as well include bytes < 0x80 which
363 might follow afterwards in that run. */
364 for (easy_sz = 0; easy_sz < litlen && in[litofs + easy_sz] < 0x80;
369 if (oo + easy_sz >= out_len)
371 memcpy(out + oo, in + litofs, easy_sz);
380 if (oo + 1 + litlen >= out_len)
382 out[oo++] = 0x80 | (litlen - 1);
384 out[oo++] = in[litofs++];
389 /* Literal length > 32, so chunk it. */
390 if (oo + 1 + 32 >= out_len)
392 out[oo++] = 0x80 | 31;
393 memcpy(out + oo, in + litofs, 32);
405 unchecked_decompress_buf(const unsigned char *in, unsigned int in_len,
407 unsigned int out_len __attribute__((unused)))
409 unsigned char *orig_out = out;
410 const unsigned char *in_end = in + in_len;
413 unsigned int first = *in++;
418 /* This default case can't happen, but GCCs VRP is not strong
419 enough to see this, so make this explicitely not fall to
420 the end of the switch, so that we don't have to initialize
428 //fprintf (stderr, "lit: 1\n");
432 //b 100lllll <l+1 bytes>
434 unsigned int l = first & 31;
435 //fprintf (stderr, "lit: %d\n", l);
444 o = first & (3 << 3);
445 o = (o << 5) | *in++;
446 first = (first & 7) + 2;
453 first = (first & 31) + 10;
457 // e 1110llll <8o> <8o>
459 o = in[0] | (in[1] << 8);
466 //f1 1111llll <8o> <8o> <8l>
467 //f2 11110lll <8o> <8o> <8l>
468 // g 11111lll <8o> <8o> <8o> <8l>
473 first = (((first - 8) << 8) | in[0]) + 5;
474 o = in[1] | (in[2] << 8) | (in[3] << 16);
479 first = ((first << 8) | in[0]) + 19;
480 o = in[1] | (in[2] << 8);
486 //fprintf(stderr, "ref: %d @ %d\n", first, o);
490 /* We know that first will not be zero, and this loop structure is
491 better optimizable. */
501 case 18: *out = *(out + o); out++;
502 case 17: *out = *(out + o); out++;
503 case 16: *out = *(out + o); out++;
504 case 15: *out = *(out + o); out++;
505 case 14: *out = *(out + o); out++;
506 case 13: *out = *(out + o); out++;
507 case 12: *out = *(out + o); out++;
508 case 11: *out = *(out + o); out++;
509 case 10: *out = *(out + o); out++;
510 case 9: *out = *(out + o); out++;
511 case 8: *out = *(out + o); out++;
512 case 7: *out = *(out + o); out++;
513 case 6: *out = *(out + o); out++;
514 case 5: *out = *(out + o); out++;
515 case 4: *out = *(out + o); out++;
516 case 3: *out = *(out + o); out++;
517 case 2: *out = *(out + o); out++;
518 case 1: *out = *(out + o); out++;
526 case 0: *out = *(out + o); out++;
527 case 15: *out = *(out + o); out++;
528 case 14: *out = *(out + o); out++;
529 case 13: *out = *(out + o); out++;
530 case 12: *out = *(out + o); out++;
531 case 11: *out = *(out + o); out++;
532 case 10: *out = *(out + o); out++;
533 case 9: *out = *(out + o); out++;
534 case 8: *out = *(out + o); out++;
535 case 7: *out = *(out + o); out++;
536 case 6: *out = *(out + o); out++;
537 case 5: *out = *(out + o); out++;
538 case 4: *out = *(out + o); out++;
539 case 3: *out = *(out + o); out++;
540 case 2: *out = *(out + o); out++;
541 case 1: *out = *(out + o); out++;
543 while ((int)(first -= 16) > 0);
549 return out - orig_out;
552 /**********************************************************************/
554 void repopagestore_init(Repopagestore *store)
556 memset(store, 0, sizeof(*store));
560 void repopagestore_free(Repopagestore *store)
562 sat_free(store->blob_store);
563 sat_free(store->pages);
564 sat_free(store->mapped);
565 if (store->pagefd != -1)
566 close(store->pagefd);
570 /**********************************************************************/
573 repopagestore_load_page_range(Repopagestore *store, unsigned int pstart, unsigned int pend)
575 /* Make sure all pages from PSTART to PEND (inclusive) are loaded,
576 and are consecutive. Return a pointer to the mapping of PSTART. */
577 unsigned char buf[BLOB_PAGESIZE];
580 /* Quick check in case all pages are there already and consecutive. */
581 for (i = pstart; i <= pend; i++)
582 if (store->pages[i].mapped_at == -1
584 && store->pages[i].mapped_at
585 != store->pages[i-1].mapped_at + BLOB_PAGESIZE))
588 return store->blob_store + store->pages[pstart].mapped_at;
590 if (store->pagefd == -1)
593 /* Ensure that we can map the numbers of pages we need at all. */
594 if (pend - pstart + 1 > store->ncanmap)
596 unsigned int oldcan = store->ncanmap;
597 store->ncanmap = pend - pstart + 1;
598 if (store->ncanmap < 4)
600 store->mapped = sat_realloc2(store->mapped, store->ncanmap, sizeof(store->mapped[0]));
601 memset(store->mapped + oldcan, 0, (store->ncanmap - oldcan) * sizeof (store->mapped[0]));
602 store->blob_store = sat_realloc2(store->blob_store, store->ncanmap, BLOB_PAGESIZE);
604 fprintf(stderr, "PAGE: can map %d pages\n", store->ncanmap);
608 /* Now search for "cheap" space in our store. Space is cheap if it's either
609 free (very cheap) or contains pages we search for anyway. */
611 /* Setup cost array. */
612 unsigned int cost[store->ncanmap];
613 for (i = 0; i < store->ncanmap; i++)
615 unsigned int pnum = store->mapped[i];
621 Attrblobpage *p = store->pages + pnum;
622 assert(p->mapped_at != -1);
623 if (pnum >= pstart && pnum <= pend)
630 /* And search for cheapest space. */
631 unsigned int best_cost = -1;
632 unsigned int best = 0;
633 unsigned int same_cost = 0;
634 for (i = 0; i + pend - pstart < store->ncanmap; i++)
636 unsigned int c = cost[i];
638 for (j = 0; j < pend - pstart + 1; j++)
641 best_cost = c, best = i;
642 else if (c == best_cost)
644 /* A null cost won't become better. */
648 /* If all places have the same cost we would thrash on slot 0. Avoid
649 this by doing a round-robin strategy in this case. */
650 if (same_cost == store->ncanmap - pend + pstart - 1)
651 best = store->rr_counter++ % (store->ncanmap - pend + pstart);
653 /* So we want to map our pages from [best] to [best+pend-pstart].
654 Use a very simple strategy, which doesn't make the best use of
655 our resources, but works. Throw away all pages in that range
656 (even ours) then copy around ours (in case they were outside the
657 range) or read them in. */
658 for (i = best; i < best + pend - pstart + 1; i++)
660 unsigned int pnum = store->mapped[i];
662 /* If this page is exactly at the right place already,
663 no need to evict it. */
664 && pnum != pstart + i - best)
666 /* Evict this page. */
668 fprintf(stderr, "PAGE: evict page %d from %d\n", pnum, i);
671 store->mapped[i] = 0;
672 store->pages[pnum].mapped_at = -1;
676 /* Everything is free now. Read in the pages we want. */
677 for (i = pstart; i <= pend; i++)
679 Attrblobpage *p = store->pages + i;
680 unsigned int pnum = i - pstart + best;
681 void *dest = store->blob_store + pnum * BLOB_PAGESIZE;
682 if (p->mapped_at != -1)
684 if (p->mapped_at != pnum * BLOB_PAGESIZE)
687 fprintf(stderr, "PAGECOPY: %d to %d\n", i, pnum);
689 /* Still mapped somewhere else, so just copy it from there. */
690 memcpy(dest, store->blob_store + p->mapped_at, BLOB_PAGESIZE);
691 store->mapped[p->mapped_at / BLOB_PAGESIZE] = 0;
696 unsigned int in_len = p->file_size;
697 unsigned int compressed = in_len & 1;
700 fprintf(stderr, "PAGEIN: %d to %d", i, pnum);
702 if (pread(store->pagefd, compressed ? buf : dest, in_len, p->file_offset) != in_len)
704 perror("mapping pread");
709 unsigned int out_len;
710 out_len = unchecked_decompress_buf(buf, in_len,
711 dest, BLOB_PAGESIZE);
712 if (out_len != BLOB_PAGESIZE && i < store->num_pages - 1)
715 fprintf(stderr, "can't decompress\n");
720 fprintf(stderr, " (expand %d to %d)", in_len, out_len);
724 fprintf(stderr, "\n");
727 p->mapped_at = pnum * BLOB_PAGESIZE;
728 store->mapped[pnum] = i + 1;
730 return store->blob_store + best * BLOB_PAGESIZE;
734 repopagestore_compress_page(unsigned char *page, unsigned int len, unsigned char *cpage, unsigned int max)
736 return compress_buf(page, len, cpage, max);
739 #define SOLV_ERROR_EOF 3
740 #define SOLV_ERROR_CORRUPT 6
742 static inline unsigned int
748 for (i = 0; i < 4; i++)
758 /* Try to either setup on-demand paging (using FP as backing
759 file), or in case that doesn't work (FP not seekable) slurps in
760 all pages and deactivates paging. */
762 repopagestore_read_or_setup_pages(Repopagestore *store, FILE *fp, unsigned int pagesz, unsigned int blobsz)
766 unsigned int can_seek;
768 unsigned char buf[BLOB_PAGESIZE];
770 if (pagesz != BLOB_PAGESIZE)
772 /* We could handle this by slurping in everything. */
773 return SOLV_ERROR_CORRUPT;
776 if ((cur_file_ofs = ftell(fp)) < 0)
780 store->pagefd = dup(fileno(fp));
781 if (store->pagefd == -1)
784 fcntl(store->pagefd, F_SETFD, FD_CLOEXEC);
787 fprintf(stderr, "can %sseek\n", can_seek ? "" : "NOT ");
789 npages = (blobsz + BLOB_PAGESIZE - 1) / BLOB_PAGESIZE;
791 store->num_pages = npages;
792 store->pages = sat_malloc2(npages, sizeof(store->pages[0]));
794 /* If we can't seek on our input we have to slurp in everything. */
796 store->blob_store = sat_malloc2(npages, BLOB_PAGESIZE);
797 for (i = 0; i < npages; i++)
799 unsigned int in_len = read_u32(fp);
800 unsigned int compressed = in_len & 1;
801 Attrblobpage *p = store->pages + i;
804 fprintf(stderr, "page %d: len %d (%scompressed)\n",
805 i, in_len, compressed ? "" : "not ");
811 p->file_offset = cur_file_ofs;
812 p->file_size = in_len * 2 + compressed;
813 if (fseek(fp, in_len, SEEK_CUR) < 0)
815 /* We can't fall back to non-seeking behaviour as we already
816 read over some data pages without storing them away. */
817 close(store->pagefd);
819 return SOLV_ERROR_EOF;
821 cur_file_ofs += in_len;
825 unsigned int out_len;
826 void *dest = store->blob_store + i * BLOB_PAGESIZE;
827 p->mapped_at = i * BLOB_PAGESIZE;
830 /* We can't seek, so suck everything in. */
831 if (fread(compressed ? buf : dest, in_len, 1, fp) != 1)
834 return SOLV_ERROR_EOF;
838 out_len = unchecked_decompress_buf(buf, in_len, dest, BLOB_PAGESIZE);
839 if (out_len != BLOB_PAGESIZE && i < npages - 1)
841 return SOLV_ERROR_CORRUPT;
850 repopagestore_disable_paging(Repopagestore *store)
852 if (store->num_pages)
853 repopagestore_load_page_range(store, 0, store->num_pages - 1);
859 transfer_file(FILE * from, FILE * to, int compress)
861 unsigned char inb[BLOCK_SIZE];
862 unsigned char outb[BLOCK_SIZE];
863 while (!feof (from) && !ferror (from))
865 unsigned int in_len, out_len;
868 in_len = fread(inb, 1, BLOCK_SIZE, from);
871 unsigned char *b = outb;
872 out_len = compress_buf(inb, in_len, outb, sizeof (outb));
874 b = inb, out_len = in_len;
875 if (fwrite(&out_len, sizeof (out_len), 1, to) != 1)
877 perror("write size");
880 if (fwrite(b, out_len, 1, to) != 1)
882 perror("write data");
889 if (fread(&in_len, sizeof(in_len), 1, from) != 1)
893 perror("can't read size");
896 if (fread(inb, in_len, 1, from) != 1)
898 perror("can't read data");
902 unchecked_decompress_buf(inb, in_len, outb, sizeof(outb));
903 if (fwrite(outb, out_len, 1, to) != 1)
905 perror("can't write output");
912 /* Just for benchmarking purposes. */
914 dumb_memcpy(void *dest, const void *src, unsigned int len)
923 benchmark(FILE * from)
925 unsigned char inb[BLOCK_SIZE];
926 unsigned char outb[BLOCK_SIZE];
927 unsigned int in_len = fread(inb, 1, BLOCK_SIZE, from);
928 unsigned int out_len;
931 perror("can't read from input");
935 unsigned int calib_loop;
936 unsigned int per_loop;
945 while ((clock() - start) < CLOCKS_PER_SEC / 4)
948 for (i = 0; i < calib_loop; i++)
949 dumb_memcpy(outb, inb, in_len);
950 per_loop += calib_loop;
953 fprintf(stderr, "memcpy:\nCalibrated to %d iterations per loop\n",
957 for (i = 0; i < 10; i++)
958 for (j = 0; j < per_loop; j++)
959 dumb_memcpy(outb, inb, in_len);
961 seconds = (end - start) / (float) CLOCKS_PER_SEC;
962 fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
963 ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
969 while ((clock() - start) < CLOCKS_PER_SEC / 4)
972 for (i = 0; i < calib_loop; i++)
973 compress_buf(inb, in_len, outb, sizeof(outb));
974 per_loop += calib_loop;
977 fprintf(stderr, "compression:\nCalibrated to %d iterations per loop\n",
981 for (i = 0; i < 10; i++)
982 for (j = 0; j < per_loop; j++)
983 compress_buf(inb, in_len, outb, sizeof(outb));
985 seconds = (end - start) / (float) CLOCKS_PER_SEC;
986 fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
987 ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
989 out_len = compress_buf(inb, in_len, outb, sizeof(outb));
994 while ((clock() - start) < CLOCKS_PER_SEC / 4)
997 for (i = 0; i < calib_loop; i++)
998 unchecked_decompress_buf(outb, out_len, inb, sizeof(inb));
999 per_loop += calib_loop;
1002 fprintf(stderr, "decompression:\nCalibrated to %d iterations per loop\n",
1006 for (i = 0; i < 10; i++)
1007 for (j = 0; j < per_loop; j++)
1008 unchecked_decompress_buf(outb, out_len, inb, sizeof(inb));
1010 seconds = (end - start) / (float) CLOCKS_PER_SEC;
1011 fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
1012 ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
1016 main(int argc, char *argv[])
1019 if (argc > 1 && !strcmp(argv[1], "-d"))
1021 if (argc > 1 && !strcmp(argv[1], "-b"))
1024 transfer_file(stdin, stdout, compress);