X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=src%2Frepo_solv.c;h=5858d4fc6c31f1518d157c859cc934c09f642897;hb=e679b515eddb3dd340fb25620de0160211f40fdc;hp=8dfcc7607caf57f1ab4550e3fe8689135a723b79;hpb=709d8403945de1ac569f14a89b953b900c2bded3;p=platform%2Fupstream%2Flibsolv.git diff --git a/src/repo_solv.c b/src/repo_solv.c index 8dfcc76..5858d4f 100644 --- a/src/repo_solv.c +++ b/src/repo_solv.c @@ -7,13 +7,9 @@ /* * repo_solv.c - * - * Read the binary dump of a Repo and create a Repo * from it - * - * See - * Repo *pool_addrepo_solv(Pool *pool, FILE *fp) - * below - * + * + * Add a repo in solv format + * */ @@ -27,9 +23,12 @@ #include "util.h" #include "repopack.h" +#include "repopage.h" + +#include "poolid_private.h" /* WHATPROVIDES_BLOCK */ #define INTERESTED_START SOLVABLE_NAME -#define INTERESTED_END SOLVABLE_FRESHENS +#define INTERESTED_END SOLVABLE_ENHANCES #define SOLV_ERROR_NOT_SOLV 1 #define SOLV_ERROR_UNSUPPORTED 2 @@ -38,10 +37,11 @@ #define SOLV_ERROR_OVERFLOW 5 #define SOLV_ERROR_CORRUPT 6 -static Pool *mypool; /* for pool_debug... */ -/*-----------------------------------------------------------------*/ -/* .solv read functions */ + +/******************************************************************************* + * functions to extract data from a file handle + */ /* * read u32 @@ -60,8 +60,7 @@ read_u32(Repodata *data) c = getc(data->fp); if (c == EOF) { - pool_debug(mypool, SAT_ERROR, "unexpected EOF\n"); - data->error = SOLV_ERROR_EOF; + data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF"); return 0; } x = (x << 8) | c; @@ -84,8 +83,7 @@ read_u8(Repodata *data) c = getc(data->fp); if (c == EOF) { - pool_debug(mypool, SAT_ERROR, "unexpected EOF\n"); - data->error = SOLV_ERROR_EOF; + data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF"); return 0; } return c; @@ -109,40 +107,31 @@ read_id(Repodata *data, Id max) c = getc(data->fp); if (c == EOF) { - pool_debug(mypool, SAT_ERROR, "unexpected EOF\n"); - data->error = SOLV_ERROR_EOF; + data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF"); return 0; } if (!(c & 128)) { x = (x << 7) | c; - if (max && x >= max) + if (max && x >= (unsigned int)max) { - pool_debug(mypool, SAT_ERROR, "read_id: id too large (%u/%u)\n", x, max); - data->error = SOLV_ERROR_ID_RANGE; + data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "read_id: id too large (%u/%u)", x, max); return 0; } return x; } x = (x << 7) ^ c ^ 128; } - pool_debug(mypool, SAT_ERROR, "read_id: id too long\n"); - data->error = SOLV_ERROR_CORRUPT; + data->error = pool_error(data->repo->pool, SOLV_ERROR_CORRUPT, "read_id: id too long"); return 0; } -/* - * read array of Ids - */ - -#if 0 static Id * -read_rel_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end, Id marker) +read_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end) { unsigned int x = 0; int c; - Id old = 0; if (data->error) return 0; @@ -151,8 +140,7 @@ read_rel_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end, Id marker) c = getc(data->fp); if (c == EOF) { - pool_debug(mypool, SAT_ERROR, "unexpected EOF\n"); - data->error = SOLV_ERROR_EOF; + data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF"); return 0; } if ((c & 128) != 0) @@ -161,38 +149,16 @@ read_rel_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end, Id marker) continue; } x = (x << 6) | (c & 63); - if (x == 0) + if (max && x >= (unsigned int)max) { - /* marker hack */ - if (store == end) - { - pool_debug(mypool, SAT_ERROR, "read_rel_idarray: array overflow\n"); - data->error = SOLV_ERROR_OVERFLOW; - return 0; - } - if (c != 0x40) - { - *store++ = 0; - return store; - } - *store++ = marker; /* do not map! */ - old = 0; - x = 0; - continue; - } - x = (x - 1) + old; - old = x; - if (max && x >= max) - { - pool_debug(mypool, SAT_ERROR, "read_rel_idarray: id too large (%u/%u)\n", x, max); - data->error = SOLV_ERROR_ID_RANGE; + data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "read_idarray: id too large (%u/%u)", x, max); return 0; } if (map) x = map[x]; if (store == end) { - pool_debug(mypool, SAT_ERROR, "read_rel_idarray: array overflow\n"); + data->error = pool_error(data->repo->pool, SOLV_ERROR_OVERFLOW, "read_idarray: array overflow"); return 0; } *store++ = x; @@ -202,8 +168,7 @@ read_rel_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end, Id marker) return store; if (store == end) { - pool_debug(mypool, SAT_ERROR, "read_rel_idarray: array overflow\n"); - data->error = SOLV_ERROR_OVERFLOW; + data->error = pool_error(data->repo->pool, SOLV_ERROR_OVERFLOW, "read_idarray: array overflow"); return 0; } *store++ = 0; @@ -212,25 +177,32 @@ read_rel_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end, Id marker) x = 0; } } -#endif + + +/******************************************************************************* + * functions to extract data from memory + */ + +/* + * read array of Ids + */ static inline unsigned char * -data_read_id_max(unsigned char *dp, Id *ret, Id *map, int max, int *error) +data_read_id_max(unsigned char *dp, Id *ret, Id *map, int max, Repodata *data) { Id x; dp = data_read_id(dp, &x); - if (max && x >= max) + if (x < 0 || (max && x >= max)) { - pool_debug(mypool, SAT_ERROR, "data_read_idarray: id too large (%u/%u)\n", x, max); - *error = SOLV_ERROR_ID_RANGE; + data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "data_read_id_max: id too large (%u/%u)", x, max); x = 0; } *ret = map ? map[x] : x; return dp; } -unsigned char * -data_read_idarray(unsigned char *dp, Id **storep, Id *map, int max, int *error) +static unsigned char * +data_read_idarray(unsigned char *dp, Id **storep, Id *map, int max, Repodata *data) { Id *store = *storep; unsigned int x = 0; @@ -245,13 +217,13 @@ data_read_idarray(unsigned char *dp, Id **storep, Id *map, int max, int *error) continue; } x = (x << 6) | (c & 63); - if (max && x >= max) + if (max && x >= (unsigned int)max) { - pool_debug(mypool, SAT_ERROR, "data_read_idarray: id too large (%u/%u)\n", x, max); - *error = SOLV_ERROR_ID_RANGE; + data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "data_read_idarray: id too large (%u/%u)", x, max); + data->error = SOLV_ERROR_ID_RANGE; break; } - *store++ = x; + *store++ = map ? map[x] : x; if ((c & 64) == 0) break; x = 0; @@ -261,8 +233,8 @@ data_read_idarray(unsigned char *dp, Id **storep, Id *map, int max, int *error) return dp; } -unsigned char * -data_read_rel_idarray(unsigned char *dp, Id **storep, Id *map, int max, int *error, Id marker) +static unsigned char * +data_read_rel_idarray(unsigned char *dp, Id **storep, Id *map, int max, Repodata *data, Id marker) { Id *store = *storep; Id old = 0; @@ -289,10 +261,9 @@ data_read_rel_idarray(unsigned char *dp, Id **storep, Id *map, int max, int *err } x = old + (x - 1); old = x; - if (max && x >= max) + if (max && x >= (unsigned int)max) { - pool_debug(mypool, SAT_ERROR, "data_read_rel_idarray: id too large (%u/%u)\n", x, max); - *error = SOLV_ERROR_ID_RANGE; + data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "data_read_rel_idarray: id too large (%u/%u)", x, max); break; } *store++ = map ? map[x] : x; @@ -306,305 +277,82 @@ data_read_rel_idarray(unsigned char *dp, Id **storep, Id *map, int max, int *err } -static Id * -read_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end) -{ - unsigned int x = 0; - int c; - if (data->error) - return 0; - for (;;) - { - c = getc(data->fp); - if (c == EOF) - { - pool_debug(mypool, SAT_ERROR, "unexpected EOF\n"); - data->error = SOLV_ERROR_EOF; - return 0; - } - if ((c & 128) != 0) - { - x = (x << 7) ^ c ^ 128; - continue; - } - x = (x << 6) | (c & 63); - if (max && x >= max) - { - pool_debug(mypool, SAT_ERROR, "read_idarray: id too large (%u/%u)\n", x, max); - data->error = SOLV_ERROR_ID_RANGE; - return 0; - } - if (map) - x = map[x]; - if (store == end) - { - pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n"); - return 0; - } - *store++ = x; - if ((c & 64) == 0) - { - if (x == 0) /* already have trailing zero? */ - return store; - if (store == end) - { - pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n"); - data->error = SOLV_ERROR_OVERFLOW; - return 0; - } - *store++ = 0; - return store; - } - x = 0; - } -} -static void -read_str(Repodata *data, char **inbuf, unsigned *len) -{ - unsigned char *buf = (unsigned char*)*inbuf; - if (!buf) - { - buf = sat_malloc(1024); - *len = 1024; - } - int c; - unsigned ofs = 0; - while((c = getc(data->fp)) != 0) - { - if (c == EOF) - { - pool_debug (mypool, SAT_ERROR, "unexpected EOF\n"); - data->error = SOLV_ERROR_EOF; - return; - } - /* Plus 1 as we also want to add the 0. */ - if (ofs + 1 >= *len) - { - *len += 256; - /* Don't realloc on the inbuf, it might be on the stack. */ - if (buf == (unsigned char*)*inbuf) - { - buf = sat_malloc(*len); - memcpy(buf, *inbuf, *len - 256); - } - else - buf = sat_realloc(buf, *len); - } - buf[ofs++] = c; - } - buf[ofs++] = 0; - *inbuf = (char*)buf; -} +/******************************************************************************* + * functions to add data to our incore memory space + */ + +#define INCORE_ADD_CHUNK 8192 +#define DATA_READ_CHUNK 8192 static void -skip_item(Repodata *data, unsigned type, unsigned numid, unsigned numrel) +incore_add_id(Repodata *data, Id sx) { - switch (type) + unsigned int x = (unsigned int)sx; + unsigned char *dp; + /* make sure we have at least 5 bytes free */ + if (data->incoredatafree < 5) { - case REPOKEY_TYPE_VOID: - case REPOKEY_TYPE_CONSTANT: - case REPOKEY_TYPE_CONSTANTID: - break; - case REPOKEY_TYPE_ID: - read_id(data, numid + numrel); /* just check Id */ - break; - case REPOKEY_TYPE_DIR: - read_id(data, numid + data->dirpool.ndirs); /* just check Id */ - break; - case REPOKEY_TYPE_NUM: - read_id(data, 0); - break; - case REPOKEY_TYPE_U32: - read_u32(data); - break; - case REPOKEY_TYPE_STR: - while (read_u8(data) != 0) - ; - break; - case REPOKEY_TYPE_IDARRAY: - case REPOKEY_TYPE_REL_IDARRAY: - while ((read_u8(data) & 0xc0) != 0) - ; - break; - case REPOKEY_TYPE_DIRNUMNUMARRAY: - for (;;) - { - read_id(data, numid + data->dirpool.ndirs); /* just check Id */ - read_id(data, 0); - if (!(read_id(data, 0) & 0x40)) - break; - } - break; - case REPOKEY_TYPE_DIRSTRARRAY: - for (;;) - { - Id id = read_id(data, 0); - while (read_u8(data) != 0) - ; - if (!(id & 0x40)) - break; - } - break; - default: - pool_debug(mypool, SAT_ERROR, "unknown type %d\n", type); - data->error = SOLV_ERROR_CORRUPT; - break; + data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + INCORE_ADD_CHUNK); + data->incoredatafree = INCORE_ADD_CHUNK; } -} - -static int -key_cmp (const void *pa, const void *pb) -{ - Repokey *a = (Repokey *)pa; - Repokey *b = (Repokey *)pb; - return a->name - b->name; -} - -static void repodata_load_solv(Repodata *data); - -static void -parse_external_repodata(Repodata *maindata, Id *keyp, Repokey *keys, Id *idmap, unsigned numid, unsigned numrel) -{ - Repo *repo = maindata->repo; - Id key, id; - Id *ida, *ide; - Repodata *data; - int i, n; - - repo->repodata = sat_realloc2(repo->repodata, repo->nrepodata + 1, sizeof (*data)); - data = repo->repodata + repo->nrepodata++; - memset(data, 0, sizeof(*data)); - data->repo = repo; - data->pagefd = -1; - data->state = REPODATA_STUB; - data->loadcallback = repodata_load_solv; - - while ((key = *keyp++) != 0) + dp = data->incoredata + data->incoredatalen; + if (x >= (1 << 14)) { - id = keys[key].name; - switch (keys[key].type) - { - case REPOKEY_TYPE_IDARRAY: - if (id != REPODATA_KEYS) - { - skip_item(maindata, REPOKEY_TYPE_IDARRAY, numid, numrel); - break; - } - /* read_idarray writes a terminating 0, that's why the + 1 */ - ida = sat_calloc(keys[key].size + 1, sizeof(Id)); - ide = read_idarray(maindata, numid, idmap, ida, ida + keys[key].size + 1); - n = ide - ida - 1; - if (n & 1) - { - pool_debug (mypool, SAT_ERROR, "invalid attribute data\n"); - maindata->error = SOLV_ERROR_CORRUPT; - return; - } - data->nkeys = 1 + (n >> 1); - data->keys = sat_malloc2(data->nkeys, sizeof(data->keys[0])); - memset(data->keys, 0, sizeof(Repokey)); - for (i = 1, ide = ida; i < data->nkeys; i++) - { - data->keys[i].name = *ide++; - data->keys[i].type = *ide++; - data->keys[i].size = 0; - data->keys[i].storage = 0; - } - sat_free(ida); - if (data->nkeys > 2) - qsort(data->keys + 1, data->nkeys - 1, sizeof(data->keys[0]), key_cmp); - break; - case REPOKEY_TYPE_STR: - if (id != REPODATA_LOCATION) - skip_item(maindata, REPOKEY_TYPE_STR, numid, numrel); - else - { - char buf[1024]; - unsigned len = sizeof(buf); - char *filename = buf; - read_str(maindata, &filename, &len); - data->location = strdup(filename); - if (filename != buf) - free(filename); - } - break; - default: - skip_item(maindata, keys[key].type, numid, numrel); - break; - } + if (x >= (1 << 28)) + *dp++ = (x >> 28) | 128; + if (x >= (1 << 21)) + *dp++ = (x >> 21) | 128; + *dp++ = (x >> 14) | 128; } + if (x >= (1 << 7)) + *dp++ = (x >> 7) | 128; + *dp++ = x & 127; + data->incoredatafree -= dp - (data->incoredata + data->incoredatalen); + data->incoredatalen = dp - data->incoredata; } static void -parse_info_repodata(Repodata *maindata, Id *keyp, Repokey *keys, Id *idmap, unsigned numid, unsigned numrel) +incore_add_sizek(Repodata *data, unsigned int sx) { - Id key, id; - Id *ida; - while ((key = *keyp++) != 0) + if (sx < (1 << 22)) + incore_add_id(data, (Id)(sx << 10)); + else { - id = keys[key].name; - if (id == REPODATA_ADDEDFILEPROVIDES && keys[key].type == REPOKEY_TYPE_REL_IDARRAY) + if ((sx >> 25) != 0) { - Id old = 0; - /* + 1 just in case */ - ida = sat_calloc(keys[key].size + 1, sizeof(Id)); - read_idarray(maindata, 0, 0, ida, ida + keys[key].size + 1); - maindata->addedfileprovides = ida; - for (; *ida; ida++) - { - old += *ida - 1; - if (old >= numid) - { - *ida = 0; - break; - } - *ida = idmap ? idmap[old] : old; - } - continue; + incore_add_id(data, (Id)(sx >> 25)); + data->incoredata[data->incoredatalen - 1] |= 128; } - skip_item(maindata, keys[key].type, numid, numrel); + incore_add_id(data, (Id)((sx << 10) | 0x80000000)); + data->incoredata[data->incoredatalen - 5] = (sx >> 18) | 128; } } -/*-----------------------------------------------------------------*/ - - -static void -skip_schema(Repodata *data, Id *keyp, Repokey *keys, unsigned int numid, unsigned int numrel) -{ - Id key; - while ((key = *keyp++) != 0) - skip_item(data, keys[key].type, numid, numrel); -} - -/*-----------------------------------------------------------------*/ - static void -incore_add_id(Repodata *data, Id x) +incore_add_ideof(Repodata *data, Id sx, int eof) { + unsigned int x = (unsigned int)sx; unsigned char *dp; /* make sure we have at least 5 bytes free */ if (data->incoredatafree < 5) { - data->incoredata = sat_realloc(data->incoredata, data->incoredatalen + 1024); - data->incoredatafree = 1024; + data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + INCORE_ADD_CHUNK); + data->incoredatafree = INCORE_ADD_CHUNK; } dp = data->incoredata + data->incoredatalen; - if (x < 0) - abort(); - if (x >= (1 << 14)) + if (x >= (1 << 13)) { - if (x >= (1 << 28)) - *dp++ = (x >> 28) | 128; - if (x >= (1 << 21)) - *dp++ = (x >> 21) | 128; - *dp++ = (x >> 14) | 128; + if (x >= (1 << 27)) + *dp++ = (x >> 27) | 128; + if (x >= (1 << 20)) + *dp++ = (x >> 20) | 128; + *dp++ = (x >> 13) | 128; } - if (x >= (1 << 7)) - *dp++ = (x >> 7) | 128; - *dp++ = x & 127; + if (x >= (1 << 6)) + *dp++ = (x >> 6) | 128; + *dp++ = eof ? (x & 63) : (x & 63) | 64; data->incoredatafree -= dp - (data->incoredata + data->incoredatalen); data->incoredatalen = dp - data->incoredata; } @@ -612,10 +360,10 @@ incore_add_id(Repodata *data, Id x) static void incore_add_blob(Repodata *data, unsigned char *buf, int len) { - if (data->incoredatafree < len) + if (data->incoredatafree < (unsigned int)len) { - data->incoredata = sat_realloc(data->incoredata, data->incoredatalen + 1024 + len); - data->incoredatafree = 1024 + len; + data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + INCORE_ADD_CHUNK + len); + data->incoredatafree = INCORE_ADD_CHUNK + len; } memcpy(data->incoredata + data->incoredatalen, buf, len); data->incoredatafree -= len; @@ -633,21 +381,19 @@ incore_map_idarray(Repodata *data, unsigned char *dp, Id *map, Id max) Id id; int eof; dp = data_read_ideof(dp, &id, &eof); - if (max && id >= max) + if (id < 0 || (max && id >= max)) { - pool_debug(mypool, SAT_ERROR, "incore_map_idarray: id too large (%u/%u)\n", id, max); - data->error = SOLV_ERROR_ID_RANGE; + data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "incore_map_idarray: id too large (%u/%u)", id, max); break; } id = map[id]; - if (id >= 64) - id = (id & 63) | ((id & ~63) << 1); - incore_add_id(data, eof ? id : id | 64); + incore_add_ideof(data, id, eof); if (eof) break; } } +#if 0 static void incore_add_u32(Repodata *data, unsigned int x) { @@ -655,8 +401,8 @@ incore_add_u32(Repodata *data, unsigned int x) /* make sure we have at least 4 bytes free */ if (data->incoredatafree < 4) { - data->incoredata = sat_realloc(data->incoredata, data->incoredatalen + 1024); - data->incoredatafree = 1024; + data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + INCORE_ADD_CHUNK); + data->incoredatafree = INCORE_ADD_CHUNK; } dp = data->incoredata + data->incoredatalen; *dp++ = x >> 24; @@ -667,7 +413,6 @@ incore_add_u32(Repodata *data, unsigned int x) data->incoredatalen += 4; } -#if 0 static void incore_add_u8(Repodata *data, unsigned int x) { @@ -675,7 +420,7 @@ incore_add_u8(Repodata *data, unsigned int x) /* make sure we have at least 1 byte free */ if (data->incoredatafree < 1) { - data->incoredata = sat_realloc(data->incoredata, data->incoredatalen + 1024); + data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + 1024); data->incoredatafree = 1024; } dp = data->incoredata + data->incoredatalen; @@ -686,22 +431,21 @@ incore_add_u8(Repodata *data, unsigned int x) #endif - -// ---------------------------------------------- - +/******************************************************************************* + * our main function + */ /* - * read repo from .solv file - * and add it to pool + * read repo from .solv file and add it to pool */ -static int -repo_add_solv_parent(Repo *repo, FILE *fp, Repodata *parent) +int +repo_add_solv(Repo *repo, FILE *fp, int flags) { Pool *pool = repo->pool; int i, l; - unsigned int numid, numrel, numdir, numsolv; - unsigned int numkeys, numschemata, numinfo; + int numid, numrel, numdir, numsolv; + int numkeys, numschemata; Offset sizeid; Offset *str; /* map Id -> Offset into string space */ @@ -709,11 +453,10 @@ repo_add_solv_parent(Repo *repo, FILE *fp, Repodata *parent) char *sp; /* pointer into string space */ Id *idmap; /* map of repo Ids to pool Ids */ Id id, type; - unsigned int hashmask, h; - int hh; - Id *hashtbl; + Hashval hashmask, h, hh; + Hashtable hashtbl; Id name, evr, did; - int flags; + int relflags; Reldep *ran; unsigned int size_idarray; Id *idarraydatap, *idarraydataend; @@ -723,139 +466,161 @@ repo_add_solv_parent(Repo *repo, FILE *fp, Repodata *parent) unsigned int solvversion; Repokey *keys; Id *schemadata, *schemadatap, *schemadataend; - Id *schemata, key; - int have_xdata; - unsigned oldnrepodata; + Id *schemata, key, *keyp; + int nentries; + int have_incoredata; int maxsize, allsize; - unsigned char *buf, *dp, *dps; - int left; + unsigned char *buf, *bufend, *dp, *dps; + Id stack[3 * 5]; + int keydepth; + int needchunk; /* need a new chunk of data */ + unsigned int now; + int oldnstrings = pool->ss.nstrings; + int oldnrels = pool->nrels; struct _Stringpool *spool; + Repodata *parent = 0; Repodata data; + int extendstart = 0, extendend = 0; /* set in case we're extending */ + + now = solv_timems(0); + + if ((flags & REPO_USE_LOADING) != 0) + { + /* this is a stub replace operation */ + flags |= REPO_EXTEND_SOLVABLES; + /* use REPO_REUSE_REPODATA hack so that the old repodata is kept */ + parent = repo_add_repodata(repo, flags | REPO_REUSE_REPODATA); + extendstart = parent->start; + extendend = parent->end; + } + else if (flags & REPO_EXTEND_SOLVABLES) + { + /* extend all solvables of this repo */ + extendstart = repo->start; + extendend = repo->end; + } + memset(&data, 0, sizeof(data)); data.repo = repo; data.fp = fp; - data.pagefd = -1; - - mypool = pool; + repopagestore_init(&data.store); if (read_u32(&data) != ('S' << 24 | 'O' << 16 | 'L' << 8 | 'V')) - { - pool_debug(pool, SAT_ERROR, "not a SOLV file\n"); - return SOLV_ERROR_NOT_SOLV; - } + return pool_error(pool, SOLV_ERROR_NOT_SOLV, "not a SOLV file"); solvversion = read_u32(&data); switch (solvversion) { - case SOLV_VERSION_6: - break; + case SOLV_VERSION_8: + break; default: - pool_debug(pool, SAT_ERROR, "unsupported SOLV version\n"); - return SOLV_ERROR_UNSUPPORTED; + return pool_error(pool, SOLV_ERROR_UNSUPPORTED, "unsupported SOLV version"); } - pool_freeidhashes(pool); - - numid = read_u32(&data); - numrel = read_u32(&data); - numdir = read_u32(&data); - numsolv = read_u32(&data); - numkeys = read_u32(&data); - numschemata = read_u32(&data); - numinfo = read_u32(&data); + numid = (int)read_u32(&data); + numrel = (int)read_u32(&data); + numdir = (int)read_u32(&data); + numsolv = (int)read_u32(&data); + numkeys = (int)read_u32(&data); + numschemata = (int)read_u32(&data); solvflags = read_u32(&data); - if (numdir && numdir < 2) + if (numid < 0 || numid >= 0x20000000) + return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of ids"); + if (numrel < 0 || numrel >= 0x20000000) + return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of rels"); + if (numdir && (numdir < 2 || numdir >= 0x20000000)) + return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of dirs"); + if (numsolv < 0 || numsolv >= 0x20000000) + return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of solvables"); + if (numkeys < 0 || numkeys >= 0x20000000) + return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of keys"); + if (numschemata < 0 || numschemata >= 0x20000000) + return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of schematas"); + + if (numrel && (flags & REPO_LOCALPOOL) != 0) + return pool_error(pool, SOLV_ERROR_CORRUPT, "relations are forbidden in a local pool"); + if ((flags & REPO_EXTEND_SOLVABLES) && numsolv) { - pool_debug(pool, SAT_ERROR, "bad number of dirs\n"); - return SOLV_ERROR_CORRUPT; - } - - if (parent) - { - if (numrel) - { - pool_debug(pool, SAT_ERROR, "relations are forbidden in a store\n"); - return SOLV_ERROR_CORRUPT; - } - if (parent->end - parent->start != numsolv) - { - pool_debug(pool, SAT_ERROR, "unequal number of solvables in a store\n"); - return SOLV_ERROR_CORRUPT; - } - if (numinfo) - { - pool_debug(pool, SAT_ERROR, "info blocks are forbidden in a store\n"); - return SOLV_ERROR_CORRUPT; - } + /* make sure that we exactly replace the stub repodata */ + if (extendend - extendstart != numsolv) + return pool_error(pool, SOLV_ERROR_CORRUPT, "sub-repository solvable number does not match main repository (%d - %d)", extendend - extendstart, numsolv); + for (i = 0; i < numsolv; i++) + if (pool->solvables[extendstart + i].repo != repo) + return pool_error(pool, SOLV_ERROR_CORRUPT, "main repository contains holes, cannot extend"); } /******* Part 1: string IDs *****************************************/ - sizeid = read_u32(&data); /* size of string+Id space */ + sizeid = read_u32(&data); /* size of string space */ /* * read strings and Ids - * + * */ - + /* * alloc buffers */ - if (!parent) - spool = &pool->ss; + if (!(flags & REPO_LOCALPOOL)) + { + spool = &pool->ss; + /* alloc max needed string buffer and string pointers, will shrink again later */ +#if 0 + spool->stringspace = solv_realloc(spool->stringspace, spool->sstrings + sizeid + 1); + spool->strings = solv_realloc2(spool->strings, spool->nstrings + numid, sizeof(Offset)); +#else + spool->sstrings += sizeid + 1; + spool->nstrings += numid; + stringpool_shrink(spool); /* we misuse stringpool_shrink so that the correct BLOCK factor is used */ + spool->sstrings -= sizeid + 1; + spool->nstrings -= numid; +#endif + } else { data.localpool = 1; spool = &data.spool; - spool->stringspace = sat_malloc(7); + spool->stringspace = solv_malloc(7 + sizeid + 1); + spool->strings = solv_malloc2(numid < 2 ? 2 : numid, sizeof(Offset)); strcpy(spool->stringspace, ""); spool->sstrings = 7; - spool->nstrings = 0; + spool->nstrings = 1; + spool->strings[0] = 0; /* */ } - /* alloc string buffer */ - spool->stringspace = sat_realloc(spool->stringspace, spool->sstrings + sizeid + 1); - /* alloc string offsets (Id -> Offset into string space) */ - spool->strings = sat_realloc2(spool->strings, spool->nstrings + numid, sizeof(Offset)); - - strsp = spool->stringspace; - str = spool->strings; /* array of offsets into strsp, indexed by Id */ - - /* point to _BEHIND_ already allocated string/Id space */ - strsp += spool->sstrings; - /* - * read new repo at end of pool + * read string data and append to old string space */ - + + strsp = spool->stringspace + spool->sstrings; /* append new entries */ if ((solvflags & SOLV_FLAG_PREFIX_POOL) == 0) { if (sizeid && fread(strsp, sizeid, 1, fp) != 1) { - pool_debug(pool, SAT_ERROR, "read error while reading strings\n"); - return SOLV_ERROR_EOF; + repodata_freedata(&data); + return pool_error(pool, SOLV_ERROR_EOF, "read error while reading strings"); } } else { unsigned int pfsize = read_u32(&data); - char *prefix = sat_malloc(pfsize); + char *prefix = solv_malloc(pfsize); char *pp = prefix; - char *old_str = 0; + char *old_str = strsp; char *dest = strsp; int freesp = sizeid; if (pfsize && fread(prefix, pfsize, 1, fp) != 1) - { - pool_debug(pool, SAT_ERROR, "read error while reading strings\n"); - sat_free(prefix); - return SOLV_ERROR_EOF; + { + solv_free(prefix); + repodata_freedata(&data); + return pool_error(pool, SOLV_ERROR_EOF, "read error while reading strings"); } for (i = 1; i < numid; i++) { @@ -864,9 +629,9 @@ repo_add_solv_parent(Repo *repo, FILE *fp, Repodata *parent) freesp -= same + len; if (freesp < 0) { - pool_debug(pool, SAT_ERROR, "overflow while expanding strings\n"); - sat_free(prefix); - return SOLV_ERROR_OVERFLOW; + solv_free(prefix); + repodata_freedata(&data); + return pool_error(pool, SOLV_ERROR_OVERFLOW, "overflow while expanding strings"); } if (same) memcpy(dest, old_str, same); @@ -875,34 +640,35 @@ repo_add_solv_parent(Repo *repo, FILE *fp, Repodata *parent) old_str = dest; dest += same + len; } - sat_free(prefix); + solv_free(prefix); if (freesp != 0) { - pool_debug(pool, SAT_ERROR, "expanding strings size mismatch\n"); - return SOLV_ERROR_CORRUPT; + repodata_freedata(&data); + return pool_error(pool, SOLV_ERROR_CORRUPT, "expanding strings size mismatch"); } } strsp[sizeid] = 0; /* make string space \0 terminated */ sp = strsp; - if (parent) + /* now merge */ + str = spool->strings; /* array of offsets into strsp, indexed by Id */ + if ((flags & REPO_LOCALPOOL) != 0) { - /* no shared pool, thus no idmap and no unification */ + /* no shared pool, thus no idmap and no unification needed */ idmap = 0; - spool->nstrings = numid; - str[0] = 0; + spool->nstrings = numid < 2 ? 2 : numid; /* make sure we have at least id 0 and 1 */ if (*sp) { - /* we need the '' for directories */ - pool_debug(pool, SAT_ERROR, "store strings don't start with ''\n"); - return SOLV_ERROR_CORRUPT; + /* we need id 1 to be '' for directories */ + repodata_freedata(&data); + return pool_error(pool, SOLV_ERROR_CORRUPT, "store strings don't start with an empty string"); } for (i = 1; i < spool->nstrings; i++) { - if (sp >= strsp + sizeid) + if (sp >= strsp + sizeid && numid >= 2) { - pool_debug(pool, SAT_ERROR, "not enough strings\n"); - return SOLV_ERROR_OVERFLOW; + repodata_freedata(&data); + return pool_error(pool, SOLV_ERROR_OVERFLOW, "not enough strings"); } str[i] = sp - spool->stringspace; sp += strlen(sp) + 1; @@ -911,51 +677,33 @@ repo_add_solv_parent(Repo *repo, FILE *fp, Repodata *parent) } else { + Offset oldsstrings = spool->sstrings; /* alloc id map for name and rel Ids. this maps ids in the solv files * to the ids in our pool */ - idmap = sat_calloc(numid + numrel, sizeof(Id)); - - /* - * build hashes for all read strings - * - */ - - hashmask = mkmask(spool->nstrings + numid); - + idmap = solv_calloc(numid + numrel, sizeof(Id)); + stringpool_resize_hash(spool, numid); + hashtbl = spool->stringhashtbl; + hashmask = spool->stringhashmask; #if 0 - POOL_DEBUG(SAT_DEBUG_STATS, "read %d strings\n", numid); - POOL_DEBUG(SAT_DEBUG_STATS, "string hash buckets: %d\n", hashmask + 1); + POOL_DEBUG(SOLV_DEBUG_STATS, "read %d strings\n", numid); + POOL_DEBUG(SOLV_DEBUG_STATS, "string hash buckets: %d\n", hashmask + 1); #endif - /* - * create hashtable with strings already in pool + * run over strings and merge with pool. + * we could use stringpool_str2id, but this is faster. + * also populate id map (maps solv Id -> pool Id) */ - - hashtbl = sat_calloc(hashmask + 1, sizeof(Id)); - for (i = 1; i < spool->nstrings; i++) /* leave out our dummy zero id */ - { - h = strhash(spool->stringspace + spool->strings[i]) & hashmask; - hh = HASHCHAIN_START; - while (hashtbl[h]) - h = HASHCHAIN_NEXT(h, hh, hashmask); - hashtbl[h] = i; - } - - /* - * run over string space, calculate offsets - * - * build id map (maps solv Id -> pool Id) - */ - for (i = 1; i < numid; i++) { if (sp >= strsp + sizeid) { - sat_free(hashtbl); - sat_free(idmap); - pool_debug(pool, SAT_ERROR, "not enough strings %d %d\n", i, numid); - return SOLV_ERROR_OVERFLOW; + solv_free(idmap); + spool->nstrings = oldnstrings; + spool->sstrings = oldsstrings; + stringpool_freehash(spool); + repodata_freedata(&data); + return pool_error(pool, SOLV_ERROR_OVERFLOW, "not enough strings %d %d", i, numid); } if (!*sp) /* empty string */ { @@ -970,105 +718,107 @@ repo_add_solv_parent(Repo *repo, FILE *fp, Repodata *parent) for (;;) { id = hashtbl[h]; - if (id == 0) + if (!id) break; if (!strcmp(spool->stringspace + spool->strings[id], sp)) - break; /* existing string */ + break; /* already in pool */ h = HASHCHAIN_NEXT(h, hh, hashmask); } /* length == offset to next string */ l = strlen(sp) + 1; - if (id == ID_NULL) /* end of hash chain -> new string */ + if (!id) /* end of hash chain -> new string */ { id = spool->nstrings++; hashtbl[h] = id; - str[id] = spool->sstrings; /* save Offset */ - if (sp != spool->stringspace + spool->sstrings) /* not at end-of-buffer */ - memmove(spool->stringspace + spool->sstrings, sp, l); /* append to pool buffer */ + str[id] = spool->sstrings; /* save offset */ + if (sp != spool->stringspace + spool->sstrings) + memmove(spool->stringspace + spool->sstrings, sp, l); spool->sstrings += l; } - idmap[i] = id; /* repo relative -> pool relative */ - sp += l; /* next string */ + idmap[i] = id; /* repo relative -> pool relative */ + sp += l; /* next string */ } - sat_free(hashtbl); + stringpool_shrink(spool); /* vacuum */ } - pool_shrink_strings(pool); /* vacuum */ - + /******* Part 2: Relation IDs ***************************************/ /* * read RelDeps - * + * */ - + if (numrel) { /* extend rels */ - pool->rels = sat_realloc2(pool->rels, pool->nrels + numrel, sizeof(Reldep)); + pool->rels = solv_realloc2(pool->rels, pool->nrels + numrel, sizeof(Reldep)); ran = pool->rels; - hashmask = mkmask(pool->nrels + numrel); + pool_resize_rels_hash(pool, numrel); + hashtbl = pool->relhashtbl; + hashmask = pool->relhashmask; #if 0 - POOL_DEBUG(SAT_DEBUG_STATS, "read %d rels\n", numrel); - POOL_DEBUG(SAT_DEBUG_STATS, "rel hash buckets: %d\n", hashmask + 1); + POOL_DEBUG(SOLV_DEBUG_STATS, "read %d rels\n", numrel); + POOL_DEBUG(SOLV_DEBUG_STATS, "rel hash buckets: %d\n", hashmask + 1); #endif - /* - * prep hash table with already existing RelDeps - */ - - hashtbl = sat_calloc(hashmask + 1, sizeof(Id)); - for (i = 1; i < pool->nrels; i++) - { - h = relhash(ran[i].name, ran[i].evr, ran[i].flags) & hashmask; - hh = HASHCHAIN_START; - while (hashtbl[h]) - h = HASHCHAIN_NEXT(h, hh, hashmask); - hashtbl[h] = i; - } /* * read RelDeps from repo */ - for (i = 0; i < numrel; i++) { name = read_id(&data, i + numid); /* read (repo relative) Ids */ evr = read_id(&data, i + numid); - flags = read_u8(&data); + relflags = read_u8(&data); name = idmap[name]; /* map to (pool relative) Ids */ evr = idmap[evr]; - h = relhash(name, evr, flags) & hashmask; + h = relhash(name, evr, relflags) & hashmask; hh = HASHCHAIN_START; for (;;) { id = hashtbl[h]; - if (id == ID_NULL) /* end of hash chain */ + if (!id) /* end of hash chain reached */ break; - if (ran[id].name == name && ran[id].evr == evr && ran[id].flags == flags) + if (ran[id].name == name && ran[id].evr == evr && ran[id].flags == relflags) break; h = HASHCHAIN_NEXT(h, hh, hashmask); } - if (id == ID_NULL) /* new RelDep */ + if (!id) /* new RelDep */ { id = pool->nrels++; hashtbl[h] = id; ran[id].name = name; ran[id].evr = evr; - ran[id].flags = flags; + ran[id].flags = relflags; } idmap[i + numid] = MAKERELDEP(id); /* fill Id map */ } - sat_free(hashtbl); pool_shrink_rels(pool); /* vacuum */ } + /* if we added ids/rels, make room in our whatprovide arrays */ + if (!(flags & REPO_LOCALPOOL)) + { + if (pool->whatprovides && oldnstrings != pool->ss.nstrings) + { + int newlen = (pool->ss.nstrings + WHATPROVIDES_BLOCK) & ~WHATPROVIDES_BLOCK; + pool->whatprovides = solv_realloc2(pool->whatprovides, newlen, sizeof(Offset)); + memset(pool->whatprovides + oldnstrings, 0, (newlen - oldnstrings) * sizeof(Offset)); + } + if (pool->whatprovides_rel && oldnrels != pool->nrels) + { + int newlen = (pool->nrels + WHATPROVIDES_BLOCK) & ~WHATPROVIDES_BLOCK; + pool->whatprovides_rel = solv_realloc2(pool->whatprovides_rel, newlen, sizeof(Offset)); + memset(pool->whatprovides_rel + oldnrels, 0, (newlen - oldnrels) * sizeof(Offset)); + } + } /******* Part 3: Dirs ***********************************************/ if (numdir) { - data.dirpool.dirs = sat_malloc2(numdir, sizeof(Id)); + data.dirpool.dirs = solv_malloc2(numdir, sizeof(Id)); data.dirpool.ndirs = numdir; data.dirpool.dirs[0] = 0; /* dir 0: virtual root */ data.dirpool.dirs[1] = 1; /* dir 1: / */ @@ -1076,82 +826,104 @@ repo_add_solv_parent(Repo *repo, FILE *fp, Repodata *parent) { id = read_id(&data, i + numid); if (id >= numid) - data.dirpool.dirs[i] = -(id - numid); - else if (idmap) - data.dirpool.dirs[i] = idmap[id]; - else - data.dirpool.dirs[i] = id; + { + data.dirpool.dirs[i++] = -(id - numid); + if (i >= numdir) + { + data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "last dir entry is not a component"); + break; + } + id = read_id(&data, numid); + } + if (idmap) + id = idmap[id]; + data.dirpool.dirs[i] = id; + if (id <= 0) + data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "bad dir component"); } } /******* Part 4: Keys ***********************************************/ - keys = sat_calloc(numkeys, sizeof(*keys)); + keys = solv_calloc(numkeys, sizeof(*keys)); /* keys start at 1 */ for (i = 1; i < numkeys; i++) { id = read_id(&data, numid); if (idmap) id = idmap[id]; - else if (parent) - id = str2id(pool, stringpool_id2str(spool, id), 1); + else if ((flags & REPO_LOCALPOOL) != 0) + id = pool_str2id(pool, stringpool_id2str(spool, id), 1); type = read_id(&data, numid); if (idmap) type = idmap[type]; - else if (parent) - type = str2id(pool, stringpool_id2str(spool, type), 1); - if (type < REPOKEY_TYPE_VOID || type > REPOKEY_TYPE_DIRNUMNUMARRAY) + else if ((flags & REPO_LOCALPOOL) != 0) + type = pool_str2id(pool, stringpool_id2str(spool, type), 1); + if (type < REPOKEY_TYPE_VOID || type > REPOKEY_TYPE_FLEXARRAY) { - pool_debug(pool, SAT_ERROR, "unsupported data type '%s'\n", id2str(pool, type)); - data.error = SOLV_ERROR_UNSUPPORTED; + data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "unsupported data type '%s'", pool_id2str(pool, type)); type = REPOKEY_TYPE_VOID; } keys[i].name = id; keys[i].type = type; keys[i].size = read_id(&data, keys[i].type == REPOKEY_TYPE_CONSTANTID ? numid + numrel : 0); keys[i].storage = read_id(&data, 0); - if (id >= SOLVABLE_NAME && id <= RPM_RPMDBID) - keys[i].storage = KEY_STORAGE_SOLVABLE; - else if (keys[i].storage == KEY_STORAGE_SOLVABLE) + /* old versions used SOLVABLE for main solvable data */ + if (keys[i].storage == KEY_STORAGE_SOLVABLE) keys[i].storage = KEY_STORAGE_INCORE; - if (keys[i].type == REPOKEY_TYPE_CONSTANTID) + if (keys[i].storage != KEY_STORAGE_INCORE && keys[i].storage != KEY_STORAGE_VERTICAL_OFFSET) + data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "unsupported storage type %d", keys[i].storage); + if (id >= SOLVABLE_NAME && id <= RPM_RPMDBID) { - if (idmap) - keys[i].size = idmap[keys[i].size]; - else if (parent) - keys[i].size = str2id(pool, stringpool_id2str(spool, keys[i].size), 1); + if (keys[i].storage != KEY_STORAGE_INCORE) + data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "main solvable data must use incore storage %d", keys[i].storage); + keys[i].storage = KEY_STORAGE_SOLVABLE; } + /* cannot handle rel idarrays in incore/vertical */ + if (type == REPOKEY_TYPE_REL_IDARRAY && keys[i].storage != KEY_STORAGE_SOLVABLE) + data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "type REL_IDARRAY is only supported for STORAGE_SOLVABLE"); + /* cannot handle mapped ids in vertical */ + if (!(flags & REPO_LOCALPOOL) && keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET && (type == REPOKEY_TYPE_ID || type == REPOKEY_TYPE_IDARRAY)) + data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "mapped ids are not supported for STORAGE_VERTICAL_OFFSET"); + + if (keys[i].type == REPOKEY_TYPE_CONSTANTID && idmap) + keys[i].size = idmap[keys[i].size]; #if 0 - fprintf(stderr, "key %d %s %s %d %d\n", i, id2str(pool,id), id2str(pool, keys[i].type), + fprintf(stderr, "key %d %s %s %d %d\n", i, pool_id2str(pool,id), pool_id2str(pool, keys[i].type), keys[i].size, keys[i].storage); #endif } - have_xdata = parent ? 1 : 0; + have_incoredata = 0; for (i = 1; i < numkeys; i++) if (keys[i].storage == KEY_STORAGE_INCORE || keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET) - have_xdata = 1; + have_incoredata = 1; data.keys = keys; data.nkeys = numkeys; + for (i = 1; i < numkeys; i++) + { + id = keys[i].name; + data.keybits[(id >> 3) & (sizeof(data.keybits) - 1)] |= 1 << (id & 7); + } /******* Part 5: Schemata ********************************************/ - + id = read_id(&data, 0); - schemadata = sat_calloc(id + 1, sizeof(Id)); + schemadata = solv_calloc(id + 1, sizeof(Id)); schemadatap = schemadata + 1; schemadataend = schemadatap + id; - schemata = sat_calloc(numschemata, sizeof(Id)); + schemata = solv_calloc(numschemata, sizeof(Id)); for (i = 1; i < numschemata; i++) { schemata[i] = schemadatap - schemadata; schemadatap = read_idarray(&data, numid, 0, schemadatap, schemadataend); #if 0 Id *sp = schemadata + schemata[i]; - fprintf (stderr, "schema %d:", i); + fprintf(stderr, "schema %d:", i); for (; *sp; sp++) - fprintf (stderr, " %d", *sp); - fprintf (stderr, "\n"); + fprintf(stderr, " %d", *sp); + fprintf(stderr, "\n"); #endif } data.schemata = schemata; @@ -1159,261 +931,319 @@ repo_add_solv_parent(Repo *repo, FILE *fp, Repodata *parent) data.schemadata = schemadata; data.schemadatalen = schemadataend - data.schemadata; + /******* Part 6: Data ********************************************/ - /******* Part 6: Info ***********************************************/ - oldnrepodata = repo->nrepodata; - if (numinfo) - { - id = read_id(&data, 0); - id = read_id(&data, 0); - } - for (i = 0; i < numinfo; i++) - { - /* for now we're just interested in data that starts with - * the repodata_external id - */ - Id *keyp; - id = read_id(&data, numschemata); - keyp = schemadata + schemata[id]; - key = *keyp; - if (keys[key].name == REPODATA_EXTERNAL && keys[key].type == REPOKEY_TYPE_VOID) - { - /* external data for some ids */ - parse_external_repodata(&data, keyp, keys, idmap, numid, numrel); - } - else if (keys[key].name == REPODATA_INFO) - { - parse_info_repodata(&data, keyp, keys, idmap, numid, numrel); - } - else - { - skip_schema(&data, keyp, keys, numid, numrel); - } - } - + idarraydatap = idarraydataend = 0; + size_idarray = 0; - /******* Part 7: item data *******************************************/ + maxsize = read_id(&data, 0); + allsize = read_id(&data, 0); + maxsize += 5; /* so we can read the next schema of an array */ + if (maxsize > allsize) + maxsize = allsize; - /* calculate idarray size */ - size_idarray = 0; - for (i = 1; i < numkeys; i++) - { - id = keys[i].name; - if ((keys[i].type == REPOKEY_TYPE_IDARRAY || keys[i].type == REPOKEY_TYPE_REL_IDARRAY) - && id >= INTERESTED_START && id <= INTERESTED_END) - size_idarray += keys[i].size; - } + buf = solv_calloc(maxsize + DATA_READ_CHUNK + 4, 1); /* 4 extra bytes to detect overflows */ + bufend = buf; + dp = buf; - if (numsolv) + l = maxsize; + if (l < DATA_READ_CHUNK) + l = DATA_READ_CHUNK; + if (l > allsize) + l = allsize; + if (!l || fread(buf, l, 1, data.fp) != 1) { - maxsize = read_id(&data, 0); - allsize = read_id(&data, 0); - if (maxsize > allsize) - { - pool_debug(pool, SAT_ERROR, "maxsize %d is greater then allsize %d\n", maxsize, allsize); - data.error = SOLV_ERROR_CORRUPT; - } + data.error = pool_error(pool, SOLV_ERROR_EOF, "unexpected EOF"); + id = 0; } else - maxsize = allsize = 0; - - /* allocate needed space in repo */ - /* we add maxsize because it is an upper limit for all idarrays */ - repo_reserve_ids(repo, 0, size_idarray + maxsize + 1); - idarraydatap = repo->idarraydata + repo->idarraysize; - repo->idarraysize += size_idarray; - idarraydataend = idarraydatap + size_idarray; - repo->lastoff = 0; - - /* read solvables */ - if (numsolv) { - if (parent) - s = pool_id2solvable(pool, parent->start); - else - s = pool_id2solvable(pool, repo_add_solvable_block(repo, numsolv)); - /* store start and end of our id block */ - data.start = s - pool->solvables; - data.end = data.start + numsolv; - /* In case we have info blocks, make them refer to our part of the - repository now. */ - for (i = oldnrepodata; i < repo->nrepodata; i++) - { - repo->repodata[i].start = data.start; - repo->repodata[i].end = data.end; - } + bufend = buf + l; + allsize -= l; + dp = data_read_id_max(dp, &id, 0, numschemata, &data); } - else - s = 0; - if (have_xdata) + incore_add_id(&data, 0); /* so that incoreoffset 0 means schema 0 */ + incore_add_id(&data, id); /* main schema id */ + keyp = schemadata + schemata[id]; + data.mainschema = id; + for (i = 0; keyp[i]; i++) + ; + if (i) + data.mainschemaoffsets = solv_calloc(i, sizeof(Id)); + + nentries = 0; + keydepth = 0; + s = 0; + needchunk = 1; + for(;;) { - /* reserve one byte so that all offsets are not zero */ - incore_add_id(&data, 0); - repodata_extend_block(&data, data.start, numsolv); - } + /* make sure we have enough room */ + if (keydepth == 0 || needchunk) + { + int left = bufend - dp; + /* read data chunk to dp */ + if (data.error) + break; + if (left < 0) + { + data.error = pool_error(pool, SOLV_ERROR_EOF, "buffer overrun"); + break; + } + if (left < maxsize) + { + if (left) + memmove(buf, dp, left); + l = maxsize - left; + if (l < DATA_READ_CHUNK) + l = DATA_READ_CHUNK; + if (l > allsize) + l = allsize; + if (l && fread(buf + left, l, 1, data.fp) != 1) + { + data.error = pool_error(pool, SOLV_ERROR_EOF, "unexpected EOF"); + break; + } + allsize -= l; + left += l; + bufend = buf + left; + if (allsize + left < maxsize) + maxsize = allsize + left; + dp = buf; + } + needchunk = 0; + } - left = 0; - buf = sat_calloc(maxsize + 4, 1); - dp = buf; - for (i = 0; i < numsolv; i++, s++) - { - Id *keyp; - if (data.error) - break; + key = *keyp++; +#if 0 +printf("key %d at %d\n", key, (int)(keyp - 1 - schemadata)); +#endif + if (!key) + { + if (keydepth <= 3) + needchunk = 1; + if (nentries) + { + if (s && keydepth == 3) + { + s++; /* next solvable */ + if (have_incoredata) + data.incoreoffset[(s - pool->solvables) - data.start] = data.incoredatalen; + } + id = stack[keydepth - 1]; + if (!id) + { + dp = data_read_id_max(dp, &id, 0, numschemata, &data); + incore_add_id(&data, id); + } + keyp = schemadata + schemata[id]; + nentries--; + continue; + } + if (!keydepth) + break; + --keydepth; + keyp = schemadata + stack[--keydepth]; + nentries = stack[--keydepth]; +#if 0 +printf("pop flexarray %d %d\n", keydepth, nentries); +#endif + if (!keydepth && s) + s = 0; /* back from solvables */ + continue; + } - left -= (dp - buf); - if (left < 0) - { - pool_debug(mypool, SAT_ERROR, "buffer overrun\n"); - data.error = SOLV_ERROR_EOF; - break; - } - if (left) - memmove(buf, dp, left); - l = maxsize - left; - if (l > allsize) - l = allsize; - if (l && fread(buf + left, l, 1, data.fp) != 1) - { - pool_debug(mypool, SAT_ERROR, "unexpected EOF\n"); - data.error = SOLV_ERROR_EOF; - break; - } - allsize -= l; - left += l; - dp = buf; + if (keydepth == 0) + data.mainschemaoffsets[keyp - 1 - (schemadata + schemata[data.mainschema])] = data.incoredatalen; - dp = data_read_id_max(dp, &id, 0, numschemata, &data.error); - if (have_xdata) +#if 0 +printf("=> %s %s %p\n", pool_id2str(pool, keys[key].name), pool_id2str(pool, keys[key].type), s); +#endif + id = keys[key].name; + if (keys[key].storage == KEY_STORAGE_VERTICAL_OFFSET) { - data.incoreoffset[i] = data.incoredatalen; - incore_add_id(&data, id); + dps = dp; + dp = data_skip(dp, REPOKEY_TYPE_ID); + dp = data_skip(dp, REPOKEY_TYPE_ID); + incore_add_blob(&data, dps, dp - dps); /* just record offset/size */ + continue; } - keyp = schemadata + schemata[id]; - while ((key = *keyp++) != 0) + switch (keys[key].type) { - if (data.error) - break; - - id = keys[key].name; + case REPOKEY_TYPE_ID: + dp = data_read_id_max(dp, &did, idmap, numid + numrel, &data); + if (s && id == SOLVABLE_NAME) + s->name = did; + else if (s && id == SOLVABLE_ARCH) + s->arch = did; + else if (s && id == SOLVABLE_EVR) + s->evr = did; + else if (s && id == SOLVABLE_VENDOR) + s->vendor = did; + else if (keys[key].storage == KEY_STORAGE_INCORE) + incore_add_id(&data, did); #if 0 -fprintf(stderr, "solv %d name %d type %d class %d\n", i, id, keys[key].type, keys[key].storage); + POOL_DEBUG(SOLV_DEBUG_STATS, "%s -> %s\n", pool_id2str(pool, id), pool_id2str(pool, did)); #endif - if (keys[key].storage == KEY_STORAGE_VERTICAL_OFFSET) + break; + case REPOKEY_TYPE_IDARRAY: + case REPOKEY_TYPE_REL_IDARRAY: + if (!s || id < INTERESTED_START || id > INTERESTED_END) { - /* copy offset/length into incore */ dps = dp; - dp = data_skip(dp, REPOKEY_TYPE_ID); - dp = data_skip(dp, REPOKEY_TYPE_ID); - incore_add_blob(&data, dps, dp - dps); - continue; + dp = data_skip(dp, REPOKEY_TYPE_IDARRAY); + if (keys[key].storage != KEY_STORAGE_INCORE) + break; + if (idmap) + incore_map_idarray(&data, dps, idmap, numid + numrel); + else + incore_add_blob(&data, dps, dp - dps); + break; } - switch (keys[key].type) + ido = idarraydatap - repo->idarraydata; + if (keys[key].type == REPOKEY_TYPE_IDARRAY) + dp = data_read_idarray(dp, &idarraydatap, idmap, numid + numrel, &data); + else if (id == SOLVABLE_REQUIRES) + dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data, SOLVABLE_PREREQMARKER); + else if (id == SOLVABLE_PROVIDES) + dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data, SOLVABLE_FILEMARKER); + else + dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data, 0); + if (idarraydatap > idarraydataend) { - case REPOKEY_TYPE_ID: - dp = data_read_id_max(dp, &did, idmap, numid + numrel, &data.error); - if (id == SOLVABLE_NAME) - s->name = did; - else if (id == SOLVABLE_ARCH) - s->arch = did; - else if (id == SOLVABLE_EVR) - s->evr = did; - else if (id == SOLVABLE_VENDOR) - s->vendor = did; - else if (keys[key].storage == KEY_STORAGE_INCORE) - incore_add_id(&data, did); -#if 0 - POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %s\n", id2str(pool, id), id2str(pool, did)); -#endif + data.error = pool_error(pool, SOLV_ERROR_OVERFLOW, "idarray overflow"); break; - case REPOKEY_TYPE_U32: - dp = data_read_u32(dp, &h); + } + if (id == SOLVABLE_PROVIDES) + s->provides = ido; + else if (id == SOLVABLE_OBSOLETES) + s->obsoletes = ido; + else if (id == SOLVABLE_CONFLICTS) + s->conflicts = ido; + else if (id == SOLVABLE_REQUIRES) + s->requires = ido; + else if (id == SOLVABLE_RECOMMENDS) + s->recommends= ido; + else if (id == SOLVABLE_SUPPLEMENTS) + s->supplements = ido; + else if (id == SOLVABLE_SUGGESTS) + s->suggests = ido; + else if (id == SOLVABLE_ENHANCES) + s->enhances = ido; #if 0 - POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %u\n", id2str(pool, id), h); + POOL_DEBUG(SOLV_DEBUG_STATS, "%s ->\n", pool_id2str(pool, id)); + for (; repo->idarraydata[ido]; ido++) + POOL_DEBUG(SOLV_DEBUG_STATS," %s\n", pool_dep2str(pool, repo->idarraydata[ido])); #endif - if (id == RPM_RPMDBID) + break; + case REPOKEY_TYPE_FIXARRAY: + case REPOKEY_TYPE_FLEXARRAY: + if (!keydepth) + needchunk = 1; + if (keydepth == sizeof(stack)/sizeof(*stack)) + { + data.error = pool_error(pool, SOLV_ERROR_OVERFLOW, "array stack overflow"); + break; + } + stack[keydepth++] = nentries; + stack[keydepth++] = keyp - schemadata; + stack[keydepth++] = 0; + dp = data_read_id_max(dp, &nentries, 0, 0, &data); + incore_add_id(&data, nentries); + if (!nentries) + { + /* zero size array? */ + keydepth -= 2; + nentries = stack[--keydepth]; + break; + } + if (keydepth == 3 && id == REPOSITORY_SOLVABLES) + { + /* horray! here come the solvables */ + if (nentries != numsolv) { - if (!repo->rpmdbid) - repo->rpmdbid = sat_calloc(numsolv, sizeof(Id)); - repo->rpmdbid[i] = h; + data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "inconsistent number of solvables: %d %d", nentries, numsolv); + break; } - else if (keys[key].storage == KEY_STORAGE_INCORE) - incore_add_u32(&data, h); - break; - case REPOKEY_TYPE_IDARRAY: - case REPOKEY_TYPE_REL_IDARRAY: - if (id < INTERESTED_START || id > INTERESTED_END) + if (idarraydatap) { - dps = dp; - dp = data_skip(dp, REPOKEY_TYPE_IDARRAY); - if (keys[key].storage == KEY_STORAGE_INCORE && idmap) - incore_map_idarray(&data, dps, idmap, numid); - else if (keys[key].storage == KEY_STORAGE_INCORE) - incore_add_blob(&data, dps, dp - dps); + data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "more than one solvable block"); break; } - ido = idarraydatap - repo->idarraydata; - if (keys[key].type == REPOKEY_TYPE_IDARRAY) - dp = data_read_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error); - else if (id == SOLVABLE_REQUIRES) - dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_PREREQMARKER); - else if (id == SOLVABLE_PROVIDES) - dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_FILEMARKER); + if ((flags & REPO_EXTEND_SOLVABLES) != 0) + s = pool_id2solvable(pool, extendstart); else - dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, 0); - if (idarraydatap > idarraydataend) + s = pool_id2solvable(pool, repo_add_solvable_block(repo, numsolv)); + data.start = s - pool->solvables; + data.end = data.start + numsolv; + repodata_extend_block(&data, data.start, numsolv); + for (i = 1; i < numkeys; i++) { - pool_debug(pool, SAT_ERROR, "idarray overflow\n"); - data.error = SOLV_ERROR_OVERFLOW; - break; + id = keys[i].name; + if ((keys[i].type == REPOKEY_TYPE_IDARRAY || keys[i].type == REPOKEY_TYPE_REL_IDARRAY) + && id >= INTERESTED_START && id <= INTERESTED_END) + size_idarray += keys[i].size; } - if (id == SOLVABLE_PROVIDES) - s->provides = ido; - else if (id == SOLVABLE_OBSOLETES) - s->obsoletes = ido; - else if (id == SOLVABLE_CONFLICTS) - s->conflicts = ido; - else if (id == SOLVABLE_REQUIRES) - s->requires = ido; - else if (id == SOLVABLE_RECOMMENDS) - s->recommends= ido; - else if (id == SOLVABLE_SUPPLEMENTS) - s->supplements = ido; - else if (id == SOLVABLE_SUGGESTS) - s->suggests = ido; - else if (id == SOLVABLE_ENHANCES) - s->enhances = ido; - else if (id == SOLVABLE_FRESHENS) - s->freshens = ido; -#if 0 - POOL_DEBUG(SAT_DEBUG_STATS, "%s ->\n", id2str(pool, id)); - for (; repo->idarraydata[ido]; ido++) - POOL_DEBUG(SAT_DEBUG_STATS," %s\n", dep2str(pool, repo->idarraydata[ido])); -#endif + /* allocate needed space in repo */ + /* we add maxsize because it is an upper limit for all idarrays, thus we can't overflow */ + repo_reserve_ids(repo, 0, size_idarray + maxsize + 1); + idarraydatap = repo->idarraydata + repo->idarraysize; + repo->idarraysize += size_idarray; + idarraydataend = idarraydatap + size_idarray; + repo->lastoff = 0; + if (have_incoredata) + data.incoreoffset[(s - pool->solvables) - data.start] = data.incoredatalen; + } + nentries--; + dp = data_read_id_max(dp, &id, 0, numschemata, &data); + incore_add_id(&data, id); + if (keys[key].type == REPOKEY_TYPE_FIXARRAY) + { + if (!id) + data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "illegal fixarray"); + stack[keydepth - 1] = id; + } + keyp = schemadata + schemata[id]; + break; + case REPOKEY_TYPE_NUM: + if (!(solvflags & SOLV_FLAG_SIZE_BYTES) && keys[key].storage == KEY_STORAGE_INCORE && + (id == SOLVABLE_INSTALLSIZE || id == SOLVABLE_DOWNLOADSIZE || id == DELTA_DOWNLOADSIZE)) + { + /* old solv file with sizes in kilos. transcode. */ + dp = data_read_id(dp, &id); + incore_add_sizek(&data, (unsigned int)id); break; - default: - dps = dp; - dp = data_skip(dp, keys[key].type); - if (keys[key].storage == KEY_STORAGE_INCORE) - incore_add_blob(&data, dps, dp - dps); + } + /* FALLTHROUGH */ + default: + if (id == RPM_RPMDBID && s && (keys[key].type == REPOKEY_TYPE_U32 || keys[key].type == REPOKEY_TYPE_NUM)) + { + if (keys[key].type == REPOKEY_TYPE_U32) + dp = data_read_u32(dp, (unsigned int *)&id); + else + dp = data_read_id_max(dp, &id, 0, 0, &data); + if (!repo->rpmdbid) + repo->rpmdbid = repo_sidedata_create(repo, sizeof(Id)); + repo->rpmdbid[(s - pool->solvables) - repo->start] = id; break; } + dps = dp; + dp = data_skip(dp, keys[key].type); + if (keys[key].storage == KEY_STORAGE_INCORE) + incore_add_blob(&data, dps, dp - dps); + break; } } - /* should shrink idarraydata again */ + if (keydepth) + data.error = pool_error(pool, SOLV_ERROR_EOF, "unexpected EOF, depth = %d", keydepth); if (!data.error) { - left -= (dp - buf); - if (left < 0) - { - pool_debug(mypool, SAT_ERROR, "buffer overrun\n"); - data.error = SOLV_ERROR_EOF; - } + if (dp > bufend) + data.error = pool_error(pool, SOLV_ERROR_EOF, "buffer overrun"); } - sat_free(buf); + solv_free(buf); if (data.error) { @@ -1422,16 +1252,22 @@ fprintf(stderr, "solv %d name %d type %d class %d\n", i, id, keys[key].type, key /* free id array */ repo->idarraysize -= size_idarray; /* free incore data */ - data.incoredata = sat_free(data.incoredata); + data.incoredata = solv_free(data.incoredata); data.incoredatalen = data.incoredatafree = 0; } if (data.incoredatafree) { /* shrink excess size */ - data.incoredata = sat_realloc(data.incoredata, data.incoredatalen); + data.incoredata = solv_realloc(data.incoredata, data.incoredatalen); data.incoredatafree = 0; } + solv_free(idmap); + + /* fixup the special idarray type */ + for (i = 1; i < numkeys; i++) + if (keys[i].type == REPOKEY_TYPE_REL_IDARRAY) + keys[i].type = REPOKEY_TYPE_IDARRAY; for (i = 1; i < numkeys; i++) if (keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET) @@ -1440,9 +1276,9 @@ fprintf(stderr, "solv %d name %d type %d class %d\n", i, id, keys[key].type, key { Id fileoffset = 0; unsigned int pagesize; - + /* we have vertical data, make it available */ - data.verticaloffset = sat_calloc(numkeys, sizeof(Id)); + data.verticaloffset = solv_calloc(numkeys, sizeof(Id)); for (i = 1; i < numkeys; i++) if (keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET) { @@ -1451,69 +1287,58 @@ fprintf(stderr, "solv %d name %d type %d class %d\n", i, id, keys[key].type, key } data.lastverticaloffset = fileoffset; pagesize = read_u32(&data); - repodata_read_or_setup_pages(&data, pagesize, fileoffset); + if (!data.error) + { + data.error = repopagestore_read_or_setup_pages(&data.store, data.fp, pagesize, fileoffset); + if (data.error == SOLV_ERROR_EOF) + pool_error(pool, data.error, "repopagestore setup: unexpected EOF"); + else if (data.error) + pool_error(pool, data.error, "repopagestore setup failed"); + } } - else + data.fp = 0; /* no longer needed */ + + if (data.error) { - /* no longer needed */ - data.fp = 0; + i = data.error; + repodata_freedata(&data); + return i; } - if (parent && !data.error) + if (parent) { - /* we're a store */ - sat_free(parent->schemata); - sat_free(parent->schemadata); - sat_free(parent->keys); - sat_free(parent->location); + /* overwrite stub repodata */ + repodata_freedata(parent); + data.repodataid = parent->repodataid; *parent = data; } - else if ((data.incoredatalen || data.fp) && !data.error) + else { - /* we got some data, make it available */ - repo->repodata = sat_realloc2(repo->repodata, repo->nrepodata + 1, sizeof(data)); + /* make it available as new repodata */ + if (!repo->nrepodata) + { + repo->nrepodata = 1; + repo->repodata = solv_calloc(2, sizeof(data)); + } + else + repo->repodata = solv_realloc2(repo->repodata, repo->nrepodata + 1, sizeof(data)); + data.repodataid = repo->nrepodata; repo->repodata[repo->nrepodata++] = data; } - else + + /* create stub repodata entries for all external */ + if (!(flags & SOLV_ADD_NO_STUBS) && !parent) { - /* discard data */ - sat_free(data.dirpool.dirs); - sat_free(data.incoreoffset); - sat_free(schemata); - sat_free(schemadata); - sat_free(keys); + for (key = 1 ; key < data.nkeys; key++) + if (data.keys[key].name == REPOSITORY_EXTERNAL && data.keys[key].type == REPOKEY_TYPE_FLEXARRAY) + break; + if (key < data.nkeys) + repodata_create_stubs(repo->repodata + (repo->nrepodata - 1)); } - sat_free(idmap); - mypool = 0; - return data.error; -} - -int -repo_add_solv(Repo *repo, FILE *fp) -{ - return repo_add_solv_parent(repo, fp, 0); + POOL_DEBUG(SOLV_DEBUG_STATS, "repo_add_solv took %d ms\n", solv_timems(now)); + POOL_DEBUG(SOLV_DEBUG_STATS, "repo size: %d solvables\n", repo->nsolvables); + POOL_DEBUG(SOLV_DEBUG_STATS, "repo memory used: %d K incore, %d K idarray\n", data.incoredatalen/1024, repo->idarraysize / (int)(1024/sizeof(Id))); + return 0; } -static void -repodata_load_solv(Repodata *data) -{ - FILE *fp; - Pool *pool = data->repo->pool; - if (!pool->loadcallback) - { - data->state = REPODATA_ERROR; - return; - } - fp = pool->loadcallback(pool, data, pool->loadcallbackdata); - if (!fp) - { - data->state = REPODATA_ERROR; - return; - } - if (repo_add_solv_parent(data->repo, fp, data)) - data->state = REPODATA_ERROR; - else - data->state = REPODATA_AVAILABLE; - fclose(fp); -}