/*
* repo_solv.c
*
- * Read the binary dump of a Repo and create a Repo * from it
- *
- * See
- * Repo *pool_addrepo_solv(Pool *pool, FILE *fp)
- * below
+ * Add a repo in solv format
*
*/
#include "repopack.h"
#include "repopage.h"
+#include "poolid_private.h" /* WHATPROVIDES_BLOCK */
+
#define INTERESTED_START SOLVABLE_NAME
#define INTERESTED_END SOLVABLE_ENHANCES
#define SOLV_ERROR_OVERFLOW 5
#define SOLV_ERROR_CORRUPT 6
-static Pool *mypool; /* for pool_debug... */
-
-
-static void repodata_load_stub(Repodata *data);
/*******************************************************************************
c = getc(data->fp);
if (c == EOF)
{
- pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF");
return 0;
}
x = (x << 8) | c;
c = getc(data->fp);
if (c == EOF)
{
- pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF");
return 0;
}
return c;
c = getc(data->fp);
if (c == EOF)
{
- pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF");
return 0;
}
if (!(c & 128))
x = (x << 7) | c;
if (max && x >= max)
{
- pool_debug(mypool, SAT_ERROR, "read_id: id too large (%u/%u)\n", x, max);
- data->error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "read_id: id too large (%u/%u)", x, max);
return 0;
}
return x;
}
x = (x << 7) ^ c ^ 128;
}
- pool_debug(mypool, SAT_ERROR, "read_id: id too long\n");
- data->error = SOLV_ERROR_CORRUPT;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_CORRUPT, "read_id: id too long");
return 0;
}
c = getc(data->fp);
if (c == EOF)
{
- pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF");
return 0;
}
if ((c & 128) != 0)
x = (x << 6) | (c & 63);
if (max && x >= max)
{
- pool_debug(mypool, SAT_ERROR, "read_idarray: id too large (%u/%u)\n", x, max);
- data->error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "read_idarray: id too large (%u/%u)", x, max);
return 0;
}
if (map)
x = map[x];
if (store == end)
{
- pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n");
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_OVERFLOW, "read_idarray: array overflow");
return 0;
}
*store++ = x;
return store;
if (store == end)
{
- pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n");
- data->error = SOLV_ERROR_OVERFLOW;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_OVERFLOW, "read_idarray: array overflow");
return 0;
}
*store++ = 0;
*/
static inline unsigned char *
-data_read_id_max(unsigned char *dp, Id *ret, Id *map, int max, int *error)
+data_read_id_max(unsigned char *dp, Id *ret, Id *map, int max, Repodata *data)
{
Id x;
dp = data_read_id(dp, &x);
- if (max && x >= max)
+ if (x < 0 || (max && x >= max))
{
- pool_debug(mypool, SAT_ERROR, "data_read_idarray: id too large (%u/%u)\n", x, max);
- *error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "data_read_id_max: id too large (%u/%u)", x, max);
x = 0;
}
*ret = map ? map[x] : x;
return dp;
}
-unsigned char *
-data_read_idarray(unsigned char *dp, Id **storep, Id *map, int max, int *error)
+static unsigned char *
+data_read_idarray(unsigned char *dp, Id **storep, Id *map, int max, Repodata *data)
{
Id *store = *storep;
unsigned int x = 0;
x = (x << 6) | (c & 63);
if (max && x >= max)
{
- pool_debug(mypool, SAT_ERROR, "data_read_idarray: id too large (%u/%u)\n", x, max);
- *error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "data_read_idarray: id too large (%u/%u)", x, max);
+ data->error = SOLV_ERROR_ID_RANGE;
break;
}
*store++ = x;
return dp;
}
-unsigned char *
-data_read_rel_idarray(unsigned char *dp, Id **storep, Id *map, int max, int *error, Id marker)
+static unsigned char *
+data_read_rel_idarray(unsigned char *dp, Id **storep, Id *map, int max, Repodata *data, Id marker)
{
Id *store = *storep;
Id old = 0;
old = x;
if (max && x >= max)
{
- pool_debug(mypool, SAT_ERROR, "data_read_rel_idarray: id too large (%u/%u)\n", x, max);
- *error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "data_read_rel_idarray: id too large (%u/%u)", x, max);
break;
}
*store++ = map ? map[x] : x;
* functions to add data to our incore memory space
*/
+#define INCORE_ADD_CHUNK 8192
+#define DATA_READ_CHUNK 8192
static void
-incore_add_id(Repodata *data, Id x)
+incore_add_id(Repodata *data, Id sx)
{
+ unsigned int x = (unsigned int)sx;
unsigned char *dp;
/* make sure we have at least 5 bytes free */
if (data->incoredatafree < 5)
{
- data->incoredata = sat_realloc(data->incoredata, data->incoredatalen + 1024);
- data->incoredatafree = 1024;
+ data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + INCORE_ADD_CHUNK);
+ data->incoredatafree = INCORE_ADD_CHUNK;
}
dp = data->incoredata + data->incoredatalen;
- if (x < 0)
- abort();
if (x >= (1 << 14))
{
if (x >= (1 << 28))
}
static void
+incore_add_sizek(Repodata *data, unsigned int sx)
+{
+ if (sx < (1 << 22))
+ incore_add_id(data, (Id)(sx << 10));
+ else
+ {
+ if ((sx >> 25) != 0)
+ {
+ incore_add_id(data, (Id)(sx >> 25));
+ data->incoredata[data->incoredatalen - 1] |= 128;
+ }
+ incore_add_id(data, (Id)((sx << 10) | 0x80000000));
+ data->incoredata[data->incoredatalen - 5] = (sx >> 18) | 128;
+ }
+}
+
+static void
+incore_add_ideof(Repodata *data, Id sx, int eof)
+{
+ unsigned int x = (unsigned int)sx;
+ unsigned char *dp;
+ /* make sure we have at least 5 bytes free */
+ if (data->incoredatafree < 5)
+ {
+ data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + INCORE_ADD_CHUNK);
+ data->incoredatafree = INCORE_ADD_CHUNK;
+ }
+ dp = data->incoredata + data->incoredatalen;
+ if (x >= (1 << 13))
+ {
+ if (x >= (1 << 27))
+ *dp++ = (x >> 27) | 128;
+ if (x >= (1 << 20))
+ *dp++ = (x >> 20) | 128;
+ *dp++ = (x >> 13) | 128;
+ }
+ if (x >= (1 << 6))
+ *dp++ = (x >> 6) | 128;
+ *dp++ = eof ? (x & 63) : (x & 63) | 64;
+ data->incoredatafree -= dp - (data->incoredata + data->incoredatalen);
+ data->incoredatalen = dp - data->incoredata;
+}
+
+static void
incore_add_blob(Repodata *data, unsigned char *buf, int len)
{
if (data->incoredatafree < len)
{
- data->incoredata = sat_realloc(data->incoredata, data->incoredatalen + 1024 + len);
- data->incoredatafree = 1024 + len;
+ data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + INCORE_ADD_CHUNK + len);
+ data->incoredatafree = INCORE_ADD_CHUNK + len;
}
memcpy(data->incoredata + data->incoredatalen, buf, len);
data->incoredatafree -= len;
Id id;
int eof;
dp = data_read_ideof(dp, &id, &eof);
- if (max && id >= max)
+ if (id < 0 || (max && id >= max))
{
- pool_debug(mypool, SAT_ERROR, "incore_map_idarray: id too large (%u/%u)\n", id, max);
- data->error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "incore_map_idarray: id too large (%u/%u)", id, max);
break;
}
id = map[id];
- if (id >= 64)
- id = (id & 63) | ((id & ~63) << 1);
- incore_add_id(data, eof ? id : id | 64);
+ incore_add_ideof(data, id, eof);
if (eof)
break;
}
}
+#if 0
static void
incore_add_u32(Repodata *data, unsigned int x)
{
/* make sure we have at least 4 bytes free */
if (data->incoredatafree < 4)
{
- data->incoredata = sat_realloc(data->incoredata, data->incoredatalen + 1024);
- data->incoredatafree = 1024;
+ data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + INCORE_ADD_CHUNK);
+ data->incoredatafree = INCORE_ADD_CHUNK;
}
dp = data->incoredata + data->incoredatalen;
*dp++ = x >> 24;
data->incoredatalen += 4;
}
-#if 0
static void
incore_add_u8(Repodata *data, unsigned int x)
{
/* make sure we have at least 1 byte free */
if (data->incoredatafree < 1)
{
- data->incoredata = sat_realloc(data->incoredata, data->incoredatalen + 1024);
+ data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + 1024);
data->incoredatafree = 1024;
}
dp = data->incoredata + data->incoredatalen;
/*******************************************************************************
- * callback to create our stub sub-repodatas from the incore data
- */
-
-struct create_stub_data {
- Repodata *data;
- Id xkeyname;
-};
-
-int
-create_stub_cb(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv)
-{
- struct create_stub_data *stubdata = cbdata;
- if (key->name == REPOSITORY_EXTERNAL && key->type == REPOKEY_TYPE_FLEXARRAY)
- {
- if (stubdata->data)
- {
- repodata_internalize(stubdata->data);
- if (data->start != data->end)
- {
- repodata_extend(stubdata->data, data->start);
- repodata_extend(stubdata->data, data->end - 1);
- }
- stubdata->data = 0;
- }
- if (kv->eof)
- return SEARCH_NEXT_SOLVABLE;
- stubdata->data = repo_add_repodata(data->repo, 0);
- stubdata->data->state = REPODATA_STUB;
- stubdata->data->loadcallback = repodata_load_stub;
- return 0;
- }
- if (!stubdata->data)
- return SEARCH_NEXT_KEY;
- switch(key->type)
- {
- case REPOKEY_TYPE_ID:
- repodata_set_id(stubdata->data, REPOENTRY_META, key->name, kv->id);
- break;
- case REPOKEY_TYPE_CONSTANTID:
- repodata_set_constantid(stubdata->data, REPOENTRY_META, key->name, kv->id);
- break;
- case REPOKEY_TYPE_STR:
- repodata_set_str(stubdata->data, REPOENTRY_META, key->name, kv->str);
- break;
- case REPOKEY_TYPE_VOID:
- repodata_set_void(stubdata->data, REPOENTRY_META, key->name);
- break;
- case REPOKEY_TYPE_NUM:
- repodata_set_num(stubdata->data, REPOENTRY_META, key->name, kv->num);
- break;
- case REPOKEY_TYPE_IDARRAY:
- repodata_add_idarray(stubdata->data, REPOENTRY_META, key->name, kv->id);
- if (key->name == REPOSITORY_KEYS)
- {
- if (!stubdata->xkeyname)
- stubdata->xkeyname = kv->id;
- else
- {
- Repokey xkey;
-
- xkey.name = stubdata->xkeyname;
- xkey.type = kv->id;
- xkey.storage = KEY_STORAGE_INCORE;
- xkey.size = 0;
- repodata_key2id(stubdata->data, &xkey, 1);
- stubdata->xkeyname = 0;
- }
- if (kv->eof)
- stubdata->xkeyname = 0;
- }
- break;
- case REPOKEY_TYPE_MD5:
- case REPOKEY_TYPE_SHA1:
- case REPOKEY_TYPE_SHA256:
- repodata_set_checksum(stubdata->data, REPOENTRY_META, key->name, key->type, kv->str);
- break;
- default:
- return SEARCH_NEXT_KEY;
- }
- return 0;
-}
-
-
-/*******************************************************************************
* our main function
*/
/*
* read repo from .solv file and add it to pool
- * if stubdata is set, substitute it with read data
- * (this is used to replace a repodata stub with the real data)
*/
-static int
-repo_add_solv_parent(Repo *repo, FILE *fp, Repodata *parent)
+int
+repo_add_solv(Repo *repo, FILE *fp, int flags)
{
Pool *pool = repo->pool;
int i, l;
char *sp; /* pointer into string space */
Id *idmap; /* map of repo Ids to pool Ids */
Id id, type;
- unsigned int hashmask, h;
- int hh;
- Id *hashtbl;
+ Hashval hashmask, h, hh;
+ Hashtable hashtbl;
Id name, evr, did;
- int flags;
+ int relflags;
Reldep *ran;
unsigned int size_idarray;
Id *idarraydatap, *idarraydataend;
Id *schemadata, *schemadatap, *schemadataend;
Id *schemata, key, *keyp;
int nentries;
- int have_xdata;
+ int have_incoredata;
int maxsize, allsize;
- unsigned char *buf, *dp, *dps;
- int left;
- Id stack[10];
+ unsigned char *buf, *bufend, *dp, *dps;
+ Id stack[3 * 5];
int keydepth;
+ int needchunk; /* need a new chunk of data */
+ unsigned int now;
+ int oldnstrings = pool->ss.nstrings;
+ int oldnrels = pool->nrels;
struct _Stringpool *spool;
+ Repodata *parent = 0;
Repodata data;
+ int extendstart = 0, extendend = 0; /* set in case we're extending */
+
+ now = solv_timems(0);
+
+ if ((flags & REPO_USE_LOADING) != 0)
+ {
+ /* this is a stub replace operation */
+ flags |= REPO_EXTEND_SOLVABLES;
+ /* use REPO_REUSE_REPODATA hack so that the old repodata is kept */
+ parent = repo_add_repodata(repo, flags | REPO_REUSE_REPODATA);
+ extendstart = parent->start;
+ extendend = parent->end;
+ }
+ else if (flags & REPO_EXTEND_SOLVABLES)
+ {
+ /* extend all solvables of this repo */
+ extendstart = repo->start;
+ extendend = repo->end;
+ }
+
memset(&data, 0, sizeof(data));
data.repo = repo;
data.fp = fp;
repopagestore_init(&data.store);
- mypool = pool;
-
if (read_u32(&data) != ('S' << 24 | 'O' << 16 | 'L' << 8 | 'V'))
- {
- pool_debug(pool, SAT_ERROR, "not a SOLV file\n");
- return SOLV_ERROR_NOT_SOLV;
- }
+ return pool_error(pool, SOLV_ERROR_NOT_SOLV, "not a SOLV file");
solvversion = read_u32(&data);
switch (solvversion)
{
case SOLV_VERSION_8:
break;
default:
- pool_debug(pool, SAT_ERROR, "unsupported SOLV version\n");
- return SOLV_ERROR_UNSUPPORTED;
+ return pool_error(pool, SOLV_ERROR_UNSUPPORTED, "unsupported SOLV version");
}
- pool_freeidhashes(pool);
-
numid = read_u32(&data);
numrel = read_u32(&data);
numdir = read_u32(&data);
solvflags = read_u32(&data);
if (numdir && numdir < 2)
- {
- pool_debug(pool, SAT_ERROR, "bad number of dirs\n");
- return SOLV_ERROR_CORRUPT;
- }
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of dirs");
- if (parent)
+ if (numrel && (flags & REPO_LOCALPOOL) != 0)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "relations are forbidden in a local pool");
+ if ((flags & REPO_EXTEND_SOLVABLES) && numsolv)
{
- if (numrel)
- {
- pool_debug(pool, SAT_ERROR, "relations are forbidden in a sub-repository\n");
- return SOLV_ERROR_CORRUPT;
- }
- if (parent->end - parent->start != numsolv)
- {
- pool_debug(pool, SAT_ERROR, "sub-repository solvable number doesn't match main repository (%d - %d)\n", parent->end - parent->start, numsolv);
- return SOLV_ERROR_CORRUPT;
- }
+ /* make sure that we exactly replace the stub repodata */
+ if (extendend - extendstart != numsolv)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "sub-repository solvable number does not match main repository (%d - %d)", extendend - extendstart, numsolv);
+ for (i = 0; i < numsolv; i++)
+ if (pool->solvables[extendstart + i].repo != repo)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "main repository contains holes, cannot extend");
}
/******* Part 1: string IDs *****************************************/
- sizeid = read_u32(&data); /* size of string+Id space */
+ sizeid = read_u32(&data); /* size of string space */
/*
* read strings and Ids
* alloc buffers
*/
- if (!parent)
- spool = &pool->ss;
+ if (!(flags & REPO_LOCALPOOL))
+ {
+ spool = &pool->ss;
+ /* alloc max needed string buffer and string pointers, will shrink again later */
+#if 0
+ spool->stringspace = solv_realloc(spool->stringspace, spool->sstrings + sizeid + 1);
+ spool->strings = solv_realloc2(spool->strings, spool->nstrings + numid, sizeof(Offset));
+#else
+ spool->sstrings += sizeid + 1;
+ spool->nstrings += numid;
+ stringpool_shrink(spool); /* we misuse stringpool_shrink so that the correct BLOCK factor is used */
+ spool->sstrings -= sizeid + 1;
+ spool->nstrings -= numid;
+#endif
+ }
else
{
data.localpool = 1;
spool = &data.spool;
- spool->stringspace = sat_malloc(7);
+ spool->stringspace = solv_malloc(7 + sizeid + 1);
+ spool->strings = solv_malloc2(numid < 2 ? 2 : numid, sizeof(Offset));
strcpy(spool->stringspace, "<NULL>");
spool->sstrings = 7;
- spool->nstrings = 0;
+ spool->nstrings = 1;
+ spool->strings[0] = 0; /* <NULL> */
}
- /* alloc string buffer */
- spool->stringspace = sat_realloc(spool->stringspace, spool->sstrings + sizeid + 1);
- /* alloc string offsets (Id -> Offset into string space) */
- spool->strings = sat_realloc2(spool->strings, spool->nstrings + numid, sizeof(Offset));
-
- strsp = spool->stringspace;
- str = spool->strings; /* array of offsets into strsp, indexed by Id */
-
- /* point to _BEHIND_ already allocated string/Id space */
- strsp += spool->sstrings;
-
/*
- * read new repo at end of pool
+ * read string data and append to old string space
*/
+ strsp = spool->stringspace + spool->sstrings; /* append new entries */
if ((solvflags & SOLV_FLAG_PREFIX_POOL) == 0)
{
if (sizeid && fread(strsp, sizeid, 1, fp) != 1)
{
- pool_debug(pool, SAT_ERROR, "read error while reading strings\n");
- return SOLV_ERROR_EOF;
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_EOF, "read error while reading strings");
}
}
else
{
unsigned int pfsize = read_u32(&data);
- char *prefix = sat_malloc(pfsize);
+ char *prefix = solv_malloc(pfsize);
char *pp = prefix;
char *old_str = 0;
char *dest = strsp;
int freesp = sizeid;
if (pfsize && fread(prefix, pfsize, 1, fp) != 1)
- {
- pool_debug(pool, SAT_ERROR, "read error while reading strings\n");
- sat_free(prefix);
- return SOLV_ERROR_EOF;
+ {
+ solv_free(prefix);
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_EOF, "read error while reading strings");
}
for (i = 1; i < numid; i++)
{
freesp -= same + len;
if (freesp < 0)
{
- pool_debug(pool, SAT_ERROR, "overflow while expanding strings\n");
- sat_free(prefix);
- return SOLV_ERROR_OVERFLOW;
+ solv_free(prefix);
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_OVERFLOW, "overflow while expanding strings");
}
if (same)
memcpy(dest, old_str, same);
old_str = dest;
dest += same + len;
}
- sat_free(prefix);
+ solv_free(prefix);
if (freesp != 0)
{
- pool_debug(pool, SAT_ERROR, "expanding strings size mismatch\n");
- return SOLV_ERROR_CORRUPT;
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "expanding strings size mismatch");
}
}
strsp[sizeid] = 0; /* make string space \0 terminated */
sp = strsp;
- if (parent)
+ /* now merge */
+ str = spool->strings; /* array of offsets into strsp, indexed by Id */
+ if ((flags & REPO_LOCALPOOL) != 0)
{
- /* no shared pool, thus no idmap and no unification */
+ /* no shared pool, thus no idmap and no unification needed */
idmap = 0;
- spool->nstrings = numid;
- str[0] = 0;
+ spool->nstrings = numid < 2 ? 2 : numid; /* make sure we have at least id 0 and 1 */
if (*sp)
{
- /* we need the '' for directories */
- pool_debug(pool, SAT_ERROR, "store strings don't start with ''\n");
- return SOLV_ERROR_CORRUPT;
+ /* we need id 1 to be '' for directories */
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "store strings don't start with an empty string");
}
for (i = 1; i < spool->nstrings; i++)
{
- if (sp >= strsp + sizeid)
+ if (sp >= strsp + sizeid && numid >= 2)
{
- pool_debug(pool, SAT_ERROR, "not enough strings\n");
- return SOLV_ERROR_OVERFLOW;
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_OVERFLOW, "not enough strings");
}
str[i] = sp - spool->stringspace;
sp += strlen(sp) + 1;
}
else
{
+ Offset oldsstrings = spool->sstrings;
/* alloc id map for name and rel Ids. this maps ids in the solv files
* to the ids in our pool */
- idmap = sat_calloc(numid + numrel, sizeof(Id));
+ idmap = solv_calloc(numid + numrel, sizeof(Id));
- /*
- * build hashes for all read strings
- *
- */
-
+ /* grow hash if needed, otherwise reuse */
hashmask = mkmask(spool->nstrings + numid);
-
#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "read %d strings\n", numid);
- POOL_DEBUG(SAT_DEBUG_STATS, "string hash buckets: %d\n", hashmask + 1);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "read %d strings\n", numid);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "string hash buckets: %d, old %d\n", hashmask + 1, spool->stringhashmask + 1);
#endif
-
- /*
- * create hashtable with strings already in pool
- */
-
- hashtbl = sat_calloc(hashmask + 1, sizeof(Id));
- for (i = 1; i < spool->nstrings; i++) /* leave out our dummy zero id */
+ if (hashmask > spool->stringhashmask)
{
- h = strhash(spool->stringspace + spool->strings[i]) & hashmask;
- hh = HASHCHAIN_START;
- while (hashtbl[h])
- h = HASHCHAIN_NEXT(h, hh, hashmask);
- hashtbl[h] = i;
+ spool->stringhashtbl = solv_free(spool->stringhashtbl);
+ spool->stringhashmask = hashmask;
+ spool->stringhashtbl = hashtbl = solv_calloc(hashmask + 1, sizeof(Id));
+ for (i = 1; i < spool->nstrings; i++)
+ {
+ h = strhash(spool->stringspace + spool->strings[i]) & hashmask;
+ hh = HASHCHAIN_START;
+ while (hashtbl[h])
+ h = HASHCHAIN_NEXT(h, hh, hashmask);
+ hashtbl[h] = i;
+ }
+ }
+ else
+ {
+ hashtbl = spool->stringhashtbl;
+ hashmask = spool->stringhashmask;
}
/*
- * run over string space, calculate offsets
- *
- * build id map (maps solv Id -> pool Id)
+ * run over strings and merge with pool.
+ * also populate id map (maps solv Id -> pool Id)
*/
-
for (i = 1; i < numid; i++)
{
if (sp >= strsp + sizeid)
{
- sat_free(hashtbl);
- sat_free(idmap);
- pool_debug(pool, SAT_ERROR, "not enough strings %d %d\n", i, numid);
- return SOLV_ERROR_OVERFLOW;
+ solv_free(idmap);
+ spool->nstrings = oldnstrings;
+ spool->sstrings = oldsstrings;
+ stringpool_freehash(spool);
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_OVERFLOW, "not enough strings %d %d", i, numid);
}
if (!*sp) /* empty string */
{
for (;;)
{
id = hashtbl[h];
- if (id == 0)
+ if (!id)
break;
if (!strcmp(spool->stringspace + spool->strings[id], sp))
- break; /* existing string */
+ break; /* already in pool */
h = HASHCHAIN_NEXT(h, hh, hashmask);
}
/* length == offset to next string */
l = strlen(sp) + 1;
- if (id == ID_NULL) /* end of hash chain -> new string */
+ if (!id) /* end of hash chain -> new string */
{
id = spool->nstrings++;
hashtbl[h] = id;
- str[id] = spool->sstrings; /* save Offset */
- if (sp != spool->stringspace + spool->sstrings) /* not at end-of-buffer */
- memmove(spool->stringspace + spool->sstrings, sp, l); /* append to pool buffer */
+ str[id] = spool->sstrings; /* save offset */
+ if (sp != spool->stringspace + spool->sstrings)
+ memmove(spool->stringspace + spool->sstrings, sp, l);
spool->sstrings += l;
}
- idmap[i] = id; /* repo relative -> pool relative */
- sp += l; /* next string */
+ idmap[i] = id; /* repo relative -> pool relative */
+ sp += l; /* next string */
}
- sat_free(hashtbl);
+ if (hashmask > mkmask(spool->nstrings + 8192))
+ {
+ spool->stringhashtbl = solv_free(spool->stringhashtbl);
+ spool->stringhashmask = 0;
+ }
+ stringpool_shrink(spool); /* vacuum */
}
- pool_shrink_strings(pool); /* vacuum */
/******* Part 2: Relation IDs ***************************************/
if (numrel)
{
/* extend rels */
- pool->rels = sat_realloc2(pool->rels, pool->nrels + numrel, sizeof(Reldep));
+ pool->rels = solv_realloc2(pool->rels, pool->nrels + numrel, sizeof(Reldep));
ran = pool->rels;
+ /* grow hash if needed, otherwise reuse */
hashmask = mkmask(pool->nrels + numrel);
#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "read %d rels\n", numrel);
- POOL_DEBUG(SAT_DEBUG_STATS, "rel hash buckets: %d\n", hashmask + 1);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "read %d rels\n", numrel);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "rel hash buckets: %d, old %d\n", hashmask + 1, pool->relhashmask + 1);
#endif
- /*
- * prep hash table with already existing RelDeps
- */
-
- hashtbl = sat_calloc(hashmask + 1, sizeof(Id));
- for (i = 1; i < pool->nrels; i++)
+ if (hashmask > pool->relhashmask)
{
- h = relhash(ran[i].name, ran[i].evr, ran[i].flags) & hashmask;
- hh = HASHCHAIN_START;
- while (hashtbl[h])
- h = HASHCHAIN_NEXT(h, hh, hashmask);
- hashtbl[h] = i;
+ pool->relhashtbl = solv_free(pool->relhashtbl);
+ pool->relhashmask = hashmask;
+ pool->relhashtbl = hashtbl = solv_calloc(hashmask + 1, sizeof(Id));
+ for (i = 1; i < pool->nrels; i++)
+ {
+ h = relhash(ran[i].name, ran[i].evr, ran[i].flags) & hashmask;
+ hh = HASHCHAIN_START;
+ while (hashtbl[h])
+ h = HASHCHAIN_NEXT(h, hh, hashmask);
+ hashtbl[h] = i;
+ }
+ }
+ else
+ {
+ hashtbl = pool->relhashtbl;
+ hashmask = pool->relhashmask;
}
/*
* read RelDeps from repo
*/
-
for (i = 0; i < numrel; i++)
{
name = read_id(&data, i + numid); /* read (repo relative) Ids */
evr = read_id(&data, i + numid);
- flags = read_u8(&data);
+ relflags = read_u8(&data);
name = idmap[name]; /* map to (pool relative) Ids */
evr = idmap[evr];
- h = relhash(name, evr, flags) & hashmask;
+ h = relhash(name, evr, relflags) & hashmask;
hh = HASHCHAIN_START;
for (;;)
{
id = hashtbl[h];
- if (id == ID_NULL) /* end of hash chain */
+ if (!id) /* end of hash chain reached */
break;
- if (ran[id].name == name && ran[id].evr == evr && ran[id].flags == flags)
+ if (ran[id].name == name && ran[id].evr == evr && ran[id].flags == relflags)
break;
h = HASHCHAIN_NEXT(h, hh, hashmask);
}
- if (id == ID_NULL) /* new RelDep */
+ if (!id) /* new RelDep */
{
id = pool->nrels++;
hashtbl[h] = id;
ran[id].name = name;
ran[id].evr = evr;
- ran[id].flags = flags;
+ ran[id].flags = relflags;
}
idmap[i + numid] = MAKERELDEP(id); /* fill Id map */
}
- sat_free(hashtbl);
+ if (hashmask > mkmask(pool->nrels + 4096))
+ {
+ pool->relhashtbl = solv_free(pool->relhashtbl);
+ pool->relhashmask = 0;
+ }
pool_shrink_rels(pool); /* vacuum */
}
+ /* if we added ids/rels, make room in our whatprovide arrays */
+ if (!(flags & REPO_LOCALPOOL))
+ {
+ if (pool->whatprovides && oldnstrings != pool->ss.nstrings)
+ {
+ int newlen = (pool->ss.nstrings + WHATPROVIDES_BLOCK) & ~WHATPROVIDES_BLOCK;
+ pool->whatprovides = solv_realloc2(pool->whatprovides, newlen, sizeof(Offset));
+ memset(pool->whatprovides + oldnstrings, 0, (newlen - oldnstrings) * sizeof(Offset));
+ }
+ if (pool->whatprovides_rel && oldnrels != pool->nrels)
+ {
+ int newlen = (pool->nrels + WHATPROVIDES_BLOCK) & ~WHATPROVIDES_BLOCK;
+ pool->whatprovides_rel = solv_realloc2(pool->whatprovides_rel, newlen, sizeof(Offset));
+ memset(pool->whatprovides_rel + oldnrels, 0, (newlen - oldnrels) * sizeof(Offset));
+ }
+ }
/******* Part 3: Dirs ***********************************************/
if (numdir)
{
- data.dirpool.dirs = sat_malloc2(numdir, sizeof(Id));
+ data.dirpool.dirs = solv_malloc2(numdir, sizeof(Id));
data.dirpool.ndirs = numdir;
data.dirpool.dirs[0] = 0; /* dir 0: virtual root */
data.dirpool.dirs[1] = 1; /* dir 1: / */
/******* Part 4: Keys ***********************************************/
- keys = sat_calloc(numkeys, sizeof(*keys));
+ keys = solv_calloc(numkeys, sizeof(*keys));
/* keys start at 1 */
for (i = 1; i < numkeys; i++)
{
id = read_id(&data, numid);
if (idmap)
id = idmap[id];
- else if (parent)
- id = str2id(pool, stringpool_id2str(spool, id), 1);
+ else if ((flags & REPO_LOCALPOOL) != 0)
+ id = pool_str2id(pool, stringpool_id2str(spool, id), 1);
type = read_id(&data, numid);
if (idmap)
type = idmap[type];
- else if (parent)
- type = str2id(pool, stringpool_id2str(spool, type), 1);
+ else if ((flags & REPO_LOCALPOOL) != 0)
+ type = pool_str2id(pool, stringpool_id2str(spool, type), 1);
if (type < REPOKEY_TYPE_VOID || type > REPOKEY_TYPE_FLEXARRAY)
{
- pool_debug(pool, SAT_ERROR, "unsupported data type '%s'\n", id2str(pool, type));
- data.error = SOLV_ERROR_UNSUPPORTED;
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "unsupported data type '%s'", pool_id2str(pool, type));
type = REPOKEY_TYPE_VOID;
}
keys[i].name = id;
keys[i].type = type;
keys[i].size = read_id(&data, keys[i].type == REPOKEY_TYPE_CONSTANTID ? numid + numrel : 0);
keys[i].storage = read_id(&data, 0);
- if (id >= SOLVABLE_NAME && id <= RPM_RPMDBID)
- keys[i].storage = KEY_STORAGE_SOLVABLE;
- else if (keys[i].storage == KEY_STORAGE_SOLVABLE)
+ /* old versions used SOLVABLE for main solvable data */
+ if (keys[i].storage == KEY_STORAGE_SOLVABLE)
keys[i].storage = KEY_STORAGE_INCORE;
- if (keys[i].type == REPOKEY_TYPE_CONSTANTID)
+ if (keys[i].storage != KEY_STORAGE_INCORE && keys[i].storage != KEY_STORAGE_VERTICAL_OFFSET)
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "unsupported storage type %d", keys[i].storage);
+ if (id >= SOLVABLE_NAME && id <= RPM_RPMDBID)
{
- if (idmap)
- keys[i].size = idmap[keys[i].size];
- else if (parent)
- keys[i].size = str2id(pool, stringpool_id2str(spool, keys[i].size), 1);
+ if (keys[i].storage != KEY_STORAGE_INCORE)
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "main solvable data must use incore storage %d", keys[i].storage);
+ keys[i].storage = KEY_STORAGE_SOLVABLE;
}
+ /* cannot handle rel idarrays in incore/vertical */
+ if (type == REPOKEY_TYPE_REL_IDARRAY && keys[i].storage != KEY_STORAGE_SOLVABLE)
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "type REL_IDARRAY is only supported for STORAGE_SOLVABLE");
+ /* cannot handle mapped ids in vertical */
+ if (!(flags & REPO_LOCALPOOL) && keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET && (type == REPOKEY_TYPE_ID || type == REPOKEY_TYPE_IDARRAY))
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "mapped ids are not supported for STORAGE_VERTICAL_OFFSET");
+
+ if (keys[i].type == REPOKEY_TYPE_CONSTANTID && idmap)
+ keys[i].size = idmap[keys[i].size];
#if 0
- fprintf(stderr, "key %d %s %s %d %d\n", i, id2str(pool,id), id2str(pool, keys[i].type),
+ fprintf(stderr, "key %d %s %s %d %d\n", i, pool_id2str(pool,id), pool_id2str(pool, keys[i].type),
keys[i].size, keys[i].storage);
#endif
}
- have_xdata = parent ? 1 : 0;
+ have_incoredata = 0;
for (i = 1; i < numkeys; i++)
if (keys[i].storage == KEY_STORAGE_INCORE || keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET)
- have_xdata = 1;
+ have_incoredata = 1;
data.keys = keys;
data.nkeys = numkeys;
/******* Part 5: Schemata ********************************************/
id = read_id(&data, 0);
- schemadata = sat_calloc(id + 1, sizeof(Id));
+ schemadata = solv_calloc(id + 1, sizeof(Id));
schemadatap = schemadata + 1;
schemadataend = schemadatap + id;
- schemata = sat_calloc(numschemata, sizeof(Id));
+ schemata = solv_calloc(numschemata, sizeof(Id));
for (i = 1; i < numschemata; i++)
{
schemata[i] = schemadatap - schemadata;
schemadatap = read_idarray(&data, numid, 0, schemadatap, schemadataend);
#if 0
Id *sp = schemadata + schemata[i];
- fprintf (stderr, "schema %d:", i);
+ fprintf(stderr, "schema %d:", i);
for (; *sp; sp++)
- fprintf (stderr, " %d", *sp);
- fprintf (stderr, "\n");
+ fprintf(stderr, " %d", *sp);
+ fprintf(stderr, "\n");
#endif
}
data.schemata = schemata;
maxsize = read_id(&data, 0);
allsize = read_id(&data, 0);
- maxsize += 5; /* so we can read the next schema */
+ maxsize += 5; /* so we can read the next schema of an array */
if (maxsize > allsize)
maxsize = allsize;
- left = 0;
- buf = sat_calloc(maxsize + 4, 1);
+ buf = solv_calloc(maxsize + DATA_READ_CHUNK + 4, 1); /* 4 extra bytes to detect overflows */
+ bufend = buf;
dp = buf;
l = maxsize;
+ if (l < DATA_READ_CHUNK)
+ l = DATA_READ_CHUNK;
if (l > allsize)
l = allsize;
if (!l || fread(buf, l, 1, data.fp) != 1)
{
- pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
- data.error = SOLV_ERROR_EOF;
+ data.error = pool_error(pool, SOLV_ERROR_EOF, "unexpected EOF");
id = 0;
}
else
{
- left = l;
+ bufend = buf + l;
allsize -= l;
- dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
+ dp = data_read_id_max(dp, &id, 0, numschemata, &data);
}
- incore_add_id(&data, 0); /* XXX? */
- incore_add_id(&data, id);
+ incore_add_id(&data, 0); /* so that incoreoffset 0 means schema 0 */
+ incore_add_id(&data, id); /* main schema id */
keyp = schemadata + schemata[id];
data.mainschema = id;
for (i = 0; keyp[i]; i++)
;
if (i)
- data.mainschemaoffsets = sat_calloc(i, sizeof(Id));
+ data.mainschemaoffsets = solv_calloc(i, sizeof(Id));
nentries = 0;
keydepth = 0;
s = 0;
+ needchunk = 1;
for(;;)
{
+ /* make sure we have enough room */
+ if (keydepth == 0 || needchunk)
+ {
+ int left = bufend - dp;
+ /* read data chunk to dp */
+ if (data.error)
+ break;
+ if (left < 0)
+ {
+ data.error = pool_error(pool, SOLV_ERROR_EOF, "buffer overrun");
+ break;
+ }
+ if (left < maxsize)
+ {
+ if (left)
+ memmove(buf, dp, left);
+ l = maxsize - left;
+ if (l < DATA_READ_CHUNK)
+ l = DATA_READ_CHUNK;
+ if (l > allsize)
+ l = allsize;
+ if (l && fread(buf + left, l, 1, data.fp) != 1)
+ {
+ data.error = pool_error(pool, SOLV_ERROR_EOF, "unexpected EOF");
+ break;
+ }
+ allsize -= l;
+ left += l;
+ bufend = buf + left;
+ if (allsize + left < maxsize)
+ maxsize = allsize + left;
+ dp = buf;
+ }
+ needchunk = 0;
+ }
+
key = *keyp++;
#if 0
-printf("key %d at %d\n", key, keyp - 1 - schemadata);
+printf("key %d at %d\n", key, (int)(keyp - 1 - schemadata));
#endif
if (!key)
{
+ if (keydepth <= 3)
+ needchunk = 1;
if (nentries)
{
- if (s && keydepth == 2)
+ if (s && keydepth == 3)
{
s++; /* next solvable */
- if (have_xdata)
+ if (have_incoredata)
data.incoreoffset[(s - pool->solvables) - data.start] = data.incoredatalen;
}
- dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
- incore_add_id(&data, id);
+ id = stack[keydepth - 1];
+ if (!id)
+ {
+ dp = data_read_id_max(dp, &id, 0, numschemata, &data);
+ incore_add_id(&data, id);
+ }
keyp = schemadata + schemata[id];
nentries--;
continue;
}
if (!keydepth)
break;
+ --keydepth;
keyp = schemadata + stack[--keydepth];
nentries = stack[--keydepth];
#if 0
continue;
}
- if (keydepth <= 2)
- {
- if (keydepth == 0)
- data.mainschemaoffsets[keyp - 1 - (schemadata + schemata[data.mainschema])] = data.incoredatalen;
- /* read data chunk to dp */
- if (data.error)
- break;
- left -= (dp - buf);
- if (left < 0)
- {
- pool_debug(mypool, SAT_ERROR, "buffer overrun\n");
- data.error = SOLV_ERROR_EOF;
- break;
- }
- if (left)
- memmove(buf, dp, left);
- l = maxsize - left;
- if (l > allsize)
- l = allsize;
- if (l && fread(buf + left, l, 1, data.fp) != 1)
- {
- pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
- data.error = SOLV_ERROR_EOF;
- break;
- }
- allsize -= l;
- left += l;
- dp = buf;
- }
+ if (keydepth == 0)
+ data.mainschemaoffsets[keyp - 1 - (schemadata + schemata[data.mainschema])] = data.incoredatalen;
#if 0
-printf("=> %s %s %p\n", id2str(pool, keys[key].name), id2str(pool, keys[key].type), s);
+printf("=> %s %s %p\n", pool_id2str(pool, keys[key].name), pool_id2str(pool, keys[key].type), s);
#endif
id = keys[key].name;
if (keys[key].storage == KEY_STORAGE_VERTICAL_OFFSET)
dps = dp;
dp = data_skip(dp, REPOKEY_TYPE_ID);
dp = data_skip(dp, REPOKEY_TYPE_ID);
- incore_add_blob(&data, dps, dp - dps);
+ incore_add_blob(&data, dps, dp - dps); /* just record offset/size */
continue;
}
switch (keys[key].type)
{
case REPOKEY_TYPE_ID:
- dp = data_read_id_max(dp, &did, idmap, numid + numrel, &data.error);
+ dp = data_read_id_max(dp, &did, idmap, numid + numrel, &data);
if (s && id == SOLVABLE_NAME)
s->name = did;
else if (s && id == SOLVABLE_ARCH)
else if (keys[key].storage == KEY_STORAGE_INCORE)
incore_add_id(&data, did);
#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %s\n", id2str(pool, id), id2str(pool, did));
+ POOL_DEBUG(SOLV_DEBUG_STATS, "%s -> %s\n", pool_id2str(pool, id), pool_id2str(pool, did));
#endif
break;
- case REPOKEY_TYPE_U32:
- dp = data_read_u32(dp, &h);
-#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %u\n", id2str(pool, id), h);
-#endif
- if (s && id == RPM_RPMDBID)
- {
- if (!repo->rpmdbid)
- repo->rpmdbid = repo_sidedata_create(repo, sizeof(Id));
- repo->rpmdbid[(s - pool->solvables) - repo->start] = h;
- }
- else if (keys[key].storage == KEY_STORAGE_INCORE)
- incore_add_u32(&data, h);
- break;
case REPOKEY_TYPE_IDARRAY:
case REPOKEY_TYPE_REL_IDARRAY:
if (!s || id < INTERESTED_START || id > INTERESTED_END)
if (keys[key].storage != KEY_STORAGE_INCORE)
break;
if (idmap)
- incore_map_idarray(&data, dps, idmap, numid);
+ incore_map_idarray(&data, dps, idmap, numid + numrel);
else
incore_add_blob(&data, dps, dp - dps);
break;
}
ido = idarraydatap - repo->idarraydata;
if (keys[key].type == REPOKEY_TYPE_IDARRAY)
- dp = data_read_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error);
+ dp = data_read_idarray(dp, &idarraydatap, idmap, numid + numrel, &data);
else if (id == SOLVABLE_REQUIRES)
- dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_PREREQMARKER);
+ dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data, SOLVABLE_PREREQMARKER);
else if (id == SOLVABLE_PROVIDES)
- dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_FILEMARKER);
+ dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data, SOLVABLE_FILEMARKER);
else
- dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, 0);
+ dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data, 0);
if (idarraydatap > idarraydataend)
{
- pool_debug(pool, SAT_ERROR, "idarray overflow\n");
- data.error = SOLV_ERROR_OVERFLOW;
+ data.error = pool_error(pool, SOLV_ERROR_OVERFLOW, "idarray overflow");
break;
}
if (id == SOLVABLE_PROVIDES)
else if (id == SOLVABLE_ENHANCES)
s->enhances = ido;
#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "%s ->\n", id2str(pool, id));
+ POOL_DEBUG(SOLV_DEBUG_STATS, "%s ->\n", pool_id2str(pool, id));
for (; repo->idarraydata[ido]; ido++)
- POOL_DEBUG(SAT_DEBUG_STATS," %s\n", dep2str(pool, repo->idarraydata[ido]));
+ POOL_DEBUG(SOLV_DEBUG_STATS," %s\n", pool_dep2str(pool, repo->idarraydata[ido]));
#endif
break;
+ case REPOKEY_TYPE_FIXARRAY:
case REPOKEY_TYPE_FLEXARRAY:
+ if (!keydepth)
+ needchunk = 1;
if (keydepth == sizeof(stack)/sizeof(*stack))
{
- pool_debug(pool, SAT_ERROR, "flexarray stack overflow\n");
- data.error = SOLV_ERROR_CORRUPT;
+ data.error = pool_error(pool, SOLV_ERROR_OVERFLOW, "array stack overflow");
break;
}
stack[keydepth++] = nentries;
stack[keydepth++] = keyp - schemadata;
- dp = data_read_id(dp, &nentries);
+ stack[keydepth++] = 0;
+ dp = data_read_id_max(dp, &nentries, 0, 0, &data);
incore_add_id(&data, nentries);
if (!nentries)
{
/* zero size array? */
- keydepth--;
+ keydepth -= 2;
nentries = stack[--keydepth];
break;
}
- if (keydepth == 2 && id == REPOSITORY_SOLVABLES)
+ if (keydepth == 3 && id == REPOSITORY_SOLVABLES)
{
/* horray! here come the solvables */
if (nentries != numsolv)
{
- pool_debug(pool, SAT_ERROR, "inconsistent number of solvables: %d %d\n", nentries, numsolv);
- data.error = SOLV_ERROR_CORRUPT;
+ data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "inconsistent number of solvables: %d %d", nentries, numsolv);
break;
}
if (idarraydatap)
{
- pool_debug(pool, SAT_ERROR, "more than one solvable block\n");
- data.error = SOLV_ERROR_CORRUPT;
+ data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "more than one solvable block");
break;
}
- if (parent)
- s = pool_id2solvable(pool, parent->start);
+ if ((flags & REPO_EXTEND_SOLVABLES) != 0)
+ s = pool_id2solvable(pool, extendstart);
else
s = pool_id2solvable(pool, repo_add_solvable_block(repo, numsolv));
data.start = s - pool->solvables;
repo->idarraysize += size_idarray;
idarraydataend = idarraydatap + size_idarray;
repo->lastoff = 0;
- if (have_xdata)
+ if (have_incoredata)
data.incoreoffset[(s - pool->solvables) - data.start] = data.incoredatalen;
}
nentries--;
- dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
+ dp = data_read_id_max(dp, &id, 0, numschemata, &data);
incore_add_id(&data, id);
+ if (keys[key].type == REPOKEY_TYPE_FIXARRAY)
+ {
+ if (!id)
+ data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "illegal fixarray");
+ stack[keydepth - 1] = id;
+ }
keyp = schemadata + schemata[id];
break;
+ case REPOKEY_TYPE_NUM:
+ if (!(solvflags & SOLV_FLAG_SIZE_BYTES) && keys[key].storage == KEY_STORAGE_INCORE &&
+ (id == SOLVABLE_INSTALLSIZE || id == SOLVABLE_DOWNLOADSIZE || id == DELTA_DOWNLOADSIZE))
+ {
+ /* old solv file with sizes in kilos. transcode. */
+ dp = data_read_id(dp, &id);
+ incore_add_sizek(&data, (unsigned int)id);
+ break;
+ }
+ /* FALLTHROUGH */
default:
+ if (id == RPM_RPMDBID && s && (keys[key].type == REPOKEY_TYPE_U32 || keys[key].type == REPOKEY_TYPE_NUM))
+ {
+ if (keys[key].type == REPOKEY_TYPE_U32)
+ dp = data_read_u32(dp, (unsigned int *)&id);
+ else
+ dp = data_read_id_max(dp, &id, 0, 0, &data);
+ if (!repo->rpmdbid)
+ repo->rpmdbid = repo_sidedata_create(repo, sizeof(Id));
+ repo->rpmdbid[(s - pool->solvables) - repo->start] = id;
+ break;
+ }
dps = dp;
dp = data_skip(dp, keys[key].type);
if (keys[key].storage == KEY_STORAGE_INCORE)
/* should shrink idarraydata again */
if (keydepth)
- {
- pool_debug(pool, SAT_ERROR, "unexpected EOF, depth = %d\n", keydepth);
- data.error = SOLV_ERROR_CORRUPT;
- }
+ data.error = pool_error(pool, SOLV_ERROR_EOF, "unexpected EOF, depth = %d", keydepth);
if (!data.error)
{
- left -= (dp - buf);
- if (left < 0)
- {
- pool_debug(mypool, SAT_ERROR, "buffer overrun\n");
- data.error = SOLV_ERROR_EOF;
- }
+ if (dp > bufend)
+ data.error = pool_error(pool, SOLV_ERROR_EOF, "buffer overrun");
}
- sat_free(buf);
+ solv_free(buf);
if (data.error)
{
/* free id array */
repo->idarraysize -= size_idarray;
/* free incore data */
- data.incoredata = sat_free(data.incoredata);
+ data.incoredata = solv_free(data.incoredata);
data.incoredatalen = data.incoredatafree = 0;
}
if (data.incoredatafree)
{
/* shrink excess size */
- data.incoredata = sat_realloc(data.incoredata, data.incoredatalen);
+ data.incoredata = solv_realloc(data.incoredata, data.incoredatalen);
data.incoredatafree = 0;
}
+ solv_free(idmap);
+
+ /* fixup the special idarray type */
+ for (i = 1; i < numkeys; i++)
+ if (keys[i].type == REPOKEY_TYPE_REL_IDARRAY)
+ keys[i].type = REPOKEY_TYPE_IDARRAY;
for (i = 1; i < numkeys; i++)
if (keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET)
unsigned int pagesize;
/* we have vertical data, make it available */
- data.verticaloffset = sat_calloc(numkeys, sizeof(Id));
+ data.verticaloffset = solv_calloc(numkeys, sizeof(Id));
for (i = 1; i < numkeys; i++)
if (keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET)
{
}
data.lastverticaloffset = fileoffset;
pagesize = read_u32(&data);
- data.error = repopagestore_read_or_setup_pages(&data.store, data.fp, pagesize, fileoffset);
- }
- else
- {
- /* no longer needed */
- data.fp = 0;
+ if (!data.error)
+ {
+ data.error = repopagestore_read_or_setup_pages(&data.store, data.fp, pagesize, fileoffset);
+ if (data.error == SOLV_ERROR_EOF)
+ pool_error(pool, data.error, "repopagestore setup: unexpected EOF");
+ else if (data.error)
+ pool_error(pool, data.error, "repopagestore setup failed");
+ }
}
- sat_free(idmap);
- mypool = 0;
+ data.fp = 0; /* no longer needed */
if (data.error)
{
- /* XXX: free repodata? */
- return data.error;
+ i = data.error;
+ repodata_freedata(&data);
+ return i;
}
if (parent)
{
/* overwrite stub repodata */
- repodata_free(parent);
+ repodata_freedata(parent);
+ data.repodataid = parent->repodataid;
*parent = data;
}
else
{
/* make it available as new repodata */
- repo->repodata = sat_realloc2(repo->repodata, repo->nrepodata + 1, sizeof(data));
+ if (!repo->nrepodata)
+ {
+ repo->nrepodata = 1;
+ repo->repodata = solv_calloc(2, sizeof(data));
+ }
+ else
+ repo->repodata = solv_realloc2(repo->repodata, repo->nrepodata + 1, sizeof(data));
+ data.repodataid = repo->nrepodata;
repo->repodata[repo->nrepodata++] = data;
}
/* create stub repodata entries for all external */
- for (key = 1 ; key < data.nkeys; key++)
- if (data.keys[key].name == REPOSITORY_EXTERNAL && data.keys[key].type == REPOKEY_TYPE_FLEXARRAY)
- break;
- if (key < data.nkeys)
+ if (!(flags & SOLV_ADD_NO_STUBS) && !parent)
{
- struct create_stub_data stubdata;
- /* got some */
- memset(&stubdata, 0, sizeof(stubdata));
- repodata_search(&data, REPOENTRY_META, REPOSITORY_EXTERNAL, create_stub_cb, &stubdata);
+ for (key = 1 ; key < data.nkeys; key++)
+ if (data.keys[key].name == REPOSITORY_EXTERNAL && data.keys[key].type == REPOKEY_TYPE_FLEXARRAY)
+ break;
+ if (key < data.nkeys)
+ repodata_create_stubs(repo->repodata + (repo->nrepodata - 1));
}
- return 0;
-}
-int
-repo_add_solv(Repo *repo, FILE *fp)
-{
- return repo_add_solv_parent(repo, fp, 0);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "repo_add_solv took %d ms\n", solv_timems(now));
+ POOL_DEBUG(SOLV_DEBUG_STATS, "repo size: %d solvables\n", repo->nsolvables);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "repo memory used: %d K incore, %d K idarray\n", data.incoredatalen/1024, repo->idarraysize / (int)(1024/sizeof(Id)));
+ return 0;
}
-static void
-repodata_load_stub(Repodata *data)
-{
- FILE *fp;
- Pool *pool = data->repo->pool;
- if (!pool->loadcallback)
- {
- data->state = REPODATA_ERROR;
- return;
- }
- /* so that we can retrieve meta data */
- data->state = REPODATA_AVAILABLE;
- fp = pool->loadcallback(pool, data, pool->loadcallbackdata);
- if (!fp)
- {
- data->state = REPODATA_ERROR;
- return;
- }
- if (repo_add_solv_parent(data->repo, fp, data))
- data->state = REPODATA_ERROR;
- else
- data->state = REPODATA_AVAILABLE;
- fclose(fp);
-}