/*
* repo_solv.c
- *
+ *
* Add a repo in solv format
- *
+ *
*/
c = getc(data->fp);
if (c == EOF)
{
- pool_debug(data->repo->pool, SOLV_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF");
return 0;
}
x = (x << 8) | c;
c = getc(data->fp);
if (c == EOF)
{
- pool_debug(data->repo->pool, SOLV_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF");
return 0;
}
return c;
c = getc(data->fp);
if (c == EOF)
{
- pool_debug(data->repo->pool, SOLV_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF");
return 0;
}
if (!(c & 128))
{
x = (x << 7) | c;
- if (max && x >= max)
+ if (max && x >= (unsigned int)max)
{
- pool_debug(data->repo->pool, SOLV_ERROR, "read_id: id too large (%u/%u)\n", x, max);
- data->error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "read_id: id too large (%u/%u)", x, max);
return 0;
}
return x;
}
x = (x << 7) ^ c ^ 128;
}
- pool_debug(data->repo->pool, SOLV_ERROR, "read_id: id too long\n");
- data->error = SOLV_ERROR_CORRUPT;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_CORRUPT, "read_id: id too long");
return 0;
}
c = getc(data->fp);
if (c == EOF)
{
- pool_debug(data->repo->pool, SOLV_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_EOF, "unexpected EOF");
return 0;
}
if ((c & 128) != 0)
continue;
}
x = (x << 6) | (c & 63);
- if (max && x >= max)
+ if (max && x >= (unsigned int)max)
{
- pool_debug(data->repo->pool, SOLV_ERROR, "read_idarray: id too large (%u/%u)\n", x, max);
- data->error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "read_idarray: id too large (%u/%u)", x, max);
return 0;
}
if (map)
x = map[x];
if (store == end)
{
- pool_debug(data->repo->pool, SOLV_ERROR, "read_idarray: array overflow\n");
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_OVERFLOW, "read_idarray: array overflow");
return 0;
}
*store++ = x;
return store;
if (store == end)
{
- pool_debug(data->repo->pool, SOLV_ERROR, "read_idarray: array overflow\n");
- data->error = SOLV_ERROR_OVERFLOW;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_OVERFLOW, "read_idarray: array overflow");
return 0;
}
*store++ = 0;
dp = data_read_id(dp, &x);
if (x < 0 || (max && x >= max))
{
- pool_debug(data->repo->pool, SOLV_ERROR, "data_read_id_max: id too large (%u/%u)\n", x, max);
- data->error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "data_read_id_max: id too large (%u/%u)", x, max);
x = 0;
}
*ret = map ? map[x] : x;
continue;
}
x = (x << 6) | (c & 63);
- if (max && x >= max)
+ if (max && x >= (unsigned int)max)
{
- pool_debug(data->repo->pool, SOLV_ERROR, "data_read_idarray: id too large (%u/%u)\n", x, max);
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "data_read_idarray: id too large (%u/%u)", x, max);
data->error = SOLV_ERROR_ID_RANGE;
break;
}
- *store++ = x;
+ *store++ = map ? map[x] : x;
if ((c & 64) == 0)
break;
x = 0;
}
x = old + (x - 1);
old = x;
- if (max && x >= max)
+ if (max && x >= (unsigned int)max)
{
- pool_debug(data->repo->pool, SOLV_ERROR, "data_read_rel_idarray: id too large (%u/%u)\n", x, max);
- data->error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "data_read_rel_idarray: id too large (%u/%u)", x, max);
break;
}
*store++ = map ? map[x] : x;
static void
incore_add_blob(Repodata *data, unsigned char *buf, int len)
{
- if (data->incoredatafree < len)
+ if (data->incoredatafree < (unsigned int)len)
{
data->incoredata = solv_realloc(data->incoredata, data->incoredatalen + INCORE_ADD_CHUNK + len);
data->incoredatafree = INCORE_ADD_CHUNK + len;
dp = data_read_ideof(dp, &id, &eof);
if (id < 0 || (max && id >= max))
{
- pool_debug(data->repo->pool, SOLV_ERROR, "incore_map_idarray: id too large (%u/%u)\n", id, max);
- data->error = SOLV_ERROR_ID_RANGE;
+ data->error = pool_error(data->repo->pool, SOLV_ERROR_ID_RANGE, "incore_map_idarray: id too large (%u/%u)", id, max);
break;
}
id = map[id];
{
Pool *pool = repo->pool;
int i, l;
- unsigned int numid, numrel, numdir, numsolv;
- unsigned int numkeys, numschemata;
+ int numid, numrel, numdir, numsolv;
+ int numkeys, numschemata;
Offset sizeid;
Offset *str; /* map Id -> Offset into string space */
char *sp; /* pointer into string space */
Id *idmap; /* map of repo Ids to pool Ids */
Id id, type;
- unsigned int hashmask, h;
- int hh;
- Id *hashtbl;
+ Hashval hashmask, h, hh;
+ Hashtable hashtbl;
Id name, evr, did;
int relflags;
Reldep *ran;
Id *schemadata, *schemadatap, *schemadataend;
Id *schemata, key, *keyp;
int nentries;
- int have_xdata;
+ int have_incoredata;
int maxsize, allsize;
unsigned char *buf, *bufend, *dp, *dps;
Id stack[3 * 5];
int oldnstrings = pool->ss.nstrings;
int oldnrels = pool->nrels;
- struct _Stringpool *spool;
+ struct s_Stringpool *spool;
Repodata *parent = 0;
Repodata data;
+ int extendstart = 0, extendend = 0; /* set in case we're extending */
+
now = solv_timems(0);
if ((flags & REPO_USE_LOADING) != 0)
flags |= REPO_EXTEND_SOLVABLES;
/* use REPO_REUSE_REPODATA hack so that the old repodata is kept */
parent = repo_add_repodata(repo, flags | REPO_REUSE_REPODATA);
+ extendstart = parent->start;
+ extendend = parent->end;
+ }
+ else if (flags & REPO_EXTEND_SOLVABLES)
+ {
+ /* extend all solvables of this repo */
+ extendstart = repo->start;
+ extendend = repo->end;
}
-
+
memset(&data, 0, sizeof(data));
data.repo = repo;
data.fp = fp;
repopagestore_init(&data.store);
if (read_u32(&data) != ('S' << 24 | 'O' << 16 | 'L' << 8 | 'V'))
- {
- pool_debug(pool, SOLV_ERROR, "not a SOLV file\n");
- return SOLV_ERROR_NOT_SOLV;
- }
+ return pool_error(pool, SOLV_ERROR_NOT_SOLV, "not a SOLV file");
solvversion = read_u32(&data);
switch (solvversion)
{
case SOLV_VERSION_8:
break;
default:
- pool_debug(pool, SOLV_ERROR, "unsupported SOLV version\n");
- return SOLV_ERROR_UNSUPPORTED;
+ return pool_error(pool, SOLV_ERROR_UNSUPPORTED, "unsupported SOLV version");
}
- numid = read_u32(&data);
- numrel = read_u32(&data);
- numdir = read_u32(&data);
- numsolv = read_u32(&data);
- numkeys = read_u32(&data);
- numschemata = read_u32(&data);
+ numid = (int)read_u32(&data);
+ numrel = (int)read_u32(&data);
+ numdir = (int)read_u32(&data);
+ numsolv = (int)read_u32(&data);
+ numkeys = (int)read_u32(&data);
+ numschemata = (int)read_u32(&data);
solvflags = read_u32(&data);
- if (numdir && numdir < 2)
- {
- pool_debug(pool, SOLV_ERROR, "bad number of dirs\n");
- return SOLV_ERROR_CORRUPT;
- }
+ if (numid < 0 || numid >= 0x20000000)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of ids");
+ if (numrel < 0 || numrel >= 0x20000000)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of rels");
+ if (numdir && (numdir < 2 || numdir >= 0x20000000))
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of dirs");
+ if (numsolv < 0 || numsolv >= 0x20000000)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of solvables");
+ if (numkeys < 0 || numkeys >= 0x20000000)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of keys");
+ if (numschemata < 0 || numschemata >= 0x20000000)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "bad number of schematas");
if (numrel && (flags & REPO_LOCALPOOL) != 0)
- {
- pool_debug(pool, SOLV_ERROR, "relations are forbidden in a local pool\n");
- return SOLV_ERROR_CORRUPT;
- }
- if (parent && numsolv)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "relations are forbidden in a local pool");
+ if ((flags & REPO_EXTEND_SOLVABLES) && numsolv)
{
/* make sure that we exactly replace the stub repodata */
- if (parent->end - parent->start != numsolv)
- {
- pool_debug(pool, SOLV_ERROR, "sub-repository solvable number does not match main repository (%d - %d)\n", parent->end - parent->start, numsolv);
- return SOLV_ERROR_CORRUPT;
- }
+ if (extendend - extendstart != numsolv)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "sub-repository solvable number does not match main repository (%d - %d)", extendend - extendstart, numsolv);
for (i = 0; i < numsolv; i++)
- if (pool->solvables[parent->start + i].repo != repo)
- {
- pool_debug(pool, SOLV_ERROR, "main repository contains holes\n");
- return SOLV_ERROR_CORRUPT;
- }
+ if (pool->solvables[extendstart + i].repo != repo)
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "main repository contains holes, cannot extend");
}
/******* Part 1: string IDs *****************************************/
- sizeid = read_u32(&data); /* size of string+Id space */
+ sizeid = read_u32(&data); /* size of string space */
/*
* read strings and Ids
- *
+ *
*/
-
+
/*
* alloc buffers
*/
if (!(flags & REPO_LOCALPOOL))
- spool = &pool->ss;
+ {
+ spool = &pool->ss;
+ /* alloc max needed string buffer and string pointers, will shrink again later */
+#if 0
+ spool->stringspace = solv_realloc(spool->stringspace, spool->sstrings + sizeid + 1);
+ spool->strings = solv_realloc2(spool->strings, spool->nstrings + numid, sizeof(Offset));
+#else
+ spool->sstrings += sizeid + 1;
+ spool->nstrings += numid;
+ stringpool_shrink(spool); /* we misuse stringpool_shrink so that the correct BLOCK factor is used */
+ spool->sstrings -= sizeid + 1;
+ spool->nstrings -= numid;
+#endif
+ }
else
{
data.localpool = 1;
spool = &data.spool;
- spool->stringspace = solv_malloc(7);
+ spool->stringspace = solv_malloc(7 + sizeid + 1);
+ spool->strings = solv_malloc2(numid < 2 ? 2 : numid, sizeof(Offset));
strcpy(spool->stringspace, "<NULL>");
spool->sstrings = 7;
- spool->nstrings = 0;
+ spool->nstrings = 1;
+ spool->strings[0] = 0; /* <NULL> */
}
- /* alloc string buffer */
- spool->stringspace = solv_realloc(spool->stringspace, spool->sstrings + sizeid + 1);
- /* alloc string offsets (Id -> Offset into string space) */
- spool->strings = solv_realloc2(spool->strings, spool->nstrings + numid, sizeof(Offset));
-
- strsp = spool->stringspace;
- str = spool->strings; /* array of offsets into strsp, indexed by Id */
-
- /* point to _BEHIND_ already allocated string/Id space */
- strsp += spool->sstrings;
-
/*
- * read new repo at end of pool
+ * read string data and append to old string space
*/
-
+
+ strsp = spool->stringspace + spool->sstrings; /* append new entries */
if ((solvflags & SOLV_FLAG_PREFIX_POOL) == 0)
{
if (sizeid && fread(strsp, sizeid, 1, fp) != 1)
{
- pool_debug(pool, SOLV_ERROR, "read error while reading strings\n");
- return SOLV_ERROR_EOF;
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_EOF, "read error while reading strings");
}
}
else
unsigned int pfsize = read_u32(&data);
char *prefix = solv_malloc(pfsize);
char *pp = prefix;
- char *old_str = 0;
+ char *old_str = strsp;
char *dest = strsp;
int freesp = sizeid;
if (pfsize && fread(prefix, pfsize, 1, fp) != 1)
- {
- pool_debug(pool, SOLV_ERROR, "read error while reading strings\n");
+ {
solv_free(prefix);
- return SOLV_ERROR_EOF;
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_EOF, "read error while reading strings");
}
for (i = 1; i < numid; i++)
{
freesp -= same + len;
if (freesp < 0)
{
- pool_debug(pool, SOLV_ERROR, "overflow while expanding strings\n");
solv_free(prefix);
- return SOLV_ERROR_OVERFLOW;
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_OVERFLOW, "overflow while expanding strings");
}
if (same)
memcpy(dest, old_str, same);
solv_free(prefix);
if (freesp != 0)
{
- pool_debug(pool, SOLV_ERROR, "expanding strings size mismatch\n");
- return SOLV_ERROR_CORRUPT;
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "expanding strings size mismatch");
}
}
strsp[sizeid] = 0; /* make string space \0 terminated */
sp = strsp;
+ /* now merge */
+ str = spool->strings; /* array of offsets into strsp, indexed by Id */
if ((flags & REPO_LOCALPOOL) != 0)
{
- /* no shared pool, thus no idmap and no unification */
+ /* no shared pool, thus no idmap and no unification needed */
idmap = 0;
- spool->nstrings = numid;
- str[0] = 0;
+ spool->nstrings = numid < 2 ? 2 : numid; /* make sure we have at least id 0 and 1 */
if (*sp)
{
- /* we need the '' for directories */
- pool_debug(pool, SOLV_ERROR, "store strings don't start with ''\n");
- return SOLV_ERROR_CORRUPT;
+ /* we need id 1 to be '' for directories */
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_CORRUPT, "store strings don't start with an empty string");
}
for (i = 1; i < spool->nstrings; i++)
{
- if (sp >= strsp + sizeid)
+ if (sp >= strsp + sizeid && numid >= 2)
{
- pool_debug(pool, SOLV_ERROR, "not enough strings\n");
- return SOLV_ERROR_OVERFLOW;
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_OVERFLOW, "not enough strings");
}
str[i] = sp - spool->stringspace;
sp += strlen(sp) + 1;
}
else
{
+ Offset oldsstrings = spool->sstrings;
+
/* alloc id map for name and rel Ids. this maps ids in the solv files
* to the ids in our pool */
idmap = solv_calloc(numid + numrel, sizeof(Id));
-
- /* grow hash if needed, otherwise reuse */
- hashmask = mkmask(spool->nstrings + numid);
+ stringpool_resize_hash(spool, numid);
+ hashtbl = spool->stringhashtbl;
+ hashmask = spool->stringhashmask;
#if 0
POOL_DEBUG(SOLV_DEBUG_STATS, "read %d strings\n", numid);
- POOL_DEBUG(SOLV_DEBUG_STATS, "string hash buckets: %d, old %d\n", hashmask + 1, spool->stringhashmask + 1);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "string hash buckets: %d\n", hashmask + 1);
#endif
- if (hashmask > spool->stringhashmask)
- {
- spool->stringhashtbl = solv_free(spool->stringhashtbl);
- spool->stringhashmask = hashmask;
- spool->stringhashtbl = hashtbl = solv_calloc(hashmask + 1, sizeof(Id));
- for (i = 1; i < spool->nstrings; i++)
- {
- h = strhash(spool->stringspace + spool->strings[i]) & hashmask;
- hh = HASHCHAIN_START;
- while (hashtbl[h])
- h = HASHCHAIN_NEXT(h, hh, hashmask);
- hashtbl[h] = i;
- }
- }
- else
- {
- hashtbl = spool->stringhashtbl;
- hashmask = spool->stringhashmask;
- }
-
/*
* run over strings and merge with pool.
+ * we could use stringpool_str2id, but this is faster.
* also populate id map (maps solv Id -> pool Id)
*/
for (i = 1; i < numid; i++)
{
if (sp >= strsp + sizeid)
{
- solv_free(hashtbl);
solv_free(idmap);
- pool_debug(pool, SOLV_ERROR, "not enough strings %d %d\n", i, numid);
- return SOLV_ERROR_OVERFLOW;
+ spool->nstrings = oldnstrings;
+ spool->sstrings = oldsstrings;
+ stringpool_freehash(spool);
+ repodata_freedata(&data);
+ return pool_error(pool, SOLV_ERROR_OVERFLOW, "not enough strings %d %d", i, numid);
}
if (!*sp) /* empty string */
{
idmap[i] = id; /* repo relative -> pool relative */
sp += l; /* next string */
}
- if (hashmask > mkmask(spool->nstrings + 8192))
- {
- spool->stringhashtbl = solv_free(spool->stringhashtbl);
- spool->stringhashmask = 0;
- }
+ stringpool_shrink(spool); /* vacuum */
}
- pool_shrink_strings(pool); /* vacuum */
-
+
/******* Part 2: Relation IDs ***************************************/
/*
* read RelDeps
- *
+ *
*/
-
+
if (numrel)
{
/* extend rels */
pool->rels = solv_realloc2(pool->rels, pool->nrels + numrel, sizeof(Reldep));
ran = pool->rels;
- /* grow hash if needed, otherwise reuse */
- hashmask = mkmask(pool->nrels + numrel);
+ pool_resize_rels_hash(pool, numrel);
+ hashtbl = pool->relhashtbl;
+ hashmask = pool->relhashmask;
#if 0
POOL_DEBUG(SOLV_DEBUG_STATS, "read %d rels\n", numrel);
- POOL_DEBUG(SOLV_DEBUG_STATS, "rel hash buckets: %d, old %d\n", hashmask + 1, pool->relhashmask + 1);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "rel hash buckets: %d\n", hashmask + 1);
#endif
- if (hashmask > pool->relhashmask)
- {
- pool->relhashtbl = solv_free(pool->relhashtbl);
- pool->relhashmask = hashmask;
- pool->relhashtbl = hashtbl = solv_calloc(hashmask + 1, sizeof(Id));
- for (i = 1; i < pool->nrels; i++)
- {
- h = relhash(ran[i].name, ran[i].evr, ran[i].flags) & hashmask;
- hh = HASHCHAIN_START;
- while (hashtbl[h])
- h = HASHCHAIN_NEXT(h, hh, hashmask);
- hashtbl[h] = i;
- }
- }
- else
- {
- hashtbl = pool->relhashtbl;
- hashmask = pool->relhashmask;
- }
/*
* read RelDeps from repo
}
idmap[i + numid] = MAKERELDEP(id); /* fill Id map */
}
- if (hashmask > mkmask(pool->nrels + 4096))
- {
- pool->relhashtbl = solv_free(pool->relhashtbl);
- pool->relhashmask = 0;
- }
pool_shrink_rels(pool); /* vacuum */
}
{
id = read_id(&data, i + numid);
if (id >= numid)
- data.dirpool.dirs[i] = -(id - numid);
- else if (idmap)
- data.dirpool.dirs[i] = idmap[id];
- else
- data.dirpool.dirs[i] = id;
+ {
+ data.dirpool.dirs[i++] = -(id - numid);
+ if (i >= numdir)
+ {
+ data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "last dir entry is not a component");
+ break;
+ }
+ id = read_id(&data, numid);
+ }
+ if (idmap)
+ id = idmap[id];
+ data.dirpool.dirs[i] = id;
+ if (id <= 0)
+ data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "bad dir component");
}
}
type = idmap[type];
else if ((flags & REPO_LOCALPOOL) != 0)
type = pool_str2id(pool, stringpool_id2str(spool, type), 1);
- if (type < REPOKEY_TYPE_VOID || type > REPOKEY_TYPE_FLEXARRAY)
+ if (type < REPOKEY_TYPE_VOID || type > REPOKEY_TYPE_DELETED)
{
- pool_debug(pool, SOLV_ERROR, "unsupported data type '%s'\n", pool_id2str(pool, type));
- data.error = SOLV_ERROR_UNSUPPORTED;
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "unsupported data type '%s'", pool_id2str(pool, type));
type = REPOKEY_TYPE_VOID;
}
keys[i].name = id;
if (keys[i].storage == KEY_STORAGE_SOLVABLE)
keys[i].storage = KEY_STORAGE_INCORE;
if (keys[i].storage != KEY_STORAGE_INCORE && keys[i].storage != KEY_STORAGE_VERTICAL_OFFSET)
- {
- pool_debug(pool, SOLV_ERROR, "unsupported storage type %d\n", keys[i].storage);
- data.error = SOLV_ERROR_UNSUPPORTED;
- }
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "unsupported storage type %d", keys[i].storage);
if (id >= SOLVABLE_NAME && id <= RPM_RPMDBID)
{
if (keys[i].storage != KEY_STORAGE_INCORE)
- {
- pool_debug(pool, SOLV_ERROR, "main solvable data must use incore storage%d\n", keys[i].storage);
- data.error = SOLV_ERROR_UNSUPPORTED;
- }
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "main solvable data must use incore storage %d", keys[i].storage);
keys[i].storage = KEY_STORAGE_SOLVABLE;
}
+ if ((type == REPOKEY_TYPE_FIXARRAY || type == REPOKEY_TYPE_FLEXARRAY) && keys[i].storage != KEY_STORAGE_INCORE)
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "flex/fixarrays must use incore storage\n");
/* cannot handle rel idarrays in incore/vertical */
if (type == REPOKEY_TYPE_REL_IDARRAY && keys[i].storage != KEY_STORAGE_SOLVABLE)
- {
- pool_debug(pool, SOLV_ERROR, "type REL_IDARRAY is only supported for STORAGE_SOLVABLE\n");
- data.error = SOLV_ERROR_UNSUPPORTED;
- }
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "type REL_IDARRAY is only supported for STORAGE_SOLVABLE");
/* cannot handle mapped ids in vertical */
if (!(flags & REPO_LOCALPOOL) && keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET && (type == REPOKEY_TYPE_ID || type == REPOKEY_TYPE_IDARRAY))
- {
- pool_debug(pool, SOLV_ERROR, "mapped ids are not supported for STORAGE_VERTICAL_OFFSET\n");
- data.error = SOLV_ERROR_UNSUPPORTED;
- }
-
+ data.error = pool_error(pool, SOLV_ERROR_UNSUPPORTED, "mapped ids are not supported for STORAGE_VERTICAL_OFFSET");
+
if (keys[i].type == REPOKEY_TYPE_CONSTANTID && idmap)
keys[i].size = idmap[keys[i].size];
#if 0
#endif
}
- have_xdata = parent ? 1 : 0;
+ have_incoredata = 0;
for (i = 1; i < numkeys; i++)
if (keys[i].storage == KEY_STORAGE_INCORE || keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET)
- have_xdata = 1;
+ have_incoredata = 1;
data.keys = keys;
data.nkeys = numkeys;
}
/******* Part 5: Schemata ********************************************/
-
+
id = read_id(&data, 0);
schemadata = solv_calloc(id + 1, sizeof(Id));
schemadatap = schemadata + 1;
l = allsize;
if (!l || fread(buf, l, 1, data.fp) != 1)
{
- pool_debug(pool, SOLV_ERROR, "unexpected EOF\n");
- data.error = SOLV_ERROR_EOF;
+ data.error = pool_error(pool, SOLV_ERROR_EOF, "unexpected EOF");
id = 0;
}
else
dp = data_read_id_max(dp, &id, 0, numschemata, &data);
}
- incore_add_id(&data, 0); /* XXX? */
- incore_add_id(&data, id);
+ incore_add_id(&data, 0); /* so that incoreoffset 0 means schema 0 */
+ incore_add_id(&data, id); /* main schema id */
keyp = schemadata + schemata[id];
data.mainschema = id;
for (i = 0; keyp[i]; i++)
break;
if (left < 0)
{
- pool_debug(pool, SOLV_ERROR, "buffer overrun\n");
- data.error = SOLV_ERROR_EOF;
+ data.error = pool_error(pool, SOLV_ERROR_EOF, "buffer overrun");
break;
}
if (left < maxsize)
l = allsize;
if (l && fread(buf + left, l, 1, data.fp) != 1)
{
- pool_debug(pool, SOLV_ERROR, "unexpected EOF\n");
- data.error = SOLV_ERROR_EOF;
+ data.error = pool_error(pool, SOLV_ERROR_EOF, "unexpected EOF");
break;
}
allsize -= l;
if (s && keydepth == 3)
{
s++; /* next solvable */
- if (have_xdata)
+ if (have_incoredata)
data.incoreoffset[(s - pool->solvables) - data.start] = data.incoredatalen;
}
id = stack[keydepth - 1];
case REPOKEY_TYPE_ID:
dp = data_read_id_max(dp, &did, idmap, numid + numrel, &data);
if (s && id == SOLVABLE_NAME)
- s->name = did;
+ s->name = did;
else if (s && id == SOLVABLE_ARCH)
- s->arch = did;
+ s->arch = did;
else if (s && id == SOLVABLE_EVR)
- s->evr = did;
+ s->evr = did;
else if (s && id == SOLVABLE_VENDOR)
- s->vendor = did;
+ s->vendor = did;
else if (keys[key].storage == KEY_STORAGE_INCORE)
incore_add_id(&data, did);
#if 0
dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data, 0);
if (idarraydatap > idarraydataend)
{
- pool_debug(pool, SOLV_ERROR, "idarray overflow\n");
- data.error = SOLV_ERROR_OVERFLOW;
+ data.error = pool_error(pool, SOLV_ERROR_OVERFLOW, "idarray overflow");
break;
}
if (id == SOLVABLE_PROVIDES)
needchunk = 1;
if (keydepth == sizeof(stack)/sizeof(*stack))
{
- pool_debug(pool, SOLV_ERROR, "array stack overflow\n");
- data.error = SOLV_ERROR_CORRUPT;
+ data.error = pool_error(pool, SOLV_ERROR_OVERFLOW, "array stack overflow");
break;
}
stack[keydepth++] = nentries;
/* horray! here come the solvables */
if (nentries != numsolv)
{
- pool_debug(pool, SOLV_ERROR, "inconsistent number of solvables: %d %d\n", nentries, numsolv);
- data.error = SOLV_ERROR_CORRUPT;
+ data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "inconsistent number of solvables: %d %d", nentries, numsolv);
break;
}
if (idarraydatap)
{
- pool_debug(pool, SOLV_ERROR, "more than one solvable block\n");
- data.error = SOLV_ERROR_CORRUPT;
+ data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "more than one solvable block");
break;
}
- if (parent)
- s = pool_id2solvable(pool, parent->start);
+ if ((flags & REPO_EXTEND_SOLVABLES) != 0)
+ s = pool_id2solvable(pool, extendstart);
else
s = pool_id2solvable(pool, repo_add_solvable_block(repo, numsolv));
data.start = s - pool->solvables;
repo->idarraysize += size_idarray;
idarraydataend = idarraydatap + size_idarray;
repo->lastoff = 0;
- if (have_xdata)
+ if (have_incoredata)
data.incoreoffset[(s - pool->solvables) - data.start] = data.incoredatalen;
}
nentries--;
if (keys[key].type == REPOKEY_TYPE_FIXARRAY)
{
if (!id)
- {
- pool_debug(pool, SOLV_ERROR, "illegal fixarray\n");
- data.error = SOLV_ERROR_CORRUPT;
- }
+ data.error = pool_error(pool, SOLV_ERROR_CORRUPT, "illegal fixarray");
stack[keydepth - 1] = id;
}
keyp = schemadata + schemata[id];
}
/* FALLTHROUGH */
default:
- if (id == RPM_RPMDBID && s && (keys[key].type == REPOKEY_TYPE_U32 || keys[key].type == REPOKEY_TYPE_NUM))
+ if (id == RPM_RPMDBID && s && keys[key].type == REPOKEY_TYPE_NUM)
{
- if (keys[key].type == REPOKEY_TYPE_U32)
- dp = data_read_u32(dp, (unsigned int *)&id);
- else
- dp = data_read_id_max(dp, &id, 0, 0, &data);
+ dp = data_read_id(dp, &id);
if (!repo->rpmdbid)
repo->rpmdbid = repo_sidedata_create(repo, sizeof(Id));
repo->rpmdbid[(s - pool->solvables) - repo->start] = id;
/* should shrink idarraydata again */
if (keydepth)
- {
- pool_debug(pool, SOLV_ERROR, "unexpected EOF, depth = %d\n", keydepth);
- data.error = SOLV_ERROR_CORRUPT;
- }
+ data.error = pool_error(pool, SOLV_ERROR_EOF, "unexpected EOF, depth = %d", keydepth);
if (!data.error)
{
if (dp > bufend)
- {
- pool_debug(pool, SOLV_ERROR, "buffer overrun\n");
- data.error = SOLV_ERROR_EOF;
- }
+ data.error = pool_error(pool, SOLV_ERROR_EOF, "buffer overrun");
}
solv_free(buf);
data.incoredata = solv_realloc(data.incoredata, data.incoredatalen);
data.incoredatafree = 0;
}
+ solv_free(idmap);
+
+ /* fixup the special idarray type */
+ for (i = 1; i < numkeys; i++)
+ if (keys[i].type == REPOKEY_TYPE_REL_IDARRAY)
+ keys[i].type = REPOKEY_TYPE_IDARRAY;
for (i = 1; i < numkeys; i++)
- if (keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET)
+ if (keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET && keys[i].size)
break;
if (i < numkeys && !data.error)
{
Id fileoffset = 0;
unsigned int pagesize;
-
+
/* we have vertical data, make it available */
data.verticaloffset = solv_calloc(numkeys, sizeof(Id));
for (i = 1; i < numkeys; i++)
}
data.lastverticaloffset = fileoffset;
pagesize = read_u32(&data);
- data.error = repopagestore_read_or_setup_pages(&data.store, data.fp, pagesize, fileoffset);
- }
- else
- {
- /* no longer needed */
- data.fp = 0;
+ if (!data.error)
+ {
+ data.error = repopagestore_read_or_setup_pages(&data.store, data.fp, pagesize, fileoffset);
+ if (data.error == SOLV_ERROR_EOF)
+ pool_error(pool, data.error, "repopagestore setup: unexpected EOF");
+ else if (data.error)
+ pool_error(pool, data.error, "repopagestore setup failed");
+ }
}
- solv_free(idmap);
+ data.fp = 0; /* no longer needed */
if (data.error)
{
- /* XXX: free repodata? */
- return data.error;
+ i = data.error;
+ repodata_freedata(&data);
+ return i;
}
if (parent)
/* overwrite stub repodata */
repodata_freedata(parent);
data.repodataid = parent->repodataid;
+ data.loadcallback = parent->loadcallback;
*parent = data;
}
else
repo->repodata[repo->nrepodata++] = data;
}
+ if ((flags & REPO_EXTEND_SOLVABLES) != 0)
+ {
+ if (repodata_has_keyname(&data, SOLVABLE_FILELIST))
+ repodata_set_filelisttype(repo->repodata + data.repodataid, REPODATA_FILELIST_EXTENSION);
+ }
+ else
+ {
+ if (repodata_lookup_type(&data, SOLVID_META, REPOSITORY_FILTEREDFILELIST))
+ repodata_set_filelisttype(repo->repodata + data.repodataid, REPODATA_FILELIST_FILTERED);
+ }
+
/* create stub repodata entries for all external */
if (!(flags & SOLV_ADD_NO_STUBS) && !parent)
{
if (data.keys[key].name == REPOSITORY_EXTERNAL && data.keys[key].type == REPOKEY_TYPE_FLEXARRAY)
break;
if (key < data.nkeys)
- repodata_create_stubs(repo->repodata + (repo->nrepodata - 1));
+ repodata_create_stubs(repo->repodata + data.repodataid);
}
POOL_DEBUG(SOLV_DEBUG_STATS, "repo_add_solv took %d ms\n", solv_timems(now));