SET(libsatsolver_SRCS
bitmap.c poolarch.c poolvendor.c poolid.c strpool.c dirpool.c
solver.c solverdebug.c repo_solv.c repo_helix.c evr.c pool.c
- queue.c repo.c repodata.c util.c policy.c fastlz.c solvable.c)
+ queue.c repo.c repodata.c repopage.c util.c policy.c solvable.c)
ADD_LIBRARY(satsolver STATIC ${libsatsolver_SRCS})
SET(libsatsolver_HEADERS
bitmap.h evr.h hash.h policy.h poolarch.h poolvendor.h pool.h
poolid.h pooltypes.h queue.h solvable.h solver.h solverdebug.h
- repo.h repodata.h repo_solv.h repo_helix.h util.h
+ repo.h repodata.h repopage.h repo_solv.h repo_helix.h util.h
strpool.h dirpool.h knownid.h)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
}
void
+dirpool_free(Dirpool *dp)
+{
+ sat_free(dp->dirs);
+ sat_free(dp->dirtraverse);
+}
+
+void
dirpool_make_dirtraverse(Dirpool *dp)
{
Id parent, i, *dirtraverse;
} Dirpool;
void dirpool_create(Dirpool *dp);
+void dirpool_free(Dirpool *dp);
+
void dirpool_make_dirtraverse(Dirpool *dp);
Id dirpool_add_dir(Dirpool *dp, Id parent, Id comp, int create);
KNOWNID(ARCH_SRC, "src"),
KNOWNID(ARCH_NOSRC, "nosrc"),
KNOWNID(ARCH_NOARCH, "noarch"),
-KNOWNID(REPODATA_INFO, "repodata:info"),
-KNOWNID(REPODATA_EXTERNAL, "repodata:external"),
-KNOWNID(REPODATA_KEYS, "repodata:keys"),
-KNOWNID(REPODATA_LOCATION, "repodata:location"),
-KNOWNID(REPODATA_ADDEDFILEPROVIDES, "repodata:addedfileprovides"),
-KNOWNID(REPODATA_RPMDBCOOKIE, "repodata:rpmdbcookie"),
+
+KNOWNID(REPOSITORY_SOLVABLES, "repository:solvables"),
+KNOWNID(REPOSITORY_DELTAINFO, "repository:deltainfo"),
+
+/* sub-repository information, they will get loaded on demand */
+KNOWNID(REPOSITORY_EXTERNAL, "repository:external"),
+KNOWNID(REPOSITORY_KEYS, "repository:keys"),
+KNOWNID(REPOSITORY_LOCATION, "repository:location"),
+
+/* file provides already added to our solvables */
+KNOWNID(REPOSITORY_ADDEDFILEPROVIDES, "repository:addedfileprovides"),
+/* inode of the rpm database for rpm --rebuilddb detection */
+KNOWNID(REPOSITORY_RPMDBCOOKIE, "repository:rpmdbcookie"),
/* The void type is usable to encode one-valued attributes, they have
no associated data. This is useful to encode values which many solvables
KNOWNID(REPOKEY_TYPE_MD5, "repokey:type:md5"),
KNOWNID(REPOKEY_TYPE_SHA1, "repokey:type:sha1"),
KNOWNID(REPOKEY_TYPE_SHA256, "repokey:type:sha256"),
-KNOWNID(REPOKEY_TYPE_COUNTED, "repokey:type:counted"),
+KNOWNID(REPOKEY_TYPE_FIXARRAY, "repokey:type:fixarray"),
+KNOWNID(REPOKEY_TYPE_FLEXARRAY, "repokey:type:flexarray"),
KNOWNID(SOLVABLE_SUMMARY, "solvable:summary"),
KNOWNID(SOLVABLE_DESCRIPTION, "solvable:description"),
KNOWNID(SOLVABLE_PATCHCATEGORY, "solvable:patchcategory"),
KNOWNID(SOLVABLE_HEADEREND, "solvable:headerend"),
+/* stuff for solvables of type pattern */
KNOWNID(SOLVABLE_CATEGORY, "solvable:category"),
KNOWNID(SOLVABLE_INCLUDES, "solvable:includes"),
KNOWNID(SOLVABLE_EXTENDS, "solvable:extends"),
KNOWNID(SOLVABLE_ICON, "solvable:icon"),
KNOWNID(SOLVABLE_ORDER, "solvable:order"),
-KNOWNID(UPDATE_REBOOT, "update:reboot"), /* reboot suggested (kernel update) */
-KNOWNID(UPDATE_RESTART, "update:restart"), /* restart suggested (update stack update) */
-KNOWNID(UPDATE_RELOGIN, "update:relogin"), /* restart suggested (update stack update) */
+KNOWNID(UPDATE_REBOOT, "update:reboot"), /* reboot suggested (kernel update) */
+KNOWNID(UPDATE_RESTART, "update:restart"), /* restart suggested (update stack update) */
+KNOWNID(UPDATE_RELOGIN, "update:relogin"), /* restart suggested (update stack update) */
-KNOWNID(UPDATE_MESSAGE, "update:message"), /* restart suggested (update stack update) */
+KNOWNID(UPDATE_MESSAGE, "update:message"), /* restart suggested (update stack update) */
/* 'content' of patch, usually list of packages */
-KNOWNID(UPDATE_COLLECTION, "update:collection"), /* "name evr arch" */
-KNOWNID(UPDATE_COLLECTION_NAME, "update:collection:name"), /* name */
-KNOWNID(UPDATE_COLLECTION_EVR, "update:collection:evr"), /* epoch:version-release */
-KNOWNID(UPDATE_COLLECTION_ARCH, "update:collection:arch"), /* architecture */
-KNOWNID(UPDATE_COLLECTION_FILENAME, "update:collection:filename"), /* filename (of rpm) */
-KNOWNID(UPDATE_COLLECTION_FLAGS, "update:collection:flags"), /* reboot(1)/restart(2) suggested if this rpm gets updated */
-
- /* external references for the update */
-KNOWNID(UPDATE_REFERENCE_TYPE, "update:reference:type"), /* type, e.g. 'bugzilla' or 'cve' */
-KNOWNID(UPDATE_REFERENCE_HREF, "update:reference:href"), /* href, e.g. 'http://bugzilla...' */
-KNOWNID(UPDATE_REFERENCE_ID, "update:reference:id"), /* id, e.g. bug number */
-KNOWNID(UPDATE_REFERENCE_TITLE, "update:reference:title"), /* title, e.g. "the bla forz scribs on fuggle" */
+KNOWNID(UPDATE_COLLECTION, "update:collection"), /* "name evr arch" */
+KNOWNID(UPDATE_COLLECTION_NAME, "update:collection:name"), /* name */
+KNOWNID(UPDATE_COLLECTION_EVR, "update:collection:evr"), /* epoch:version-release */
+KNOWNID(UPDATE_COLLECTION_ARCH, "update:collection:arch"), /* architecture */
+KNOWNID(UPDATE_COLLECTION_FILENAME, "update:collection:filename"), /* filename (of rpm) */
+KNOWNID(UPDATE_COLLECTION_FLAGS, "update:collection:flags"), /* reboot(1)/restart(2) suggested if this rpm gets updated */
+
+KNOWNID(UPDATE_REFERENCE, "update:reference"), /* external references for the update */
+KNOWNID(UPDATE_REFERENCE_TYPE, "update:reference:type"), /* type, e.g. 'bugzilla' or 'cve' */
+KNOWNID(UPDATE_REFERENCE_HREF, "update:reference:href"), /* href, e.g. 'http://bugzilla...' */
+KNOWNID(UPDATE_REFERENCE_ID, "update:reference:id"), /* id, e.g. bug number */
+KNOWNID(UPDATE_REFERENCE_TITLE, "update:reference:title"), /* title, e.g. "the bla forz scribs on fuggle" */
/* name */
KNOWNID(PRODUCT_SHORTLABEL, "product:shortlabel"),
KNOWNID(PRODUCT_DISTPRODUCT, "product:distproduct"),
KNOWNID(PRODUCT_DISTVERSION, "product:distversion"),
KNOWNID(PRODUCT_TYPE, "product:type"),
-KNOWNID(PRODUCT_URL, "product:url"),
+KNOWNID(PRODUCT_URL, "product:url"),
KNOWNID(PRODUCT_URL_TYPE, "product:url:type"),
KNOWNID(PRODUCT_FLAGS, "product:flags"),
KNOWNID(PRODUCT_PRODUCTLINE, "product:productline"),
KNOWNID(SUSETAGS_DATADIR, "susetags:datadir"),
/* timestamp then the repository was generated */
-KNOWNID(REPOSITORY_TIMESTAMP, "repository:timestamp"),
+KNOWNID(REPOSITORY_TIMESTAMP, "repository:timestamp"),
/* hint when the metadata could be outdated
w/respect to generated timestamp */
-KNOWNID(REPOSITORY_EXPIRE, "repository:expire"),
+KNOWNID(REPOSITORY_EXPIRE, "repository:expire"),
/* which things does this repo provides updates for, if it does */
-KNOWNID(REPOSITORY_UPDATES, "repository:updates"),
+KNOWNID(REPOSITORY_UPDATES, "repository:updates"),
/* which products this repository is supposed to be for */
-KNOWNID(REPOSITORY_PRODUCTS, "repository:products"),
+KNOWNID(REPOSITORY_PRODUCTS, "repository:products"),
/* keyword (tags) for this repository */
-KNOWNID(REPOSITORY_KEYWORDS, "repository:keywords"),
+KNOWNID(REPOSITORY_KEYWORDS, "repository:keywords"),
KNOWNID(DELTA_PACKAGE_NAME, "delta:pkgname"),
-KNOWNID(DELTA_PACKAGE_EVR, "delta:pkgevr"),
-KNOWNID(DELTA_PACKAGE_ARCH, "delta:pkgarch"),
+KNOWNID(DELTA_PACKAGE_EVR, "delta:pkgevr"),
+KNOWNID(DELTA_PACKAGE_ARCH, "delta:pkgarch"),
KNOWNID(DELTA_LOCATION_DIR, "delta:locdir"),
KNOWNID(DELTA_LOCATION_NAME, "delta:locname"),
KNOWNID(DELTA_LOCATION_EVR, "delta:locevr"),
KNOWNID(DELTA_LOCATION_SUFFIX, "delta:locsuffix"),
KNOWNID(DELTA_DOWNLOADSIZE, "delta:downloadsize"),
-KNOWNID(DELTA_CHECKSUM, "delta:checksum"),
-KNOWNID(DELTA_BASE_EVR, "delta:baseevr"),
-KNOWNID(DELTA_SEQ_NAME, "delta:seqname"),
-KNOWNID(DELTA_SEQ_EVR, "delta:seqevr"),
-KNOWNID(DELTA_SEQ_NUM, "delta:seqnum"),
+KNOWNID(DELTA_CHECKSUM, "delta:checksum"),
+KNOWNID(DELTA_BASE_EVR, "delta:baseevr"),
+KNOWNID(DELTA_SEQ_NAME, "delta:seqname"),
+KNOWNID(DELTA_SEQ_EVR, "delta:seqevr"),
+KNOWNID(DELTA_SEQ_NUM, "delta:seqnum"),
KNOWNID(ID_NUM_INTERNAL, 0)
return 0;
}
+static int
+addfileprovides_setid_cb(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv)
+{
+ Map *provideids = cbdata;
+ if (key->type != REPOKEY_TYPE_IDARRAY)
+ return 0;
+ MAPSET(provideids, kv->id);
+ return kv->eof ? SEARCH_NEXT_SOLVABLE : 0;
+}
+
static void
pool_addfileprovides_search(Pool *pool, struct addfileprovides_cbdata *cbd, struct searchfiles *sf, Repo *repoonly)
{
- Id p, start, end, *idp;
+ Id p, start, end;
Solvable *s;
Repodata *data = 0, *nextdata;
Repo *oldrepo = 0;
int dataincludes = 0;
- int i;
+ int i, j;
Map providedids;
cbd->nfiles = sf->nfiles;
{
if (!s->repo || (repoonly && s->repo != repoonly))
continue;
+ /* check if p is in (oldrepo,data) */
if (s->repo != oldrepo || (data && p >= data->end))
{
data = 0;
}
if (oldrepo == 0)
{
+ /* nope, find new repo/repodata */
+ /* if we don't find a match, set data to the next repodata */
nextdata = 0;
for (i = 0, data = s->repo->repodata; i < s->repo->nrepodata; i++, data++)
{
- if (!data->addedfileprovides || p >= data->end)
+ if (p >= data->end)
continue;
+ if (data->state != REPODATA_AVAILABLE)
+ continue;
+ for (j = 1; j < data->nkeys; j++)
+ if (data->keys[j].name == REPOSITORY_ADDEDFILEPROVIDES && data->keys[j].type == REPOKEY_TYPE_IDARRAY)
+ break;
+ if (j == data->nkeys)
+ continue;
+ /* great, this repodata contains addedfileprovides */
if (!nextdata || nextdata->start > data->start)
nextdata = data;
if (p >= data->start)
break;
}
if (i == s->repo->nrepodata)
- data = nextdata;
+ data = nextdata; /* no direct hit, use next repodata */
if (data)
{
map_init(&providedids, pool->ss.nstrings);
- for (idp = data->addedfileprovides; *idp; idp++)
- MAPSET(&providedids, *idp);
+ repodata_search(data, REPOENTRY_META, REPOSITORY_ADDEDFILEPROVIDES, addfileprovides_setid_cb, &providedids);
for (i = 0; i < cbd->nfiles; i++)
if (!MAPTST(&providedids, cbd->ids[i]))
- {
- break;
- }
+ break;
map_free(&providedids);
dataincludes = i == cbd->nfiles;
}
continue;
if (!MAPTST(installedmap, sp))
continue;
- change += repo_lookup_num(s, SOLVABLE_INSTALLSIZE);
+ change += solvable_lookup_num(s, SOLVABLE_INSTALLSIZE, 0);
}
if (oldinstalled)
{
{
if (MAPTST(installedmap, sp))
continue;
- change -= repo_lookup_num(s, SOLVABLE_INSTALLSIZE);
+ change -= solvable_lookup_num(s, SOLVABLE_INSTALLSIZE, 0);
}
}
return change;
#define SOLV_VERSION_5 5
#define SOLV_VERSION_6 6
#define SOLV_VERSION_7 7
+#define SOLV_VERSION_8 8
/* The format of .solv files might change incompatibly, and that is described
by the above version number. But sometimes we also extend the emitted
domatch_idarray(Solvable *s, Id keyname, struct matchdata *md, Id *ida)
{
KeyValue kv;
+ kv.entry = 0;
+ kv.parent = 0;
for (; *ida && !md->stop; ida++)
{
kv.id = *ida;
kv.eof = ida[1] ? 0 : 1;
repo_matchvalue(md, s, 0, solvablekeys + (keyname - SOLVABLE_NAME), &kv);
+ kv.entry++;
}
}
int i, j, flags;
Solvable *s;
+ kv.parent = 0;
md->stop = 0;
if (!p)
{
{
if (p < data->start || p >= data->end)
continue;
+ if (keyname && !repodata_precheck_keyname(data, keyname))
+ continue;
if (data->state == REPODATA_STUB)
{
if (keyname)
}
if (data->state == REPODATA_ERROR)
continue;
- repodata_search(data, p - data->start, keyname, repo_matchvalue, md);
+ repodata_search(data, p, keyname, repo_matchvalue, md);
if (md->stop > SEARCH_NEXT_KEY)
break;
}
}
const char *
-repo_lookup_str(Solvable *s, Id key)
+repo_lookup_str(Repo *repo, Id entry, Id keyname)
{
- Repo *repo = s->repo;
Pool *pool = repo->pool;
Repodata *data;
- int i, j, n;
+ int i, j;
- switch(key)
+ switch(keyname)
{
case SOLVABLE_NAME:
- return id2str(pool, s->name);
+ return id2str(pool, pool->solvables[entry].name);
case SOLVABLE_ARCH:
- return id2str(pool, s->arch);
+ return id2str(pool, pool->solvables[entry].arch);
case SOLVABLE_EVR:
- return id2str(pool, s->evr);
+ return id2str(pool, pool->solvables[entry].evr);
case SOLVABLE_VENDOR:
- return id2str(pool, s->vendor);
+ return id2str(pool, pool->solvables[entry].vendor);
}
- n = s - pool->solvables;
for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
{
- if (n < data->start || n >= data->end)
+ if (entry && (entry < data->start || entry >= data->end))
+ continue;
+ if (!repodata_precheck_keyname(data, keyname))
continue;
for (j = 1; j < data->nkeys; j++)
{
- if (data->keys[j].name == key && (data->keys[j].type == REPOKEY_TYPE_ID || data->keys[j].type == REPOKEY_TYPE_CONSTANTID || data->keys[j].type == REPOKEY_TYPE_STR))
- return repodata_lookup_str(data, n - data->start, j);
+ if (data->keys[j].name == keyname && (data->keys[j].type == REPOKEY_TYPE_ID || data->keys[j].type == REPOKEY_TYPE_CONSTANTID || data->keys[j].type == REPOKEY_TYPE_STR))
+ return repodata_lookup_str(data, entry, keyname);
}
}
return 0;
unsigned int
-repo_lookup_num(Solvable *s, Id key)
+repo_lookup_num(Repo *repo, Id entry, Id keyname, unsigned int notfound)
{
- Repo *repo = s->repo;
- Pool *pool = repo->pool;
Repodata *data;
- int i, j, n;
+ int i, j;
- if (key == RPM_RPMDBID)
+ if (keyname == RPM_RPMDBID)
{
- if (repo->rpmdbid)
- return repo->rpmdbid[(s - pool->solvables) - repo->start];
- return 0;
+ if (repo->rpmdbid && entry && entry >= repo->start && entry < repo->end)
+ return repo->rpmdbid[entry - repo->start];
+ return notfound;
}
- n = s - pool->solvables;
for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
{
- if (n < data->start || n >= data->end)
+ if (entry && (entry < data->start || entry >= data->end))
+ continue;
+ if (!repodata_precheck_keyname(data, keyname))
continue;
for (j = 1; j < data->nkeys; j++)
{
- if (data->keys[j].name == key
+ if (data->keys[j].name == keyname
&& (data->keys[j].type == REPOKEY_TYPE_U32
|| data->keys[j].type == REPOKEY_TYPE_NUM
|| data->keys[j].type == REPOKEY_TYPE_CONSTANT))
{
unsigned value;
- if (repodata_lookup_num(data, n - data->start, j, &value))
+ if (repodata_lookup_num(data, entry, keyname, &value))
return value;
}
}
}
- return 0;
+ return notfound;
}
+Id
+repo_lookup_id(Repo *repo, Id entry, Id keyname)
+{
+ Repodata *data;
+ int i, j;
-/*
- * generic attribute lookup
- * returns non-zero if found
- * zero if not found
- * (XXX: return value is broken atm!)
- */
+ switch(keyname)
+ {
+ case SOLVABLE_NAME:
+ return repo->pool->solvables[entry].name;
+ case SOLVABLE_ARCH:
+ return repo->pool->solvables[entry].arch;
+ case SOLVABLE_EVR:
+ return repo->pool->solvables[entry].evr;
+ case SOLVABLE_VENDOR:
+ return repo->pool->solvables[entry].vendor;
+ }
+ for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
+ {
+ if (entry && (entry < data->start || entry >= data->end))
+ continue;
+ if (!repodata_precheck_keyname(data, keyname))
+ continue;
+ for (j = 1; j < data->nkeys; j++)
+ {
+ if (data->keys[j].name == keyname && (data->keys[j].type == REPOKEY_TYPE_ID || data->keys[j].type == REPOKEY_TYPE_CONSTANTID))
+ {
+ Id id = repodata_lookup_id(data, entry, keyname);
+ if (id)
+ {
+ if (data->localpool)
+ id = repodata_globalize_id(data, id);
+ return id;
+ }
+ }
+ }
+ }
+ return 0;
+}
-int
-repo_lookup(Solvable *s, Id key, int (*callback)(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv), void *cbdata)
+const unsigned char *
+repo_lookup_bin_checksum(Repo *repo, Id entry, Id keyname, Id *typep)
{
- Repo *repo = s->repo;
- Pool *pool = repo->pool;
Repodata *data;
- int i, s_id;
-
- s_id = s - pool->solvables;
+ int i, j;
for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
{
- if (s_id < data->start || s_id >= data->end)
+ if (entry && (entry < data->start || entry >= data->end))
continue;
- repodata_search(data, s_id - data->start, key, callback, cbdata);
- return 1;
+ if (!repodata_precheck_keyname(data, keyname))
+ continue;
+ for (j = 1; j < data->nkeys; j++)
+ {
+ if (data->keys[j].name == keyname)
+ {
+ const unsigned char *chk = repodata_lookup_bin_checksum(data, entry, keyname, typep);
+ if (chk)
+ return chk;
+ }
+ }
}
+ *typep = 0;
return 0;
}
+int
+repo_lookup_void(Repo *repo, Id entry, Id keyname)
+{
+ Repodata *data;
+ int i, j;
+ for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
+ {
+ if (entry && (entry < data->start || entry >= data->end))
+ continue;
+ if (!repodata_precheck_keyname(data, keyname))
+ continue;
+ for (j = 1; j < data->nkeys; j++)
+ {
+ if (data->keys[j].name == keyname
+ && (data->keys[j].type == REPOKEY_TYPE_VOID))
+ {
+ if (repodata_lookup_void(data, entry, keyname))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
/***********************************************************************/
return data;
}
-static Repodata *
-repo_findrepodata(Repo *repo, Id p, Id keyname)
+Repodata *
+repo_last_repodata(Repo *repo)
{
int i;
- Repodata *data;
-
- /* FIXME: enter nice code here */
- for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
- if ((p < 0 && (-1 - p) >= data->extrastart && (-1 - p) < (data->extrastart + data->nextra))
- || (p >= 0 && p >= data->start && p < data->end))
- return data;
- if (p < 0)
- {
- data = repo->repodata;
- if (data)
- {
- for (i = 1; i < repo->nrepodata; i++)
- if (data->extrastart + data->nextra
- > repo->repodata[i].extrastart + repo->repodata[i].nextra)
- data = repo->repodata + i;
- }
- else
- data = repo_add_repodata(repo, 0);
- repodata_extend_extra(data, (-1 - p) - data->extrastart + 1);
- if (-p > repo->nextra)
- repo->nextra = -p;
- return data;
- }
- for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
- if (p == data->end)
- break;
- if (i < repo->nrepodata)
- {
- repodata_extend(data, p);
- return data;
- }
+ for (i = repo->nrepodata - 1; i >= 0; i--)
+ if (repo->repodata[i].state != REPODATA_STUB)
+ return repo->repodata + i;
return repo_add_repodata(repo, 0);
}
void
repo_set_id(Repo *repo, Id p, Id keyname, Id id)
{
- Repodata *data = repo_findrepodata(repo, p, keyname);
- if (p < 0)
- /* This is -1 - ((-1 - p) - data->extrastart). */
- p = p + data->extrastart;
- else
- p = p - data->start;
- repodata_set_id(data, repodata_get_handle(data, p), keyname, id);
+ Repodata *data = repo_last_repodata(repo);
+ repodata_set_id(data, p, keyname, id);
}
void
repo_set_num(Repo *repo, Id p, Id keyname, Id num)
{
- Repodata *data = repo_findrepodata(repo, p, keyname);
- if (p < 0)
- p = p + data->extrastart;
- else
- p = p - data->start;
- repodata_set_num(data, repodata_get_handle(data, p), keyname, num);
+ Repodata *data = repo_last_repodata(repo);
+ repodata_set_num(data, p, keyname, num);
}
void
repo_set_str(Repo *repo, Id p, Id keyname, const char *str)
{
- Repodata *data = repo_findrepodata(repo, p, keyname);
- if (p < 0)
- p = p + data->extrastart;
- else
- p = p - data->start;
- repodata_set_str(data, repodata_get_handle(data, p), keyname, str);
+ Repodata *data = repo_last_repodata(repo);
+ repodata_set_str(data, p, keyname, str);
}
void
repo_set_poolstr(Repo *repo, Id p, Id keyname, const char *str)
{
- Repodata *data = repo_findrepodata(repo, p, keyname);
- if (p < 0)
- p = p + data->extrastart;
- else
- p = p - data->start;
- repodata_set_poolstr(data, repodata_get_handle(data, p), keyname, str);
+ Repodata *data = repo_last_repodata(repo);
+ repodata_set_poolstr(data, p, keyname, str);
}
void
repo_add_poolstr_array(Repo *repo, Id p, Id keyname, const char *str)
{
- Repodata *data = repo_findrepodata(repo, p, keyname);
- if (p < 0)
- p = p + data->extrastart;
- else
- p = p - data->start;
- repodata_add_poolstr_array(data, repodata_get_handle(data, p), keyname, str);
+ Repodata *data = repo_last_repodata(repo);
+ repodata_add_poolstr_array(data, p, keyname, str);
}
void
Repodata *data;
for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
- if (data->attrs || data->extraattrs)
+ if (data->attrs || data->xattrs)
repodata_internalize(data);
}
const char *str;
int num;
int num2;
- int eof;
+
+ int entry; /* array entry, starts with 0 */
+ int eof; /* last entry reached */
+
+ struct _KeyValue *parent;
} KeyValue;
/* search flags */
#define SEARCH_SUBSTRING 2
#define SEARCH_GLOB 3
#define SEARCH_REGEX 4
+#define SEARCH_ERROR 5
#define SEARCH_NOCASE (1<<8)
#define SEARCH_NO_STORAGE_SOLVABLE (1<<9)
#define SEARCH_EXTRA (1<<10)
+#define SEARCH_SUB (1<<10)
#define SEARCH_ALL_REPOS (1<<11)
#define SEARCH_SKIP_KIND (1<<12)
+
/* By default we don't match in attributes representing filelists
because the construction of those strings is costly. Specify this
flag if you want this. In that case kv->str will contain the full
#define SEARCH_FILES (1<<13)
/* Internal */
-#define __SEARCH_ONESOLVABLE (1 << 31)
+#define SEARCH_THISENTRY (1<<31)
+
+
+/* standard flags used in the repo_add functions */
+#define REPO_REUSE_REPODATA (1 << 0)
+#define REPO_NO_INTERNALIZE (1 << 1)
Repodata *repo_add_repodata(Repo *repo, int localpool);
+Repodata *repo_last_repodata(Repo *repo);
void repo_search(Repo *repo, Id p, Id key, const char *match, int flags, int (*callback)(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv), void *cbdata);
/* returns the string value of the attribute, or NULL if not found */
-const char * repo_lookup_str(Solvable *s, Id key);
+const char *repo_lookup_str(Repo *repo, Id entry, Id key);
/* returns the integer value of the attribute, or 0 if not found */
-unsigned int repo_lookup_num(Solvable *s, Id key);
-/* generic attribute lookup */
-int repo_lookup(Solvable *s, Id key, int (*callback)(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv), void *cbdata);
+unsigned int repo_lookup_num(Repo *repo, Id entry, Id key, unsigned int notfound);
+Id repo_lookup_id(Repo *repo, Id entry, Id keyid);
+int repo_lookup_void(Repo *repo, Id entry, Id keyid);
+const unsigned char *repo_lookup_bin_checksum(Repo *repo, Id entry, Id keyid, Id *typep);
+
+#if 0
typedef struct _Dataiterator
{
Repodata *data;
int subnum;
Id subschema;
} Dataiterator;
+#else
+
+typedef struct _Datamatcher {
+ Pool *pool;
+ int flags;
+ void *match;
+ int error;
+} Datamatcher;
+
+typedef struct _Dataiterator
+{
+ int state;
+ int flags;
+
+ Pool *pool;
+ Repo *repo;
+ Repodata *data;
+
+ /* data pointers */
+ unsigned char *dp;
+ unsigned char *ddp;
+ Id *idp;
+ Id *keyp;
+
+ /* the result */
+ Repokey *key;
+ KeyValue kv;
+
+ /* our matcher */
+ Datamatcher matcher;
+
+ /* iterators/filters */
+ Id keyname;
+ Id repodataid;
+ Id entry;
+ Id repoid;
+
+ /* recursion data */
+ struct di_parent {
+ KeyValue kv;
+ unsigned char *dp;
+ Id *keyp;
+ } parents[3];
+ int nparents;
+} Dataiterator;
+
+#endif
/* Use these like:
Dataiterator di;
pd->depth++;
/* find node name in stateswitch */
+ if (!pd->swtab[pd->state])
+ return;
for (sw = pd->swtab[pd->state]; sw->from == pd->state; sw++)
{
if (!strcmp(sw->ename, name))
case STATE_PACKAGE: /* solvable name */
pd->solvable = pool_id2solvable(pool, repo_add_solvable(pd->repo));
- if (pd->data)
- repodata_extend(pd->data, pd->solvable - pool->solvables);
-
if (!strcmp(name, "selection"))
pd->kind = "selection";
else if (!strcmp(name, "pattern"))
case STATE_BUILDTIME:
t = atoi (pd->content);
if (t)
- repodata_set_num(pd->data, repodata_get_handle(pd->data, (s - pool->solvables) - pd->repo->start), SOLVABLE_BUILDTIME, t);
+ repodata_set_num(pd->data, s - pool->solvables, SOLVABLE_BUILDTIME, t);
break;
case STATE_UPDATE: /* new version, keeping all other metadata */
evr = evr2id(pool, pd,
*/
void
-repo_add_helix(Repo *repo, FILE *fp)
+repo_add_helix(Repo *repo, FILE *fp, int flags)
{
Pool *pool = repo->pool;
Parsedata pd;
- Repodata *data = 0;
+ Repodata *data;
char buf[BUFF_SIZE];
int i, l;
struct stateswitch *sw;
- if (repo->nrepodata)
- /* use last repodata */
- data = repo->repodata + repo->nrepodata - 1;
- else
+ if (!(flags & REPO_REUSE_REPODATA))
data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
/* prepare parsedata */
memset(&pd, 0, sizeof(pd));
break;
}
XML_ParserFree(parser);
-
- if (pd.data)
- repodata_internalize(pd.data);
-
free(pd.content);
free(pd.evrspace);
+
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
}
#include "pool.h"
#include "repo.h"
-extern void repo_add_helix(Repo *repo, FILE *fp);
+extern void repo_add_helix(Repo *repo, FILE *fp, int flags);
#ifdef __cplusplus
}
#include "util.h"
#include "repopack.h"
+#include "repopage.h"
#define INTERESTED_START SOLVABLE_NAME
#define INTERESTED_END SOLVABLE_ENHANCES
static Pool *mypool; /* for pool_debug... */
-/*-----------------------------------------------------------------*/
-/* .solv read functions */
+
+static void repodata_load_stub(Repodata *data);
+
+
+/*******************************************************************************
+ * functions to extract data from a file handle
+ */
/*
* read u32
}
-/*
- * read array of Ids
- */
-
-#if 0
static Id *
-read_rel_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end, Id marker)
+read_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end)
{
unsigned int x = 0;
int c;
- Id old = 0;
if (data->error)
return 0;
continue;
}
x = (x << 6) | (c & 63);
- if (x == 0)
- {
- /* marker hack */
- if (store == end)
- {
- pool_debug(mypool, SAT_ERROR, "read_rel_idarray: array overflow\n");
- data->error = SOLV_ERROR_OVERFLOW;
- return 0;
- }
- if (c != 0x40)
- {
- *store++ = 0;
- return store;
- }
- *store++ = marker; /* do not map! */
- old = 0;
- x = 0;
- continue;
- }
- x = (x - 1) + old;
- old = x;
if (max && x >= max)
{
- pool_debug(mypool, SAT_ERROR, "read_rel_idarray: id too large (%u/%u)\n", x, max);
+ pool_debug(mypool, SAT_ERROR, "read_idarray: id too large (%u/%u)\n", x, max);
data->error = SOLV_ERROR_ID_RANGE;
return 0;
}
x = map[x];
if (store == end)
{
- pool_debug(mypool, SAT_ERROR, "read_rel_idarray: array overflow\n");
+ pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n");
return 0;
}
*store++ = x;
return store;
if (store == end)
{
- pool_debug(mypool, SAT_ERROR, "read_rel_idarray: array overflow\n");
+ pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n");
data->error = SOLV_ERROR_OVERFLOW;
return 0;
}
x = 0;
}
}
-#endif
+
+
+/*******************************************************************************
+ * functions to extract data from memory
+ */
+
+/*
+ * read array of Ids
+ */
static inline unsigned char *
data_read_id_max(unsigned char *dp, Id *ret, Id *map, int max, int *error)
}
-static Id *
-read_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end)
-{
- unsigned int x = 0;
- int c;
-
- if (data->error)
- return 0;
- for (;;)
- {
- c = getc(data->fp);
- if (c == EOF)
- {
- pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
- return 0;
- }
- if ((c & 128) != 0)
- {
- x = (x << 7) ^ c ^ 128;
- continue;
- }
- x = (x << 6) | (c & 63);
- if (max && x >= max)
- {
- pool_debug(mypool, SAT_ERROR, "read_idarray: id too large (%u/%u)\n", x, max);
- data->error = SOLV_ERROR_ID_RANGE;
- return 0;
- }
- if (map)
- x = map[x];
- if (store == end)
- {
- pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n");
- return 0;
- }
- *store++ = x;
- if ((c & 64) == 0)
- {
- if (x == 0) /* already have trailing zero? */
- return store;
- if (store == end)
- {
- pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n");
- data->error = SOLV_ERROR_OVERFLOW;
- return 0;
- }
- *store++ = 0;
- return store;
- }
- x = 0;
- }
-}
-
-static void
-read_str(Repodata *data, char **inbuf, unsigned *len)
-{
- unsigned char *buf = (unsigned char*)*inbuf;
- if (!buf)
- {
- buf = sat_malloc(1024);
- *len = 1024;
- }
- int c;
- unsigned ofs = 0;
- while((c = getc(data->fp)) != 0)
- {
- if (c == EOF)
- {
- pool_debug (mypool, SAT_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
- return;
- }
- /* Plus 1 as we also want to add the 0. */
- if (ofs + 1 >= *len)
- {
- *len += 256;
- /* Don't realloc on the inbuf, it might be on the stack. */
- if (buf == (unsigned char*)*inbuf)
- {
- buf = sat_malloc(*len);
- memcpy(buf, *inbuf, *len - 256);
- }
- else
- buf = sat_realloc(buf, *len);
- }
- buf[ofs++] = c;
- }
- buf[ofs++] = 0;
- *inbuf = (char*)buf;
-}
-
-static void
-skip_item(Repodata *data, unsigned type, unsigned numid, unsigned numrel)
-{
- switch (type)
- {
- case REPOKEY_TYPE_VOID:
- case REPOKEY_TYPE_CONSTANT:
- case REPOKEY_TYPE_CONSTANTID:
- break;
- case REPOKEY_TYPE_ID:
- read_id(data, numid + numrel); /* just check Id */
- break;
- case REPOKEY_TYPE_DIR:
- read_id(data, numid + data->dirpool.ndirs); /* just check Id */
- break;
- case REPOKEY_TYPE_NUM:
- read_id(data, 0);
- break;
- case REPOKEY_TYPE_U32:
- read_u32(data);
- break;
- case REPOKEY_TYPE_STR:
- while (read_u8(data) != 0)
- ;
- break;
- case REPOKEY_TYPE_MD5:
- {
- int i;
- for (i = 0; i < SIZEOF_MD5; i++)
- read_u8(data);
- break;
- }
- case REPOKEY_TYPE_SHA1:
- {
- int i;
- for (i = 0; i < SIZEOF_SHA1; i++)
- read_u8(data);
- break;
- }
- case REPOKEY_TYPE_SHA256:
- {
- int i;
- for (i = 0; i < SIZEOF_SHA256; i++)
- read_u8(data);
- break;
- }
- case REPOKEY_TYPE_IDARRAY:
- case REPOKEY_TYPE_REL_IDARRAY:
- while ((read_u8(data) & 0xc0) != 0)
- ;
- break;
- case REPOKEY_TYPE_DIRNUMNUMARRAY:
- for (;;)
- {
- read_id(data, numid + data->dirpool.ndirs); /* just check Id */
- read_id(data, 0);
- if (!(read_id(data, 0) & 0x40))
- break;
- }
- break;
- case REPOKEY_TYPE_DIRSTRARRAY:
- for (;;)
- {
- Id id = read_id(data, 0);
- while (read_u8(data) != 0)
- ;
- if (!(id & 0x40))
- break;
- }
- break;
- default:
- pool_debug(mypool, SAT_ERROR, "unknown type %d\n", type);
- data->error = SOLV_ERROR_CORRUPT;
- break;
- }
-}
-
-static int
-key_cmp (const void *pa, const void *pb)
-{
- Repokey *a = (Repokey *)pa;
- Repokey *b = (Repokey *)pb;
- return a->name - b->name;
-}
-
-static void repodata_load_solv(Repodata *data);
-
-static void
-parse_external_repodata(Repodata *maindata, Id *keyp, Repokey *keys, Id *idmap, unsigned numid, unsigned numrel)
-{
- Repo *repo = maindata->repo;
- Id key, id;
- Id *ida, *ide;
- Repodata *data;
- int i, n;
-
- repo->repodata = sat_realloc2(repo->repodata, repo->nrepodata + 1, sizeof (*data));
- data = repo->repodata + repo->nrepodata++;
- memset(data, 0, sizeof(*data));
- data->repo = repo;
- data->pagefd = -1;
- data->state = REPODATA_STUB;
- data->loadcallback = repodata_load_solv;
-
- while ((key = *keyp++) != 0)
- {
- id = keys[key].name;
- switch (keys[key].type)
- {
- case REPOKEY_TYPE_IDARRAY:
- if (id != REPODATA_KEYS)
- {
- skip_item(maindata, REPOKEY_TYPE_IDARRAY, numid, numrel);
- break;
- }
- /* read_idarray writes a terminating 0, that's why the + 1 */
- ida = sat_calloc(keys[key].size + 1, sizeof(Id));
- ide = read_idarray(maindata, numid, idmap, ida, ida + keys[key].size + 1);
- n = ide - ida - 1;
- if (n & 1)
- {
- pool_debug (mypool, SAT_ERROR, "invalid attribute data\n");
- maindata->error = SOLV_ERROR_CORRUPT;
- return;
- }
- data->nkeys = 1 + (n >> 1);
- data->keys = sat_malloc2(data->nkeys, sizeof(data->keys[0]));
- memset(data->keys, 0, sizeof(Repokey));
- for (i = 1, ide = ida; i < data->nkeys; i++)
- {
- data->keys[i].name = *ide++;
- data->keys[i].type = *ide++;
- data->keys[i].size = 0;
- data->keys[i].storage = 0;
- }
- sat_free(ida);
- if (data->nkeys > 2)
- qsort(data->keys + 1, data->nkeys - 1, sizeof(data->keys[0]), key_cmp);
- break;
- case REPOKEY_TYPE_STR:
- if (id != REPODATA_LOCATION)
- skip_item(maindata, REPOKEY_TYPE_STR, numid, numrel);
- else
- {
- char buf[1024];
- unsigned len = sizeof(buf);
- char *filename = buf;
- read_str(maindata, &filename, &len);
- data->location = strdup(filename);
- if (filename != buf)
- free(filename);
- }
- break;
- default:
- skip_item(maindata, keys[key].type, numid, numrel);
- break;
- }
- }
-}
-
-static void
-parse_info_repodata(Repodata *maindata, Id *keyp, Repokey *keys, Id *idmap, unsigned numid, unsigned numrel)
-{
- Id key, id;
- Id *ida;
- while ((key = *keyp++) != 0)
- {
- id = keys[key].name;
- if (id == REPODATA_ADDEDFILEPROVIDES && keys[key].type == REPOKEY_TYPE_REL_IDARRAY)
- {
- Id old = 0;
- /* + 1 just in case */
- ida = sat_calloc(keys[key].size + 1, sizeof(Id));
- read_idarray(maindata, 0, 0, ida, ida + keys[key].size + 1);
- maindata->addedfileprovides = ida;
- for (; *ida; ida++)
- {
- old += *ida - 1;
- if (old >= numid)
- {
- *ida = 0;
- break;
- }
- *ida = idmap ? idmap[old] : old;
- }
- continue;
- }
- if (id == REPODATA_RPMDBCOOKIE && keys[key].type == REPOKEY_TYPE_SHA256)
- {
- int i;
- for (i = 0; i < 32; i++)
- maindata->repo->rpmdbcookie[i] = read_u8(maindata);
- continue;
- }
- skip_item(maindata, keys[key].type, numid, numrel);
- }
-}
-
-/*-----------------------------------------------------------------*/
-static void
-skip_schema(Repodata *data, Id *keyp, Repokey *keys, unsigned int numid, unsigned int numrel)
-{
- Id key;
- while ((key = *keyp++) != 0)
- skip_item(data, keys[key].type, numid, numrel);
-}
+/*******************************************************************************
+ * functions to add data to our incore memory space
+ */
-/*-----------------------------------------------------------------*/
static void
incore_add_id(Repodata *data, Id x)
#endif
+/*******************************************************************************
+ * callback to create our stub sub-repodatas from the incore data
+ */
+
+struct create_stub_data {
+ Repodata *data;
+ Id xkeyname;
+};
+
+int
+create_stub_cb(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv)
+{
+ struct create_stub_data *stubdata = cbdata;
+ if (key->name == REPOSITORY_EXTERNAL && key->type == REPOKEY_TYPE_FLEXARRAY)
+ {
+ if (stubdata->data)
+ {
+ repodata_internalize(stubdata->data);
+ if (data->start != data->end)
+ {
+ repodata_extend(stubdata->data, data->start);
+ repodata_extend(stubdata->data, data->end - 1);
+ }
+ stubdata->data = 0;
+ }
+ if (kv->eof)
+ return SEARCH_NEXT_SOLVABLE;
+ stubdata->data = repo_add_repodata(data->repo, 0);
+ stubdata->data->state = REPODATA_STUB;
+ stubdata->data->loadcallback = repodata_load_stub;
+ return 0;
+ }
+ if (!stubdata->data)
+ return SEARCH_NEXT_KEY;
+ switch(key->type)
+ {
+ case REPOKEY_TYPE_ID:
+ repodata_set_id(stubdata->data, REPOENTRY_META, key->name, kv->id);
+ break;
+ case REPOKEY_TYPE_CONSTANTID:
+ repodata_set_constantid(stubdata->data, REPOENTRY_META, key->name, kv->id);
+ break;
+ case REPOKEY_TYPE_STR:
+ repodata_set_str(stubdata->data, REPOENTRY_META, key->name, kv->str);
+ break;
+ case REPOKEY_TYPE_VOID:
+ repodata_set_void(stubdata->data, REPOENTRY_META, key->name);
+ break;
+ case REPOKEY_TYPE_NUM:
+ repodata_set_num(stubdata->data, REPOENTRY_META, key->name, kv->num);
+ break;
+ case REPOKEY_TYPE_IDARRAY:
+ repodata_add_idarray(stubdata->data, REPOENTRY_META, key->name, kv->id);
+ if (key->name == REPOSITORY_KEYS)
+ {
+ if (!stubdata->xkeyname)
+ stubdata->xkeyname = kv->id;
+ else
+ {
+ Repokey xkey;
+
+ xkey.name = stubdata->xkeyname;
+ xkey.type = kv->id;
+ xkey.storage = KEY_STORAGE_INCORE;
+ xkey.size = 0;
+ repodata_key2id(stubdata->data, &xkey, 1);
+ stubdata->xkeyname = 0;
+ }
+ if (kv->eof)
+ stubdata->xkeyname = 0;
+ }
+ break;
+ case REPOKEY_TYPE_MD5:
+ case REPOKEY_TYPE_SHA1:
+ case REPOKEY_TYPE_SHA256:
+ repodata_set_checksum(stubdata->data, REPOENTRY_META, key->name, key->type, kv->str);
+ break;
+ default:
+ return SEARCH_NEXT_KEY;
+ }
+ return 0;
+}
+
-// ----------------------------------------------
+/*******************************************************************************
+ * our main function
+ */
/*
- * read repo from .solv file
- * and add it to pool
+ * read repo from .solv file and add it to pool
+ * if stubdata is set, substitute it with read data
+ * (this is used to replace a repodata stub with the real data)
*/
static int
Pool *pool = repo->pool;
int i, l;
unsigned int numid, numrel, numdir, numsolv;
- unsigned int numkeys, numschemata, numinfo, numextra, contentver;
+ unsigned int numkeys, numschemata;
Offset sizeid;
Offset *str; /* map Id -> Offset into string space */
unsigned int solvversion;
Repokey *keys;
Id *schemadata, *schemadatap, *schemadataend;
- Id *schemata, key;
+ Id *schemata, key, *keyp;
+ int nentries;
int have_xdata;
- unsigned oldnrepodata;
int maxsize, allsize;
unsigned char *buf, *dp, *dps;
int left;
+ Id stack[10];
+ int keydepth;
struct _Stringpool *spool;
solvversion = read_u32(&data);
switch (solvversion)
{
- case SOLV_VERSION_6:
- break;
- case SOLV_VERSION_7:
+ case SOLV_VERSION_8:
break;
default:
pool_debug(pool, SAT_ERROR, "unsupported SOLV version\n");
numsolv = read_u32(&data);
numkeys = read_u32(&data);
numschemata = read_u32(&data);
- numinfo = read_u32(&data);
- if (solvversion > SOLV_VERSION_6)
- {
- numextra = read_u32(&data);
- contentver = read_u32(&data);
- }
- else
- numextra = 0, contentver = 1;
solvflags = read_u32(&data);
if (numdir && numdir < 2)
{
if (numrel)
{
- pool_debug(pool, SAT_ERROR, "relations are forbidden in a store\n");
+ pool_debug(pool, SAT_ERROR, "relations are forbidden in a sub-repository\n");
return SOLV_ERROR_CORRUPT;
}
if (parent->end - parent->start != numsolv)
{
- pool_debug(pool, SAT_ERROR, "unequal number of solvables in a store\n");
- return SOLV_ERROR_CORRUPT;
- }
- if (parent->nextra != numextra)
- {
- pool_debug(pool, SAT_ERROR, "unequal number of non-solvables in a store\n");
- return SOLV_ERROR_CORRUPT;
- }
- if (numinfo)
- {
- pool_debug(pool, SAT_ERROR, "info blocks are forbidden in a store\n");
+ pool_debug(pool, SAT_ERROR, "sub-repository solvable number doesn't match main repository (%d - %d)\n", parent->end - parent->start, numsolv);
return SOLV_ERROR_CORRUPT;
}
}
type = idmap[type];
else if (parent)
type = str2id(pool, stringpool_id2str(spool, type), 1);
- if (type < REPOKEY_TYPE_VOID || type > REPOKEY_TYPE_COUNTED)
+ if (type < REPOKEY_TYPE_VOID || type > REPOKEY_TYPE_FLEXARRAY)
{
pool_debug(pool, SAT_ERROR, "unsupported data type '%s'\n", id2str(pool, type));
data.error = SOLV_ERROR_UNSUPPORTED;
data.keys = keys;
data.nkeys = numkeys;
+ for (i = 1; i < numkeys; i++)
+ {
+ id = keys[i].name;
+ data.keybits[(id >> 3) & (sizeof(data.keybits) - 1)] |= 1 << (id & 7);
+ }
/******* Part 5: Schemata ********************************************/
data.schemadata = schemadata;
data.schemadatalen = schemadataend - data.schemadata;
+ /******* Part 6: Data ********************************************/
- /******* Part 6: Info ***********************************************/
- oldnrepodata = repo->nrepodata;
- if (numinfo)
- {
- id = read_id(&data, 0);
- id = read_id(&data, 0);
- }
- for (i = 0; i < numinfo; i++)
- {
- /* for now we're just interested in data that starts with
- * the repodata_external id
- */
- Id *keyp;
- id = read_id(&data, numschemata);
- keyp = schemadata + schemata[id];
- key = *keyp;
- if (keys[key].name == REPODATA_EXTERNAL && keys[key].type == REPOKEY_TYPE_VOID)
- {
- /* external data for some ids */
- parse_external_repodata(&data, keyp, keys, idmap, numid, numrel);
- }
- else if (keys[key].name == REPODATA_INFO)
- {
- parse_info_repodata(&data, keyp, keys, idmap, numid, numrel);
- }
- else
- {
- skip_schema(&data, keyp, keys, numid, numrel);
- }
- }
-
+ idarraydatap = idarraydataend = 0;
+ size_idarray = 0;
- /******* Part 7: item data *******************************************/
+ maxsize = read_id(&data, 0);
+ allsize = read_id(&data, 0);
+ maxsize += 5; /* so we can read the next schema */
+ if (maxsize > allsize)
+ maxsize = allsize;
- /* calculate idarray size */
- size_idarray = 0;
- for (i = 1; i < numkeys; i++)
- {
- id = keys[i].name;
- if ((keys[i].type == REPOKEY_TYPE_IDARRAY || keys[i].type == REPOKEY_TYPE_REL_IDARRAY)
- && id >= INTERESTED_START && id <= INTERESTED_END)
- size_idarray += keys[i].size;
- }
+ left = 0;
+ buf = sat_calloc(maxsize + 4, 1);
+ dp = buf;
- if (numsolv || numextra)
+ l = maxsize;
+ if (l > allsize)
+ l = allsize;
+ if (!l || fread(buf, l, 1, data.fp) != 1)
{
- maxsize = read_id(&data, 0);
- allsize = read_id(&data, 0);
- if (maxsize > allsize)
- {
- pool_debug(pool, SAT_ERROR, "maxsize %d is greater then allsize %d\n", maxsize, allsize);
- data.error = SOLV_ERROR_CORRUPT;
- }
- }
- else
- maxsize = allsize = 0;
-
- /* allocate needed space in repo */
- /* we add maxsize because it is an upper limit for all idarrays */
- repo_reserve_ids(repo, 0, size_idarray + maxsize + 1);
- idarraydatap = repo->idarraydata + repo->idarraysize;
- repo->idarraysize += size_idarray;
- idarraydataend = idarraydatap + size_idarray;
- repo->lastoff = 0;
-
- /* read solvables */
- if (numsolv)
- {
- if (parent)
- s = pool_id2solvable(pool, parent->start);
- else
- s = pool_id2solvable(pool, repo_add_solvable_block(repo, numsolv));
- /* store start and end of our id block */
- data.start = s - pool->solvables;
- data.end = data.start + numsolv;
- /* In case we have info blocks, make them refer to our part of the
- repository now. */
- for (i = oldnrepodata; i < repo->nrepodata; i++)
- {
- repo->repodata[i].start = data.start;
- repo->repodata[i].end = data.end;
- }
+ pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
+ data.error = SOLV_ERROR_EOF;
+ id = 0;
}
else
- s = 0;
-
- if (numextra)
- {
- data.extrastart = repo->nextra;
- repodata_extend_extra(&data, numextra);
- repo->nextra += numextra;
- for (i = oldnrepodata; i < repo->nrepodata; i++)
- {
- repo->repodata[i].extrastart = data.extrastart;
- repo->repodata[i].nextra = data.nextra;
- }
- }
-
- if (have_xdata)
{
- /* reserve one byte so that all offsets are not zero */
- incore_add_id(&data, 0);
- repodata_extend_block(&data, data.start, numsolv);
+ left = l;
+ allsize -= l;
+ dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
}
- left = 0;
- buf = sat_calloc(maxsize + 4, 1);
- dp = buf;
- for (i = 0; i < numsolv + numextra; i++, s++)
+ incore_add_id(&data, 0); /* XXX? */
+ incore_add_id(&data, id);
+ keyp = schemadata + schemata[id];
+ data.mainschema = id;
+ for (i = 0; keyp[i]; i++)
+ ;
+ if (i)
+ data.mainschemaoffsets = sat_calloc(i, sizeof(Id));
+
+ nentries = 0;
+ keydepth = 0;
+ s = 0;
+ for(;;)
{
- Id *keyp;
- if (data.error)
- break;
-
- left -= (dp - buf);
- if (left < 0)
- {
- pool_debug(mypool, SAT_ERROR, "buffer overrun\n");
- data.error = SOLV_ERROR_EOF;
- break;
- }
- if (left)
- memmove(buf, dp, left);
- l = maxsize - left;
- if (l > allsize)
- l = allsize;
- if (l && fread(buf + left, l, 1, data.fp) != 1)
- {
- pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
- data.error = SOLV_ERROR_EOF;
- break;
- }
- allsize -= l;
- left += l;
- dp = buf;
-
- dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
- if (have_xdata)
+ key = *keyp++;
+#if 0
+printf("key %d at %d\n", key, keyp - 1 - schemadata);
+#endif
+ if (!key)
{
- if (i < numsolv)
- data.incoreoffset[i] = data.incoredatalen;
- else
- data.extraoffset[i - numsolv] = data.incoredatalen;
- incore_add_id(&data, id);
- }
- if (i >= numsolv)
- s = 0;
+ if (nentries)
+ {
+ if (s && keydepth == 2)
+ {
+ s++; /* next solvable */
+ if (have_xdata)
+ data.incoreoffset[(s - pool->solvables) - data.start] = data.incoredatalen;
+ }
+ dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
+ incore_add_id(&data, id);
+ keyp = schemadata + schemata[id];
+ nentries--;
+ continue;
+ }
+ if (!keydepth)
+ break;
+ keyp = schemadata + stack[--keydepth];
+ nentries = stack[--keydepth];
#if 0
- if (i < numsolv)
- fprintf(stderr, "solv %d: schema %d\n", i, id);
- else
- fprintf(stderr, "extra %d: schema %d\n", i - numsolv, id);
+printf("pop flexarray %d %d\n", keydepth, nentries);
#endif
- keyp = schemadata + schemata[id];
- while ((key = *keyp++) != 0)
+ if (!keydepth && s)
+ s = 0; /* back from solvables */
+ continue;
+ }
+
+ if (keydepth <= 2)
{
+ if (keydepth == 0)
+ data.mainschemaoffsets[keyp - 1 - schemadata + schemata[data.mainschema]] = data.incoredatalen;
+ /* read data chunk to dp */
if (data.error)
break;
+ left -= (dp - buf);
+ if (left < 0)
+ {
+ pool_debug(mypool, SAT_ERROR, "buffer overrun\n");
+ data.error = SOLV_ERROR_EOF;
+ break;
+ }
+ if (left)
+ memmove(buf, dp, left);
+ l = maxsize - left;
+ if (l > allsize)
+ l = allsize;
+ if (l && fread(buf + left, l, 1, data.fp) != 1)
+ {
+ pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
+ data.error = SOLV_ERROR_EOF;
+ break;
+ }
+ allsize -= l;
+ left += l;
+ dp = buf;
+ }
- id = keys[key].name;
#if 0
- if (i < numsolv)
- fprintf(stderr, "solv %d name %d type %d class %d\n", i, id, keys[key].type, keys[key].storage);
- else
- fprintf(stderr, "extra %d name %d type %d class %d\n", i - numsolv, id, keys[key].type, keys[key].storage);
+printf("=> %s %s %p\n", id2str(pool, keys[key].name), id2str(pool, keys[key].type), s);
+#endif
+ id = keys[key].name;
+ if (keys[key].storage == KEY_STORAGE_VERTICAL_OFFSET)
+ {
+ dps = dp;
+ dp = data_skip(dp, REPOKEY_TYPE_ID);
+ dp = data_skip(dp, REPOKEY_TYPE_ID);
+ incore_add_blob(&data, dps, dp - dps);
+ continue;
+ }
+ switch (keys[key].type)
+ {
+ case REPOKEY_TYPE_ID:
+ dp = data_read_id_max(dp, &did, idmap, numid + numrel, &data.error);
+ if (s && id == SOLVABLE_NAME)
+ s->name = did;
+ else if (s && id == SOLVABLE_ARCH)
+ s->arch = did;
+ else if (s && id == SOLVABLE_EVR)
+ s->evr = did;
+ else if (s && id == SOLVABLE_VENDOR)
+ s->vendor = did;
+ else if (keys[key].storage == KEY_STORAGE_INCORE)
+ incore_add_id(&data, did);
+#if 0
+ POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %s\n", id2str(pool, id), id2str(pool, did));
+#endif
+ break;
+ case REPOKEY_TYPE_U32:
+ dp = data_read_u32(dp, &h);
+#if 0
+ POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %u\n", id2str(pool, id), h);
#endif
- if (keys[key].storage == KEY_STORAGE_VERTICAL_OFFSET)
+ if (s && id == RPM_RPMDBID)
+ {
+ if (!repo->rpmdbid)
+ repo->rpmdbid = repo_sidedata_create(repo, sizeof(Id));
+ repo->rpmdbid[(s - pool->solvables) - repo->start] = h;
+ }
+ else if (keys[key].storage == KEY_STORAGE_INCORE)
+ incore_add_u32(&data, h);
+ break;
+ case REPOKEY_TYPE_IDARRAY:
+ case REPOKEY_TYPE_REL_IDARRAY:
+ if (!s || id < INTERESTED_START || id > INTERESTED_END)
{
- /* copy offset/length into incore */
dps = dp;
- dp = data_skip(dp, REPOKEY_TYPE_ID);
- dp = data_skip(dp, REPOKEY_TYPE_ID);
- incore_add_blob(&data, dps, dp - dps);
- continue;
+ dp = data_skip(dp, REPOKEY_TYPE_IDARRAY);
+ if (keys[key].storage != KEY_STORAGE_INCORE)
+ break;
+ if (idmap)
+ incore_map_idarray(&data, dps, idmap, numid);
+ else
+ incore_add_blob(&data, dps, dp - dps);
+ break;
}
- switch (keys[key].type)
+ ido = idarraydatap - repo->idarraydata;
+ if (keys[key].type == REPOKEY_TYPE_IDARRAY)
+ dp = data_read_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error);
+ else if (id == SOLVABLE_REQUIRES)
+ dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_PREREQMARKER);
+ else if (id == SOLVABLE_PROVIDES)
+ dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_FILEMARKER);
+ else
+ dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, 0);
+ if (idarraydatap > idarraydataend)
{
- case REPOKEY_TYPE_ID:
- dp = data_read_id_max(dp, &did, idmap, numid + numrel, &data.error);
- if (id == SOLVABLE_NAME)
- s->name = did;
- else if (id == SOLVABLE_ARCH)
- s->arch = did;
- else if (id == SOLVABLE_EVR)
- s->evr = did;
- else if (id == SOLVABLE_VENDOR)
- s->vendor = did;
- else if (keys[key].storage == KEY_STORAGE_INCORE)
- incore_add_id(&data, did);
-#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %s\n", id2str(pool, id), id2str(pool, did));
-#endif
+ pool_debug(pool, SAT_ERROR, "idarray overflow\n");
+ data.error = SOLV_ERROR_OVERFLOW;
break;
- case REPOKEY_TYPE_U32:
- dp = data_read_u32(dp, &h);
+ }
+ if (id == SOLVABLE_PROVIDES)
+ s->provides = ido;
+ else if (id == SOLVABLE_OBSOLETES)
+ s->obsoletes = ido;
+ else if (id == SOLVABLE_CONFLICTS)
+ s->conflicts = ido;
+ else if (id == SOLVABLE_REQUIRES)
+ s->requires = ido;
+ else if (id == SOLVABLE_RECOMMENDS)
+ s->recommends= ido;
+ else if (id == SOLVABLE_SUPPLEMENTS)
+ s->supplements = ido;
+ else if (id == SOLVABLE_SUGGESTS)
+ s->suggests = ido;
+ else if (id == SOLVABLE_ENHANCES)
+ s->enhances = ido;
#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %u\n", id2str(pool, id), h);
+ POOL_DEBUG(SAT_DEBUG_STATS, "%s ->\n", id2str(pool, id));
+ for (; repo->idarraydata[ido]; ido++)
+ POOL_DEBUG(SAT_DEBUG_STATS," %s\n", dep2str(pool, repo->idarraydata[ido]));
#endif
- if (id == RPM_RPMDBID)
- {
- if (!repo->rpmdbid)
- repo->rpmdbid = repo_sidedata_create(repo, sizeof(Id));
- repo->rpmdbid[i] = h;
- }
- else if (keys[key].storage == KEY_STORAGE_INCORE)
- incore_add_u32(&data, h);
+ break;
+ case REPOKEY_TYPE_FLEXARRAY:
+ if (keydepth == sizeof(stack)/sizeof(*stack))
+ {
+ pool_debug(pool, SAT_ERROR, "flexarray stack overflow\n");
+ data.error = SOLV_ERROR_CORRUPT;
break;
- case REPOKEY_TYPE_IDARRAY:
- case REPOKEY_TYPE_REL_IDARRAY:
- if (id < INTERESTED_START || id > INTERESTED_END)
+ }
+ stack[keydepth++] = nentries;
+ stack[keydepth++] = keyp - schemadata;
+ dp = data_read_id(dp, &nentries);
+ incore_add_id(&data, nentries);
+ if (!nentries)
+ {
+ /* zero size array? */
+ keydepth--;
+ nentries = stack[--keydepth];
+ break;
+ }
+ if (keydepth == 2 && id == REPOSITORY_SOLVABLES)
+ {
+ /* horray! here come the solvables */
+ if (nentries != numsolv)
{
- dps = dp;
- dp = data_skip(dp, REPOKEY_TYPE_IDARRAY);
- if (keys[key].storage != KEY_STORAGE_INCORE)
- break;
- if (idmap)
- incore_map_idarray(&data, dps, idmap, numid);
- else
- incore_add_blob(&data, dps, dp - dps);
+ pool_debug(pool, SAT_ERROR, "inconsistent number of solvables: %d %d\n", nentries, numsolv);
+ data.error = SOLV_ERROR_CORRUPT;
break;
}
- ido = idarraydatap - repo->idarraydata;
- if (keys[key].type == REPOKEY_TYPE_IDARRAY)
- dp = data_read_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error);
- else if (id == SOLVABLE_REQUIRES)
- dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_PREREQMARKER);
- else if (id == SOLVABLE_PROVIDES)
- dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_FILEMARKER);
- else
- dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, 0);
- if (idarraydatap > idarraydataend)
+ if (idarraydatap)
{
- pool_debug(pool, SAT_ERROR, "idarray overflow\n");
- data.error = SOLV_ERROR_OVERFLOW;
+ pool_debug(pool, SAT_ERROR, "more than one solvable block\n");
+ data.error = SOLV_ERROR_CORRUPT;
break;
}
- if (id == SOLVABLE_PROVIDES)
- s->provides = ido;
- else if (id == SOLVABLE_OBSOLETES)
- s->obsoletes = ido;
- else if (id == SOLVABLE_CONFLICTS)
- s->conflicts = ido;
- else if (id == SOLVABLE_REQUIRES)
- s->requires = ido;
- else if (id == SOLVABLE_RECOMMENDS)
- s->recommends= ido;
- else if (id == SOLVABLE_SUPPLEMENTS)
- s->supplements = ido;
- else if (id == SOLVABLE_SUGGESTS)
- s->suggests = ido;
- else if (id == SOLVABLE_ENHANCES)
- s->enhances = ido;
-#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "%s ->\n", id2str(pool, id));
- for (; repo->idarraydata[ido]; ido++)
- POOL_DEBUG(SAT_DEBUG_STATS," %s\n", dep2str(pool, repo->idarraydata[ido]));
-#endif
- break;
- case REPOKEY_TYPE_COUNTED:
+ if (parent)
+ s = pool_id2solvable(pool, parent->start);
+ else
+ s = pool_id2solvable(pool, repo_add_solvable_block(repo, numsolv));
+ data.start = s - pool->solvables;
+ data.end = data.start + numsolv;
+ repodata_extend_block(&data, data.start, numsolv);
+ for (i = 1; i < numkeys; i++)
{
- Id num, did;
- dp = data_read_id(dp, &num);
- incore_add_id(&data, num);
- dp = data_read_id_max(dp, &did, 0, numschemata, &data.error);
- incore_add_id(&data, did);
- while (num--)
- {
- Id *kp = schemadata + schemata[did];
- for (; *kp; kp++)
- {
- Id tid;
- switch (keys[*kp].type)
- {
- case REPOKEY_TYPE_ID:
- dp = data_read_id_max(dp, &tid, idmap, numid + numrel, &data.error);
- incore_add_id(&data, tid);
- break;
- default:
- dps = dp;
- //dp = data_skip(dp, keys[*kp].type);
- dp = data_skip_recursive(&data, dp, keys + *kp);
- incore_add_blob(&data, dps, dp - dps);
- break;
- }
- }
- }
+ id = keys[i].name;
+ if ((keys[i].type == REPOKEY_TYPE_IDARRAY || keys[i].type == REPOKEY_TYPE_REL_IDARRAY)
+ && id >= INTERESTED_START && id <= INTERESTED_END)
+ size_idarray += keys[i].size;
}
- break;
- default:
- dps = dp;
- //dp = data_skip(dp, keys[key].type);
- dp = data_skip_recursive(&data, dp, keys + key);
- if (keys[key].storage == KEY_STORAGE_INCORE)
- incore_add_blob(&data, dps, dp - dps);
- break;
+ /* allocate needed space in repo */
+ /* we add maxsize because it is an upper limit for all idarrays, thus we can't overflow */
+ repo_reserve_ids(repo, 0, size_idarray + maxsize + 1);
+ idarraydatap = repo->idarraydata + repo->idarraysize;
+ repo->idarraysize += size_idarray;
+ idarraydataend = idarraydatap + size_idarray;
+ repo->lastoff = 0;
+ if (have_xdata)
+ data.incoreoffset[(s - pool->solvables) - data.start] = data.incoredatalen;
}
+ nentries--;
+ dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
+ incore_add_id(&data, id);
+ keyp = schemadata + schemata[id];
+ break;
+ default:
+ dps = dp;
+ dp = data_skip(dp, keys[key].type);
+ if (keys[key].storage == KEY_STORAGE_INCORE)
+ incore_add_blob(&data, dps, dp - dps);
+ break;
}
}
-
/* should shrink idarraydata again */
+ if (keydepth)
+ {
+ pool_debug(pool, SAT_ERROR, "unexpected EOF, depth = %d\n", keydepth);
+ data.error = SOLV_ERROR_CORRUPT;
+ }
if (!data.error)
{
left -= (dp - buf);
/* no longer needed */
data.fp = 0;
}
+ sat_free(idmap);
+ mypool = 0;
- if (parent && !data.error)
+ if (data.error)
{
- /* we're a store */
- sat_free(parent->schemata);
- sat_free(parent->schemadata);
- sat_free(parent->keys);
- sat_free(parent->location);
+ /* XXX: free repodata? */
+ return data.error;
+ }
+
+ if (parent)
+ {
+ /* overwrite stub repodata */
+ repodata_free(parent);
*parent = data;
}
- else if ((data.incoredatalen || data.fp) && !data.error)
+ else
{
- /* we got some data, make it available */
+ /* make it available as new repodata */
repo->repodata = sat_realloc2(repo->repodata, repo->nrepodata + 1, sizeof(data));
repo->repodata[repo->nrepodata++] = data;
}
- else
+
+ /* create stub repodata entries for all external */
+ for (key = 1 ; key < data.nkeys; key++)
+ if (data.keys[key].name == REPOSITORY_EXTERNAL && data.keys[key].type == REPOKEY_TYPE_FLEXARRAY)
+ break;
+ if (key < data.nkeys)
{
- /* discard data */
- sat_free(data.dirpool.dirs);
- sat_free(data.incoreoffset);
- sat_free(schemata);
- sat_free(schemadata);
- sat_free(keys);
+ struct create_stub_data stubdata;
+ /* got some */
+ memset(&stubdata, 0, sizeof(stubdata));
+ repodata_search(&data, REPOENTRY_META, REPOSITORY_EXTERNAL, create_stub_cb, &stubdata);
}
-
- sat_free(idmap);
- mypool = 0;
- return data.error;
+ return 0;
}
int
}
static void
-repodata_load_solv(Repodata *data)
+repodata_load_stub(Repodata *data)
{
FILE *fp;
Pool *pool = data->repo->pool;
data->state = REPODATA_ERROR;
return;
}
+ /* so that we can retrieve meta data */
+ data->state = REPODATA_AVAILABLE;
fp = pool->loadcallback(pool, data, pool->loadcallbackdata);
if (!fp)
{
#include "util.h"
#include "repopack.h"
+#include "repopage.h"
extern unsigned int compress_buf (const unsigned char *in, unsigned int in_len,
unsigned char *out, unsigned int out_len);
data->schemadata = sat_calloc(1, sizeof(Id));
data->nschemata = 1;
data->schemadatalen = 1;
- data->start = repo->start;
- data->end = repo->end;
- data->nextra = repo->nextra;
- data->extrastart = 0;
- data->incoreoffset = sat_extend_resize(0, data->end - data->start, sizeof(Id), REPODATA_BLOCK);
- data->extraoffset = sat_extend_resize(0, repo->nextra, sizeof(Id), REPODATA_BLOCK);
data->pagefd = -1;
}
void
repodata_free(Repodata *data)
{
+ int i;
+
sat_free(data->keys);
+
sat_free(data->schemata);
sat_free(data->schemadata);
+ sat_free(data->schematahash);
- sat_free(data->spool.strings);
- sat_free(data->spool.stringspace);
- sat_free(data->spool.stringhashtbl);
-
- sat_free(data->dirpool.dirs);
- sat_free(data->dirpool.dirtraverse);
+ stringpool_free(&data->spool);
+ dirpool_free(&data->dirpool);
+ sat_free(data->mainschemaoffsets);
sat_free(data->incoredata);
sat_free(data->incoreoffset);
- sat_free(data->extraoffset);
sat_free(data->verticaloffset);
sat_free(data->blob_store);
sat_free(data->vincore);
+ if (data->attrs)
+ for (i = 0; i < data->end - data->start; i++)
+ sat_free(data->attrs[i]);
sat_free(data->attrs);
- sat_free(data->extraattrs);
+ if (data->xattrs)
+ for (i = 0; i < data->nxattrs; i++)
+ sat_free(data->xattrs[i]);
+ sat_free(data->xattrs);
+
sat_free(data->attrdata);
sat_free(data->attriddata);
- sat_free(data->location);
- sat_free(data->addedfileprovides);
-
if (data->pagefd != -1)
close(data->pagefd);
}
-unsigned char *
-data_skip_recursive(Repodata *data, unsigned char *dp, Repokey *key)
+
+/***************************************************************
+ * key pool management
+ */
+
+/* this is not so time critical that we need a hash, so we do a simple
+ * linear search */
+Id
+repodata_key2id(Repodata *data, Repokey *key, int create)
{
- KeyValue kv;
- if (key->type != REPOKEY_TYPE_COUNTED)
- return data_skip(dp, key->type);
- dp = data_fetch(dp, &kv, key);
- int num = kv.num;
- int schema = kv.id;
- while (num--)
- {
- Id *keyp = data->schemadata + data->schemata[schema];
- for (; *keyp; keyp++)
- dp = data_skip_recursive(data, dp, data->keys + *keyp);
+ Id keyid;
+
+ for (keyid = 1; keyid < data->nkeys; keyid++)
+ if (data->keys[keyid].name == key->name && data->keys[keyid].type == key->type)
+ {
+ if ((key->type == REPOKEY_TYPE_CONSTANT || key->type == REPOKEY_TYPE_CONSTANTID) && key->size != data->keys[keyid].size)
+ continue;
+ break;
+ }
+ if (keyid == data->nkeys)
+ {
+ if (!create)
+ return 0;
+ /* allocate new key */
+ data->keys = sat_realloc2(data->keys, data->nkeys + 1, sizeof(Repokey));
+ data->keys[data->nkeys++] = *key;
+ if (data->verticaloffset)
+ {
+ data->verticaloffset = sat_realloc2(data->verticaloffset, data->nkeys, sizeof(Id));
+ data->verticaloffset[data->nkeys - 1] = 0;
+ }
+ data->keybits[(key->name >> 3) & (sizeof(data->keybits) - 1)] |= 1 << (key->name & 7);
}
- return dp;
+ return keyid;
}
-static unsigned char *
-forward_to_key(Repodata *data, Id keyid, Id schemaid, unsigned char *dp)
+
+/***************************************************************
+ * schema pool management
+ */
+
+#define SCHEMATA_BLOCK 31
+#define SCHEMATADATA_BLOCK 255
+
+Id
+repodata_schema2id(Repodata *data, Id *schema, int create)
{
- Id k, *keyp;
+ int h, len, i;
+ Id *sp, cid;
+ Id *schematahash;
- keyp = data->schemadata + data->schemata[schemaid];
- while ((k = *keyp++) != 0)
+ if ((schematahash = data->schematahash) == 0)
{
- if (k == keyid)
- return dp;
- if (data->keys[k].storage == KEY_STORAGE_VERTICAL_OFFSET)
+ data->schematahash = schematahash = sat_calloc(256, sizeof(Id));
+ for (i = 0; i < data->nschemata; i++)
{
- dp = data_skip(dp, REPOKEY_TYPE_ID); /* skip that offset */
- dp = data_skip(dp, REPOKEY_TYPE_ID); /* skip that length */
- continue;
+ for (sp = data->schemadata + data->schemata[i], h = 0; *sp; len++)
+ h = h * 7 + *sp++;
+ h &= 255;
+ schematahash[h] = i + 1;
}
- if (data->keys[k].storage != KEY_STORAGE_INCORE)
- continue;
- dp = data_skip_recursive(data, dp, data->keys + k);
+ data->schemadata = sat_extend_resize(data->schemadata, data->schemadatalen, sizeof(Id), SCHEMATADATA_BLOCK);
+ data->schemata = sat_extend_resize(data->schemata, data->nschemata, sizeof(Id), SCHEMATA_BLOCK);
}
- return 0;
+
+ for (sp = schema, len = 0, h = 0; *sp; len++)
+ h = h * 7 + *sp++;
+ h &= 255;
+ len++;
+
+ cid = schematahash[h];
+ if (cid)
+ {
+ cid--;
+ if (!memcmp(data->schemadata + data->schemata[cid], schema, len * sizeof(Id)))
+ return cid;
+ /* cache conflict */
+ for (cid = 0; cid < data->nschemata; cid++)
+ if (!memcmp(data->schemadata + data->schemata[cid], schema, len * sizeof(Id)))
+ return cid;
+ }
+ /* a new one */
+ if (!create)
+ return 0;
+ data->schemadata = sat_extend(data->schemadata, data->schemadatalen, len, sizeof(Id), SCHEMATADATA_BLOCK);
+ data->schemata = sat_extend(data->schemata, data->nschemata, 1, sizeof(Id), SCHEMATA_BLOCK);
+ /* add schema */
+ memcpy(data->schemadata + data->schemadatalen, schema, len * sizeof(Id));
+ data->schemata[data->nschemata] = data->schemadatalen;
+ data->schemadatalen += len;
+ schematahash[h] = data->nschemata + 1;
+#if 0
+fprintf(stderr, "schema2id: new schema\n");
+#endif
+ return data->nschemata++;
+}
+
+void
+repodata_free_schemahash(Repodata *data)
+{
+ data->schematahash = sat_free(data->schematahash);
+ /* shrink arrays */
+ data->schemata = sat_realloc2(data->schemata, data->nschemata, sizeof(Id));
+ data->schemadata = sat_realloc2(data->schemadata, data->schemadatalen, sizeof(Id));
}
-#define BLOB_PAGEBITS 15
-#define BLOB_PAGESIZE (1 << BLOB_PAGEBITS)
-static unsigned char *
-load_page_range(Repodata *data, unsigned int pstart, unsigned int pend)
-{
-/* Make sure all pages from PSTART to PEND (inclusive) are loaded,
- and are consecutive. Return a pointer to the mapping of PSTART. */
- unsigned char buf[BLOB_PAGESIZE];
- unsigned int i;
-
- /* Quick check in case all pages are there already and consecutive. */
- for (i = pstart; i <= pend; i++)
- if (data->pages[i].mapped_at == -1
- || (i > pstart
- && data->pages[i].mapped_at
- != data->pages[i-1].mapped_at + BLOB_PAGESIZE))
- break;
- if (i > pend)
- return data->blob_store + data->pages[pstart].mapped_at;
+/***************************************************************
+ * dir pool management
+ */
- if (data->pagefd == -1)
- return 0;
+Id
+repodata_str2dir(Repodata *data, const char *dir, int create)
+{
+ Id id, parent;
+ const char *dire;
- /* Ensure that we can map the numbers of pages we need at all. */
- if (pend - pstart + 1 > data->ncanmap)
+ parent = 0;
+ while (*dir == '/' && dir[1] == '/')
+ dir++;
+ if (*dir == '/' && !dir[1])
+ return 1;
+ while (*dir)
{
- unsigned int oldcan = data->ncanmap;
- data->ncanmap = pend - pstart + 1;
- if (data->ncanmap < 4)
- data->ncanmap = 4;
- data->mapped = sat_realloc2(data->mapped, data->ncanmap, sizeof(data->mapped[0]));
- memset (data->mapped + oldcan, 0, (data->ncanmap - oldcan) * sizeof (data->mapped[0]));
- data->blob_store = sat_realloc2(data->blob_store, data->ncanmap, BLOB_PAGESIZE);
-#ifdef DEBUG_PAGING
- fprintf (stderr, "PAGE: can map %d pages\n", data->ncanmap);
-#endif
+ dire = strchrnul(dir, '/');
+ if (data->localpool)
+ id = stringpool_strn2id(&data->spool, dir, dire - dir, create);
+ else
+ id = strn2id(data->repo->pool, dir, dire - dir, create);
+ if (!id)
+ return 0;
+ parent = dirpool_add_dir(&data->dirpool, parent, id, create);
+ if (!parent)
+ return 0;
+ if (!*dire)
+ break;
+ dir = dire + 1;
+ while (*dir == '/')
+ dir++;
}
+ return parent;
+}
- /* Now search for "cheap" space in our store. Space is cheap if it's either
- free (very cheap) or contains pages we search for anyway. */
+const char *
+repodata_dir2str(Repodata *data, Id did, const char *suf)
+{
+ Pool *pool = data->repo->pool;
+ int l = 0;
+ Id parent, comp;
+ const char *comps;
+ char *p;
- /* Setup cost array. */
- unsigned int cost[data->ncanmap];
- for (i = 0; i < data->ncanmap; i++)
+ if (!did)
+ return suf ? suf : "";
+ parent = did;
+ while (parent)
{
- unsigned int pnum = data->mapped[i];
- if (pnum == 0)
- cost[i] = 0;
- else
- {
- pnum--;
- Attrblobpage *p = data->pages + pnum;
- assert (p->mapped_at != -1);
- if (pnum >= pstart && pnum <= pend)
- cost[i] = 1;
- else
- cost[i] = 3;
- }
+ comp = dirpool_compid(&data->dirpool, parent);
+ comps = stringpool_id2str(data->localpool ? &data->spool : &pool->ss, comp);
+ l += strlen(comps);
+ parent = dirpool_parent(&data->dirpool, parent);
+ if (parent)
+ l++;
}
-
- /* And search for cheapest space. */
- unsigned int best_cost = -1;
- unsigned int best = 0;
- unsigned int same_cost = 0;
- for (i = 0; i + pend - pstart < data->ncanmap; i++)
+ if (suf)
+ l += strlen(suf) + 1;
+ p = pool_alloctmpspace(pool, l + 1) + l;
+ *p = 0;
+ if (suf)
{
- unsigned int c = cost[i];
- unsigned int j;
- for (j = 0; j < pend - pstart + 1; j++)
- c += cost[i+j];
- if (c < best_cost)
- best_cost = c, best = i;
- else if (c == best_cost)
- same_cost++;
- /* A null cost won't become better. */
- if (c == 0)
- break;
+ p -= strlen(suf);
+ strcpy(p, suf);
+ *--p = '/';
+ }
+ parent = did;
+ while (parent)
+ {
+ comp = dirpool_compid(&data->dirpool, parent);
+ comps = stringpool_id2str(data->localpool ? &data->spool : &pool->ss, comp);
+ l = strlen(comps);
+ p -= l;
+ strncpy(p, comps, l);
+ parent = dirpool_parent(&data->dirpool, parent);
+ if (parent)
+ *--p = '/';
}
- /* If all places have the same cost we would thrash on slot 0. Avoid
- this by doing a round-robin strategy in this case. */
- if (same_cost == data->ncanmap - pend + pstart - 1)
- best = data->rr_counter++ % (data->ncanmap - pend + pstart);
-
- /* So we want to map our pages from [best] to [best+pend-pstart].
- Use a very simple strategy, which doesn't make the best use of
- our resources, but works. Throw away all pages in that range
- (even ours) then copy around ours (in case they were outside the
- range) or read them in. */
- for (i = best; i < best + pend - pstart + 1; i++)
+ return p;
+}
+
+
+/***************************************************************
+ * data management
+ */
+
+static inline unsigned char *
+data_skip_schema(Repodata *data, unsigned char *dp, Id schema)
+{
+ Id *keyp = data->schemadata + data->schemata[schema];
+ for (; *keyp; keyp++)
+ dp = data_skip_key(data, dp, data->keys + *keyp);
+ return dp;
+}
+
+unsigned char *
+data_skip_key(Repodata *data, unsigned char *dp, Repokey *key)
+{
+ int nentries, schema;
+ switch(key->type)
{
- unsigned int pnum = data->mapped[i];
- if (pnum--
- /* If this page is exactly at the right place already,
- no need to evict it. */
- && pnum != pstart + i - best)
+ case REPOKEY_TYPE_FIXARRAY:
+ dp = data_read_id(dp, &nentries);
+ if (!nentries)
+ return dp;
+ dp = data_read_id(dp, &schema);
+ while (nentries--)
+ dp = data_skip_schema(data, dp, schema);
+ return dp;
+ case REPOKEY_TYPE_FLEXARRAY:
+ dp = data_read_id(dp, &nentries);
+ while (nentries--)
{
- /* Evict this page. */
-#ifdef DEBUG_PAGING
- fprintf (stderr, "PAGE: evict page %d from %d\n", pnum, i);
-#endif
- cost[i] = 0;
- data->mapped[i] = 0;
- data->pages[pnum].mapped_at = -1;
+ dp = data_read_id(dp, &schema);
+ dp = data_skip_schema(data, dp, schema);
}
+ return dp;
+ default:
+ if (key->storage == KEY_STORAGE_INCORE)
+ dp = data_skip(dp, key->type);
+ else if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
+ {
+ dp = data_skip(dp, REPOKEY_TYPE_ID);
+ dp = data_skip(dp, REPOKEY_TYPE_ID);
+ }
+ return dp;
}
+}
+
+static unsigned char *
+forward_to_key(Repodata *data, Id keyid, Id *keyp, unsigned char *dp)
+{
+ Id k;
- /* Everything is free now. Read in the pages we want. */
- for (i = pstart; i <= pend; i++)
+ if (!keyid)
+ return 0;
+ while ((k = *keyp++) != 0)
{
- Attrblobpage *p = data->pages + i;
- unsigned int pnum = i - pstart + best;
- void *dest = data->blob_store + pnum * BLOB_PAGESIZE;
- if (p->mapped_at != -1)
- {
- if (p->mapped_at != pnum * BLOB_PAGESIZE)
- {
-#ifdef DEBUG_PAGING
- fprintf (stderr, "PAGECOPY: %d to %d\n", i, pnum);
-#endif
- /* Still mapped somewhere else, so just copy it from there. */
- memcpy (dest, data->blob_store + p->mapped_at, BLOB_PAGESIZE);
- data->mapped[p->mapped_at / BLOB_PAGESIZE] = 0;
- }
- }
- else
- {
- unsigned int in_len = p->file_size;
- unsigned int compressed = in_len & 1;
- in_len >>= 1;
-#ifdef DEBUG_PAGING
- fprintf (stderr, "PAGEIN: %d to %d", i, pnum);
-#endif
- if (pread(data->pagefd, compressed ? buf : dest, in_len, p->file_offset) != in_len)
- {
- perror ("mapping pread");
- return 0;
- }
- if (compressed)
- {
- unsigned int out_len;
- out_len = unchecked_decompress_buf(buf, in_len,
- dest, BLOB_PAGESIZE);
- if (out_len != BLOB_PAGESIZE && i < data->num_pages - 1)
- {
- fprintf(stderr, "can't decompress\n");
- return 0;
- }
-#ifdef DEBUG_PAGING
- fprintf (stderr, " (expand %d to %d)", in_len, out_len);
-#endif
- }
-#ifdef DEBUG_PAGING
- fprintf (stderr, "\n");
-#endif
+ if (k == keyid)
+ return dp;
+ if (data->keys[k].storage == KEY_STORAGE_VERTICAL_OFFSET)
+ {
+ dp = data_skip(dp, REPOKEY_TYPE_ID); /* skip offset */
+ dp = data_skip(dp, REPOKEY_TYPE_ID); /* skip length */
+ continue;
}
- p->mapped_at = pnum * BLOB_PAGESIZE;
- data->mapped[pnum] = i + 1;
+ if (data->keys[k].storage != KEY_STORAGE_INCORE)
+ continue;
+ dp = data_skip_key(data, dp, data->keys + k);
}
- return data->blob_store + best * BLOB_PAGESIZE;
+ return 0;
}
static unsigned char *
-make_vertical_available(Repodata *data, Repokey *key, Id off, Id len)
+get_vertical_data(Repodata *data, Repokey *key, Id off, Id len)
{
unsigned char *dp;
if (!len)
/* we now have the offset, go into vertical */
off += data->verticaloffset[key - data->keys];
/* fprintf(stderr, "key %d page %d\n", key->name, off / BLOB_PAGESIZE); */
- dp = load_page_range(data, off / BLOB_PAGESIZE, (off + len - 1) / BLOB_PAGESIZE);
+ dp = repodata_load_page_range(data, off / BLOB_PAGESIZE, (off + len - 1) / BLOB_PAGESIZE);
if (dp)
dp += off % BLOB_PAGESIZE;
return dp;
if (key->storage == KEY_STORAGE_INCORE)
{
/* hmm, this is a bit expensive */
- *dpp = data_skip_recursive(data, dp, key);
+ *dpp = data_skip_key(data, dp, key);
return dp;
}
else if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
dp = data_read_id(dp, &off);
dp = data_read_id(dp, &len);
*dpp = dp;
- return make_vertical_available(data, key, off, len);
+ return get_vertical_data(data, key, off, len);
+ }
+ return 0;
+}
+
+static int
+load_repodata(Repodata *data)
+{
+ if (data->loadcallback)
+ {
+ data->loadcallback(data);
+ if (data->state == REPODATA_AVAILABLE)
+ return 1;
}
+ data->state = REPODATA_ERROR;
return 0;
}
static inline int
-maybe_load_repodata(Repodata *data, Id *keyid)
+maybe_load_repodata(Repodata *data, Id keyname)
{
- if (data->state == REPODATA_STUB)
+ if (keyname && !repodata_precheck_keyname(data, keyname))
+ return 0; /* do not bother... */
+ switch(data->state)
{
- if (data->loadcallback)
+ case REPODATA_STUB:
+ if (keyname)
{
- if (keyid)
- {
- /* key order may change when loading */
- int i;
- Id name = data->keys[*keyid].name;
- Id type = data->keys[*keyid].type;
- data->loadcallback(data);
- if (data->state == REPODATA_AVAILABLE)
- {
- for (i = 1; i < data->nkeys; i++)
- if (data->keys[i].name == name && data->keys[i].type == type)
- break;
- if (i < data->nkeys)
- *keyid = i;
- else
- return 0;
- }
- }
- else
- data->loadcallback(data);
+ int i;
+ for (i = 0; i < data->nkeys; i++)
+ if (keyname == data->keys[i].name)
+ break;
+ if (i == data->nkeys)
+ return 0;
}
- else
- data->state = REPODATA_ERROR;
+ return load_repodata(data);
+ case REPODATA_ERROR:
+ return 0;
+ case REPODATA_AVAILABLE:
+ return 1;
+ default:
+ data->state = REPODATA_ERROR;
+ return 0;
}
- if (data->state == REPODATA_AVAILABLE)
- return 1;
- data->state = REPODATA_ERROR;
- return 0;
}
static inline unsigned char*
-entry2data(Repodata *data, Id entry)
+entry2data(Repodata *data, Id entry, Id *schemap)
{
- if (entry < 0)
- return data->incoredata + data->extraoffset[-1 - entry];
+ unsigned char *dp = data->incoredata;
+ if (!dp)
+ return 0;
+ if (entry == REPOENTRY_META) /* META */
+ dp += 1;
+ else if (entry == REPOENTRY_POS) /* META */
+ {
+ *schemap = data->pos.schema;
+ return data->incoredata + data->pos.dp;
+ }
else
- return data->incoredata + data->incoreoffset[entry];
+ {
+ if (entry < data->start || entry >= data->end)
+ return 0;
+ dp += data->incoreoffset[entry - data->start];
+ }
+ return data_read_id(dp, schemap);
}
-Id
-repodata_lookup_id(Repodata *data, Id entry, Id keyid)
+/************************************************************************
+ * data lookup
+ */
+
+static inline Id
+find_schema_key(Repodata *data, Id schema, Id keyname)
{
- Id schema;
+ Id *keyp;
+ for (keyp = data->schemadata + data->schemata[schema]; *keyp; keyp++)
+ if (data->keys[*keyp].name == keyname)
+ return *keyp;
+ return 0;
+}
+
+static inline unsigned char *
+find_key_data(Repodata *data, Id entry, Id keyname, Repokey **keyp)
+{
+ unsigned char *dp, *ddp;
+ Id keyid, schema;
Repokey *key;
- Id id, *keyp;
- unsigned char *dp;
- if (!maybe_load_repodata(data, &keyid))
+ if (!maybe_load_repodata(data, keyname))
return 0;
- dp = entry2data(data, entry);
+ dp = entry2data(data, entry, &schema);
if (!dp)
return 0;
- dp = data_read_id(dp, &schema);
- /* make sure the schema of this solvable contains the key */
- for (keyp = data->schemadata + data->schemata[schema]; *keyp != keyid; keyp++)
- if (!*keyp)
- return 0;
- dp = forward_to_key(data, keyid, schema, dp);
+ keyid = find_schema_key(data, schema, keyname);
+ if (!keyid)
+ return 0;
key = data->keys + keyid;
- dp = get_data(data, key, &dp);
+ *keyp = key;
+ if (key->type == REPOKEY_TYPE_VOID || key->type == REPOKEY_TYPE_CONSTANT || key->type == REPOKEY_TYPE_CONSTANTID)
+ return dp;
+ dp = forward_to_key(data, keyid, data->schemadata + data->schemata[schema], dp);
+ if (!dp)
+ return 0;
+ ddp = get_data(data, key, &dp);
+ return ddp;
+}
+
+
+Id
+repodata_lookup_id(Repodata *data, Id entry, Id keyname)
+{
+ unsigned char *dp;
+ Repokey *key;
+ Id id;
+
+ dp = find_key_data(data, entry, keyname, &key);
if (!dp)
return 0;
if (key->type == REPOKEY_TYPE_CONSTANTID)
}
const char *
-repodata_lookup_str(Repodata *data, Id entry, Id keyid)
+repodata_lookup_str(Repodata *data, Id entry, Id keyname)
{
- Id schema;
- Repokey *key;
- Id id, *keyp;
unsigned char *dp;
+ Repokey *key;
+ Id id;
- if (!maybe_load_repodata(data, &keyid))
- return 0;
-
- dp = entry2data(data, entry);
- if (!dp)
- return 0;
- dp = data_read_id(dp, &schema);
- /* make sure the schema of this solvable contains the key */
- for (keyp = data->schemadata + data->schemata[schema]; *keyp != keyid; keyp++)
- if (!*keyp)
- return 0;
- dp = forward_to_key(data, keyid, schema, dp);
- key = data->keys + keyid;
- dp = get_data(data, key, &dp);
+ dp = find_key_data(data, entry, keyname, &key);
if (!dp)
return 0;
if (key->type == REPOKEY_TYPE_STR)
return id2str(data->repo->pool, id);
}
-int
-repodata_lookup_num(Repodata *data, Id entry, Id keyid, unsigned int *value)
-{
- Id schema;
- Repokey *key;
- Id *keyp;
- KeyValue kv;
- unsigned char *dp;
+int
+repodata_lookup_num(Repodata *data, Id entry, Id keyname, unsigned int *value)
+{
+ unsigned char *dp;
+ Repokey *key;
+ KeyValue kv;
+
+ *value = 0;
+ dp = find_key_data(data, entry, keyname, &key);
+ if (!dp)
+ return 0;
+ if (key->type == REPOKEY_TYPE_NUM
+ || key->type == REPOKEY_TYPE_U32
+ || key->type == REPOKEY_TYPE_CONSTANT)
+ {
+ dp = data_fetch(dp, &kv, key);
+ *value = kv.num;
+ return 1;
+ }
+ return 0;
+}
+
+int
+repodata_lookup_void(Repodata *data, Id entry, Id keyname)
+{
+ Id schema;
+ Id *keyp;
+ unsigned char *dp;
+
+ if (!maybe_load_repodata(data, keyname))
+ return 0;
+ dp = entry2data(data, entry, &schema);
+ if (!dp)
+ return 0;
+ /* can't use find_schema_key as we need to test the type */
+ for (keyp = data->schemadata + data->schemata[schema]; *keyp; keyp++)
+ if (data->keys[*keyp].name == keyname && data->keys[*keyp].type == REPOKEY_TYPE_VOID)
+ return 1;
+ return 0;
+}
+
+const unsigned char *
+repodata_lookup_bin_checksum(Repodata *data, Id entry, Id keyname, Id *typep)
+{
+ unsigned char *dp;
+ Repokey *key;
+
+ dp = find_key_data(data, entry, keyname, &key);
+ if (!dp)
+ return 0;
+ *typep = key->type;
+ return dp;
+}
+
+
+/************************************************************************
+ * data search
+ */
+
+struct subschema_data {
+ Solvable *s;
+ void *cbdata;
+ KeyValue *parent;
+};
+
+/* search in a specific entry */
+void
+repodata_search(Repodata *data, Id entry, Id keyname, int (*callback)(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv), void *cbdata)
+{
+ Id schema;
+ Repokey *key;
+ Id k, keyid, *kp, *keyp;
+ unsigned char *dp, *ddp;
+ int onekey = 0;
+ int stop;
+ KeyValue kv;
+ Solvable *s;
+
+ if (!maybe_load_repodata(data, keyname))
+ return;
+ if (entry == REPOENTRY_SUBSCHEMA)
+ {
+ struct subschema_data *subd = cbdata;
+ cbdata = subd->cbdata;
+ s = subd->s;
+ schema = subd->parent->id;
+ dp = (unsigned char *)subd->parent->str;
+ kv.parent = subd->parent;
+ }
+ else
+ {
+ schema = 0;
+ dp = entry2data(data, entry, &schema);
+ if (!dp)
+ return;
+ s = data->repo->pool->solvables + entry;
+ kv.parent = 0;
+ }
+ keyp = data->schemadata + data->schemata[schema];
+ if (keyname)
+ {
+ /* search for a specific key */
+ for (kp = keyp; (k = *kp++) != 0; )
+ if (data->keys[k].name == keyname)
+ break;
+ if (k == 0)
+ return;
+ dp = forward_to_key(data, k, data->schemadata + data->schemata[schema], dp);
+ if (!dp)
+ return;
+ keyp = kp - 1;
+ onekey = 1;
+ }
+ while ((keyid = *keyp++) != 0)
+ {
+ stop = 0;
+ key = data->keys + keyid;
+ ddp = get_data(data, key, &dp);
+
+ if (key->type == REPOKEY_TYPE_FLEXARRAY || key->type == REPOKEY_TYPE_FIXARRAY)
+ {
+ struct subschema_data subd;
+ int nentries;
+ Id schema = 0;
+
+ subd.cbdata = cbdata;
+ subd.s = s;
+ subd.parent = &kv;
+ ddp = data_read_id(ddp, &nentries);
+ kv.num = nentries;
+ kv.entry = 0;
+ while (ddp && nentries > 0)
+ {
+ if (key->type == REPOKEY_TYPE_FLEXARRAY || !kv.entry)
+ ddp = data_read_id(ddp, &schema);
+ kv.id = schema;
+ kv.str = (char *)ddp;
+ stop = callback(cbdata, s, data, key, &kv);
+ if (stop > SEARCH_NEXT_KEY)
+ return;
+ if (stop)
+ break;
+ if (!keyname)
+ repodata_search(data, REPOENTRY_SUBSCHEMA, 0, callback, &subd);
+ ddp = data_skip_schema(data, ddp, schema);
+ nentries--;
+ kv.entry++;
+ }
+ if (!nentries)
+ {
+ /* sentinel */
+ kv.eof = 1;
+ kv.str = (char *)ddp;
+ stop = callback(cbdata, s, data, key, &kv);
+ if (stop > SEARCH_NEXT_KEY)
+ return;
+ }
+ if (onekey)
+ return;
+ continue;
+ }
+ kv.entry = 0;
+ do
+ {
+ ddp = data_fetch(ddp, &kv, key);
+ if (!ddp)
+ break;
+ stop = callback(cbdata, s, data, key, &kv);
+ kv.entry++;
+ }
+ while (!kv.eof && !stop);
+ if (onekey || stop > SEARCH_NEXT_KEY)
+ return;
+ }
+}
+
+void
+repodata_set_pos_kv(Repodata *data, KeyValue *kv)
+{
+ if (!kv)
+ {
+ data->pos.dp = 0;
+ data->pos.schema = 0;
+ }
+ else
+ {
+ data->pos.dp = (unsigned char *)kv->str - data->incoredata;
+ data->pos.schema = kv->id;
+ }
+}
+
+/************************************************************************/
+
+static Repokey solvablekeys[RPM_RPMDBID - SOLVABLE_NAME + 1] = {
+ { SOLVABLE_NAME, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_ARCH, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_EVR, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_VENDOR, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_PROVIDES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_OBSOLETES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_CONFLICTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_REQUIRES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_RECOMMENDS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_SUGGESTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_SUPPLEMENTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_ENHANCES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { RPM_RPMDBID, REPOKEY_TYPE_U32, 0, KEY_STORAGE_SOLVABLE },
+};
+
+#if 1
+static inline Id *
+solvabledata_fetch(Solvable *s, KeyValue *kv, Id keyname)
+{
+ kv->id = keyname;
+ switch (keyname)
+ {
+ case SOLVABLE_NAME:
+ kv->eof = 1;
+ return &s->name;
+ case SOLVABLE_ARCH:
+ kv->eof = 1;
+ return &s->arch;
+ case SOLVABLE_EVR:
+ kv->eof = 1;
+ return &s->evr;
+ case SOLVABLE_VENDOR:
+ kv->eof = 1;
+ return &s->vendor;
+ case SOLVABLE_PROVIDES:
+ kv->eof = 0;
+ return s->provides ? s->repo->idarraydata + s->provides : 0;
+ case SOLVABLE_OBSOLETES:
+ kv->eof = 0;
+ return s->obsoletes ? s->repo->idarraydata + s->obsoletes : 0;
+ case SOLVABLE_CONFLICTS:
+ kv->eof = 0;
+ return s->conflicts ? s->repo->idarraydata + s->conflicts : 0;
+ case SOLVABLE_REQUIRES:
+ kv->eof = 0;
+ return s->requires ? s->repo->idarraydata + s->requires : 0;
+ case SOLVABLE_RECOMMENDS:
+ kv->eof = 0;
+ return s->recommends ? s->repo->idarraydata + s->recommends : 0;
+ case SOLVABLE_SUPPLEMENTS:
+ kv->eof = 0;
+ return s->supplements ? s->repo->idarraydata + s->supplements : 0;
+ case SOLVABLE_SUGGESTS:
+ kv->eof = 0;
+ return s->suggests ? s->repo->idarraydata + s->suggests : 0;
+ case SOLVABLE_ENHANCES:
+ kv->eof = 0;
+ return s->enhances ? s->repo->idarraydata + s->enhances : 0;
+ case RPM_RPMDBID:
+ kv->eof = 1;
+ return s->repo->rpmdbid ? s->repo->rpmdbid + (s - s->repo->pool->solvables - s->repo->start) : 0;
+ default:
+ return 0;
+ }
+}
+
+void
+datamatcher_init(Datamatcher *ma, Pool *pool, const char *match, int flags)
+{
+ ma->pool = pool;
+ ma->match = (void *)match;
+ ma->flags = flags;
+ ma->error = 0;
+ if ((flags & SEARCH_STRINGMASK) == SEARCH_REGEX)
+ {
+ ma->match = sat_calloc(1, sizeof(regex_t));
+ ma->error = regcomp((regex_t *)ma->match, match, REG_EXTENDED | REG_NOSUB | REG_NEWLINE | ((flags & SEARCH_NOCASE) ? REG_ICASE : 0));
+ if (ma->error)
+ {
+ sat_free(ma->match);
+ ma->match = (void *)match;
+ ma->flags = (flags & ~SEARCH_STRINGMASK) | SEARCH_ERROR;
+ }
+ }
+}
+
+void
+datamatcher_free(Datamatcher *ma)
+{
+ if ((ma->flags & SEARCH_STRINGMASK) == SEARCH_REGEX && ma->match)
+ {
+ regfree(ma->match);
+ ma->match = sat_free(ma->match);
+ }
+}
+
+int
+datamatcher_match(Datamatcher *ma, Repodata *data, Repokey *key, KeyValue *kv)
+{
+ switch (key->type)
+ {
+ case REPOKEY_TYPE_ID:
+ case REPOKEY_TYPE_IDARRAY:
+ if (data && data->localpool)
+ kv->str = stringpool_id2str(&data->spool, kv->id);
+ else
+ kv->str = id2str(ma->pool, kv->id);
+ break;
+ case REPOKEY_TYPE_STR:
+ break;
+ case REPOKEY_TYPE_DIRSTRARRAY:
+ if (!(ma->flags & SEARCH_FILES))
+ return 0;
+ /* Put the full filename into kv->str. */
+ kv->str = repodata_dir2str(data, kv->id, kv->str);
+ /* And to compensate for that put the "empty" directory into
+ kv->id, so that later calls to repodata_dir2str on this data
+ come up with the same filename again. */
+ kv->id = 0;
+ break;
+ default:
+ return 0;
+ }
+ /* Maybe skip the kind specifier. Do this only for SOLVABLE attributes,
+ for the others we can't know if a colon separates a kind or not. */
+ if ((ma->flags & SEARCH_SKIP_KIND) != 0 && key->storage == KEY_STORAGE_SOLVABLE)
+ {
+ const char *s = strchr(kv->str, ':');
+ if (s)
+ kv->str = s + 1;
+ }
+ switch ((ma->flags & SEARCH_STRINGMASK))
+ {
+ case SEARCH_SUBSTRING:
+ if (ma->flags & SEARCH_NOCASE)
+ {
+ if (!strcasestr(kv->str, (const char *)ma->match))
+ return 0;
+ }
+ else
+ {
+ if (!strstr(kv->str, (const char *)ma->match))
+ return 0;
+ }
+ break;
+ case SEARCH_STRING:
+ if (ma->flags & SEARCH_NOCASE)
+ {
+ if (strcasecmp((const char *)ma->match, kv->str))
+ return 0;
+ }
+ else
+ {
+ if (strcmp((const char *)ma->match, kv->str))
+ return 0;
+ }
+ break;
+ case SEARCH_GLOB:
+ if (fnmatch((const char *)ma->match, kv->str, (ma->flags & SEARCH_NOCASE) ? FNM_CASEFOLD : 0))
+ return 0;
+ break;
+ case SEARCH_REGEX:
+ if (regexec((const regex_t *)ma->match, kv->str, 0, NULL, 0))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+enum {
+ di_bye,
- *value = 0;
+ di_nextattr,
+ di_nextkey,
+ di_nextrepodata,
+ di_nextsolvable,
+ di_nextrepo,
- if (!maybe_load_repodata(data, &keyid))
- return 0;
+ di_enterrepo,
+ di_entersolvable,
+ di_enterrepodata,
+ di_enterkey,
- dp = entry2data(data, entry);
- if (!dp)
- return 0;
- dp = data_read_id(dp, &schema);
- /* make sure the schema of this solvable contains the key */
- for (keyp = data->schemadata + data->schemata[schema]; *keyp != keyid; keyp++)
- if (!*keyp)
- return 0;
- dp = forward_to_key(data, keyid, schema, dp);
- key = data->keys + keyid;
- dp = get_data(data, key, &dp);
- if (!dp)
- return 0;
- if (key->type == REPOKEY_TYPE_NUM
- || key->type == REPOKEY_TYPE_U32
- || key->type == REPOKEY_TYPE_CONSTANT)
- {
- dp = data_fetch(dp, &kv, key);
- *value = kv.num;
- return 1;
- }
- return 0;
-}
+ di_nextarrayelement,
+ di_entersub,
+ di_leavesub,
-int
-repodata_lookup_void(Repodata *data, Id entry, Id keyid)
+ di_nextsolvableattr,
+ di_nextsolvablekey,
+ di_entersolvablekey
+};
+
+void
+dataiterator_init(Dataiterator *di, Repo *repo, Id p, Id keyname, const char *match, int flags)
{
- Id schema;
- Id *keyp;
- unsigned char *dp;
- if (!maybe_load_repodata(data, &keyid))
- return 0;
- dp = entry2data(data, entry);
- if (!dp)
- return 0;
- dp = data_read_id(dp, &schema);
- for (keyp = data->schemadata + data->schemata[schema]; *keyp != keyid; keyp++)
- if (!*keyp)
- return 0;
- return 1;
+ memset(di, 0, sizeof(*di));
+ di->repo = repo;
+ di->keyname = keyname;
+ di->entry = p;
+ di->pool = repo->pool;
+ if (p)
+ flags |= SEARCH_THISENTRY;
+ di->flags = flags;
+ if (repo)
+ di->repoid = -1;
+ if (match)
+ datamatcher_init(&di->matcher, di->pool, match, flags);
+ di->state = di_enterrepo;
}
-const unsigned char *
-repodata_lookup_bin_checksum(Repodata *data, Id entry, Id keyid, Id *typep)
+void
+dataiterator_free(Dataiterator *di)
{
- Id schema;
- Id *keyp;
- Repokey *key;
- unsigned char *dp;
-
- if (!maybe_load_repodata(data, &keyid))
- return 0;
- dp = entry2data(data, entry);
- if (!dp)
- return 0;
- dp = data_read_id(dp, &schema);
- for (keyp = data->schemadata + data->schemata[schema]; *keyp != keyid; keyp++)
- if (!*keyp)
- return 0;
- dp = forward_to_key(data, keyid, schema, dp);
- key = data->keys + keyid;
- *typep = key->type;
- return get_data(data, key, &dp);
+ if (di->matcher.match)
+ datamatcher_free(&di->matcher);
}
-void
-repodata_search(Repodata *data, Id entry, Id keyname, int (*callback)(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv), void *cbdata)
+int
+dataiterator_step(Dataiterator *di)
{
Id schema;
- Repokey *key;
- Id k, keyid, *kp, *keyp;
- unsigned char *dp, *ddp;
- int onekey = 0;
- int stop;
- KeyValue kv;
-
- if (entry < 0
- || !maybe_load_repodata(data, 0))
- return;
- dp = entry2data(data, entry);
- if (!dp)
- return;
- dp = data_read_id(dp, &schema);
- keyp = data->schemadata + data->schemata[schema];
- if (keyname)
- {
- /* search in a specific key */
- for (kp = keyp; (k = *kp++) != 0; )
- if (data->keys[k].name == keyname)
- break;
- if (k == 0)
- return;
- dp = forward_to_key(data, k, schema, dp);
- if (!dp)
- return;
- keyp = kp - 1;
- onekey = 1;
- }
- while ((keyid = *keyp++) != 0)
+ for (;;)
{
- stop = 0;
- key = data->keys + keyid;
- ddp = get_data(data, key, &dp);
- do
+ switch (di->state)
{
- ddp = data_fetch(ddp, &kv, key);
- if (!ddp)
- break;
- if (key->type == REPOKEY_TYPE_COUNTED)
+ case di_nextattr: di_nextattr:
+ di->kv.entry++;
+ di->ddp = data_fetch(di->ddp, &di->kv, di->key);
+ if (di->kv.eof)
+ di->state = di_nextkey;
+ else
+ di->state = di_nextattr;
+ break;
+
+ case di_nextkey: di_nextkey:
+ if (!di->keyname)
+ {
+ if (*++di->keyp)
+ goto di_enterkey;
+ }
+ else if ((di->flags & SEARCH_SUB) != 0)
{
- int num = kv.num;
- int subschema = kv.id;
- Repokey *countkey = key;
- kv.eof = 0;
- callback(cbdata, data->repo->pool->solvables + data->start + entry, data, countkey, &kv);
- while (num--)
+ Id *keyp = di->keyp;
+ for (keyp++; *keyp; keyp++)
+ if (di->data->keys[*keyp].name == di->keyname ||
+ di->data->keys[*keyp].type == REPOKEY_TYPE_FIXARRAY ||
+ di->data->keys[*keyp].type == REPOKEY_TYPE_FLEXARRAY)
+ break;
+ if (*keyp && (di->dp = forward_to_key(di->data, *keyp, di->keyp, di->dp)) != 0)
{
- Id *kp = data->schemadata + data->schemata[subschema];
- for (; *kp; kp++)
- {
- key = data->keys + *kp;
- ddp = data_fetch(ddp, &kv, key);
- if (!ddp)
- exit(1);
- callback(cbdata, data->repo->pool->solvables + data->start + entry, data, key, &kv);
- }
- kv.eof = 1;
- callback(cbdata, data->repo->pool->solvables + data->start + entry, data, countkey, &kv);
+ di->keyp = keyp;
+ goto di_enterkey;
+ }
+ }
+
+ if (di->kv.parent)
+ goto di_leavesub;
+ /* FALLTHROUGH */
+
+ case di_nextrepodata: di_nextrepodata:
+ if (di->repodataid >= 0 && ++di->repodataid < di->repo->nrepodata)
+ goto di_enterrepodata;
+ /* FALLTHROUGH */
+
+ case di_nextsolvable:
+ if (!(di->flags & SEARCH_THISENTRY))
+ {
+ if (di->entry < 0)
+ di->entry = di->repo->start;
+ else
+ di->entry++;
+ for (; di->entry < di->repo->end; di->entry++)
+ {
+ if (di->pool->solvables[di->entry].repo == di->repo)
+ goto di_entersolvable;
+ }
+ }
+ /* FALLTHROUGH */
+
+ case di_nextrepo:
+ if (di->repoid >= 0)
+ {
+ di->repoid++;
+ if (di->repoid < di->pool->nrepos)
+ {
+ di->repo = di->pool->repos[di->repoid];
+ goto di_enterrepo;
+ }
+ }
+
+ /* FALLTHROUGH */
+ case di_bye:
+ di->state = di_bye;
+ return 0;
+
+ case di_enterrepo: di_enterrepo:
+ if (!(di->flags & SEARCH_THISENTRY))
+ di->entry = di->repo->start;
+ /* FALLTHROUGH */
+
+ case di_entersolvable: di_entersolvable:
+ if (di->repodataid >= 0)
+ {
+ di->repodataid = 0;
+ if (di->entry > 0 && (!di->keyname || (di->keyname >= SOLVABLE_NAME && di->keyname <= RPM_RPMDBID)))
+ {
+ di->key = solvablekeys + (di->keyname ? di->keyname - SOLVABLE_NAME : 0);
+ di->data = 0;
+ goto di_entersolvablekey;
+ }
+ }
+
+ case di_enterrepodata: di_enterrepodata:
+ if (di->repodataid >= 0)
+ di->data = di->repo->repodata + di->repodataid;
+ if (!maybe_load_repodata(di->data, di->keyname))
+ goto di_nextrepodata;
+ di->dp = entry2data(di->data, di->entry, &schema);
+ if (!di->dp)
+ goto di_nextrepodata;
+ di->keyp = di->data->schemadata + di->data->schemata[schema];
+ if (di->keyname)
+ {
+ Id *keyp;
+ if ((di->flags & SEARCH_SUB) != 0)
+ {
+ di->keyp--;
+ goto di_nextkey;
}
- kv.eof = 2;
- stop = callback(cbdata, data->repo->pool->solvables + data->start + entry, data, countkey, &kv);
+ for (keyp = di->keyp; *keyp; keyp++)
+ if (di->data->keys[*keyp].name == di->keyname)
+ break;
+ if (!*keyp)
+ goto di_nextrepodata;
+ di->dp = forward_to_key(di->data, *keyp, di->keyp, di->dp);
+ di->keyp = keyp;
+ if (!di->dp)
+ goto di_nextrepodata;
+ }
+
+ case di_enterkey: di_enterkey:
+ di->kv.entry = -1;
+ di->key = di->data->keys + *di->keyp;
+ di->ddp = get_data(di->data, di->key, &di->dp);
+ if (!di->ddp)
+ goto di_nextkey;
+ if (di->key->type == REPOKEY_TYPE_FIXARRAY || di->key->type == REPOKEY_TYPE_FLEXARRAY)
+ {
+ di->ddp = data_read_id(di->ddp, &di->kv.num);
+ di->kv.entry = -1;
+ di->kv.eof = 0;
+ goto di_nextarrayelement;
+ }
+ goto di_nextattr;
+
+ case di_nextarrayelement: di_nextarrayelement:
+ di->kv.entry++;
+ if (di->kv.entry)
+ di->ddp = data_skip_schema(di->data, di->ddp, di->kv.id);
+ if (di->kv.entry == di->kv.num)
+ {
+ if (di->keyname && di->key->name != di->keyname)
+ goto di_nextkey;
+ di->kv.str = (char *)di->ddp;
+ di->kv.eof = 1;
+ di->state = di_nextkey;
+ break;
+ }
+ if (di->key->type == REPOKEY_TYPE_FLEXARRAY || !di->kv.entry)
+ di->ddp = data_read_id(di->ddp, &di->kv.id);
+ di->kv.str = (char *)di->ddp;
+ if (di->keyname && di->key->name != di->keyname)
+ goto di_entersub;
+ if ((di->flags & SEARCH_SUB) != 0)
+ di->state = di_entersub;
+ else
+ di->state = di_nextarrayelement;
+ break;
+
+ case di_entersub: di_entersub:
+ if (di->nparents == sizeof(di->parents)/sizeof(*di->parents) - 1)
+ goto di_nextarrayelement; /* sorry, full */
+ di->parents[di->nparents].kv = di->kv;
+ di->parents[di->nparents].dp = di->dp;
+ di->parents[di->nparents].keyp = di->keyp;
+ di->dp = (unsigned char *)di->kv.str;
+ di->keyp = di->data->schemadata + di->data->schemata[di->kv.id];
+ memset(&di->kv, 0, sizeof(di->kv));
+ di->kv.parent = &di->parents[di->nparents].kv;
+ di->nparents++;
+ di->keyp--;
+ goto di_nextkey;
+
+ case di_leavesub: di_leavesub:
+ di->nparents--;
+ di->dp = di->parents[di->nparents].dp;
+ di->kv = di->parents[di->nparents].kv;
+ di->keyp = di->parents[di->nparents].keyp;
+ di->key = di->data->keys + *di->keyp;
+ di->ddp = (unsigned char *)di->kv.str;
+ goto di_nextarrayelement;
+
+ /* special solvable attr handling follows */
+
+ case di_nextsolvableattr:
+ di->kv.id = *di->idp++;
+ di->kv.entry++;
+ if (!*di->idp)
+ {
+ di->kv.eof = 1;
+ di->state = di_nextsolvablekey;
}
+ break;
+
+ case di_nextsolvablekey: di_nextsolvablekey:
+ if (di->keyname || di->key->name == RPM_RPMDBID)
+ goto di_enterrepodata;
+ di->key++;
+ /* FALLTHROUGH */
+
+ case di_entersolvablekey: di_entersolvablekey:
+ di->idp = solvabledata_fetch(di->pool->solvables + di->entry, &di->kv, di->key->name);
+ if (!di->idp || !di->idp[0])
+ goto di_nextsolvablekey;
+ di->kv.id = di->idp[0];
+ di->kv.num = di->idp[0];
+ if (!di->kv.eof && !di->idp[1])
+ di->kv.eof = 1;
+ di->kv.entry = 0;
+ if (di->kv.eof)
+ di->state = di_nextsolvablekey;
else
- stop = callback(cbdata, data->repo->pool->solvables + data->start + entry, data, key, &kv);
+ di->state = di_nextsolvableattr;
+ break;
}
- while (!kv.eof && !stop);
- if (onekey || stop > SEARCH_NEXT_KEY)
- return;
+
+ if (di->matcher.match)
+ if (!datamatcher_match(&di->matcher, di->data, di->key, &di->kv))
+ continue;
+ /* found something! */
+ return 1;
}
}
+void
+dataiterator_skip_attribute(Dataiterator *di)
+{
+ if (di->state == di_nextsolvableattr)
+ di->state = di_nextsolvablekey;
+ else
+ di->state = di_nextkey;
+}
+
+void
+dataiterator_skip_solvable(Dataiterator *di)
+{
+ di->state = di_nextsolvable;
+}
+
+void
+dataiterator_skip_repo(Dataiterator *di)
+{
+ di->state = di_nextrepo;
+}
+
+void
+dataiterator_jump_to_solvable(Dataiterator *di, Solvable *s)
+{
+ di->repo = s->repo;
+ di->repoid = -1;
+ di->entry = s - di->pool->solvables;
+ di->state = di_entersolvable;
+}
+
+void
+dataiterator_jump_to_repo(Dataiterator *di, Repo *repo)
+{
+ di->repo = repo;
+ di->repoid = -1;
+ di->state = di_enterrepo;
+}
+
+#else
+
+/************************************************************************
+ * data search iterator
+ */
+
static void
dataiterator_newdata(Dataiterator *di)
{
return;
if (di->solvid >= 0)
dp += data->incoreoffset[di->solvid - data->start];
- else
- dp += data->extraoffset[-1 - di->solvid - data->extrastart];
dp = data_read_id(dp, &schema);
Id *keyp = data->schemadata + data->schemata[schema];
if (keyname)
break;
if (k == 0)
return;
- dp = forward_to_key(data, k, schema, dp);
+ dp = forward_to_key(data, k, keyp, dp);
if (!dp)
return;
keyp = kp - 1;
const char *match, int flags)
{
di->flags = flags;
- if (p)
+ if (p > 0)
{
di->solvid = p;
di->flags |= __SEARCH_ONESOLVABLE;
return dataiterator_match_int_real(di, flags, vmatch);
}
-static Repokey solvablekeys[RPM_RPMDBID - SOLVABLE_NAME + 1] = {
- { SOLVABLE_NAME, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_ARCH, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_EVR, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_VENDOR, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_PROVIDES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_OBSOLETES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_CONFLICTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_REQUIRES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_RECOMMENDS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_SUGGESTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_SUPPLEMENTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_ENHANCES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { RPM_RPMDBID, REPOKEY_TYPE_U32, 0, KEY_STORAGE_SOLVABLE },
-};
-
int
dataiterator_step(Dataiterator *di)
{
{
if (di->state)
{
+ /* we're stepping through solvable data, 1 -> SOLVABLE_NAME... */
if (di->idp)
{
+ /* we're stepping through an id array */
Id *idp = di->idp;
if (*idp)
{
/* Send end-of-element. See above for keyp[-1]. */
di->kv.eof = 1;
di->key = di->data->keys + di->keyp[-1];
- di->subkeyp = di->data->schemadata + di->data->schemata[di->subschema];
+ if (di->subschema)
+ di->subkeyp = di->data->schemadata + di->data->schemata[di->subschema];
+ else
+ {
+ di->dp = data_read_id(di->dp, &di->subschema);
+ di->subkeyp = di->data->schemadata + di->data->schemata[di->subschema];
+ di->subschema = 0;
+ }
di->subnum--;
}
else
{
if (!(di->flags & SEARCH_EXTRA))
goto skiprepo;
- di->solvid = -1;
- if (di->solvid < -repo->nextra)
- goto skiprepo;
+ goto skiprepo;
}
}
else
{
- --di->solvid;
- if (di->solvid < -repo->nextra)
{
skiprepo:;
Pool *pool = di->repo->pool;
}
}
di->data = repo->repodata - 1;
- if (di->solvid < 0
- || (di->flags & SEARCH_NO_STORAGE_SOLVABLE))
+ if ((di->flags & SEARCH_NO_STORAGE_SOLVABLE))
continue;
static Id zeroid = 0;
di->keyp = &zeroid;
di->state = 1;
goto restart;
}
- if ((di->solvid < 0 && (-1 - di->solvid) >= data->extrastart && (-1 - di->solvid) < (data->extrastart + data->nextra))
- || (di->solvid >= 0 && di->solvid >= data->start && di->solvid < data->end))
+ if ((di->solvid >= 0 && di->solvid >= data->start && di->solvid < data->end))
{
dataiterator_newdata(di);
if (di->nextkeydp)
}
di->dp = data_fetch(di->dp, &di->kv, di->key);
}
- if (di->key->type == REPOKEY_TYPE_COUNTED)
+ if (di->key->type == REPOKEY_TYPE_FIXARRAY)
{
di->subnum = di->kv.num;
di->subschema = di->kv.id;
di->kv.eof = 0;
di->subkeyp = di->data->schemadata + di->data->schemata[di->subschema];
}
+ if (di->key->type == REPOKEY_TYPE_FLEXARRAY)
+ {
+ di->subnum = di->kv.num;
+ di->kv.eof = 0;
+ di->dp = data_read_id(di->dp, &di->subschema);
+ di->subkeyp = di->data->schemadata + di->data->schemata[di->subschema];
+ di->subschema = 0;
+ }
}
weg2:
if (!di->match
{
dataiterator_skip_solvable(di);
/* We're done with all solvables and all extra things for this repo. */
- di->solvid = -1 - di->repo->nextra;
+ di->solvid = -1;
}
void
di->solvid = repo->start - 1;
}
+#endif
+
+/************************************************************************
+ * data modify functions
+ */
+
/* extend repodata so that it includes solvables p */
void
repodata_extend(Repodata *data, Id p)
}
void
-repodata_extend_extra(Repodata *data, int nextra)
-{
- if (nextra <= data->nextra)
- return;
- if (data->extraattrs)
- {
- data->extraattrs = sat_extend(data->extraattrs, data->nextra, nextra - data->nextra, sizeof(Id), REPODATA_BLOCK);
- memset(data->extraattrs + data->nextra, 0, (nextra - data->nextra) * sizeof (Id));
- }
- data->extraoffset = sat_extend(data->extraoffset, data->nextra, nextra - data->nextra, sizeof(Id), REPODATA_BLOCK);
- memset(data->extraoffset + data->nextra, 0, (nextra - data->nextra) * sizeof(Id));
- data->nextra = nextra;
-}
-
-void
repodata_extend_block(Repodata *data, Id start, Id num)
{
if (!num)
#define REPODATA_ATTRDATA_BLOCK 1023
#define REPODATA_ATTRIDDATA_BLOCK 63
-static inline Id
-get_new_struct(Repodata *data)
+
+Id
+repodata_new_handle(Repodata *data)
{
- /* Make sure to never give out struct id 0. */
- if (!data->structs)
+ if (!data->nxattrs)
{
- data->structs = sat_extend(0, 0, 2, sizeof(Id *), REPODATA_BLOCK);
- data->structs[0] = 0;
- data->structs[1] = 0;
- data->nstructs = 2;
- return 1;
+ data->xattrs = sat_calloc_block(1, sizeof(Id *), REPODATA_BLOCK);
+ data->nxattrs = 2;
}
- data->structs = sat_extend(data->structs, data->nstructs, 1, sizeof(Id *), REPODATA_BLOCK);
- data->structs[data->nstructs] = 0;
- return data->nstructs++;
+ data->xattrs = sat_extend(data->xattrs, data->nxattrs, 1, sizeof(Id *), REPODATA_BLOCK);
+ data->xattrs[data->nxattrs] = 0;
+ return -(data->nxattrs++);
}
-static Id
-repodata_get_handle_int(Repodata *data, Id entry)
+static inline Id **
+repodata_get_attrp(Repodata *data, Id handle)
{
- Id *ap;
- if (!data->attrs && entry >= 0)
+ if (handle == REPOENTRY_META)
{
- data->attrs = sat_calloc_block(data->end - data->start, sizeof(Id),
- REPODATA_BLOCK);
+ if (!data->xattrs)
+ {
+ data->xattrs = sat_calloc_block(1, sizeof(Id *), REPODATA_BLOCK);
+ data->nxattrs = 2;
+ }
}
- else if (!data->extraattrs && entry < 0)
- data->extraattrs = sat_calloc_block(data->nextra, sizeof(Id), REPODATA_BLOCK);
- if (entry < 0)
- ap = &data->extraattrs[-1 - entry];
- else
- ap = &data->attrs[entry];
- if (!*ap)
- *ap = get_new_struct(data);
- return *ap;
-}
-
-Id
-repodata_get_handle(Repodata *data, Id entry)
-{
- return repodata_get_handle_int(data, entry);
+ if (handle < 0)
+ return data->xattrs - handle;
+ if (handle < data->start || handle >= data->end)
+ repodata_extend(data, handle);
+ if (!data->attrs)
+ data->attrs = sat_calloc_block(data->end - data->start, sizeof(Id *), REPODATA_BLOCK);
+ return data->attrs + (handle - data->start);
}
static void
repodata_insert_keyid(Repodata *data, Id handle, Id keyid, Id val, int overwrite)
{
Id *pp;
- Id *ap;
+ Id *ap, **app;
int i;
- ap = data->structs[handle];
+
+ app = repodata_get_attrp(data, handle);
+ ap = *app;
i = 0;
if (ap)
{
i = pp - ap;
}
ap = sat_extend(ap, i, 3, sizeof(Id), REPODATA_ATTRS_BLOCK);
- data->structs[handle] = ap;
+ *app = ap;
pp = ap + i;
*pp++ = keyid;
*pp++ = val;
*pp = 0;
}
+
void
repodata_set(Repodata *data, Id handle, Repokey *key, Id val)
{
Id keyid;
- /* find key in keys */
- for (keyid = 1; keyid < data->nkeys; keyid++)
- if (data->keys[keyid].name == key->name && data->keys[keyid].type == key->type)
- {
- if ((key->type == REPOKEY_TYPE_CONSTANT || key->type == REPOKEY_TYPE_CONSTANTID) && key->size != data->keys[keyid].size)
- continue;
- break;
- }
- if (keyid == data->nkeys)
- {
- /* allocate new key */
- data->keys = sat_realloc2(data->keys, data->nkeys + 1, sizeof(Repokey));
- data->keys[data->nkeys++] = *key;
- if (data->verticaloffset)
- {
- data->verticaloffset = sat_realloc2(data->verticaloffset, data->nkeys, sizeof(Id));
- data->verticaloffset[data->nkeys - 1] = 0;
- }
- }
+ keyid = repodata_key2id(data, key, 1);
repodata_insert_keyid(data, handle, keyid, val, 1);
}
repodata_add_array(Repodata *data, Id handle, Id keyname, Id keytype, int entrysize)
{
int oldsize;
- Id *ida, *pp;
+ Id *ida, *pp, **ppp;
if (handle == data->lasthandle && data->keys[data->lastkey].name == keyname && data->keys[data->lastkey].type == keytype && data->attriddatalen == data->lastdatalen)
{
data->lastdatalen += entrysize;
return;
}
- pp = data->structs[handle];
+ ppp = repodata_get_attrp(data, handle);
+ pp = *ppp;
if (pp)
for (; *pp; pp += 2)
if (data->keys[*pp].name == keyname && data->keys[*pp].type == keytype)
repodata_add_idarray(data, handle, keyname, id);
}
-Id
-repodata_create_struct(Repodata *data, Id handle, Id keyname)
+void
+repodata_add_fixarray(Repodata *data, Id handle, Id keyname, Id ghandle)
+{
+ repodata_add_array(data, handle, keyname, REPOKEY_TYPE_FIXARRAY, 1);
+ data->attriddata[data->attriddatalen++] = ghandle;
+ data->attriddata[data->attriddatalen++] = 0;
+}
+
+void
+repodata_add_flexarray(Repodata *data, Id handle, Id keyname, Id ghandle)
{
- Id newhandle = get_new_struct(data);
- repodata_add_array(data, handle, keyname, REPOKEY_TYPE_COUNTED, 1);
- data->attriddata[data->attriddatalen++] = newhandle;
+ repodata_add_array(data, handle, keyname, REPOKEY_TYPE_FLEXARRAY, 1);
+ data->attriddata[data->attriddatalen++] = ghandle;
data->attriddata[data->attriddatalen++] = 0;
- return newhandle;
}
void
repodata_merge_attrs(Repodata *data, Id dest, Id src)
{
Id *keyp;
- if (dest == src
- || !(keyp = data->structs[src < 0
- ? data->extraattrs[-1 - src]
- : data->attrs[src]]))
+ if (dest == src || !(keyp = data->attrs[src]))
return;
- dest = repodata_get_handle_int(data, dest);
for (; *keyp; keyp += 2)
repodata_insert_keyid(data, dest, keyp[0], keyp[1], 0);
}
-/*********************************/
+
+
+
+/**********************************************************************/
/* unify with repo_write! */
#define EXTDATA_BLOCK 1023
-#define SCHEMATA_BLOCK 31
-#define SCHEMATADATA_BLOCK 255
struct extdata {
unsigned char *buf;
/*********************************/
static void
-addschema_prepare(Repodata *data, Id *schematacache)
-{
- int h, len, i;
- Id *sp;
-
- memset(schematacache, 0, 256 * sizeof(Id));
- for (i = 0; i < data->nschemata; i++)
- {
- for (sp = data->schemadata + data->schemata[i], h = 0; *sp; len++)
- h = h * 7 + *sp++;
- h &= 255;
- schematacache[h] = i + 1;
- }
- data->schemadata = sat_extend_resize(data->schemadata, data->schemadatalen, sizeof(Id), SCHEMATADATA_BLOCK);
- data->schemata = sat_extend_resize(data->schemata, data->nschemata, sizeof(Id), SCHEMATA_BLOCK);
-}
-
-static Id
-addschema(Repodata *data, Id *schema, Id *schematacache)
-{
- int h, len;
- Id *sp, cid;
-
- for (sp = schema, len = 0, h = 0; *sp; len++)
- h = h * 7 + *sp++;
- h &= 255;
- len++;
-
- cid = schematacache[h];
- if (cid)
- {
- cid--;
- if (!memcmp(data->schemadata + data->schemata[cid], schema, len * sizeof(Id)))
- return cid;
- /* cache conflict */
- for (cid = 0; cid < data->nschemata; cid++)
- if (!memcmp(data->schemadata + data->schemata[cid], schema, len * sizeof(Id)))
- return cid;
- }
- /* a new one. make room. */
- data->schemadata = sat_extend(data->schemadata, data->schemadatalen, len, sizeof(Id), SCHEMATADATA_BLOCK);
- data->schemata = sat_extend(data->schemata, data->nschemata, 1, sizeof(Id), SCHEMATA_BLOCK);
- /* add schema */
- memcpy(data->schemadata + data->schemadatalen, schema, len * sizeof(Id));
- data->schemata[data->nschemata] = data->schemadatalen;
- data->schemadatalen += len;
- schematacache[h] = data->nschemata + 1;
-#if 0
-fprintf(stderr, "addschema: new schema\n");
-#endif
- return data->nschemata++;
-}
-
-static void
repodata_serialize_key(Repodata *data, struct extdata *newincore,
struct extdata *newvincore,
- Id *schema, Id *schematacache,
+ Id *schema,
Repokey *key, Id val)
{
/* Otherwise we have a new value. Parse it into the internal
}
switch (key->type)
{
- case REPOKEY_TYPE_VOID:
- case REPOKEY_TYPE_CONSTANT:
- case REPOKEY_TYPE_CONSTANTID:
- break;
- case REPOKEY_TYPE_STR:
- data_addblob(xd, data->attrdata + val, strlen((char *)(data->attrdata + val)) + 1);
- break;
- case REPOKEY_TYPE_MD5:
- data_addblob(xd, data->attrdata + val, SIZEOF_MD5);
- break;
- case REPOKEY_TYPE_SHA1:
- data_addblob(xd, data->attrdata + val, SIZEOF_SHA1);
- break;
- case REPOKEY_TYPE_ID:
- case REPOKEY_TYPE_NUM:
- case REPOKEY_TYPE_DIR:
- data_addid(xd, val);
- break;
- case REPOKEY_TYPE_IDARRAY:
+ case REPOKEY_TYPE_VOID:
+ case REPOKEY_TYPE_CONSTANT:
+ case REPOKEY_TYPE_CONSTANTID:
+ break;
+ case REPOKEY_TYPE_STR:
+ data_addblob(xd, data->attrdata + val, strlen((char *)(data->attrdata + val)) + 1);
+ break;
+ case REPOKEY_TYPE_MD5:
+ data_addblob(xd, data->attrdata + val, SIZEOF_MD5);
+ break;
+ case REPOKEY_TYPE_SHA1:
+ data_addblob(xd, data->attrdata + val, SIZEOF_SHA1);
+ break;
+ case REPOKEY_TYPE_SHA256:
+ data_addblob(xd, data->attrdata + val, SIZEOF_SHA256);
+ break;
+ case REPOKEY_TYPE_ID:
+ case REPOKEY_TYPE_NUM:
+ case REPOKEY_TYPE_DIR:
+ data_addid(xd, val);
+ break;
+ case REPOKEY_TYPE_IDARRAY:
+ for (ida = data->attriddata + val; *ida; ida++)
+ data_addideof(xd, ida[0], ida[1] ? 0 : 1);
+ break;
+ case REPOKEY_TYPE_DIRNUMNUMARRAY:
+ for (ida = data->attriddata + val; *ida; ida += 3)
+ {
+ data_addid(xd, ida[0]);
+ data_addid(xd, ida[1]);
+ data_addideof(xd, ida[2], ida[3] ? 0 : 1);
+ }
+ break;
+ case REPOKEY_TYPE_DIRSTRARRAY:
+ for (ida = data->attriddata + val; *ida; ida += 2)
+ {
+ data_addideof(xd, ida[0], ida[2] ? 0 : 1);
+ data_addblob(xd, data->attrdata + ida[1], strlen((char *)(data->attrdata + ida[1])) + 1);
+ }
+ break;
+ case REPOKEY_TYPE_FIXARRAY:
+ {
+ int num = 0;
+ schemaid = 0;
for (ida = data->attriddata + val; *ida; ida++)
- data_addideof(xd, ida[0], ida[1] ? 0 : 1);
- break;
- case REPOKEY_TYPE_DIRNUMNUMARRAY:
- for (ida = data->attriddata + val; *ida; ida += 3)
- {
- data_addid(xd, ida[0]);
- data_addid(xd, ida[1]);
- data_addideof(xd, ida[2], ida[3] ? 0 : 1);
- }
- break;
- case REPOKEY_TYPE_DIRSTRARRAY:
- for (ida = data->attriddata + val; *ida; ida += 2)
{
- data_addideof(xd, ida[0], ida[2] ? 0 : 1);
- data_addblob(xd, data->attrdata + ida[1], strlen((char *)(data->attrdata + ida[1])) + 1);
- }
- break;
- case REPOKEY_TYPE_COUNTED:
- {
- int num = 0;
- schemaid = 0;
- for (ida = data->attriddata + val; *ida; ida++)
- {
#if 0
- fprintf(stderr, "serialize struct %d\n", *ida);
+ fprintf(stderr, "serialize struct %d\n", *ida);
#endif
- sp = schema;
- Id *kp = data->structs[*ida];
- if (!kp)
- continue;
- num++;
- for (;*kp; kp += 2)
- {
+ sp = schema;
+ Id *kp = data->xattrs[-*ida];
+ if (!kp)
+ continue;
+ num++;
+ for (;*kp; kp += 2)
+ {
#if 0
- fprintf(stderr, " %s:%d\n", id2str(data->repo->pool, data->keys[*kp].name), kp[1]);
+ fprintf(stderr, " %s:%d\n", id2str(data->repo->pool, data->keys[*kp].name), kp[1]);
#endif
- *sp++ = *kp;
- }
- *sp = 0;
- if (!schemaid)
- schemaid = addschema(data, schema, schematacache);
- else if (schemaid != addschema(data, schema, schematacache))
- {
- fprintf(stderr, " not yet implemented: substructs with different schemas\n");
- exit(1);
- }
+ *sp++ = *kp;
+ }
+ *sp = 0;
+ if (!schemaid)
+ schemaid = repodata_schema2id(data, schema, 1);
+ else if (schemaid != repodata_schema2id(data, schema, 0))
+ {
+ fprintf(stderr, " not yet implemented: substructs with different schemas\n");
+ exit(1);
+ }
#if 0
- fprintf(stderr, " schema %d\n", schemaid);
+ fprintf(stderr, " schema %d\n", schemaid);
#endif
- }
- if (!num)
- break;
- data_addid(xd, num);
- data_addid(xd, schemaid);
- for (ida = data->attriddata + val; *ida; ida++)
- {
- Id *kp = data->structs[*ida];
- if (!kp)
- continue;
- for (;*kp; kp += 2)
- {
- repodata_serialize_key(data, newincore, newvincore,
- schema, schematacache,
- data->keys + *kp, kp[1]);
- }
- }
+ }
+ if (!num)
break;
- }
- default:
- fprintf(stderr, "don't know how to handle type %d\n", key->type);
- exit(1);
+ data_addid(xd, num);
+ data_addid(xd, schemaid);
+ for (ida = data->attriddata + val; *ida; ida++)
+ {
+ Id *kp = data->xattrs[-*ida];
+ if (!kp)
+ continue;
+ for (;*kp; kp += 2)
+ {
+ repodata_serialize_key(data, newincore, newvincore,
+ schema, data->keys + *kp, kp[1]);
+ }
+ }
+ break;
+ }
+ case REPOKEY_TYPE_FLEXARRAY:
+ {
+ int num = 0;
+ for (ida = data->attriddata + val; *ida; ida++)
+ num++;
+ data_addid(xd, num);
+ for (ida = data->attriddata + val; *ida; ida++)
+ {
+ Id *kp = data->xattrs[-*ida];
+ if (!kp)
+ {
+ data_addid(xd, 0); /* XXX */
+ continue;
+ }
+ sp = schema;
+ for (;*kp; kp += 2)
+ *sp++ = *kp;
+ *sp = 0;
+ schemaid = repodata_schema2id(data, schema, 1);
+ data_addid(xd, schemaid);
+ kp = data->xattrs[-*ida];
+ for (;*kp; kp += 2)
+ {
+ repodata_serialize_key(data, newincore, newvincore,
+ schema, data->keys + *kp, kp[1]);
+ }
+ }
+ break;
+ }
+ default:
+ fprintf(stderr, "don't know how to handle type %d\n", key->type);
+ exit(1);
}
if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
{
void
repodata_internalize(Repodata *data)
{
- Repokey *key;
+ Repokey *key, solvkey;
Id entry, nentry;
- Id schematacache[256];
- Id schemaid, *schema, *sp, oldschema, *keyp, *seen;
+ Id schemaid, *schema, *sp, oldschema, *keyp, *keypstart, *seen;
unsigned char *dp, *ndp;
int newschema, oldcount;
struct extdata newincore;
struct extdata newvincore;
+ Id solvkeyid;
- if (!data->attrs && !data->extraattrs)
+ if (!data->attrs && !data->xattrs)
return;
newvincore.buf = data->vincore;
newvincore.len = data->vincorelen;
+ /* find the solvables key, create if needed */
+ memset(&solvkey, 0, sizeof(solvkey));
+ solvkey.name = REPOSITORY_SOLVABLES;
+ solvkey.type = REPOKEY_TYPE_FLEXARRAY;
+ solvkey.size = 0;
+ solvkey.storage = KEY_STORAGE_INCORE;
+ solvkeyid = repodata_key2id(data, &solvkey, data->end != data->start ? 1 : 0);
+
schema = sat_malloc2(data->nkeys, sizeof(Id));
seen = sat_malloc2(data->nkeys, sizeof(Id));
/* Merge the data already existing (in data->schemata, ->incoredata and
friends) with the new attributes in data->attrs[]. */
nentry = data->end - data->start;
- addschema_prepare(data, schematacache);
memset(&newincore, 0, sizeof(newincore));
- data_addid(&newincore, 0);
- if (!data->attrs)
- nentry = 0;
- for (entry = data->extraattrs ? -data->nextra : 0; entry < nentry; entry++)
+ data_addid(&newincore, 0); /* start data at offset 1 */
+
+ data->mainschema = 0;
+ data->mainschemaoffsets = sat_free(data->mainschemaoffsets);
+
+ /* join entry data */
+ /* we start with the meta data, entry -1 */
+ for (entry = -1; entry < nentry; entry++)
{
- Id handle;
memset(seen, 0, data->nkeys * sizeof(Id));
- sp = schema;
- dp = entry2data(data, entry);
- if (data->incoredata)
- dp = data_read_id(dp, &oldschema);
- else
- oldschema = 0;
+ oldschema = 0;
+ dp = data->incoredata;
+ if (dp)
+ {
+ dp += entry >= 0 ? data->incoreoffset[entry] : 1;
+ dp = data_read_id(dp, &oldschema);
+ }
#if 0
fprintf(stderr, "oldschema %d\n", oldschema);
fprintf(stderr, "schemata %d\n", data->schemata[oldschema]);
/* seen: -1: old data 0: skipped >0: id + 1 */
newschema = 0;
oldcount = 0;
+ sp = schema;
for (keyp = data->schemadata + data->schemata[oldschema]; *keyp; keyp++)
{
if (seen[*keyp])
*sp++ = *keyp;
oldcount++;
}
- handle = entry < 0 ? data->extraattrs[-1 - entry] : data->attrs[entry];
- keyp = data->structs[handle];
+ if (entry >= 0)
+ keyp = data->attrs ? data->attrs[entry] : 0;
+ else
+ {
+ /* strip solvables key */
+ *sp = 0;
+ for (sp = keyp = schema; *sp; sp++)
+ if (*sp != solvkeyid)
+ *keyp++ = *sp;
+ else
+ oldcount--;
+ sp = keyp;
+ seen[solvkeyid] = 0;
+ keyp = data->xattrs ? data->xattrs[1] : 0;
+ }
if (keyp)
for (; *keyp; keyp += 2)
{
}
seen[*keyp] = keyp[1] + 1;
}
- *sp++ = 0;
+ if (entry < 0 && data->end != data->start)
+ {
+ *sp++ = solvkeyid;
+ newschema = 1;
+ }
+ *sp = 0;
if (newschema)
/* Ideally we'd like to sort the new schema here, to ensure
schema equality independend of the ordering. We can't do that
yet. For once see below (old ids need to come before new ids).
An additional difficulty is that we also need to move
the values with the keys. */
- schemaid = addschema(data, schema, schematacache);
+ schemaid = repodata_schema2id(data, schema, 1);
else
schemaid = oldschema;
(oX being the old keyids (possibly overwritten), and nX being
the new keyids). This rules out sorting the keyids in order
to ensure a small schema count. */
- if (entry < 0)
- data->extraoffset[-1 - entry] = newincore.len;
- else
- data->incoreoffset[entry] = newincore.len;
+ if (entry >= 0)
+ data->incoreoffset[entry] = newincore.len;
data_addid(&newincore, schemaid);
- for (keyp = data->schemadata + data->schemata[schemaid]; *keyp; keyp++)
+ if (entry == -1)
{
+ data->mainschema = schemaid;
+ data->mainschemaoffsets = sat_calloc(sp - schema, sizeof(Id));
+ }
+ keypstart = data->schemadata + data->schemata[schemaid];
+ for (keyp = keypstart; *keyp; keyp++)
+ {
+ if (entry == -1)
+ data->mainschemaoffsets[keyp - keypstart] = newincore.len;
+ if (*keyp == solvkeyid)
+ {
+ /* add flexarray entry count */
+ data_addid(&newincore, data->end - data->start);
+ break;
+ }
key = data->keys + *keyp;
#if 0
fprintf(stderr, "internalize %d:%s:%s\n", entry, id2str(data->repo->pool, key->name), id2str(data->repo->pool, key->type));
ndp = data_skip(ndp, REPOKEY_TYPE_ID);
}
else if (key->storage == KEY_STORAGE_INCORE)
- ndp = data_skip_recursive(data, dp, key);
+ ndp = data_skip_key(data, dp, key);
oldcount--;
}
if (seen[*keyp] == -1)
/* Otherwise we have a new value. Parse it into the internal
form. */
repodata_serialize_key(data, &newincore, &newvincore,
- schema, schematacache,
- key, seen[*keyp] - 1);
+ schema, key, seen[*keyp] - 1);
}
dp = ndp;
}
- if (data->structs[handle])
- data->structs[handle] = sat_free(data->structs[handle]);
+ if (entry >= 0 && data->attrs && data->attrs[entry])
+ data->attrs[entry] = sat_free(data->attrs[entry]);
}
- for (entry = 0; entry < data->nstructs; entry++)
- if (data->structs[entry])
- sat_free(data->structs[entry]);
- data->structs = sat_free(data->structs);
+ /* free all xattrs */
+ for (entry = 0; entry < data->nxattrs; entry++)
+ if (data->xattrs[entry])
+ sat_free(data->xattrs[entry]);
+ data->xattrs = sat_free(data->xattrs);
+ data->nxattrs = 0;
+
data->lasthandle = 0;
data->lastkey = 0;
data->lastdatalen = 0;
sat_free(schema);
sat_free(seen);
+ repodata_free_schemahash(data);
sat_free(data->incoredata);
data->incoredata = newincore.buf;
data->vincorelen = newvincore.len;
data->attrs = sat_free(data->attrs);
- data->extraattrs = sat_free(data->extraattrs);
data->attrdata = sat_free(data->attrdata);
data->attriddata = sat_free(data->attriddata);
data->attrdatalen = 0;
data->attriddatalen = 0;
}
-Id
-repodata_str2dir(Repodata *data, const char *dir, int create)
-{
- Id id, parent;
- const char *dire;
-
- parent = 0;
- while (*dir == '/' && dir[1] == '/')
- dir++;
- if (*dir == '/' && !dir[1])
- return 1;
- while (*dir)
- {
- dire = strchrnul(dir, '/');
- if (data->localpool)
- id = stringpool_strn2id(&data->spool, dir, dire - dir, create);
- else
- id = strn2id(data->repo->pool, dir, dire - dir, create);
- if (!id)
- return 0;
- parent = dirpool_add_dir(&data->dirpool, parent, id, create);
- if (!parent)
- return 0;
- if (!*dire)
- break;
- dir = dire + 1;
- while (*dir == '/')
- dir++;
- }
- return parent;
-}
-
-const char *
-repodata_dir2str(Repodata *data, Id did, const char *suf)
-{
- Pool *pool = data->repo->pool;
- int l = 0;
- Id parent, comp;
- const char *comps;
- char *p;
-
- if (!did)
- return suf ? suf : "";
- parent = did;
- while (parent)
- {
- comp = dirpool_compid(&data->dirpool, parent);
- comps = stringpool_id2str(data->localpool ? &data->spool : &pool->ss, comp);
- l += strlen(comps);
- parent = dirpool_parent(&data->dirpool, parent);
- if (parent)
- l++;
- }
- if (suf)
- l += strlen(suf) + 1;
- p = pool_alloctmpspace(pool, l + 1) + l;
- *p = 0;
- if (suf)
- {
- p -= strlen(suf);
- strcpy(p, suf);
- *--p = '/';
- }
- parent = did;
- while (parent)
- {
- comp = dirpool_compid(&data->dirpool, parent);
- comps = stringpool_id2str(data->localpool ? &data->spool : &pool->ss, comp);
- l = strlen(comps);
- p -= l;
- strncpy(p, comps, l);
- parent = dirpool_parent(&data->dirpool, parent);
- if (parent)
- *--p = '/';
- }
- return p;
-}
-
-unsigned int
-repodata_compress_page(unsigned char *page, unsigned int len, unsigned char *cpage, unsigned int max)
-{
- return compress_buf(page, len, cpage, max);
-}
-
-#define SOLV_ERROR_EOF 3
-
-static inline unsigned int
-read_u32(FILE *fp)
-{
- int c, i;
- unsigned int x = 0;
-
- for (i = 0; i < 4; i++)
- {
- c = getc(fp);
- if (c == EOF)
- return 0;
- x = (x << 8) | c;
- }
- return x;
-}
-
-#define SOLV_ERROR_EOF 3
-#define SOLV_ERROR_CORRUPT 6
-
-/* Try to either setup on-demand paging (using FP as backing
- file), or in case that doesn't work (FP not seekable) slurps in
- all pages and deactivates paging. */
-void
-repodata_read_or_setup_pages(Repodata *data, unsigned int pagesz, unsigned int blobsz)
-{
- FILE *fp = data->fp;
- unsigned int npages;
- unsigned int i;
- unsigned int can_seek;
- long cur_file_ofs;
- unsigned char buf[BLOB_PAGESIZE];
-
- if (pagesz != BLOB_PAGESIZE)
- {
- /* We could handle this by slurping in everything. */
- data->error = SOLV_ERROR_CORRUPT;
- return;
- }
- can_seek = 1;
- if ((cur_file_ofs = ftell(fp)) < 0)
- can_seek = 0;
- clearerr(fp);
- if (can_seek)
- data->pagefd = dup(fileno(fp));
- if (data->pagefd == -1)
- can_seek = 0;
-
-#ifdef DEBUG_PAGING
- fprintf (stderr, "can %sseek\n", can_seek ? "" : "NOT ");
-#endif
- npages = (blobsz + BLOB_PAGESIZE - 1) / BLOB_PAGESIZE;
-
- data->num_pages = npages;
- data->pages = sat_malloc2(npages, sizeof(data->pages[0]));
-
- /* If we can't seek on our input we have to slurp in everything. */
- if (!can_seek)
- data->blob_store = sat_malloc(npages * BLOB_PAGESIZE);
- for (i = 0; i < npages; i++)
- {
- unsigned int in_len = read_u32(fp);
- unsigned int compressed = in_len & 1;
- Attrblobpage *p = data->pages + i;
- in_len >>= 1;
-#ifdef DEBUG_PAGING
- fprintf (stderr, "page %d: len %d (%scompressed)\n",
- i, in_len, compressed ? "" : "not ");
-#endif
- if (can_seek)
- {
- cur_file_ofs += 4;
- p->mapped_at = -1;
- p->file_offset = cur_file_ofs;
- p->file_size = in_len * 2 + compressed;
- if (fseek(fp, in_len, SEEK_CUR) < 0)
- {
- perror ("fseek");
- fprintf (stderr, "can't seek after we thought we can\n");
- /* We can't fall back to non-seeking behaviour as we already
- read over some data pages without storing them away. */
- data->error = SOLV_ERROR_EOF;
- close(data->pagefd);
- data->pagefd = -1;
- return;
- }
- cur_file_ofs += in_len;
- }
- else
- {
- unsigned int out_len;
- void *dest = data->blob_store + i * BLOB_PAGESIZE;
- p->mapped_at = i * BLOB_PAGESIZE;
- p->file_offset = 0;
- p->file_size = 0;
- /* We can't seek, so suck everything in. */
- if (fread(compressed ? buf : dest, in_len, 1, fp) != 1)
- {
- perror("fread");
- data->error = SOLV_ERROR_EOF;
- return;
- }
- if (compressed)
- {
- out_len = unchecked_decompress_buf(buf, in_len, dest, BLOB_PAGESIZE);
- if (out_len != BLOB_PAGESIZE && i < npages - 1)
- {
- data->error = SOLV_ERROR_CORRUPT;
- return;
- }
- }
- }
- }
-}
-
void
repodata_disable_paging(Repodata *data)
{
if (maybe_load_repodata(data, 0)
&& data->num_pages)
- load_page_range (data, 0, data->num_pages - 1);
+ repodata_load_page_range(data, 0, data->num_pages - 1);
}
+
/*
vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4:
*/
long file_size;
} Attrblobpage;
+typedef struct _Repopos {
+ Id schema;
+ Id dp;
+} Repopos;
+
typedef struct _Repodata {
struct _Repo *repo; /* back pointer to repo */
int state; /* available, stub or error */
void (*loadcallback)(struct _Repodata *);
- char *location; /* E.g. filename or the like */
- char *checksum; /* Checksum of the file */
- unsigned nchecksum; /* Length of the checksum */
- unsigned checksumtype; /* Type of checksum */
int start; /* start of solvables this repodata is valid for */
int end; /* last solvable + 1 of this repodata */
- int extrastart;
- int nextra;
FILE *fp; /* file pointer of solv file */
int error; /* corrupt solv file */
struct _Repokey *keys; /* keys, first entry is always zero */
unsigned int nkeys; /* length of keys array */
+ unsigned char keybits[32]; /* keyname hash */
Id *schemata; /* schema -> offset into schemadata */
unsigned int nschemata; /* number of schemata */
-
Id *schemadata; /* schema storage */
unsigned int schemadatalen; /* schema storage size */
+ Id *schematahash; /* unification helper */
Stringpool spool; /* local string pool */
int localpool; /* is local string pool used */
Dirpool dirpool; /* local dir pool */
+ Id mainschema;
+ Id *mainschemaoffsets;
+
unsigned char *incoredata; /* in-core data (flat_attrs) */
unsigned int incoredatalen; /* data len (attr_next_free) */
unsigned int incoredatafree; /* free data len */
Id *incoreoffset; /* offset for all entries (ent2attr) */
- Id *extraoffset; /* offset for all extra entries */
Id *verticaloffset; /* offset for all verticals, nkeys elements */
Id lastverticaloffset; /* end of verticals */
unsigned char *vincore;
unsigned int vincorelen;
- Id *attrs; /* un-internalized attributes */
- Id *extraattrs; /* Same, but for extra objects. */
+ Id **attrs; /* un-internalized attributes */
+ Id **xattrs; /* anonymous handles */
+ int nxattrs;
+
unsigned char *attrdata; /* their string data space */
unsigned int attrdatalen;
Id *attriddata; /* their id space */
unsigned int attriddatalen;
- Id **structs; /* key-value lists */
- unsigned int nstructs;
/* array cache */
Id lasthandle;
Id lastkey;
Id lastdatalen;
- Id *addedfileprovides;
+ Repopos pos;
+
} Repodata;
+#define REPOENTRY_META -1
+#define REPOENTRY_POS -2
+#define REPOENTRY_SUBSCHEMA -3 /* internal! */
/*-----
* management functions
*/
void repodata_init(Repodata *data, struct _Repo *repo, int localpool);
void repodata_extend(Repodata *data, Id p);
-void repodata_extend_extra(Repodata *data, int nextra);
void repodata_extend_block(Repodata *data, Id p, int num);
void repodata_free(Repodata *data);
/* internalize repodata into .solv, required before writing out a .solv file */
void repodata_internalize(Repodata *data);
+Id repodata_key2id(Repodata *data, struct _Repokey *key, int create);
+Id repodata_schema2id(Repodata *data, Id *schema, int create);
+
+static inline int
+repodata_precheck_keyname(Repodata *data, Id keyname)
+{
+ unsigned char x = data->keybits[(keyname >> 3) & (sizeof(data->keybits) - 1)];
+ return x && (x & (1 << (keyname & 7))) ? 1 : 0;
+}
/*----
* access functions
void repodata_search(Repodata *data, Id entry, Id keyname, int (*callback)(void *cbdata, Solvable *s, Repodata *data, struct _Repokey *key, struct _KeyValue *kv), void *cbdata);
/* lookup functions */
-Id repodata_lookup_id(Repodata *data, Id entry, Id keyid);
-const char *repodata_lookup_str(Repodata *data, Id entry, Id keyid);
-int repodata_lookup_num(Repodata *data, Id entry, Id keyid, unsigned int *value);
-int repodata_lookup_void(Repodata *data, Id entry, Id keyid);
-const unsigned char *repodata_lookup_bin_checksum(Repodata *data, Id entry, Id keyid, Id *typep);
+Id repodata_lookup_id(Repodata *data, Id entry, Id keyname);
+const char *repodata_lookup_str(Repodata *data, Id entry, Id keyname);
+int repodata_lookup_num(Repodata *data, Id entry, Id keyname, unsigned int *value);
+int repodata_lookup_void(Repodata *data, Id entry, Id keyname);
+const unsigned char *repodata_lookup_bin_checksum(Repodata *data, Id entry, Id keyname, Id *typep);
/*-----
* data assignment functions
*/
-/* Returns a handle for the attributes of ENTRY. ENTRY >= 0
- corresponds to data associated with a solvable, ENTRY < 0 is
- extra data. The returned handle is used in the various repodata_set_*
- functions to add attributes to it. */
-Id repodata_get_handle(Repodata *data, Id entry);
+/* create an anonymous handle. useful for substructures like
+ * fixarray/flexarray */
+Id repodata_new_handle(Repodata *data);
/* basic types: void, num, string, Id */
/* Arrays */
void repodata_add_idarray(Repodata *data, Id handle, Id keyname, Id id);
-void repodata_add_poolstr_array(Repodata *data, Id handle, Id keyname,
- const char *str);
-/* Creates a new substructure. Returns a handle for it (usable with the
- other repodata_{set,add}_* functions. */
-Id repodata_create_struct(Repodata *data, Id handle, Id keyname);
+void repodata_add_poolstr_array(Repodata *data, Id handle, Id keyname, const char *str);
+void repodata_add_fixarray(Repodata *data, Id handle, Id keyname, Id ghandle);
+void repodata_add_flexarray(Repodata *data, Id handle, Id keyname, Id ghandle);
+
/*-----
* data management
const char *repodata_dir2str(Repodata *data, Id did, const char *suf);
const char *repodata_chk2str(Repodata *data, Id type, const unsigned char *buf);
-/* internal */
-unsigned int repodata_compress_page(unsigned char *, unsigned int, unsigned char *, unsigned int);
-void repodata_read_or_setup_pages(Repodata *data, unsigned int pagesz, unsigned int blobsz);
-
#endif /* SATSOLVER_REPODATA_H */
dp = data_read_id(dp, &kv->id);
dp = data_read_id(dp, &kv->num);
return data_read_ideof(dp, &kv->num2, &kv->eof);
- case REPOKEY_TYPE_COUNTED:
+ case REPOKEY_TYPE_FIXARRAY:
dp = data_read_id(dp, &kv->num);
return data_read_id(dp, &kv->id);
+ case REPOKEY_TYPE_FLEXARRAY:
+ return data_read_id(dp, &kv->num);
default:
return 0;
}
return dp + 1;
dp++;
}
- case REPOKEY_TYPE_COUNTED:
- while ((*dp & 0x80) != 0)
- dp++;
- dp++;
- while ((*dp & 0x80) != 0)
- dp++;
- return dp + 1;
default:
return 0;
}
return dp + 1;
dp++;
}
- case REPOKEY_TYPE_COUNTED:
- while ((*dp & 0x80) != 0)
- dp++;
- dp++;
- while ((*dp & 0x80) != 0)
- dp++;
- return dp + 1;
default:
return 0;
}
}
-unsigned char * data_skip_recursive(Repodata *data, unsigned char *dp,
- Repokey *key);
+unsigned char *data_skip_key(Repodata *data, unsigned char *dp, Repokey *key);
#endif /* SATSOLVER_REPOPACK */
--- /dev/null
+/*
+ * Copyright (c) 2007-2008, Novell Inc.
+ *
+ * This program is licensed under the BSD license, read LICENSE.BSD
+ * for further information
+ */
+
+/*
+ * repopage.c
+ *
+ * Pageing and compression functions for the vertical repository data
+ *
+ */
+
+#define _XOPEN_SOURCE 500
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <time.h>
+
+#include "repo.h"
+#include "repopage.h"
+
+
+
+#define BLOCK_SIZE (65536*1)
+#if BLOCK_SIZE <= 65536
+typedef __uint16_t Ref;
+#else
+typedef __uint32_t Ref;
+#endif
+
+/*
+ The format is tailored for fast decompression (i.e. only byte based),
+ and skewed to ASCII content (highest bit often not set):
+
+ a 0LLLLLLL
+ - self-describing ASCII character hex L
+ b 100lllll <l+1 bytes>
+ - literal run of length l+1
+ c 101oolll <8o>
+ - back ref of length l+2, at offset -(o+1) (o < 1 << 10)
+ d 110lllll <8o>
+ - back ref of length l+2+8, at offset -(o+1) (o < 1 << 8)
+ e 1110llll <8o> <8o>
+ - back ref of length l+3, at offset -(o+1) (o < 1 << 16)
+ f1 1111llll <8l> <8o> <8o>
+ - back ref, length l+19 (l < 1<<12), offset -(o+1) (o < 1<<16)
+ f2 11110lll <8l> <8o> <8o>
+ - back ref, length l+19 (l < 1<<11), offset -(o+1) (o < 1<<16)
+ g 11111lll <8l> <8o> <8o> <8o>
+ - back ref, length l+5 (l < 1<<11), offset -(o+1) (o < 1<<24)
+
+ Generally for a literal of length L we need L+1 bytes, hence it is
+ better to encode also very short backrefs (2 chars) as backrefs if
+ their offset is small, as that only needs two bytes. Except if we
+ already have a literal run, in that case it's better to append there,
+ instead of breaking it for a backref. So given a potential backref
+ at offset O, length L the strategy is as follows:
+
+ L < 2 : encode as 1-literal
+ L == 2, O > 1024 : encode as 1-literal
+ L == 2, have already literals: encode as 1-literal
+ O = O - 1
+ L >= 2, L <= 9, O < 1024 : encode as c
+ L >= 10, L <= 41, O < 256 : encode as d
+ else we have either O >= 1024, or L >= 42:
+ L < 3 : encode as 1-literal
+ L >= 3, L <= 18, O < 65536 : encode as e
+ L >= 19, L <= 4095+18, O < 65536 : encode as f
+ else we have either L >= 4096+18 or O >= 65536.
+ O >= 65536: encode as 1-literal, too bad
+ (with the current block size this can't happen)
+ L >= 4096+18, so reduce to 4095+18 : encode as f
+*/
+
+
+static unsigned int
+compress_buf(const unsigned char *in, unsigned int in_len,
+ unsigned char *out, unsigned int out_len)
+{
+ unsigned int oo = 0; //out-offset
+ unsigned int io = 0; //in-offset
+#define HS (65536)
+ Ref htab[HS];
+ Ref hnext[BLOCK_SIZE];
+ memset (htab, -1, sizeof (htab));
+ memset (hnext, -1, sizeof (hnext));
+ unsigned int litofs = 0;
+ while (io + 2 < in_len)
+ {
+ /* Search for a match of the string starting at IN, we have at
+ least three characters. */
+ unsigned int hval = in[io] | in[io + 1] << 8 | in[io + 2] << 16;
+ unsigned int try, mlen, mofs, tries;
+ hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
+ hval = hval & (HS - 1);
+ try = htab[hval];
+ hnext[io] = htab[hval];
+ htab[hval] = io;
+ mlen = 0;
+ mofs = 0;
+
+ for (tries = 0; try != -1 && tries < 12; tries++)
+ {
+ if (try < io
+ && in[try] == in[io] && in[try + 1] == in[io + 1])
+ {
+ mlen = 2;
+ mofs = (io - try) - 1;
+ break;
+ }
+ try = hnext[try];
+ }
+ for (; try != -1 && tries < 12; tries++)
+ {
+ //assert (mlen >= 2);
+ //assert (io + mlen < in_len);
+ /* Try a match starting from [io] with the strings at [try].
+ That's only sensible if TRY actually is before IO (can happen
+ with uninit hash table). If we have a previous match already
+ we're only going to take the new one if it's longer, hence
+ check the potentially last character. */
+ if (try < io && in[try + mlen] == in[io + mlen])
+ {
+ unsigned int this_len, this_ofs;
+ if (memcmp (in + try, in + io, mlen))
+ goto no_match;
+ this_len = mlen + 1;
+ /* Now try extending the match by more characters. */
+ for (;
+ io + this_len < in_len
+ && in[try + this_len] == in[io + this_len]; this_len++)
+ ;
+#if 0
+ unsigned int testi;
+ for (testi = 0; testi < this_len; testi++)
+ assert (in[try + testi] == in[io + testi]);
+#endif
+ this_ofs = (io - try) - 1;
+ /*if (this_ofs > 65535)
+ goto no_match; */
+#if 0
+ assert (this_len >= 2);
+ assert (this_len >= mlen);
+ assert (this_len > mlen || (this_len == mlen && this_ofs > mofs));
+#endif
+ mlen = this_len, mofs = this_ofs;
+ /* If our match extends up to the end of input, no next
+ match can become better. This is not just an
+ optimization, it establishes a loop invariant
+ (io + mlen < in_len). */
+ if (io + mlen >= in_len)
+ goto match_done;
+ }
+ no_match:
+ try = hnext[try];
+ /*if (io - try - 1 >= 65536)
+ break;*/
+ }
+
+match_done:
+ if (mlen)
+ {
+ //fprintf (stderr, "%d %d\n", mlen, mofs);
+ if (mlen == 2 && (litofs || mofs >= 1024))
+ mlen = 0;
+ /*else if (mofs >= 65536)
+ mlen = 0;*/
+ else if (mofs >= 65536)
+ {
+ if (mlen >= 2048 + 5)
+ mlen = 2047 + 5;
+ else if (mlen < 5)
+ mlen = 0;
+ }
+ else if (mlen < 3)
+ mlen = 0;
+ /*else if (mlen >= 4096 + 19)
+ mlen = 4095 + 19;*/
+ else if (mlen >= 2048 + 19)
+ mlen = 2047 + 19;
+ /* Skip this match if the next character would deliver a better one,
+ but only do this if we have the chance to really extend the
+ length (i.e. our current length isn't yet the (conservative)
+ maximum). */
+ if (mlen && mlen < (2048 + 5) && io + 3 < in_len)
+ {
+ unsigned int hval =
+ in[io + 1] | in[io + 2] << 8 | in[io + 3] << 16;
+ unsigned int try;
+ hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
+ hval = hval & (HS - 1);
+ try = htab[hval];
+ if (try < io + 1
+ && in[try] == in[io + 1] && in[try + 1] == in[io + 2])
+ {
+ unsigned int this_len;
+ this_len = 2;
+ for (;
+ io + 1 + this_len < in_len
+ && in[try + this_len] == in[io + 1 + this_len];
+ this_len++)
+ ;
+ if (this_len >= mlen)
+ mlen = 0;
+ }
+ }
+ }
+ if (!mlen)
+ {
+ if (!litofs)
+ litofs = io + 1;
+ io++;
+ }
+ else
+ {
+ if (litofs)
+ {
+ litofs--;
+ unsigned litlen = io - litofs;
+ //fprintf (stderr, "lit: %d\n", litlen);
+ while (litlen)
+ {
+ unsigned int easy_sz;
+ /* Emit everything we can as self-describers. As soon as
+ we hit a byte we can't emit as such we're going to emit
+ a length descriptor anyway, so we can as well include
+ bytes < 0x80 which might follow afterwards in that run. */
+ for (easy_sz = 0;
+ easy_sz < litlen && in[litofs + easy_sz] < 0x80;
+ easy_sz++)
+ ;
+ if (easy_sz)
+ {
+ if (oo + easy_sz >= out_len)
+ return 0;
+ memcpy (out + oo, in + litofs, easy_sz);
+ litofs += easy_sz;
+ oo += easy_sz;
+ litlen -= easy_sz;
+ if (!litlen)
+ break;
+ }
+ if (litlen <= 32)
+ {
+ if (oo + 1 + litlen >= out_len)
+ return 0;
+ out[oo++] = 0x80 | (litlen - 1);
+ while (litlen--)
+ out[oo++] = in[litofs++];
+ break;
+ }
+ else
+ {
+ /* Literal length > 32, so chunk it. */
+ if (oo + 1 + 32 >= out_len)
+ return 0;
+ out[oo++] = 0x80 | 31;
+ memcpy (out + oo, in + litofs, 32);
+ oo += 32;
+ litofs += 32;
+ litlen -= 32;
+ }
+ }
+ litofs = 0;
+ }
+
+ //fprintf (stderr, "ref: %d @ %d\n", mlen, mofs);
+
+ if (mlen >= 2 && mlen <= 9 && mofs < 1024)
+ {
+ if (oo + 2 >= out_len)
+ return 0;
+ out[oo++] = 0xa0 | ((mofs & 0x300) >> 5) | (mlen - 2);
+ out[oo++] = mofs & 0xff;
+ }
+ else if (mlen >= 10 && mlen <= 41 && mofs < 256)
+ {
+ if (oo + 2 >= out_len)
+ return 0;
+ out[oo++] = 0xc0 | (mlen - 10);
+ out[oo++] = mofs;
+ }
+ else if (mofs >= 65536)
+ {
+ assert (mlen >= 5 && mlen < 2048 + 5);
+ if (oo + 5 >= out_len)
+ return 0;
+ out[oo++] = 0xf8 | ((mlen - 5) >> 8);
+ out[oo++] = (mlen - 5) & 0xff;
+ out[oo++] = mofs & 0xff;
+ out[oo++] = (mofs >> 8) & 0xff;
+ out[oo++] = mofs >> 16;
+ }
+ else if (mlen >= 3 && mlen <= 18)
+ {
+ assert (mofs < 65536);
+ if (oo + 3 >= out_len)
+ return 0;
+ out[oo++] = 0xe0 | (mlen - 3);
+ out[oo++] = mofs & 0xff;
+ out[oo++] = mofs >> 8;
+ }
+ else
+ {
+ assert (mlen >= 19 && mlen <= 4095 + 19 && mofs < 65536);
+ if (oo + 4 >= out_len)
+ return 0;
+ out[oo++] = 0xf0 | ((mlen - 19) >> 8);
+ out[oo++] = (mlen - 19) & 0xff;
+ out[oo++] = mofs & 0xff;
+ out[oo++] = mofs >> 8;
+ }
+ /* Insert the hashes for the compressed run [io..io+mlen-1].
+ For [io] we have it already done at the start of the loop.
+ So it's from [io+1..io+mlen-1], and we need three chars per
+ hash, so the accessed characters will be [io+1..io+mlen-1+2],
+ ergo io+mlen+1 < in_len. */
+ mlen--;
+ io++;
+ while (mlen--)
+ {
+ if (io + 2 < in_len)
+ {
+ unsigned int hval =
+ in[io] | in[io + 1] << 8 | in[io + 2] << 16;
+ hval = (hval ^ (hval << 5) ^ (hval >> 5)) - hval * 5;
+ hval = hval & (HS - 1);
+ hnext[io] = htab[hval];
+ htab[hval] = io;
+ }
+ io++;
+ };
+ }
+ }
+ /* We might have some characters left. */
+ if (io < in_len && !litofs)
+ litofs = io + 1;
+ io = in_len;
+ if (litofs)
+ {
+ litofs--;
+ unsigned litlen = io - litofs;
+ //fprintf (stderr, "lit: %d\n", litlen);
+ while (litlen)
+ {
+ unsigned int easy_sz;
+ /* Emit everything we can as self-describers. As soon as we hit a
+ byte we can't emit as such we're going to emit a length
+ descriptor anyway, so we can as well include bytes < 0x80 which
+ might follow afterwards in that run. */
+ for (easy_sz = 0; easy_sz < litlen && in[litofs + easy_sz] < 0x80;
+ easy_sz++)
+ ;
+ if (easy_sz)
+ {
+ if (oo + easy_sz >= out_len)
+ return 0;
+ memcpy (out + oo, in + litofs, easy_sz);
+ litofs += easy_sz;
+ oo += easy_sz;
+ litlen -= easy_sz;
+ if (!litlen)
+ break;
+ }
+ if (litlen <= 32)
+ {
+ if (oo + 1 + litlen >= out_len)
+ return 0;
+ out[oo++] = 0x80 | (litlen - 1);
+ while (litlen--)
+ out[oo++] = in[litofs++];
+ break;
+ }
+ else
+ {
+ /* Literal length > 32, so chunk it. */
+ if (oo + 1 + 32 >= out_len)
+ return 0;
+ out[oo++] = 0x80 | 31;
+ memcpy (out + oo, in + litofs, 32);
+ oo += 32;
+ litofs += 32;
+ litlen -= 32;
+ }
+ }
+ litofs = 0;
+ }
+ return oo;
+}
+
+static unsigned int
+unchecked_decompress_buf(const unsigned char *in, unsigned int in_len,
+ unsigned char *out,
+ unsigned int out_len __attribute__((unused)))
+{
+ unsigned char *orig_out = out;
+ const unsigned char *in_end = in + in_len;
+ while (in < in_end)
+ {
+ unsigned int first = *in++;
+ int o;
+ switch (first >> 4)
+ {
+ default:
+ /* This default case can't happen, but GCCs VRP is not strong
+ enough to see this, so make this explicitely not fall to
+ the end of the switch, so that we don't have to initialize
+ o above. */
+ continue;
+ case 0: case 1:
+ case 2: case 3:
+ case 4: case 5:
+ case 6: case 7:
+ //a 0LLLLLLL
+ //fprintf (stderr, "lit: 1\n");
+ *out++ = first;
+ continue;
+ case 8: case 9:
+ //b 100lllll <l+1 bytes>
+ {
+ unsigned int l = first & 31;
+ //fprintf (stderr, "lit: %d\n", l);
+ do
+ *out++ = *in++;
+ while (l--);
+ continue;
+ }
+ case 10: case 11:
+ //c 101oolll <8o>
+ {
+ o = first & (3 << 3);
+ o = (o << 5) | *in++;
+ first = (first & 7) + 2;
+ break;
+ }
+ case 12: case 13:
+ //d 110lllll <8o>
+ {
+ o = *in++;
+ first = (first & 31) + 10;
+ break;
+ }
+ case 14:
+ // e 1110llll <8o> <8o>
+ {
+ o = in[0] | (in[1] << 8);
+ in += 2;
+ first = first & 31;
+ first += 3;
+ break;
+ }
+ case 15:
+ //f1 1111llll <8o> <8o> <8l>
+ //f2 11110lll <8o> <8o> <8l>
+ // g 11111lll <8o> <8o> <8o> <8l>
+ {
+ first = first & 15;
+ if (first >= 8)
+ {
+ first = (((first - 8) << 8) | in[0]) + 5;
+ o = in[1] | (in[2] << 8) | (in[3] << 16);
+ in += 4;
+ }
+ else
+ {
+ first = ((first << 8) | in[0]) + 19;
+ o = in[1] | (in[2] << 8);
+ in += 3;
+ }
+ break;
+ }
+ }
+ //fprintf (stderr, "ref: %d @ %d\n", first, o);
+ o++;
+ o = -o;
+#if 0
+ /* We know that first will not be zero, and this loop structure is
+ better optimizable. */
+ do
+ {
+ *out = *(out - o);
+ out++;
+ }
+ while (--first);
+#else
+ switch (first)
+ {
+ case 18: *out = *(out + o); out++;
+ case 17: *out = *(out + o); out++;
+ case 16: *out = *(out + o); out++;
+ case 15: *out = *(out + o); out++;
+ case 14: *out = *(out + o); out++;
+ case 13: *out = *(out + o); out++;
+ case 12: *out = *(out + o); out++;
+ case 11: *out = *(out + o); out++;
+ case 10: *out = *(out + o); out++;
+ case 9: *out = *(out + o); out++;
+ case 8: *out = *(out + o); out++;
+ case 7: *out = *(out + o); out++;
+ case 6: *out = *(out + o); out++;
+ case 5: *out = *(out + o); out++;
+ case 4: *out = *(out + o); out++;
+ case 3: *out = *(out + o); out++;
+ case 2: *out = *(out + o); out++;
+ case 1: *out = *(out + o); out++;
+ case 0: break;
+ default:
+ /* Duff duff :-) */
+ switch (first & 15)
+ {
+ do
+ {
+ case 0: *out = *(out + o); out++;
+ case 15: *out = *(out + o); out++;
+ case 14: *out = *(out + o); out++;
+ case 13: *out = *(out + o); out++;
+ case 12: *out = *(out + o); out++;
+ case 11: *out = *(out + o); out++;
+ case 10: *out = *(out + o); out++;
+ case 9: *out = *(out + o); out++;
+ case 8: *out = *(out + o); out++;
+ case 7: *out = *(out + o); out++;
+ case 6: *out = *(out + o); out++;
+ case 5: *out = *(out + o); out++;
+ case 4: *out = *(out + o); out++;
+ case 3: *out = *(out + o); out++;
+ case 2: *out = *(out + o); out++;
+ case 1: *out = *(out + o); out++;
+ }
+ while ((int)(first -= 16) > 0);
+ }
+ break;
+ }
+#endif
+ }
+ return out - orig_out;
+}
+
+/**********************************************************************/
+
+unsigned char *
+repodata_load_page_range(Repodata *data, unsigned int pstart, unsigned int pend)
+{
+/* Make sure all pages from PSTART to PEND (inclusive) are loaded,
+ and are consecutive. Return a pointer to the mapping of PSTART. */
+ unsigned char buf[BLOB_PAGESIZE];
+ unsigned int i;
+
+ /* Quick check in case all pages are there already and consecutive. */
+ for (i = pstart; i <= pend; i++)
+ if (data->pages[i].mapped_at == -1
+ || (i > pstart
+ && data->pages[i].mapped_at
+ != data->pages[i-1].mapped_at + BLOB_PAGESIZE))
+ break;
+ if (i > pend)
+ return data->blob_store + data->pages[pstart].mapped_at;
+
+ if (data->pagefd == -1)
+ return 0;
+
+ /* Ensure that we can map the numbers of pages we need at all. */
+ if (pend - pstart + 1 > data->ncanmap)
+ {
+ unsigned int oldcan = data->ncanmap;
+ data->ncanmap = pend - pstart + 1;
+ if (data->ncanmap < 4)
+ data->ncanmap = 4;
+ data->mapped = sat_realloc2(data->mapped, data->ncanmap, sizeof(data->mapped[0]));
+ memset (data->mapped + oldcan, 0, (data->ncanmap - oldcan) * sizeof (data->mapped[0]));
+ data->blob_store = sat_realloc2(data->blob_store, data->ncanmap, BLOB_PAGESIZE);
+#ifdef DEBUG_PAGING
+ fprintf (stderr, "PAGE: can map %d pages\n", data->ncanmap);
+#endif
+ }
+
+ /* Now search for "cheap" space in our store. Space is cheap if it's either
+ free (very cheap) or contains pages we search for anyway. */
+
+ /* Setup cost array. */
+ unsigned int cost[data->ncanmap];
+ for (i = 0; i < data->ncanmap; i++)
+ {
+ unsigned int pnum = data->mapped[i];
+ if (pnum == 0)
+ cost[i] = 0;
+ else
+ {
+ pnum--;
+ Attrblobpage *p = data->pages + pnum;
+ assert (p->mapped_at != -1);
+ if (pnum >= pstart && pnum <= pend)
+ cost[i] = 1;
+ else
+ cost[i] = 3;
+ }
+ }
+
+ /* And search for cheapest space. */
+ unsigned int best_cost = -1;
+ unsigned int best = 0;
+ unsigned int same_cost = 0;
+ for (i = 0; i + pend - pstart < data->ncanmap; i++)
+ {
+ unsigned int c = cost[i];
+ unsigned int j;
+ for (j = 0; j < pend - pstart + 1; j++)
+ c += cost[i+j];
+ if (c < best_cost)
+ best_cost = c, best = i;
+ else if (c == best_cost)
+ same_cost++;
+ /* A null cost won't become better. */
+ if (c == 0)
+ break;
+ }
+ /* If all places have the same cost we would thrash on slot 0. Avoid
+ this by doing a round-robin strategy in this case. */
+ if (same_cost == data->ncanmap - pend + pstart - 1)
+ best = data->rr_counter++ % (data->ncanmap - pend + pstart);
+
+ /* So we want to map our pages from [best] to [best+pend-pstart].
+ Use a very simple strategy, which doesn't make the best use of
+ our resources, but works. Throw away all pages in that range
+ (even ours) then copy around ours (in case they were outside the
+ range) or read them in. */
+ for (i = best; i < best + pend - pstart + 1; i++)
+ {
+ unsigned int pnum = data->mapped[i];
+ if (pnum--
+ /* If this page is exactly at the right place already,
+ no need to evict it. */
+ && pnum != pstart + i - best)
+ {
+ /* Evict this page. */
+#ifdef DEBUG_PAGING
+ fprintf (stderr, "PAGE: evict page %d from %d\n", pnum, i);
+#endif
+ cost[i] = 0;
+ data->mapped[i] = 0;
+ data->pages[pnum].mapped_at = -1;
+ }
+ }
+
+ /* Everything is free now. Read in the pages we want. */
+ for (i = pstart; i <= pend; i++)
+ {
+ Attrblobpage *p = data->pages + i;
+ unsigned int pnum = i - pstart + best;
+ void *dest = data->blob_store + pnum * BLOB_PAGESIZE;
+ if (p->mapped_at != -1)
+ {
+ if (p->mapped_at != pnum * BLOB_PAGESIZE)
+ {
+#ifdef DEBUG_PAGING
+ fprintf (stderr, "PAGECOPY: %d to %d\n", i, pnum);
+#endif
+ /* Still mapped somewhere else, so just copy it from there. */
+ memcpy (dest, data->blob_store + p->mapped_at, BLOB_PAGESIZE);
+ data->mapped[p->mapped_at / BLOB_PAGESIZE] = 0;
+ }
+ }
+ else
+ {
+ unsigned int in_len = p->file_size;
+ unsigned int compressed = in_len & 1;
+ in_len >>= 1;
+#ifdef DEBUG_PAGING
+ fprintf (stderr, "PAGEIN: %d to %d", i, pnum);
+#endif
+ if (pread(data->pagefd, compressed ? buf : dest, in_len, p->file_offset) != in_len)
+ {
+ perror ("mapping pread");
+ return 0;
+ }
+ if (compressed)
+ {
+ unsigned int out_len;
+ out_len = unchecked_decompress_buf(buf, in_len,
+ dest, BLOB_PAGESIZE);
+ if (out_len != BLOB_PAGESIZE && i < data->num_pages - 1)
+ {
+ fprintf(stderr, "can't decompress\n");
+ return 0;
+ }
+#ifdef DEBUG_PAGING
+ fprintf (stderr, " (expand %d to %d)", in_len, out_len);
+#endif
+ }
+#ifdef DEBUG_PAGING
+ fprintf (stderr, "\n");
+#endif
+ }
+ p->mapped_at = pnum * BLOB_PAGESIZE;
+ data->mapped[pnum] = i + 1;
+ }
+ return data->blob_store + best * BLOB_PAGESIZE;
+}
+
+unsigned char *
+repodata_fetch_vertical(Repodata *data, Repokey *key, Id off, Id len)
+{
+ unsigned char *dp;
+ if (!len)
+ return 0;
+ if (off >= data->lastverticaloffset)
+ {
+ off -= data->lastverticaloffset;
+ if (off + len > data->vincorelen)
+ return 0;
+ return data->vincore + off;
+ }
+ if (off + len > key->size)
+ return 0;
+ /* we now have the offset, go into vertical */
+ off += data->verticaloffset[key - data->keys];
+ /* fprintf(stderr, "key %d page %d\n", key->name, off / BLOB_PAGESIZE); */
+ dp = repodata_load_page_range(data, off / BLOB_PAGESIZE, (off + len - 1) / BLOB_PAGESIZE);
+ if (dp)
+ dp += off % BLOB_PAGESIZE;
+ return dp;
+}
+
+unsigned int
+repodata_compress_page(unsigned char *page, unsigned int len, unsigned char *cpage, unsigned int max)
+{
+ return compress_buf(page, len, cpage, max);
+}
+
+#define SOLV_ERROR_EOF 3
+#define SOLV_ERROR_CORRUPT 6
+
+static inline unsigned int
+read_u32(FILE *fp)
+{
+ int c, i;
+ unsigned int x = 0;
+
+ for (i = 0; i < 4; i++)
+ {
+ c = getc(fp);
+ if (c == EOF)
+ return 0;
+ x = (x << 8) | c;
+ }
+ return x;
+}
+
+/* Try to either setup on-demand paging (using FP as backing
+ file), or in case that doesn't work (FP not seekable) slurps in
+ all pages and deactivates paging. */
+void
+repodata_read_or_setup_pages(Repodata *data, unsigned int pagesz, unsigned int blobsz)
+{
+ FILE *fp = data->fp;
+ unsigned int npages;
+ unsigned int i;
+ unsigned int can_seek;
+ long cur_file_ofs;
+ unsigned char buf[BLOB_PAGESIZE];
+
+ if (pagesz != BLOB_PAGESIZE)
+ {
+ /* We could handle this by slurping in everything. */
+ data->error = SOLV_ERROR_CORRUPT;
+ return;
+ }
+ can_seek = 1;
+ if ((cur_file_ofs = ftell(fp)) < 0)
+ can_seek = 0;
+ clearerr(fp);
+ if (can_seek)
+ data->pagefd = dup(fileno(fp));
+ if (data->pagefd == -1)
+ can_seek = 0;
+
+#ifdef DEBUG_PAGING
+ fprintf (stderr, "can %sseek\n", can_seek ? "" : "NOT ");
+#endif
+ npages = (blobsz + BLOB_PAGESIZE - 1) / BLOB_PAGESIZE;
+
+ data->num_pages = npages;
+ data->pages = sat_malloc2(npages, sizeof(data->pages[0]));
+
+ /* If we can't seek on our input we have to slurp in everything. */
+ if (!can_seek)
+ data->blob_store = sat_malloc(npages * BLOB_PAGESIZE);
+ for (i = 0; i < npages; i++)
+ {
+ unsigned int in_len = read_u32(fp);
+ unsigned int compressed = in_len & 1;
+ Attrblobpage *p = data->pages + i;
+ in_len >>= 1;
+#ifdef DEBUG_PAGING
+ fprintf (stderr, "page %d: len %d (%scompressed)\n",
+ i, in_len, compressed ? "" : "not ");
+#endif
+ if (can_seek)
+ {
+ cur_file_ofs += 4;
+ p->mapped_at = -1;
+ p->file_offset = cur_file_ofs;
+ p->file_size = in_len * 2 + compressed;
+ if (fseek(fp, in_len, SEEK_CUR) < 0)
+ {
+ perror ("fseek");
+ fprintf (stderr, "can't seek after we thought we can\n");
+ /* We can't fall back to non-seeking behaviour as we already
+ read over some data pages without storing them away. */
+ data->error = SOLV_ERROR_EOF;
+ close(data->pagefd);
+ data->pagefd = -1;
+ return;
+ }
+ cur_file_ofs += in_len;
+ }
+ else
+ {
+ unsigned int out_len;
+ void *dest = data->blob_store + i * BLOB_PAGESIZE;
+ p->mapped_at = i * BLOB_PAGESIZE;
+ p->file_offset = 0;
+ p->file_size = 0;
+ /* We can't seek, so suck everything in. */
+ if (fread(compressed ? buf : dest, in_len, 1, fp) != 1)
+ {
+ perror("fread");
+ data->error = SOLV_ERROR_EOF;
+ return;
+ }
+ if (compressed)
+ {
+ out_len = unchecked_decompress_buf(buf, in_len, dest, BLOB_PAGESIZE);
+ if (out_len != BLOB_PAGESIZE && i < npages - 1)
+ {
+ data->error = SOLV_ERROR_CORRUPT;
+ return;
+ }
+ }
+ }
+ }
+}
+
+
+#ifdef STANDALONE
+
+static void
+transfer_file (FILE * from, FILE * to, int compress)
+{
+ unsigned char inb[BLOCK_SIZE];
+ unsigned char outb[BLOCK_SIZE];
+ while (!feof (from) && !ferror (from))
+ {
+ unsigned int in_len, out_len;
+ if (compress)
+ {
+ in_len = fread (inb, 1, BLOCK_SIZE, from);
+ if (in_len)
+ {
+ unsigned char *b = outb;
+ out_len = compress_buf (inb, in_len, outb, sizeof (outb));
+ if (!out_len)
+ b = inb, out_len = in_len;
+ if (fwrite (&out_len, sizeof (out_len), 1, to) != 1)
+ {
+ perror ("write size");
+ exit (1);
+ }
+ if (fwrite (b, out_len, 1, to) != 1)
+ {
+ perror ("write data");
+ exit (1);
+ }
+ }
+ }
+ else
+ {
+ if (fread (&in_len, sizeof (in_len), 1, from) != 1)
+ {
+ if (feof (from))
+ return;
+ perror ("can't read size");
+ exit (1);
+ }
+ if (fread (inb, in_len, 1, from) != 1)
+ {
+ perror ("can't read data");
+ exit (1);
+ }
+ out_len =
+ unchecked_decompress_buf (inb, in_len, outb, sizeof (outb));
+ if (fwrite (outb, out_len, 1, to) != 1)
+ {
+ perror ("can't write output");
+ exit (1);
+ }
+ }
+ }
+}
+
+/* Just for benchmarking purposes. */
+static void
+dumb_memcpy (void *dest, const void *src, unsigned int len)
+{
+ char *d = dest;
+ const char *s = src;
+ while (len--)
+ *d++ = *s++;
+}
+
+static void
+benchmark (FILE * from)
+{
+ unsigned char inb[BLOCK_SIZE];
+ unsigned char outb[BLOCK_SIZE];
+ unsigned int in_len = fread (inb, 1, BLOCK_SIZE, from);
+ unsigned int out_len;
+ if (!in_len)
+ {
+ perror ("can't read from input");
+ exit (1);
+ }
+
+ unsigned int calib_loop;
+ unsigned int per_loop;
+ unsigned int i, j;
+ clock_t start, end;
+ float seconds;
+
+#if 0
+ calib_loop = 1;
+ per_loop = 0;
+ start = clock ();
+ while ((clock () - start) < CLOCKS_PER_SEC / 4)
+ {
+ calib_loop *= 2;
+ for (i = 0; i < calib_loop; i++)
+ dumb_memcpy (outb, inb, in_len);
+ per_loop += calib_loop;
+ }
+
+ fprintf (stderr, "memcpy:\nCalibrated to %d iterations per loop\n",
+ per_loop);
+
+ start = clock ();
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < per_loop; j++)
+ dumb_memcpy (outb, inb, in_len);
+ end = clock ();
+ seconds = (end - start) / (float) CLOCKS_PER_SEC;
+ fprintf (stderr, "%.2f seconds == %.2f MB/s\n", seconds,
+ ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
+#endif
+
+ calib_loop = 1;
+ per_loop = 0;
+ start = clock ();
+ while ((clock () - start) < CLOCKS_PER_SEC / 4)
+ {
+ calib_loop *= 2;
+ for (i = 0; i < calib_loop; i++)
+ compress_buf(inb, in_len, outb, sizeof (outb));
+ per_loop += calib_loop;
+ }
+
+ fprintf(stderr, "compression:\nCalibrated to %d iterations per loop\n",
+ per_loop);
+
+ start = clock ();
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < per_loop; j++)
+ compress_buf (inb, in_len, outb, sizeof (outb));
+ end = clock ();
+ seconds = (end - start) / (float) CLOCKS_PER_SEC;
+ fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
+ ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
+
+ out_len = compress_buf(inb, in_len, outb, sizeof (outb));
+
+ calib_loop = 1;
+ per_loop = 0;
+ start = clock ();
+ while ((clock () - start) < CLOCKS_PER_SEC / 4)
+ {
+ calib_loop *= 2;
+ for (i = 0; i < calib_loop; i++)
+ unchecked_decompress_buf(outb, out_len, inb, sizeof (inb));
+ per_loop += calib_loop;
+ }
+
+ fprintf(stderr, "decompression:\nCalibrated to %d iterations per loop\n",
+ per_loop);
+
+ start = clock ();
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < per_loop; j++)
+ unchecked_decompress_buf(outb, out_len, inb, sizeof (inb));
+ end = clock();
+ seconds = (end - start) / (float) CLOCKS_PER_SEC;
+ fprintf(stderr, "%.2f seconds == %.2f MB/s\n", seconds,
+ ((long long) in_len * per_loop * 10) / (1024 * 1024 * seconds));
+}
+
+int
+main (int argc, char *argv[])
+{
+ int compress = 1;
+ if (argc > 1 && !strcmp (argv[1], "-d"))
+ compress = 0;
+ if (argc > 1 && !strcmp (argv[1], "-b"))
+ benchmark (stdin);
+ else
+ transfer_file (stdin, stdout, compress);
+ return 0;
+}
+
+#endif
+
--- /dev/null
+/*
+ * Copyright (c) 2007-2008, Novell Inc.
+ *
+ * This program is licensed under the BSD license, read LICENSE.BSD
+ * for further information
+ */
+
+#define BLOB_PAGEBITS 15
+#define BLOB_PAGESIZE (1 << BLOB_PAGEBITS)
+
+/* load pages pstart..pend into consecutive memory, return address */
+unsigned char *repodata_load_page_range(Repodata *data, unsigned int pstart, unsigned int pend);
+
+/* compress a page, return compressed len */
+unsigned int repodata_compress_page(unsigned char *page, unsigned int len, unsigned char *cpage, unsigned int max);
+
+/* setup page data for repodata_load_page_range */
+void repodata_read_or_setup_pages(Repodata *data, unsigned int pagesz, unsigned int blobsz);
Id
solvable_lookup_id(Solvable *s, Id keyname)
{
- Repo *repo = s->repo;
- Pool *pool;
- Repodata *data;
- int i, j, n;
-
- if (!repo)
+ if (!s->repo)
return 0;
- pool = repo->pool;
- switch(keyname)
- {
- case SOLVABLE_NAME:
- return s->name;
- case SOLVABLE_ARCH:
- return s->arch;
- case SOLVABLE_EVR:
- return s->evr;
- case SOLVABLE_VENDOR:
- return s->vendor;
- }
- n = s - pool->solvables;
- for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
- {
- if (n < data->start || n >= data->end)
- continue;
- for (j = 1; j < data->nkeys; j++)
- {
- if (data->keys[j].name == keyname && (data->keys[j].type == REPOKEY_TYPE_ID || data->keys[j].type == REPOKEY_TYPE_CONSTANTID))
- {
- Id id = repodata_lookup_id(data, n - data->start, j);
- if (id)
- {
- if (data->localpool)
- id = repodata_globalize_id(data, id);
- return id;
- }
- }
- }
- }
- return 0;
+ return repo_lookup_id(s->repo, s - s->repo->pool->solvables, keyname);
}
const char *
solvable_lookup_str(Solvable *s, Id keyname)
{
- Repo *repo = s->repo;
- Pool *pool;
- Repodata *data;
- int i, j, n;
- const char *str;
-
- if (!repo)
+ if (!s->repo)
return 0;
- pool = repo->pool;
- switch(keyname)
- {
- case SOLVABLE_NAME:
- return id2str(pool, s->name);
- case SOLVABLE_ARCH:
- return id2str(pool, s->arch);
- case SOLVABLE_EVR:
- return id2str(pool, s->evr);
- case SOLVABLE_VENDOR:
- return id2str(pool, s->vendor);
- }
- n = s - pool->solvables;
- for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
- {
- if (n < data->start || n >= data->end)
- continue;
- for (j = 1; j < data->nkeys; j++)
- {
- if (data->keys[j].name == keyname && (data->keys[j].type == REPOKEY_TYPE_ID || data->keys[j].type == REPOKEY_TYPE_CONSTANTID || data->keys[j].type == REPOKEY_TYPE_STR))
- {
- str = repodata_lookup_str(data, n - data->start, j);
- if (str)
- return str;
- }
- }
- }
- return 0;
+ return repo_lookup_str(s->repo, s - s->repo->pool->solvables, keyname);
}
const char *
Id *row;
if (!s->repo)
- return repo_lookup_str(s, keyname);
+ return 0;
pool = s->repo->pool;
if (!pool->nlanguages)
- return repo_lookup_str(s, keyname);
+ return solvable_lookup_str(s, keyname);
cols = pool->nlanguages + 1;
if (!pool->languagecache)
{
*row = str2id(pool, p, 1);
sat_free(p);
}
- str = repo_lookup_str(s, *row);
+ str = solvable_lookup_str(s, *row);
if (str)
return str;
}
- return repo_lookup_str(s, keyname);
+ return solvable_lookup_str(s, keyname);
}
const char *
{
const char *str;
Id id = pool_id2langid(s->repo->pool, keyname, lang, 0);
- if (id && (str = repo_lookup_str(s, id)) != 0)
+ if (id && (str = solvable_lookup_str(s, id)) != 0)
return str;
}
- return repo_lookup_str(s, keyname);
+ return solvable_lookup_str(s, keyname);
}
unsigned int
solvable_lookup_num(Solvable *s, Id keyname, unsigned int notfound)
{
- Repo *repo = s->repo;
- Pool *pool;
- Repodata *data;
- int i, j, n;
-
- if (!repo)
+ if (!s->repo)
return 0;
- pool = repo->pool;
- if (keyname == RPM_RPMDBID)
- {
- if (repo->rpmdbid)
- return repo->rpmdbid[(s - pool->solvables) - repo->start];
- return notfound;
- }
- n = s - pool->solvables;
- for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
- {
- if (n < data->start || n >= data->end)
- continue;
- for (j = 1; j < data->nkeys; j++)
- {
- if (data->keys[j].name == keyname
- && (data->keys[j].type == REPOKEY_TYPE_U32
- || data->keys[j].type == REPOKEY_TYPE_NUM
- || data->keys[j].type == REPOKEY_TYPE_CONSTANT))
- {
- unsigned int value;
- if (repodata_lookup_num(data, n - data->start, j, &value))
- return value;
- }
- }
- }
- return notfound;
+ return repo_lookup_num(s->repo, s - s->repo->pool->solvables, keyname, notfound);
}
int
solvable_lookup_void(Solvable *s, Id keyname)
{
- Repo *repo = s->repo;
- Pool *pool;
- Repodata *data;
- int i, j, n;
-
- if (!repo)
+ if (!s->repo)
return 0;
- pool = repo->pool;
- n = s - pool->solvables;
- for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
- {
- if (n < data->start || n >= data->end)
- continue;
- for (j = 1; j < data->nkeys; j++)
- {
- if (data->keys[j].name == keyname
- && (data->keys[j].type == REPOKEY_TYPE_VOID))
- {
- if (repodata_lookup_void(data, n - data->start, j))
- return 1;
- }
- }
- }
- return 0;
+ return repo_lookup_void(s->repo, s - s->repo->pool->solvables, keyname);
}
int
{
if (n < data->start || n >= data->end)
continue;
- /* there are two ways of storing a bool */
+ /* there are two ways of storing a bool, as num == 1 or void */
for (j = 1; j < data->nkeys; j++)
{
- /* as a num == 1 */
if (data->keys[j].name == keyname
&& (data->keys[j].type == REPOKEY_TYPE_U32
|| data->keys[j].type == REPOKEY_TYPE_NUM
- || data->keys[j].type == REPOKEY_TYPE_CONSTANT))
+ || data->keys[j].type == REPOKEY_TYPE_CONSTANT
+ || data->keys[j].type == REPOKEY_TYPE_VOID))
{
unsigned int value;
- if (repodata_lookup_num(data, n - data->start, j, &value))
+ if (repodata_lookup_num(data, n, keyname, &value))
return value == 1;
- }
-
- /* as a void attribute, if it is there, then true */
- if (data->keys[j].name == keyname
- && (data->keys[j].type == REPOKEY_TYPE_VOID))
- {
- if (repodata_lookup_void(data, n - data->start, j))
+ if (repodata_lookup_void(data, n, keyname))
return 1;
}
}
solvable_lookup_bin_checksum(Solvable *s, Id keyname, Id *typep)
{
Repo *repo = s->repo;
- Pool *pool;
- Repodata *data;
- int i, j, n;
- *typep = 0;
if (!repo)
- return 0;
- pool = repo->pool;
- n = s - pool->solvables;
- for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
{
- if (n < data->start || n >= data->end)
- continue;
- for (j = 1; j < data->nkeys; j++)
- {
- if (data->keys[j].name == keyname
- && (data->keys[j].type == REPOKEY_TYPE_MD5
- || data->keys[j].type == REPOKEY_TYPE_SHA1
- || data->keys[j].type == REPOKEY_TYPE_SHA256))
- {
- const unsigned char *chk = repodata_lookup_bin_checksum(data, n - data->start, j, typep);
- if (chk)
- return chk;
- }
- }
+ *typep = 0;
+ return 0;
}
- return 0;
+ return repo_lookup_bin_checksum(repo, s - repo->pool->solvables, keyname, typep);
}
const char *
Id p;
int i;
- if (dq->count > 1 || inst)
- policy_filter_unwanted(solv, dq, inst, POLICY_MODE_CHOOSE);
+ /* FIXME: do we really need that inst handling? */
+ if (solv->distupgrade && inst && dq->count)
+ {
+ policy_filter_unwanted(solv, dq, 0, POLICY_MODE_CHOOSE);
+ for (i = 0; i < dq->count; i++)
+ if (solvable_identical(pool, pool->solvables + inst, pool->solvables + dq->elements[i]))
+ dq->elements[i] = inst;
+ }
+ else
+ {
+ if (dq->count > 1 || inst)
+ policy_filter_unwanted(solv, dq, inst, POLICY_MODE_CHOOSE);
+ }
i = 0;
if (dq->count > 1)
static int test_separate = 0;
+struct keyfilter_data {
+ char **languages;
+ int nlanguages;
+ int haveaddedfileprovides;
+ int haveexternal;
+};
+
static int
keyfilter_solv(Repo *data, Repokey *key, void *kfdata)
{
+ struct keyfilter_data *kd = kfdata;
int i;
const char *keyname;
+
if (test_separate && key->storage != KEY_STORAGE_SOLVABLE)
return KEY_STORAGE_DROPPED;
+ if (!kd->haveaddedfileprovides && key->name == REPOSITORY_ADDEDFILEPROVIDES)
+ return KEY_STORAGE_DROPPED;
+ if (!kd->haveexternal && key->name == REPOSITORY_EXTERNAL)
+ return KEY_STORAGE_DROPPED;
for (i = 0; verticals[i]; i++)
if (key->name == verticals[i])
return KEY_STORAGE_VERTICAL_OFFSET;
const char *keyname;
if (key->storage == KEY_STORAGE_SOLVABLE)
return KEY_STORAGE_DROPPED;
+ /* those two must only be in the main solv file */
+ if (key->name == REPOSITORY_EXTERNAL || key->name == REPOSITORY_ADDEDFILEPROVIDES)
+ return KEY_STORAGE_DROPPED;
for (i = 0; verticals[i]; i++)
if (key->name == verticals[i])
return KEY_STORAGE_VERTICAL_OFFSET;
return KEY_STORAGE_INCORE;
}
-struct keyfilter_other_data {
- char **languages;
- int nlanguages;
-};
-
static int
keyfilter_other(Repo *repo, Repokey *key, void *kfdata)
{
const char *name, *p;
- struct keyfilter_other_data *kd = kfdata;
+ struct keyfilter_data *kd = kfdata;
int i;
+ if (!kd->haveaddedfileprovides && key->name == REPOSITORY_ADDEDFILEPROVIDES)
+ return KEY_STORAGE_DROPPED;
+ if (!kd->haveexternal && key->name == REPOSITORY_EXTERNAL)
+ return KEY_STORAGE_DROPPED;
+
if (key->name == SOLVABLE_FILELIST || key->name == SOLVABLE_DISKUSAGE)
return KEY_STORAGE_DROPPED;
#define REPODATAFILE_BLOCK 15
+static void
+write_info(Repo *repo, FILE *fp, int (*keyfilter)(Repo *repo, Repokey *key, void *kfdata), void *kfdata, Repodata *info, const char *location)
+{
+ Id h, *keyarray = 0;
+ int i;
+
+ repo_write(repo, fp, keyfilter, kfdata, &keyarray);
+ h = repodata_new_handle(info);
+ if (keyarray)
+ {
+ for (i = 0; keyarray[i]; i++)
+ repodata_add_idarray(info, h, REPOSITORY_KEYS, keyarray[i]);
+ }
+ sat_free(keyarray);
+ repodata_set_str(info, h, REPOSITORY_LOCATION, location);
+ repodata_add_flexarray(info, REPOENTRY_META, REPOSITORY_EXTERNAL, h);
+}
+
int
tool_write(Repo *repo, const char *basename, const char *attrname)
{
Repodata *data;
+ Repodata *info = 0;
Repokey *key;
- Repodatafile *fileinfos = 0;
- int nfileinfos = 0;
char **languages = 0;
int nlanguages = 0;
int i, j, k, l;
+ Id *addedfileprovides = 0;
+ struct keyfilter_data kd;
- fileinfos = sat_zextend(fileinfos, nfileinfos, 1, sizeof(Repodatafile), REPODATAFILE_BLOCK);
- pool_addfileprovides_ids(repo->pool, 0, &fileinfos[nfileinfos].addedfileprovides);
- for (i = 0; i < 32; i++)
- if (repo->rpmdbcookie[i])
- break;
- if (i < 32)
- fileinfos[nfileinfos].rpmdbcookie = repo->rpmdbcookie;
- if (fileinfos[nfileinfos].addedfileprovides || fileinfos[nfileinfos].rpmdbcookie)
- nfileinfos++;
+ memset(&kd, 0, sizeof(kd));
+ info = repo_add_repodata(repo, 0);
+ pool_addfileprovides_ids(repo->pool, 0, &addedfileprovides);
+ if (addedfileprovides && *addedfileprovides)
+ {
+ kd.haveaddedfileprovides = 1;
+ for (i = 0; addedfileprovides[i]; i++)
+ repodata_add_idarray(info, REPOENTRY_META, REPOSITORY_ADDEDFILEPROVIDES, addedfileprovides[i]);
+ }
+ sat_free(addedfileprovides);
if (basename)
{
- struct keyfilter_other_data kd;
char fn[4096];
FILE *fp;
int has_DU = 0;
languages[nlanguages++] = strdup(keyname + l);
}
}
- fileinfos = sat_zextend(fileinfos, nfileinfos, nlanguages + 2, sizeof(Repodatafile), REPODATAFILE_BLOCK);
/* write language subfiles */
for (i = 0; i < nlanguages; i++)
{
perror(fn);
exit(1);
}
- repo_write(repo, fp, keyfilter_language, languages[i], fileinfos + nfileinfos, 0);
- fileinfos[nfileinfos].location = strdup(fn);
+ write_info(repo, fp, keyfilter_language, languages[i], info, fn);
fclose(fp);
- nfileinfos++;
+ kd.haveexternal = 1;
}
/* write DU subfile */
if (has_DU)
perror(fn);
exit(1);
}
- repo_write(repo, fp, keyfilter_DU, 0, fileinfos + nfileinfos, 0);
- fileinfos[nfileinfos].location = strdup(fn);
+ write_info(repo, fp, keyfilter_DU, 0, info, fn);
fclose(fp);
- nfileinfos++;
+ kd.haveexternal = 1;
}
/* write filelist */
if (has_FL)
perror(fn);
exit(1);
}
- repo_write(repo, fp, keyfilter_FL, 0, fileinfos + nfileinfos, 0);
- fileinfos[nfileinfos].location = strdup(fn);
+ write_info(repo, fp, keyfilter_FL, 0, info, fn);
fclose(fp);
- nfileinfos++;
+ kd.haveexternal = 1;
}
/* write everything else */
sprintf(fn, "%s.solv", basename);
}
kd.languages = languages;
kd.nlanguages = nlanguages;
- repo_write(repo, fp, keyfilter_other, &kd, nfileinfos ? fileinfos : 0, nfileinfos);
+ repodata_internalize(info);
+ repo_write(repo, fp, keyfilter_other, &kd, 0);
fclose(fp);
for (i = 0; i < nlanguages; i++)
free(languages[i]);
sat_free(languages);
- for (i = 0; i < nfileinfos; i++)
- {
- sat_free(fileinfos[i].addedfileprovides);
- sat_free(fileinfos[i].location);
- sat_free(fileinfos[i].keys);
- }
- sat_free(fileinfos);
+ repodata_free(info);
+ repo->nrepodata--;
return 0;
}
if (attrname)
{
- fileinfos = sat_zextend(fileinfos, nfileinfos, 1, sizeof(Repodatafile), REPODATAFILE_BLOCK);
test_separate = 1;
- FILE *fp = fopen (attrname, "w");
- repo_write(repo, fp, keyfilter_attr, 0, fileinfos + nfileinfos, 0);
- fileinfos[nfileinfos].location = strdup(attrname);
+ FILE *fp = fopen(attrname, "w");
+ write_info(repo, fp, keyfilter_attr, 0, info, attrname);
fclose(fp);
- nfileinfos++;
- }
- repo_write(repo, stdout, keyfilter_solv, 0, nfileinfos ? fileinfos : 0, nfileinfos);
- for (i = 0; i < nfileinfos; i++)
- {
- sat_free(fileinfos[i].addedfileprovides);
- sat_free(fileinfos[i].location);
- sat_free(fileinfos[i].keys);
+ kd.haveexternal = 1;
}
- sat_free(fileinfos);
+ repodata_internalize(info);
+ repo_write(repo, stdout, keyfilter_solv, &kd, 0);
+ repodata_free(info);
+ repo->nrepodata--;
return 0;
}
#include "pool.h"
#include "repo_solv.h"
+static int dump_repoattrs_cb(void *vcbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv);
+
static void
dump_repodata (Repo *repo)
{
printf("%02x", repo->rpmdbcookie[i]);
printf("\n");
}
- printf("repo refers to %d subfiles:\n", repo->nrepodata);
+ printf("repo contains %d repodata sections:\n", repo->nrepodata);
for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
{
unsigned int j;
- printf("%s has %d keys, %d schemata\n", data->location ? data->location : "**EMBED**", data->nkeys, data->nschemata);
+ printf("\nrepodata %d has %d keys, %d schemata\n", i + 1, data->nkeys - 1, data->nschemata - 1);
for (j = 1; j < data->nkeys; j++)
printf(" %s (type %s size %d storage %d)\n", id2str(repo->pool, data->keys[j].name), id2str(repo->pool, data->keys[j].type), data->keys[j].size, data->keys[j].storage);
if (data->localpool)
printf(" localpool has %d strings, size is %d\n", data->spool.nstrings, data->spool.sstrings);
if (data->dirpool.ndirs)
printf(" localpool has %d directories\n", data->dirpool.ndirs);
- if (data->addedfileprovides)
- {
- printf(" added file provides:\n");
- for (j = 0; data->addedfileprovides[j]; j++)
- printf(" %s\n", id2str(repo->pool, data->addedfileprovides[j]));
- }
printf("\n");
+ repodata_search(data, REPOENTRY_META, 0, dump_repoattrs_cb, 0);
}
printf("\n");
}
dump_attr(Repo *repo, Repodata *data, Repokey *key, KeyValue *kv)
{
const char *keyname;
+ KeyValue *kvp;
+ int indent = 0;
keyname = id2str(repo->pool, key->name);
+ for (kvp = kv; (kvp = kvp->path) != 0; indent += 2)
+ printf(" ");
switch(key->type)
{
case REPOKEY_TYPE_ID:
printf("%s: %s\n", keyname, dep2str(repo->pool, kv->id));
break;
case REPOKEY_TYPE_IDARRAY:
+ if (!kv->entry)
+ printf("%s:\n%*s", keyname, indent, "");
if (data && data->localpool)
- printf("%s: %s\n", keyname, stringpool_id2str(&data->spool, kv->id));
+ printf(" %s\n", stringpool_id2str(&data->spool, kv->id));
else
- printf("%s: %s\n", keyname, dep2str(repo->pool, kv->id));
+ printf(" %s\n", dep2str(repo->pool, kv->id));
break;
case REPOKEY_TYPE_STR:
printf("%s: %s\n", keyname, kv->str);
case REPOKEY_TYPE_MD5:
case REPOKEY_TYPE_SHA1:
case REPOKEY_TYPE_SHA256:
- printf("%s: %s\n", keyname, repodata_chk2str(data, key->type, (unsigned char *)kv->str));
+ printf("%s: %s (%s)\n", keyname, repodata_chk2str(data, key->type, (unsigned char *)kv->str), id2str(repo->pool, key->type));
break;
case REPOKEY_TYPE_VOID:
printf("%s: (void)\n", keyname);
printf("%s: %d\n", keyname, kv->num);
break;
case REPOKEY_TYPE_DIRNUMNUMARRAY:
- printf("%s: %s %d %d\n", keyname, repodata_dir2str(data, kv->id, 0), kv->num, kv->num2);
+ if (!kv->entry)
+ printf("%s:\n%*s", keyname, indent, "");
+ printf(" %s %d %d\n", repodata_dir2str(data, kv->id, 0), kv->num, kv->num2);
break;
case REPOKEY_TYPE_DIRSTRARRAY:
- printf("%s: %s\n", keyname, repodata_dir2str(data, kv->id, kv->str));
+ if (!kv->entry)
+ printf("%s:\n%*s", keyname, indent, "");
+ printf(" %s\n", repodata_dir2str(data, kv->id, kv->str));
break;
- case REPOKEY_TYPE_COUNTED:
- printf("%s: %s\n", keyname, kv->eof == 0 ? "open" : kv->eof == 1 ? "next" : "close");
+ case REPOKEY_TYPE_FIXARRAY:
+ case REPOKEY_TYPE_FLEXARRAY:
+ if (!kv->entry)
+ printf("%s:\n", keyname);
+ else
+ printf("\n");
break;
default:
printf("%s: ?\n", keyname);
return 0;
}
-/*static int
+#if 1
+static int
dump_repoattrs_cb(void *vcbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv)
{
- return dump_attr(s->repo, data, key, kv);
-}*/
+ if (key->name == REPOSITORY_SOLVABLES)
+ return SEARCH_NEXT_SOLVABLE;
+ return dump_attr(data->repo, data, key, kv);
+}
+#endif
/*
* dump all attributes for Id <p>
void
dump_repoattrs(Repo *repo, Id p)
{
-#if 0
+#if 1
repo_search(repo, p, 0, 0, SEARCH_NO_STORAGE_SOLVABLE, dump_repoattrs_cb, 0);
#else
Dataiterator di;
loadcallback (Pool *pool, Repodata *data, void *vdata)
{
FILE *fp = 0;
- if (data->location && with_attr)
+printf("LOADCALLBACK\n");
+ const char *location = repodata_lookup_str(data, REPOENTRY_META, REPOSITORY_LOCATION);
+printf("loc %s\n", location);
+ if (location && with_attr)
{
- fprintf (stderr, "Loading SOLV file %s\n", data->location);
- fp = fopen (data->location, "r");
+ fprintf (stderr, "Loading SOLV file %s\n", location);
+ fp = fopen (location, "r");
if (!fp)
- perror(data->location);
+ perror(location);
}
return fp;
}
printf("could not read repository\n");
}
printf("pool contains %d strings, %d rels, string size is %d\n", pool->ss.nstrings, pool->nrels, pool->ss.sstrings);
+
+#if 0
+{
+ Dataiterator di;
+ dataiterator_init(&di, repo, -1, 0, "oo", DI_SEARCHSUB|SEARCH_SUBSTRING);
+ while (dataiterator_step(&di))
+ dump_attr(di.repo, di.data, di.key, &di.kv);
+ exit(0);
+}
+#endif
+
for (j = 0; 1 && j < pool->nrepos; j++)
{
repo = pool->repos[j];
dump_repodata(repo);
+
printf("repo %d contains %d solvables %d non-solvables\n", j, repo->nsolvables, repo->nextra);
printf("repo start: %d end: %d\n", repo->start, repo->end);
for (i = repo->start, n = 1; i < repo->end; i++)
{
Pool *pool = pool_create();
Repo *repo = repo_create(pool, "<stdin>");
- repo_add_helix(repo, stdin);
+ repo_add_helix(repo, stdin, 0);
tool_write(repo, 0, 0);
pool_free(pool);
exit(0);
loadcallback (Pool *pool, Repodata *data, void *vdata)
{
FILE *fp = 0;
- if (data->location)
+ const char *location = repodata_lookup_str(data, 0, REPOSITORY_LOCATION);
+ if (location)
{
- fprintf(stderr, "Loading SOLV file %s\n", data->location);
- fp = fopen (data->location, "r");
+ fprintf(stderr, "Loading SOLV file %s\n", location);
+ fp = fopen (location, "r");
if (!fp)
- perror(data->location);
+ perror(location);
}
return fp;
}
if ((fp = fopen(argv[optind], "r")) == NULL)
{
perror(argv[optind]);
- exit(0);
+ exit(1);
}
repo_add_solv(repo, fp);
fclose(fp);
if (!strncmp (name, "package:", 8))
name += 8;
id = str2id(pool, name, 1);
- if (strpbrk(line, "<>=") == line) /* next(!) word is rel */
+ if (*line == '<' || *line == '>' || *line == '=') /* rel follows */
{
char *rel = splitword(&line);
char *evr = splitword(&line);
*/
void
-repo_add_content(Repo *repo, FILE *fp)
+repo_add_content(Repo *repo, FILE *fp, int flags)
{
Pool *pool = repo->pool;
char *line, *linep;
unsigned int numotherarchs = 0;
Id *otherarchs = 0;
+ if (!(flags & REPO_REUSE_REPODATA))
+ data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
+
memset(&pd, 0, sizeof(pd));
line = sat_malloc(1024);
aline = 1024;
/* create new solvable */
s = pool_id2solvable(pool, repo_add_solvable(repo));
repodata_extend(data, s - pool->solvables);
- handle = repodata_get_handle(data, s - pool->solvables - repo->start);
+ handle = s - pool->solvables;
s->name = str2id(pool, join(&pd, "product", ":", value), 1);
continue;
}
{
s = pool_id2solvable(pool, repo_add_solvable(repo));
repodata_extend(data, s - pool->solvables);
- handle = repodata_get_handle(data, s - pool->solvables - repo->start);
+ handle = s - pool->solvables;
}
if (istag ("VERSION"))
{
Solvable *p = pool_id2solvable(pool, repo_add_solvable(repo));
repodata_extend(data, p - pool->solvables);
- /*handle = repodata_get_handle(data, p - pool->solvables - repo->start);*/
p->name = s->name;
p->evr = s->evr;
p->vendor = s->vendor;
p->provides = repo_addid_dep(repo, p->provides, rel2id(pool, p->name, p->evr, REL_EQ, 1), 0);
/* now merge the attributes */
- repodata_merge_attrs(data, p - pool->solvables - repo->start, s - pool->solvables- repo->start);
+ repodata_merge_attrs(data, p - pool->solvables, s - pool->solvables);
}
- if (data)
- repodata_internalize(data);
-
if (pd.tmp)
sat_free(pd.tmp);
sat_free(line);
sat_free(otherarchs);
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
}
* for further information
*/
-void repo_add_content(Repo *repo, FILE *fp);
+void repo_add_content(Repo *repo, FILE *fp, int flags);
#define DISABLE_SPLIT
#include "tools_util.h"
-//#define DUMPOUT 0
+/* #define DUMPOUT 1 */
/*
* <deltainfo>
unsigned buildtime;
unsigned downloadsize, archivesize;
char *filechecksum;
-
+ int filechecksumtype;
/* Baseversion. deltarpm only has one. */
Id *bevr;
unsigned nbevr;
Pool *pool;
Repo *repo;
Repodata *data;
- int datanum;
struct stateswitch *swtab[NUMSTATES];
enum state sbtab[NUMSTATES];
Id newpkgevr;
Id newpkgname;
Id newpkgarch;
+
+ Id *handles;
+ int nhandles;
};
/*
l += strlen(r) + 1;
if (l > pd->acontent)
{
- pd->content = realloc(pd->content, l + 256);
+ pd->content = sat_realloc(pd->content, l + 256);
pd->acontent = l + 256;
}
c = pd->content;
case STATE_START:
break;
case STATE_NEWPACKAGE:
- if ( (str = find_attr("name", atts)) )
+ if ((str = find_attr("name", atts)) != 0)
{
pd->newpkgname = str2id(pool, str, 1);
}
pd->newpkgevr = makeevr_atts(pool, pd, atts);
- if ( (str = find_attr("arch", atts)) )
+ if ((str = find_attr("arch", atts)) != 0)
{
pd->newpkgarch = str2id(pool, str, 1);
}
break;
case STATE_DELTA:
- memset(&pd->delta, 0, sizeof (pd->delta));
+ memset(&pd->delta, 0, sizeof(pd->delta));
*pd->tempstr = 0;
pd->ltemp = 0;
- pd->delta.nbevr++;
- pd->delta.bevr = sat_realloc (pd->delta.bevr, pd->delta.nbevr * sizeof(Id));
- pd->delta.bevr[pd->delta.nbevr - 1] = makeevr_atts(pool, pd, atts);
- --(pd->datanum);
+ pd->delta.bevr = sat_extend(pd->delta.bevr, pd->delta.nbevr, 1, sizeof(Id), 7);
+ pd->delta.bevr[pd->delta.nbevr++] = makeevr_atts(pool, pd, atts);
break;
case STATE_FILENAME:
break;
break;
case STATE_SIZE:
break;
+ case STATE_CHECKSUM:
+ pd->delta.filechecksum = 0;
+ pd->delta.filechecksumtype = REPOKEY_TYPE_SHA1;
+ if ((str = find_attr("type", atts)) != 0)
+ {
+ if (!strcasecmp(str, "sha"))
+ pd->delta.filechecksumtype = REPOKEY_TYPE_SHA1;
+ else if (!strcasecmp(str, "sha256"))
+ pd->delta.filechecksumtype = REPOKEY_TYPE_SHA256;
+ else if (!strcasecmp(str, "md5"))
+ pd->delta.filechecksumtype = REPOKEY_TYPE_MD5;
+ else
+ fprintf(stderr, "warning: unknown checksum type: '%s'\n", str);
+ }
case STATE_SEQUENCE:
break;
default:
break;
case STATE_DELTA:
{
+ /* read all data for a deltarpm. commit into attributes */
+ Id handle;
+ struct deltarpm *d = &pd->delta;
#ifdef DUMPOUT
int i;
#endif
- struct deltarpm *d = &pd->delta;
#ifdef DUMPOUT
fprintf (stderr, "found deltarpm for %s:\n", id2str(pool, pd->newpkgname));
#endif
- repo_set_id(pd->repo, pd->datanum, DELTA_PACKAGE_NAME, pd->newpkgname);
- repo_set_id(pd->repo, pd->datanum, DELTA_PACKAGE_EVR, pd->newpkgevr);
- repo_set_id(pd->repo, pd->datanum, DELTA_PACKAGE_ARCH, pd->newpkgarch);
- repo_set_id(pd->repo, pd->datanum, DELTA_LOCATION_NAME, d->locname);
- repo_set_id(pd->repo, pd->datanum, DELTA_LOCATION_DIR, d->locdir);
- repo_set_id(pd->repo, pd->datanum, DELTA_LOCATION_EVR, d->locevr);
- repo_set_id(pd->repo, pd->datanum, DELTA_LOCATION_SUFFIX, d->locsuffix);
-
+ handle = repodata_new_handle(pd->data);
+ /* we commit all handles later on in one go so that the
+ * repodata code doesn't need to realloc every time */
+ pd->handles = sat_extend(pd->handles, pd->nhandles, 1, sizeof(Id), 63);
+ pd->handles[pd->nhandles++] = handle;
+ repodata_set_id(pd->data, handle, DELTA_PACKAGE_NAME, pd->newpkgname);
+ repodata_set_id(pd->data, handle, DELTA_PACKAGE_EVR, pd->newpkgevr);
+ repodata_set_id(pd->data, handle, DELTA_PACKAGE_ARCH, pd->newpkgarch);
+ repodata_set_id(pd->data, handle, DELTA_LOCATION_NAME, d->locname);
+ repodata_set_id(pd->data, handle, DELTA_LOCATION_DIR, d->locdir);
+ repodata_set_id(pd->data, handle, DELTA_LOCATION_EVR, d->locevr);
+ repodata_set_id(pd->data, handle, DELTA_LOCATION_SUFFIX, d->locsuffix);
+ if (d->downloadsize)
+ repodata_set_num(pd->data, handle, DELTA_DOWNLOADSIZE, (d->downloadsize + 1023) / 1024);
+ if (d->filechecksum)
+ repodata_set_checksum(pd->data, handle, DELTA_CHECKSUM, d->filechecksumtype, d->filechecksum);
#ifdef DUMPOUT
fprintf (stderr, " loc: %s %s %s %s\n", id2str(pool, d->locdir),
id2str(pool, d->locname), id2str(pool, d->locevr),
fprintf (stderr, " chek: %s\n", d->filechecksum);
#endif
- repo_set_num(pd->repo, pd->datanum, DELTA_DOWNLOADSIZE, d->downloadsize);
- repo_set_str(pd->repo, pd->datanum, DELTA_CHECKSUM, d->filechecksum);
-
if (d->seqnum)
{
#ifdef DUMPOUT
fprintf (stderr, " %s\n",
d->seqnum);
#endif
- repo_set_id(pd->repo, pd->datanum, DELTA_BASE_EVR, d->bevr[0]);
- repo_set_id(pd->repo, pd->datanum, DELTA_SEQ_NAME, d->seqname);
- repo_set_id(pd->repo, pd->datanum, DELTA_SEQ_EVR, d->seqevr);
- repo_set_str(pd->repo, pd->datanum, DELTA_SEQ_NUM, d->seqnum);
+ repodata_set_id(pd->data, handle, DELTA_BASE_EVR, d->bevr[0]);
+ repodata_set_id(pd->data, handle, DELTA_SEQ_NAME, d->seqname);
+ repodata_set_id(pd->data, handle, DELTA_SEQ_EVR, d->seqevr);
+ /* should store as binary blob! */
+ repodata_set_str(pd->data, handle, DELTA_SEQ_NUM, d->seqnum);
#ifdef DUMPOUT
fprintf(stderr, "OK\n");
#ifdef DUMPOUT
if (d->seqevr != d->bevr[0])
fprintf (stderr, "XXXXX evr\n");
- /* Name of package ("atom:xxxx") should match the sequence info
+ /* Name of package ("xxxx") should match the sequence info
name. */
- if (strcmp(id2str(pool, d->seqname), id2str(pool, pd->newpkgname) + 5))
+ if (strcmp(id2str(pool, d->seqname), id2str(pool, pd->newpkgname)))
fprintf (stderr, "XXXXX name\n");
#endif
}
}
}
- free(pd->delta.filechecksum);
- free(pd->delta.bevr);
- free(pd->delta.seqnum);
+ pd->delta.filechecksum = sat_free(pd->delta.filechecksum);
+ pd->delta.bevr = sat_free(pd->delta.bevr);
+ pd->delta.nbevr = 0;
+ pd->delta.seqnum = sat_free(pd->delta.seqnum);
break;
case STATE_FILENAME:
parse_delta_location(pd, pd->content);
l = pd->lcontent + len + 1;
if (l > pd->acontent)
{
- pd->content = realloc(pd->content, l + 256);
+ pd->content = sat_realloc(pd->content, l + 256);
pd->acontent = l + 256;
}
c = pd->content + pd->lcontent;
char buf[BUFF_SIZE];
int i, l;
struct stateswitch *sw;
+ Repodata *data;
+
+ if (!(flags & REPO_REUSE_REPODATA))
+ data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
memset(&pd, 0, sizeof(pd));
for (i = 0, sw = stateswitches; sw->from != NUMSTATES; i++, sw++)
}
pd.pool = pool;
pd.repo = repo;
- pd.data = repo_add_repodata(pd.repo, 0);
+ pd.data = data;
- pd.content = malloc(256);
+ pd.content = sat_malloc(256);
pd.acontent = 256;
pd.lcontent = 0;
pd.tempstr = malloc(256);
pd.atemp = 256;
pd.ltemp = 0;
- pd.datanum = 0;
+
XML_Parser parser = XML_ParserCreate(NULL);
XML_SetUserData(parser, &pd);
XML_SetElementHandler(parser, startElement, endElement);
break;
}
XML_ParserFree(parser);
+ sat_free(pd.content);
+ sat_free(pd.tempstr);
+ join_freemem();
- if (pd.data)
- repodata_internalize(pd.data);
+ /* now commit all handles */
+ for (i = 0; i < pd.nhandles; i++)
+ repodata_add_flexarray(pd.data, REPOENTRY_META, REPOSITORY_DELTAINFO, pd.handles[i]);
+ sat_free(pd.handles);
- free(pd.content);
- join_freemem();
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
}
/* EOF */
+/*
+ * Copyright (c) 2007, Novell Inc.
+ *
+ * This program is licensed under the BSD license, read LICENSE.BSD
+ * for further information
+ */
+
void repo_add_deltainfoxml(Repo *repo, FILE *fp, int flags);
if (!strcmp(pd->kind, "patch"))
{
- pd->datanum = (pd->solvable - pool->solvables) - pd->repo->start;
- repodata_extend(pd->data, pd->solvable - pool->solvables);
- pd->datanum = repodata_get_handle(pd->data, pd->datanum);
+ pd->datanum = pd->solvable - pool->solvables;
repodata_set_num(pd->data, pd->datanum, SOLVABLE_BUILDTIME, pd->timestamp);
}
#if 0
char buf[BUFF_SIZE];
int i, l;
struct stateswitch *sw;
+ Repodata *data;
+
+ if (!(flags & REPO_REUSE_REPODATA))
+ data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
memset(&pd, 0, sizeof(pd));
for (i = 0, sw = stateswitches; sw->from != NUMSTATES; i++, sw++)
}
pd.pool = pool;
pd.repo = repo;
- pd.data = repo_add_repodata(pd.repo, 0);
+ pd.data = data;
pd.content = malloc(256);
pd.acontent = 256;
}
XML_ParserFree(parser);
- if (pd.data)
- repodata_internalize(pd.data);
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
free(pd.content);
}
* for further information
*/
-#define PATCHXML_KINDS_SEPARATELY 1
+#define PATCHXML_KINDS_SEPARATELY (1 << 2)
extern void repo_add_patchxml(Repo *repo, FILE *fp, int flags);
if (!s)
{
s = pd->solvable = pool_id2solvable(pool, repo_add_solvable(pd->repo));
- repodata_extend(pd->data, s - pool->solvables);
- pd->handle = repodata_get_handle(pd->data, (s - pool->solvables) - pd->repo->start);
+ pd->handle = s - pool->solvables;
}
break;
struct parsedata *pd = userData;
int l;
char *c;
- if (!pd->docontent) {
-#if 0
- char *dup = strndup( s, len );
- fprintf(stderr, "Content: [%d]'%s'\n", pd->state, dup );
- free( dup );
-#endif
+ if (!pd->docontent)
return;
- }
l = pd->lcontent + len + 1;
if (l > pd->acontent)
{
- pd->content = realloc(pd->content, l + 256);
+ pd->content = sat_realloc(pd->content, l + 256);
pd->acontent = l + 256;
}
c = pd->content + pd->lcontent;
*/
static void
-repo_add_product(struct parsedata *pd, Repodata *data, FILE *fp, int code11)
+repo_add_product(struct parsedata *pd, FILE *fp, int code11)
{
char buf[BUFF_SIZE];
int l;
*/
static void
-parse_dir(DIR *dir, const char *path, struct parsedata *pd, Repodata *repodata, int code11)
+parse_dir(DIR *dir, const char *path, struct parsedata *pd, int code11)
{
struct dirent *entry;
char *suffix = code11 ? ".prod" : "-release";
perror(fullpath);
break;
}
- repo_add_product(pd, repodata, fp, code11);
+ repo_add_product(pd, fp, code11);
fclose(fp);
}
}
* parse each one as a product
*/
+/* Oh joy! Three parsers for the price of one! */
+
void
-repo_add_products(Repo *repo, Repodata *repodata, const char *proddir, const char *root, const char *attribute)
+repo_add_products(Repo *repo, const char *proddir, const char *root, const char *attribute, int flags)
{
const char *fullpath = proddir;
DIR *dir;
int i;
struct parsedata pd;
struct stateswitch *sw;
+ Repodata *data;
+
+ if (!(flags & REPO_REUSE_REPODATA))
+ data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
memset(&pd, 0, sizeof(pd));
pd.repo = repo;
pd.pool = repo->pool;
- pd.data = repodata;
+ pd.data = data;
- pd.content = malloc(256);
+ pd.content = sat_malloc(256);
pd.acontent = 256;
pd.attribute = attribute;
dir = opendir(fullpath);
if (dir)
{
- parse_dir(dir, fullpath, &pd, repodata, 1); /* assume 'code11' products */
+ parse_dir(dir, fullpath, &pd, 1); /* assume 'code11' products */
closedir(dir);
}
else
dir = opendir(fullpath);
if (dir)
{
- repo_add_zyppdb_products(repo, repodata, fullpath, dir); /* assume 'code10' zypp-style products */
+ repo_add_zyppdb_products(repo, data, fullpath, dir); /* assume 'code10' zypp-style products */
closedir(dir);
}
else
dir = opendir(fullpath);
if (dir)
{
- parse_dir(dir, fullpath, &pd, repodata, 0); /* fall back to /etc/<xyz>-release parsing */
+ parse_dir(dir, fullpath, &pd, 0); /* fall back to /etc/<xyz>-release parsing */
closedir(dir);
}
else
}
sat_free((void *)pd.tmplang);
- free(pd.content);
+ sat_free(pd.content);
join_freemem();
+
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
}
/* EOF */
* for further information
*/
-void repo_add_products(Repo *repo, Repodata *repodata, const char *proddir, const char *root, const char *attribute);
+void repo_add_products(Repo *repo, const char *proddir, const char *root, const char *attribute, int flags);
{ NUMSTATES }
};
-/*
- * split l into m parts, store to sp[]
- * split at whitespace
- */
-
-static inline int
-split_comma(char *l, char **sp, int m)
-{
- int i;
- for (i = 0; i < m;)
- {
- while (*l == ',')
- l++;
- if (!*l)
- break;
- sp[i++] = l;
- if (i == m)
- break;
- while (*l && !(*l == ','))
- l++;
- if (!*l)
- break;
- *l++ = 0;
- }
- return i;
-}
-
struct parsedata {
int depth;
case STATE_REPOMD:
{
const char *updstr;
- char *value;
- char *fvalue;
/* this should be OBSOLETE soon */
updstr = find_attr("updates", atts);
- if ( updstr != NULL )
+ if (updstr)
{
- value = strdup(updstr);
- fvalue = value; /* save the first */
- if ( value != NULL )
- {
- char *sp[2];
- while (value)
- {
- int words = split_comma(value, sp, 2);
- if (!words)
- break;
- if (sp[0])
- repo_add_poolstr_array(pd->repo, -1, REPOSITORY_UPDATES, sp[0]);
- if (words == 1)
- break;
- value = sp[1];
- }
- free(fvalue);
- }
+ char *value = strdup(updstr);
+ char *fvalue = value; /* save the first */
+ while (value)
+ {
+ char *p = strchr(value, ',');
+ if (*p)
+ *p++ = 0;
+ if (*value)
+ repo_add_poolstr_array(pd->repo, REPOENTRY_META, REPOSITORY_UPDATES, value);
+ value = p;
+ }
+ free(fvalue);
}
break;
}
{
struct parsedata *pd = userData;
/* Pool *pool = pd->pool; */
- int timestamp;
#if 0
fprintf(stderr, "end: %s\n", name);
{
case STATE_START: break;
case STATE_REPOMD:
- /* save the timestamp in the non solvable number 1 */
- if ( pd->timestamp > 0 )
- repo_set_num(pd->repo, -1, REPOSITORY_TIMESTAMP, pd->timestamp);
+ if (pd->timestamp > 0)
+ repodata_set_num(pd->data, REPOENTRY_META, REPOSITORY_TIMESTAMP, pd->timestamp);
break;
case STATE_DATA: break;
case STATE_LOCATION: break;
case STATE_TIMESTAMP:
{
/**
- * we want to look for the newer timestamp
+ * we want to look for the newest timestamp
* of all resources to save it as the time
* the metadata was generated
*/
- timestamp = atoi(pd->content);
- /** if the timestamp is invalid or just 0 ignore it */
- if ( timestamp == 0 )
- break;
- if ( timestamp > pd->timestamp )
- {
- pd->timestamp = timestamp;
- }
+ int timestamp = atoi(pd->content);
+ if (timestamp > pd->timestamp)
+ pd->timestamp = timestamp;
break;
}
case STATE_EXPIRE:
{
- int expire = 0;
- if ( pd->content )
- {
- expire = atoi(pd->content);
- if ( expire > 0 )
- {
- /* save the timestamp in the non solvable number 1 */
- repo_set_num(pd->repo, -1, REPOSITORY_EXPIRE, expire);
- }
- }
+ int expire = atoi(pd->content);
+ if (expire > 0)
+ repodata_set_num(pd->data, REPOENTRY_META, REPOSITORY_EXPIRE, expire);
break;
}
case STATE_PRODUCT:
- {
- if ( pd->content )
- repo_add_poolstr_array(pd->repo, -1, REPOSITORY_PRODUCTS, pd->content);
- break;
- }
+ if (pd->content)
+ repodata_add_poolstr_array(pd->data, REPOENTRY_META, REPOSITORY_PRODUCTS, pd->content);
+ break;
case STATE_KEYWORD:
- {
- if ( pd->content )
- repo_add_poolstr_array(pd->repo, -1, REPOSITORY_KEYWORDS, pd->content);
- break;
- }
+ if (pd->content)
+ repodata_add_poolstr_array(pd->data, REPOENTRY_META, REPOSITORY_KEYWORDS, pd->content);
+ break;
case STATE_SUSEINFO: break;
case STATE_PRODUCTS: break;
case STATE_KEYWORDS: break;
struct parsedata *pd = userData;
int l;
char *c;
- if (!pd->docontent) {
-#if 0
- char *dup = strndup( s, len );
- fprintf(stderr, "Content: [%d]'%s'\n", pd->state, dup );
- free( dup );
-#endif
+ if (!pd->docontent)
return;
- }
l = pd->lcontent + len + 1;
if (l > pd->acontent)
{
{
Pool *pool = repo->pool;
struct parsedata pd;
- pd.timestamp = 0;
-
+ Repodata *data;
char buf[BUFF_SIZE];
int i, l;
struct stateswitch *sw;
+ if (!(flags & REPO_REUSE_REPODATA))
+ data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
+
memset(&pd, 0, sizeof(pd));
+ pd.timestamp = 0;
for (i = 0, sw = stateswitches; sw->from != NUMSTATES; i++, sw++)
{
if (!pd.swtab[sw->from])
}
XML_ParserFree(parser);
- if (pd.data)
- repodata_internalize(pd.data);
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
free(pd.content);
}
+/*
+ * Copyright (c) 2007, Novell Inc.
+ *
+ * This program is licensed under the BSD license, read LICENSE.BSD
+ * for further information
+ */
+
void repo_add_repomdxml(Repo *repo, FILE *fp, int flags);
#endif
static void
-adddudata(Pool *pool, Repo *repo, Repodata *repodata, Solvable *s, RpmHead *rpmhead, char **dn, unsigned int *di, int fc, int dic)
+adddudata(Pool *pool, Repo *repo, Repodata *data, Solvable *s, RpmHead *rpmhead, char **dn, unsigned int *di, int fc, int dic)
{
Id handle, did;
int i, fszc;
sat_free(fsz);
sat_free(fm);
/* commit */
- repodata_extend(repodata, s - pool->solvables);
- handle = (s - pool->solvables) - repodata->start;
- handle = repodata_get_handle(repodata, handle);
+ handle = s - pool->solvables;
for (i = 0; i < fc; i++)
{
if (!fn[i])
continue;
if (!*dn[i] && (s->arch == ARCH_SRC || s->arch == ARCH_NOSRC))
- did = repodata_str2dir(repodata, "/usr/src", 1);
+ did = repodata_str2dir(data, "/usr/src", 1);
else
- did = repodata_str2dir(repodata, dn[i], 1);
- repodata_add_dirnumnum(repodata, handle, SOLVABLE_DISKUSAGE, did, fkb[i], fn[i]);
+ did = repodata_str2dir(data, dn[i], 1);
+ repodata_add_dirnumnum(data, handle, SOLVABLE_DISKUSAGE, did, fkb[i], fn[i]);
}
sat_free(fn);
sat_free(fkb);
/* assumes last processed array is provides! */
static unsigned int
-addfileprovides(Pool *pool, Repo *repo, Repodata *repodata, Solvable *s, RpmHead *rpmhead, unsigned int olddeps)
+addfileprovides(Pool *pool, Repo *repo, Repodata *data, Solvable *s, RpmHead *rpmhead, unsigned int olddeps)
{
char **bn;
char **dn;
int fna = 0;
#endif
- if (!repodata)
+ if (!data)
return olddeps;
bn = headstringarray(rpmhead, TAG_BASENAMES, &bnc);
if (!bn)
exit(1);
}
- if (repodata)
- adddudata(pool, repo, repodata, s, rpmhead, dn, di, bnc, dic);
+ if (data)
+ adddudata(pool, repo, data, s, rpmhead, dn, di, bnc, dic);
for (i = 0; i < bnc; i++)
{
strcat(fn, bn[i]);
olddeps = repo_addid_dep(repo, olddeps, str2id(pool, fn, 1), SOLVABLE_FILEMARKER);
#endif
- if (repodata)
+ if (data)
{
Id handle, did;
- repodata_extend(repodata, s - pool->solvables);
- handle = (s - pool->solvables) - repodata->start;
- handle = repodata_get_handle(repodata, handle);
- did = repodata_str2dir(repodata, dn[di[i]], 1);
+ handle = s - pool->solvables;
+ did = repodata_str2dir(data, dn[di[i]], 1);
if (!did)
- did = repodata_str2dir(repodata, "/", 1);
- repodata_add_dirstr(repodata, handle, SOLVABLE_FILELIST, did, bn[i]);
+ did = repodata_str2dir(data, "/", 1);
+ repodata_add_dirstr(data, handle, SOLVABLE_FILELIST, did, bn[i]);
}
}
#if 0
}
static void
-addsourcerpm(Pool *pool, Repodata *repodata, Id handle, char *sourcerpm, char *name, char *evr)
+addsourcerpm(Pool *pool, Repodata *data, Id handle, char *sourcerpm, char *name, char *evr)
{
const char *p, *sevr, *sarch;
return;
sevr = p + 1;
if (!strcmp(sarch, "src.rpm"))
- repodata_set_constantid(repodata, handle, SOLVABLE_SOURCEARCH, ARCH_SRC);
+ repodata_set_constantid(data, handle, SOLVABLE_SOURCEARCH, ARCH_SRC);
else if (!strcmp(sarch, "nosrc.rpm"))
- repodata_set_constantid(repodata, handle, SOLVABLE_SOURCEARCH, ARCH_NOSRC);
+ repodata_set_constantid(data, handle, SOLVABLE_SOURCEARCH, ARCH_NOSRC);
else
- repodata_set_constantid(repodata, handle, SOLVABLE_SOURCEARCH, strn2id(pool, sarch, strlen(sarch) - 4, 1));
+ repodata_set_constantid(data, handle, SOLVABLE_SOURCEARCH, strn2id(pool, sarch, strlen(sarch) - 4, 1));
if (!strncmp(sevr, evr, sarch - sevr - 1) && evr[sarch - sevr - 1] == 0)
- repodata_set_void(repodata, handle, SOLVABLE_SOURCEEVR);
+ repodata_set_void(data, handle, SOLVABLE_SOURCEEVR);
else
- repodata_set_id(repodata, handle, SOLVABLE_SOURCEEVR, strn2id(pool, sevr, sarch - sevr - 1, 1));
+ repodata_set_id(data, handle, SOLVABLE_SOURCEEVR, strn2id(pool, sevr, sarch - sevr - 1, 1));
if (!strncmp(sourcerpm, name, sevr - sourcerpm - 1) && name[sevr - sourcerpm - 1] == 0)
- repodata_set_void(repodata, handle, SOLVABLE_SOURCENAME);
+ repodata_set_void(data, handle, SOLVABLE_SOURCENAME);
else
- repodata_set_id(repodata, handle, SOLVABLE_SOURCENAME, strn2id(pool, sourcerpm, sevr - sourcerpm - 1, 1));
+ repodata_set_id(data, handle, SOLVABLE_SOURCENAME, strn2id(pool, sourcerpm, sevr - sourcerpm - 1, 1));
}
static int
-rpm2solv(Pool *pool, Repo *repo, Repodata *repodata, Solvable *s, RpmHead *rpmhead)
+rpm2solv(Pool *pool, Repo *repo, Repodata *data, Solvable *s, RpmHead *rpmhead)
{
char *name;
char *evr;
s->vendor = str2id(pool, headstring(rpmhead, TAG_VENDOR), 1);
s->provides = makedeps(pool, repo, rpmhead, TAG_PROVIDENAME, TAG_PROVIDEVERSION, TAG_PROVIDEFLAGS, 0);
- s->provides = addfileprovides(pool, repo, repodata, s, rpmhead, s->provides);
+ s->provides = addfileprovides(pool, repo, data, s, rpmhead, s->provides);
if (s->arch != ARCH_SRC && s->arch != ARCH_NOSRC)
s->provides = repo_addid_dep(repo, s->provides, rel2id(pool, s->name, s->evr, REL_EQ, 1), 0);
s->requires = makedeps(pool, repo, rpmhead, TAG_REQUIRENAME, TAG_REQUIREVERSION, TAG_REQUIREFLAGS, 0);
s->supplements = repo_fix_supplements(repo, s->provides, s->supplements, 0);
s->conflicts = repo_fix_conflicts(repo, s->conflicts);
- if (repodata)
+ if (data)
{
Id handle;
char *str;
unsigned int u32;
- repodata_extend(repodata, s - pool->solvables);
- handle = repodata_get_handle(repodata, (s - pool->solvables) - repodata->start);
+ handle = s - pool->solvables;
str = headstring(rpmhead, TAG_SUMMARY);
if (str)
- setutf8string(repodata, handle, SOLVABLE_SUMMARY, str);
+ setutf8string(data, handle, SOLVABLE_SUMMARY, str);
str = headstring(rpmhead, TAG_DESCRIPTION);
if (str)
{
while (l > 0 && str[l - 1] == '\n')
str[--l] = 0;
if (l)
- setutf8string(repodata, handle, SOLVABLE_DESCRIPTION, str);
+ setutf8string(data, handle, SOLVABLE_DESCRIPTION, str);
p = aut + 19;
aut = str; /* copy over */
while (*p == ' ' || *p == '\n')
aut--;
*aut = 0;
if (*str)
- setutf8string(repodata, handle, SOLVABLE_AUTHORS, str);
+ setutf8string(data, handle, SOLVABLE_AUTHORS, str);
free(str);
}
else if (*str)
- setutf8string(repodata, handle, SOLVABLE_DESCRIPTION, str);
+ setutf8string(data, handle, SOLVABLE_DESCRIPTION, str);
}
str = headstring(rpmhead, TAG_GROUP);
if (str)
- repodata_set_poolstr(repodata, handle, SOLVABLE_GROUP, str);
+ repodata_set_poolstr(data, handle, SOLVABLE_GROUP, str);
str = headstring(rpmhead, TAG_LICENSE);
if (str)
- repodata_set_poolstr(repodata, handle, SOLVABLE_LICENSE, str);
+ repodata_set_poolstr(data, handle, SOLVABLE_LICENSE, str);
str = headstring(rpmhead, TAG_URL);
if (str)
- repodata_set_str(repodata, handle, SOLVABLE_URL, str);
+ repodata_set_str(data, handle, SOLVABLE_URL, str);
str = headstring(rpmhead, TAG_DISTRIBUTION);
if (str)
- repodata_set_poolstr(repodata, handle, SOLVABLE_DISTRIBUTION, str);
+ repodata_set_poolstr(data, handle, SOLVABLE_DISTRIBUTION, str);
str = headstring(rpmhead, TAG_PACKAGER);
if (str)
- repodata_set_poolstr(repodata, handle, SOLVABLE_PACKAGER, str);
+ repodata_set_poolstr(data, handle, SOLVABLE_PACKAGER, str);
u32 = headint32(rpmhead, TAG_BUILDTIME);
if (u32)
- repodata_set_num(repodata, handle, SOLVABLE_BUILDTIME, u32);
+ repodata_set_num(data, handle, SOLVABLE_BUILDTIME, u32);
u32 = headint32(rpmhead, TAG_INSTALLTIME);
if (u32)
- repodata_set_num(repodata, handle, SOLVABLE_INSTALLTIME, u32);
+ repodata_set_num(data, handle, SOLVABLE_INSTALLTIME, u32);
u32 = headint32(rpmhead, TAG_SIZE);
if (u32)
- repodata_set_num(repodata, handle, SOLVABLE_INSTALLSIZE, (u32 + 1023) / 1024);
+ repodata_set_num(data, handle, SOLVABLE_INSTALLSIZE, (u32 + 1023) / 1024);
if (sourcerpm)
- addsourcerpm(pool, repodata, handle, sourcerpm, name, evr);
+ addsourcerpm(pool, data, handle, sourcerpm, name, evr);
}
sat_free(evr);
return 1;
/* copy all attributes */
if (!data)
return;
- repodata_extend(data, s - pool->solvables);
cbdata.data = data;
- cbdata.handle = repodata_get_handle(data, (s - pool->solvables) - data->start);
+ cbdata.handle = s - pool->solvables;
cbdata.dircache = dircache;
repo_search(fromrepo, (r - fromrepo->pool->solvables), 0, 0, SEARCH_NO_STORAGE_SOLVABLE, solvable_copy_cb, &cbdata);
}
/* only works if nothing is already internalized! */
if (data && data->attrs)
{
- Id tmpattrs = data->attrs[pa - data->start];
+ Id *tmpattrs = data->attrs[pa - data->start];
data->attrs[pa - data->start] = data->attrs[pb - data->start];
data->attrs[pb - data->start] = tmpattrs;
}
*/
void
-repo_add_rpmdb(Repo *repo, Repodata *repodata, Repo *ref, const char *rootdir)
+repo_add_rpmdb(Repo *repo, Repo *ref, const char *rootdir, int flags)
{
Pool *pool = repo->pool;
unsigned char buf[16];
DBT dbkey;
DBT dbdata;
struct stat packagesstat;
- int repodata_self = 0;
+ unsigned char newcookie[32];
+ const unsigned char *oldcookie = 0;
+ Id oldcookietype = 0;
+ Repodata *data;
memset(&dbkey, 0, sizeof(dbkey));
memset(&dbdata, 0, sizeof(dbdata));
if (!rootdir)
rootdir = "";
- if (!repodata)
- {
- repodata = repo_add_repodata(repo, 0);
- repodata_self = 1;
- }
+
+ if (!(flags & REPO_REUSE_REPODATA))
+ data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
if (ref && !(ref->nsolvables && ref->rpmdbid))
ref = 0;
perror(dbpath);
exit(1);
}
- mkrpmdbcookie(&packagesstat, repo->rpmdbcookie);
+ mkrpmdbcookie(&packagesstat, newcookie);
+ repodata_set_bin_checksum(data, REPOENTRY_META, REPOSITORY_RPMDBCOOKIE, REPOKEY_TYPE_SHA256, newcookie);
- if (!ref || memcmp(repo->rpmdbcookie, ref->rpmdbcookie, 32) != 0)
+ if (ref)
+ oldcookie = repo_lookup_bin_checksum(ref, REPOENTRY_META, REPOSITORY_RPMDBCOOKIE, &oldcookietype);
+ if (!ref || !oldcookie || oldcookietype != REPOKEY_TYPE_SHA256 || memcmp(oldcookie, newcookie, 32) != 0)
{
Id *pkgids;
if (db->open(db, 0, dbpath, 0, DB_HASH, DB_RDONLY, 0664))
memcpy(rpmhead->data, (unsigned char *)dbdata.data + 8, rpmhead->cnt * 16 + rpmhead->dcnt);
rpmhead->dp = rpmhead->data + rpmhead->cnt * 16;
repo->rpmdbid[(s - pool->solvables) - repo->start] = dbid;
- if (rpm2solv(pool, repo, repodata, s, rpmhead))
+ if (rpm2solv(pool, repo, data, s, rpmhead))
{
i++;
s = 0;
while (j < i)
j = pkgids[i - repo->start] = pkgids[j - repo->start];
if (j != i)
- swap_solvables(repo, repodata, i, j);
+ swap_solvables(repo, data, i, j);
}
sat_free(pkgids);
}
Solvable *r = ref->pool->solvables + ref->start + (id - 1);
if (r->repo == ref)
{
- solvable_copy(s, r, repodata, dircache);
+ solvable_copy(s, r, data, dircache);
continue;
}
}
memcpy(rpmhead->data, (unsigned char *)dbdata.data + 8, rpmhead->cnt * 16 + rpmhead->dcnt);
rpmhead->dp = rpmhead->data + rpmhead->cnt * 16;
- rpm2solv(pool, repo, repodata, s, rpmhead);
+ rpm2solv(pool, repo, data, s, rpmhead);
}
if (refhash)
sat_free(rpmids);
}
}
- if (repodata && repodata_self)
- repodata_internalize(repodata);
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
if (rpmhead)
sat_free(rpmhead);
if (db)
const char *name, *n1, *n2;
int l;
- repodata_extend(data, s - pool->solvables);
-
/* skip ./ prefix */
if (location[0] == '.' && location[1] == '/' && location[2] != '/')
location += 2;
}
void
-repo_add_rpms(Repo *repo, Repodata *repodata, const char **rpms, int nrpms)
+repo_add_rpms(Repo *repo, const char **rpms, int nrpms, int flags)
{
int i, sigdsize, sigcnt, l;
Pool *pool = repo->pool;
unsigned char lead[4096];
int headerstart, headerend;
struct stat stb;
+ Repodata *data;
+
+ if (!(flags & REPO_REUSE_REPODATA))
+ data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
if (nrpms <= 0)
return;
}
fclose(fp);
s = pool_id2solvable(pool, repo_add_solvable(repo));
- rpm2solv(pool, repo, repodata, s, rpmhead);
- if (repodata)
+ rpm2solv(pool, repo, data, s, rpmhead);
+ if (data)
{
- Id handle = (s - pool->solvables) - repodata->start;
- handle = repodata_get_handle(repodata, handle);
- add_location(repodata, s, handle, rpms[i]);
- repodata_set_num(repodata, handle, SOLVABLE_DOWNLOADSIZE, (unsigned int)((stb.st_size + 1023) / 1024));
- repodata_set_num(repodata, handle, SOLVABLE_HEADEREND, headerend);
+ Id handle = s - pool->solvables;
+ add_location(data, s, handle, rpms[i]);
+ repodata_set_num(data, handle, SOLVABLE_DOWNLOADSIZE, (unsigned int)((stb.st_size + 1023) / 1024));
+ repodata_set_num(data, handle, SOLVABLE_HEADEREND, headerend);
}
}
if (rpmhead)
sat_free(rpmhead);
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
}
/*
- * Copyright (c) 2007, Novell Inc.
+ * Copyright (c) 2007-2008, Novell Inc.
*
* This program is licensed under the BSD license, read LICENSE.BSD
* for further information
*/
-extern void repo_add_rpmdb(Repo *repo, Repodata *repodata, Repo *ref, const char *rootdir);
-extern void repo_add_rpms(Repo *repo, Repodata *repodata, const char **rpms, int nrpms);
+extern void repo_add_rpmdb(Repo *repo, Repo *ref, const char *rootdir, int flags);
+extern void repo_add_rpms(Repo *repo, const char **rpms, int nrpms, int flags);
/* this is a new package */
pd->solvable = pool_id2solvable(pool, repo_add_solvable(pd->common.repo));
pd->freshens = 0;
- repodata_extend(pd->data, pd->solvable - pool->solvables);
}
- pd->handle = repodata_get_handle(pd->data, (pd->solvable - pool->solvables) - pd->data->start);
+ pd->handle = pd->solvable - pool->solvables;
#if 0
fprintf(stderr, "package #%d\n", pd->solvable - pool->solvables);
#endif
char buf[BUFF_SIZE];
int i, l;
struct stateswitch *sw;
+ Repodata *data;
+
+ if (!(flags & REPO_REUSE_REPODATA))
+ data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
memset(&pd, 0, sizeof(pd));
for (i = 0, sw = stateswitches; sw->from != NUMSTATES; i++, sw++)
pd.common.pool = pool;
pd.common.repo = repo;
- pd.data = repo_add_repodata(repo, 0);
+ pd.data = data;
pd.content = sat_malloc(256);
pd.acontent = 256;
break;
}
XML_ParserFree(parser);
-
- if (pd.data)
- repodata_internalize(pd.data);
sat_free(pd.content);
join_freemem();
stringpool_free(&pd.cspool);
sat_free(pd.cscache);
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
}
* for further information
*/
-#define RPMMD_KINDS_SEPARATELY 1
+#define RPMMD_KINDS_SEPARATELY (1 << 2)
extern void repo_add_rpmmd(Repo *repo, FILE *fp, const char *language, int flags);
*/
static void
-add_location(struct parsedata *pd, char *line, Solvable *s, unsigned handle)
+add_location(struct parsedata *pd, char *line, Solvable *s, Id handle)
{
Pool *pool = s->repo->pool;
char *sp[3];
*/
static void
-add_source(struct parsedata *pd, char *line, Solvable *s, unsigned handle)
+add_source(struct parsedata *pd, char *line, Solvable *s, Id handle)
{
Repo *repo = s->repo;
Pool *pool = repo->pool;
}
static void
-set_checksum(struct parsedata *pd, Repodata *data, int handle, Id keyname, char *line)
+set_checksum(struct parsedata *pd, Repodata *data, Id handle, Id keyname, char *line)
{
char *sp[3];
int l;
*/
static void
-commit_diskusage (struct parsedata *pd, unsigned handle)
+commit_diskusage (struct parsedata *pd, Id handle)
{
unsigned i;
Dirpool *dp = &pd->data->dirpool;
*/
static void
-finish_solvable(struct parsedata *pd, Solvable *s, int handle, Offset freshens)
+finish_solvable(struct parsedata *pd, Solvable *s, Id handle, Offset freshens)
{
Pool *pool = pd->repo->pool;
char *sp[5];
struct parsedata pd;
Repodata *data = 0;
- Id blanr = -1;
Id handle = 0;
Id vendor = 0;
if ((flags & SUSETAGS_EXTEND) && repo->nrepodata)
indesc = 1;
- if (repo->nrepodata)
- /* use last repodata */
- data = repo->repodata + repo->nrepodata - 1;
- else
+
+ if (!(flags & REPO_REUSE_REPODATA))
data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
if (product)
{
- if (!strncmp (id2str(pool, pool->solvables[product].name), "product:", 8))
+ if (!strncmp(id2str(pool, pool->solvables[product].name), "product:", 8))
vendor = pool->solvables[product].vendor;
}
s = 0;
freshens = 0;
- /* XXX deactivate test code */
- blanr = 0;
/*
* read complete file
*
else
{
last_found_pack = nn - repo->start;
- handle = repodata_get_handle(data, last_found_pack);
+ handle = nn;
}
}
s = pool_id2solvable(pool, repo_add_solvable(repo));
last_found_pack = (s - pool->solvables) - repo->start;
if (data)
- {
- repodata_extend(data, s - pool->solvables);
- handle = repodata_get_handle(data, last_found_pack);
- }
+ handle = s - pool->solvables;
if (name)
s->name = name;
else if (pd.kind)
case CTAG('=', 'S', 'i', 'z'):
if (split (line + 6, sp, 3) == 2)
{
- repodata_set_num(data, handle, SOLVABLE_DOWNLOADSIZE, (atoi(sp[0]) + 1023) / 1024);
- repodata_set_num(data, handle, SOLVABLE_INSTALLSIZE, (atoi(sp[1]) + 1023) / 1024);
+ repodata_set_num(data, handle, SOLVABLE_DOWNLOADSIZE, (unsigned int)(atoi(sp[0]) + 1023) / 1024);
+ repodata_set_num(data, handle, SOLVABLE_INSTALLSIZE, (unsigned int)(atoi(sp[1]) + 1023) / 1024);
}
continue;
case CTAG('=', 'T', 'i', 'm'):
continue;
case CTAG('=', 'I', 'n', 's'):
repodata_set_str(data, handle, langtag(&pd, SOLVABLE_MESSAGEINS, language), line + 6);
- if (blanr)
- {
- /* XXX testcode */
- repo_set_str(repo, blanr, SOLVABLE_MESSAGEINS, line + 6);
- blanr--;
- }
continue;
case CTAG('=', 'D', 'e', 'l'):
repodata_set_str(data, handle, langtag(&pd, SOLVABLE_MESSAGEDEL, language), line + 6);
case CTAG('=', 'S', 'h', 'r'):
if (last_found_pack >= pd.nshare)
{
- if (pd.share_with)
- {
- pd.share_with = realloc (pd.share_with, (last_found_pack + 256) * sizeof (*pd.share_with));
- memset (pd.share_with + pd.nshare, 0, (last_found_pack + 256 - pd.nshare) * sizeof (*pd.share_with));
- }
- else
- pd.share_with = calloc (last_found_pack + 256, sizeof (*pd.share_with));
+ pd.share_with = sat_realloc2(pd.share_with, last_found_pack + 256, sizeof (*pd.share_with));
+ memset(pd.share_with + pd.nshare, 0, (last_found_pack + 256 - pd.nshare) * sizeof (*pd.share_with));
pd.nshare = last_found_pack + 256;
}
- pd.share_with[last_found_pack] = strdup (line + 6);
+ pd.share_with[last_found_pack] = strdup(line + 6);
continue;
case CTAG('=', 'D', 'i', 'r'):
add_dirline (&pd, line + 6);
break;
case CTAG('=', 'C', 'k', 's'):
set_checksum(&pd, data, handle, SOLVABLE_CHECKSUM, line + 6);
+#if 0
if (0)
{
Id sub = repodata_create_struct(data, handle, str2id(pool, "solvable:komisch", 1));
repodata_set_poolstr(data, sub, str2id(pool, "sub:key1", 1), line + 7);
repodata_set_num(data, sub, str2id(pool, "sub:key2", 1), last_found_pack+1);
}
+#endif
break;
case CTAG('=', 'L', 'a', 'n'):
language = strdup(line + 6);
}
}
if (n != repo->end)
- repodata_merge_attrs(data, i, last_found);
+ repodata_merge_attrs(data, repo->start + i, repo->start + last_found);
+ free(pd.share_with[i]);
}
- free (pd.share_with);
+ free(pd.share_with);
}
- if (data)
+ if (!(flags & REPO_NO_INTERNALIZE))
repodata_internalize(data);
if (pd.common.tmp)
* if <attrname> given, write attributes as '<attrname>.attr'
*/
-#define SUSETAGS_KINDS_SEPARATELY 1
-#define SUSETAGS_EXTEND 2
+#define SUSETAGS_KINDS_SEPARATELY (1 << 2)
+#define SUSETAGS_EXTEND (1 << 3)
extern void repo_add_susetags(Repo *repo, FILE *fp, Id product, const char *language, int flags);
* for further information
*/
-#define DO_ARRAY 1
-
#define _GNU_SOURCE
#include <sys/types.h>
#include <limits.h>
Repodata *data;
unsigned int datanum;
Solvable *solvable;
- unsigned int timestamp;
-
+ Id collhandle;
struct stateswitch *swtab[NUMSTATES];
enum state sbtab[NUMSTATES];
- char *tempstr;
- int ltemp;
- int atemp;
};
/*
* at </package> in order to keep all UPDATE_COLLECTION_* arrays in sync
*/
-static int package_filename_seen = 0;
-static int package_flags = 0; /* same for reboot/restart flags, to be written at </package> */
-
/*
* create evr (as Id) from 'epoch', 'version' and 'release' attributes
*/
solvable = pd->solvable = pool_id2solvable(pool, repo_add_solvable(pd->repo));
- pd->datanum = (pd->solvable - pool->solvables) - pd->repo->start;
- repodata_extend(pd->data, pd->solvable - pool->solvables);
- repodata_extend(pd->data, pd->solvable - pool->solvables);
- pd->datanum = repodata_get_handle(pd->data, pd->datanum);
+ pd->datanum = pd->solvable - pool->solvables;
-
solvable->vendor = str2id(pool, from, 1);
solvable->evr = str2id(pool, version, 1);
solvable->arch = ARCH_NOARCH;
- repodata_set_str(pd->data, pd->datanum, SOLVABLE_PATCHCATEGORY, type);
+ if (type)
+ repodata_set_str(pd->data, pd->datanum, SOLVABLE_PATCHCATEGORY, type);
}
break;
/* <id>FEDORA-2007-4594</id> */
if (!strcmp(*atts, "date"))
date = atts[1];
}
- repodata_set_str(pd->data, pd->datanum, SOLVABLE_BUILDTIME, date);
+ if (date)
+ {
+ if (strlen(date) == strspn(date, "0123456789"))
+ repodata_set_num(pd->data, pd->datanum, SOLVABLE_BUILDTIME, atoi(date));
+ else
+ {
+ /* FIXME: must convert to interger! */
+ repodata_set_str(pd->data, pd->datanum, SOLVABLE_BUILDTIME, date);
+ }
+ }
}
break;
case STATE_REFERENCES:
case STATE_REFERENCE:
{
const char *href = 0, *id = 0, *title = 0, *type = 0;
+ Id handle;
for (; *atts; atts += 2)
{
if (!strcmp(*atts, "href"))
else if (!strcmp(*atts, "type"))
type = atts[1];
}
-#if DO_ARRAY
- repodata_add_poolstr_array(pd->data, pd->datanum, UPDATE_REFERENCE_HREF, href);
- repodata_add_poolstr_array(pd->data, pd->datanum, UPDATE_REFERENCE_ID, id);
- repodata_add_poolstr_array(pd->data, pd->datanum, UPDATE_REFERENCE_TITLE, title);
- repodata_add_poolstr_array(pd->data, pd->datanum, UPDATE_REFERENCE_TYPE, type);
-#endif
+ handle = repodata_new_handle(pd->data);
+ repodata_set_str(pd->data, handle, UPDATE_REFERENCE_HREF, href);
+ repodata_set_str(pd->data, handle, UPDATE_REFERENCE_ID, id);
+ repodata_set_str(pd->data, handle, UPDATE_REFERENCE_TITLE, title);
+ repodata_set_poolstr(pd->data, handle, UPDATE_REFERENCE_TYPE, type);
+ repodata_add_flexarray(pd->data, pd->datanum, UPDATE_REFERENCE, handle);
}
break;
/* <description>This update ...</description> */
{
const char *arch = 0, *name = 0, *src = 0;
Id evr = makeevr_atts(pool, pd, atts); /* parse "epoch", "version", "release" */
- Id n, a, na;
+ Id n, a;
Id rel_id;
-
-
- /* reset package_* markers, to be evaluated at </package> */
- package_filename_seen = 0;
- package_flags = 0;
-
for (; *atts; atts += 2)
{
else if (!strcmp(*atts, "src"))
src = atts[1];
}
- /* generated Ids for name and arch */
+ /* generated Id for name */
n = str2id(pool, name, 1);
if (arch)
- a = str2id(pool, arch, 1);
- else
- a = ARCH_NOARCH;
- /* now combine both to a single Id */
- na = rel2id(pool, n, a, REL_ARCH, 1);
-
-
- rel_id = rel2id(pool, na, evr, REL_LT, 1);
+ {
+ /* generate Id for arch and combine with name */
+ a = str2id(pool, arch, 1);
+ n = rel2id(pool, n, a, REL_ARCH, 1);
+ }
+ rel_id = rel2id(pool, n, evr, REL_LT, 1);
solvable->conflicts = repo_addid_dep(pd->repo, solvable->conflicts, rel_id, 0);
-#if DO_ARRAY
- repodata_add_idarray(pd->data, pd->datanum, UPDATE_COLLECTION_NAME, n);
- repodata_add_idarray(pd->data, pd->datanum, UPDATE_COLLECTION_EVR, evr);
- repodata_add_idarray(pd->data, pd->datanum, UPDATE_COLLECTION_ARCH, a);
-#else
- /* _FILENAME and _FLAGS are written at </package> */
- if (1) {
- const char *evrstr = id2str(pool, evr);
- int buflen = strlen(name) + 1 + strlen(evrstr) + 1 + strlen(arch?arch:"") + 1;
- char *buf;
- if (!arch) arch = "";
- buf = (char *)malloc(buflen);
- if (!buf) exit(1);
- sprintf(buf, "%s %s %s", name, evrstr, arch);
- repodata_add_poolstr_array(pd->data, pd->datanum, UPDATE_COLLECTION, buf);
- free(buf);
- }
-#endif
+
+ /* who needs the collection anyway? */
+ pd->collhandle = repodata_new_handle(pd->data);
+ repodata_set_id(pd->data, pd->collhandle, UPDATE_COLLECTION_NAME, n);
+ repodata_set_id(pd->data, pd->collhandle, UPDATE_COLLECTION_EVR, evr);
+ repodata_set_id(pd->data, pd->collhandle, UPDATE_COLLECTION_ARCH, a);
+ break;
}
- break;
/* <filename>libntlm-0.4.2-1.fc8.x86_64.rpm</filename> */
/* <filename>libntlm-0.4.2-1.fc8.x86_64.rpm</filename> */
case STATE_FILENAME:
s->provides = repo_addid_dep(repo, s->provides, rel2id(pool, s->name, s->evr, REL_EQ, 1), 0);
break;
case STATE_ID:
- {
- if (pd->content)
- {
- s->name = str2id(pool, join2("patch", ":", pd->content), 1);
- }
- }
+ s->name = str2id(pool, join2("patch", ":", pd->content), 1);
break;
/* <title>imlib-1.9.15-6.fc8</title> */
case STATE_TITLE:
- {
- while (pd->lcontent > 0
- && *(pd->content + pd->lcontent - 1) == '\n')
- {
- --pd->lcontent;
- *(pd->content + pd->lcontent) = 0;
- }
- repodata_set_str(pd->data, pd->datanum, SOLVABLE_SUMMARY, pd->content);
- }
+ while (pd->lcontent > 0 && pd->content[pd->lcontent - 1] == '\n')
+ pd->content[--pd->lcontent] = 0;
+ repodata_set_str(pd->data, pd->datanum, SOLVABLE_SUMMARY, pd->content);
break;
/*
* <release>Fedora 8</release>
* <description>This update ...</description>
*/
case STATE_DESCRIPTION:
- {
- repodata_set_str(pd->data, pd->datanum, SOLVABLE_DESCRIPTION, pd->content);
- }
+ repodata_set_str(pd->data, pd->datanum, SOLVABLE_DESCRIPTION, pd->content);
break;
/*
* <message>Warning! ...</message>
*/
case STATE_MESSAGE:
- {
- repodata_set_str(pd->data, pd->datanum, UPDATE_MESSAGE, pd->content);
- }
+ repodata_set_str(pd->data, pd->datanum, UPDATE_MESSAGE, pd->content);
break;
case STATE_PKGLIST:
break;
case STATE_NAME:
break;
case STATE_PACKAGE:
- {
-#if DO_ARRAY
- /* write _FILENAME and _FLAGS at </package>
- * to ensure all UPDATE_COLLECTION_* arrays are filled in parallel
- */
- if (!package_filename_seen)
- {
- repodata_add_poolstr_array(pd->data, pd->datanum, UPDATE_COLLECTION_FILENAME, "");
- }
- repodata_add_idarray(pd->data, pd->datanum, UPDATE_COLLECTION_FLAGS, package_flags+1);
-#endif
- }
+ repodata_add_flexarray(pd->data, pd->datanum, UPDATE_COLLECTION, pd->collhandle);
+ pd->collhandle = 0;
break;
/* <filename>libntlm-0.4.2-1.fc8.x86_64.rpm</filename> */
/* <filename>libntlm-0.4.2-1.fc8.x86_64.rpm</filename> */
case STATE_FILENAME:
- {
-#if DO_ARRAY
- repodata_add_poolstr_array(pd->data, pd->datanum, UPDATE_COLLECTION_FILENAME, pd->content);
- package_filename_seen = 1;
-#endif
- }
+ repodata_set_str(pd->data, pd->collhandle, UPDATE_COLLECTION_FILENAME, pd->content);
break;
/* <reboot_suggested>True</reboot_suggested> */
case STATE_REBOOT:
- {
- if (pd->content
- && (pd->content[0] == 'T'
- || pd->content[0] == 't'|| pd->content[0] == '1'))
- {
- /* FIXME: this is per-package, the global flag should be computed at runtime */
- repodata_set_void(pd->data, pd->datanum, UPDATE_REBOOT);
- package_flags = 1;
- }
- }
+ if (pd->content[0] == 'T' || pd->content[0] == 't'|| pd->content[0] == '1')
+ {
+ /* FIXME: this is per-package, the global flag should be computed at runtime */
+ repodata_set_void(pd->data, pd->datanum, UPDATE_REBOOT);
+ repodata_set_void(pd->data, pd->collhandle, UPDATE_REBOOT);
+ }
break;
/* <restart_suggested>True</restart_suggested> */
case STATE_RESTART:
- {
- if (pd->content
- && (pd->content[0] == 'T'
- || pd->content[0] == 't' || pd->content[0] == '1'))
- {
- /* FIXME: this is per-package, the global flag should be computed at runtime */
- repodata_set_void(pd->data, pd->datanum, UPDATE_RESTART);
- package_flags = 2;
- }
- }
+ if (pd->content[0] == 'T' || pd->content[0] == 't'|| pd->content[0] == '1')
+ {
+ /* FIXME: this is per-package, the global flag should be computed at runtime */
+ repodata_set_void(pd->data, pd->datanum, UPDATE_RESTART);
+ repodata_set_void(pd->data, pd->collhandle, UPDATE_RESTART);
+ }
break;
/* <relogin_suggested>True</relogin_suggested> */
case STATE_RELOGIN:
- {
- if (pd->content
- && (pd->content[0] == 'T'
- || pd->content[0] == 't' || pd->content[0] == '1'))
- {
- /* FIXME: this is per-package, the global flag should be computed at runtime */
- repodata_set_void(pd->data, pd->datanum, UPDATE_RELOGIN);
- package_flags = 2;
- }
- }
+ if (pd->content[0] == 'T' || pd->content[0] == 't'|| pd->content[0] == '1')
+ {
+ /* FIXME: this is per-package, the global flag should be computed at runtime */
+ repodata_set_void(pd->data, pd->datanum, UPDATE_RELOGIN);
+ repodata_set_void(pd->data, pd->collhandle, UPDATE_RELOGIN);
+ }
break;
default:
break;
char buf[BUFF_SIZE];
int i, l;
struct stateswitch *sw;
+ Repodata *data;
+
+ if (!(flags & REPO_REUSE_REPODATA))
+ data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
memset(&pd, 0, sizeof(pd));
for (i = 0, sw = stateswitches; sw->from != NUMSTATES; i++, sw++)
}
pd.pool = pool;
pd.repo = repo;
- pd.data = repo_add_repodata(pd.repo, 0);
+ pd.data = data;
pd.content = malloc(256);
pd.acontent = 256;
pd.lcontent = 0;
- pd.tempstr = malloc(256);
- pd.atemp = 256;
- pd.ltemp = 0;
XML_Parser parser = XML_ParserCreate(NULL);
XML_SetUserData(parser, &pd);
XML_SetElementHandler(parser, startElement, endElement);
break;
}
XML_ParserFree(parser);
-
- if (pd.data)
- repodata_internalize(pd.data);
-
free(pd.content);
join_freemem();
+
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
}
/* EOF */
+/*
+ * Copyright (c) 2007, Novell Inc.
+ *
+ * This program is licensed under the BSD license, read LICENSE.BSD
+ * for further information
+ */
+
void repo_add_updateinfoxml(Repo *repo, FILE *fp, int flags);
#include "pool.h"
#include "util.h"
#include "repo_write.h"
+#include "repopage.h"
/*------------------------------------------------------------------*/
/* Id map optimizations */
Id *dirused;
Id vstart;
+
+ Id maxdata;
+ Id lastlen;
+
+ int doingsolvables; /* working on solvables data */
};
#define NEEDED_BLOCK 1023
Id id;
int rm;
+ if (key->name == REPOSITORY_SOLVABLES)
+ return SEARCH_NEXT_KEY; /* we do not want this one */
+ if (data != data->repo->repodata + data->repo->nrepodata - 1)
+ if (key->name == REPOSITORY_ADDEDFILEPROVIDES || key->name == REPOSITORY_EXTERNAL || key->name == REPOSITORY_LOCATION || key->name == REPOSITORY_KEYS)
+ return SEARCH_NEXT_KEY;
+
rm = cbdata->keymap[cbdata->keymapstart[data - data->repo->repodata] + (key - data->keys)];
if (!rm)
return SEARCH_NEXT_KEY; /* we do not want this one */
/* record key in schema */
- if ((key->type != REPOKEY_TYPE_COUNTED || kv->eof == 0)
+ if ((key->type != REPOKEY_TYPE_FIXARRAY || kv->eof == 0)
&& (cbdata->sp == cbdata->schema || cbdata->sp[-1] != rm))
*cbdata->sp++ = rm;
switch(key->type)
else
setdirused(cbdata, &data->dirpool, id);
break;
- case REPOKEY_TYPE_COUNTED:
+ case REPOKEY_TYPE_FIXARRAY:
if (kv->eof == 0)
{
if (cbdata->oldschema)
cbdata->oldsp = cbdata->oldschema = 0;
}
break;
+ case REPOKEY_TYPE_FLEXARRAY:
+ if (kv->entry == 0)
+ {
+ if (!kv->eof)
+ *cbdata->sp++ = 0; /* mark start */
+ }
+ else
+ {
+ /* just finished a schema, rewind */
+ Id *sp = cbdata->sp - 1;
+ *sp = 0;
+ while (sp[-1])
+ sp--;
+ cbdata->subschemata = sat_extend(cbdata->subschemata, cbdata->nsubschemata, 1, sizeof(Id), SCHEMATA_BLOCK);
+ cbdata->subschemata[cbdata->nsubschemata++] = addschema(cbdata, sp);
+ cbdata->sp = kv->eof ? sp - 1: sp;
+ }
+ break;
default:
break;
}
repo_write_cb_needed(void *vcbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv)
{
struct cbdata *cbdata = vcbdata;
- Repo *repo = s ? s->repo : 0;
+ Repo *repo = data->repo;
+
#if 0
- fprintf(stderr, "solvable %d (%s): key (%d)%s %d\n", s ? s - s->repo->pool->solvables : 0, s ? id2str(s->repo->pool, s->name) : "", key->name, id2str(repo->pool, key->name), key->type);
+ if (s)
+ fprintf(stderr, "solvable %d (%s): key (%d)%s %d\n", s ? s - repo->pool->solvables : 0, s ? id2str(repo->pool, s->name) : "", key->name, id2str(repo->pool, key->name), key->type);
#endif
return repo_write_collect_needed(cbdata, repo, data, key, kv);
}
unsigned char v[4];
struct extdata *xd;
+ if (key->name == REPOSITORY_SOLVABLES)
+ return SEARCH_NEXT_KEY;
+ if (data != data->repo->repodata + data->repo->nrepodata - 1)
+ if (key->name == REPOSITORY_ADDEDFILEPROVIDES || key->name == REPOSITORY_EXTERNAL || key->name == REPOSITORY_LOCATION || key->name == REPOSITORY_KEYS)
+ return SEARCH_NEXT_KEY;
+
rm = cbdata->keymap[cbdata->keymapstart[data - data->repo->repodata] + (key - data->keys)];
if (!rm)
- return 0; /* we do not want this one */
+ return SEARCH_NEXT_KEY; /* we do not want this one */
if (cbdata->mykeys[rm].storage == KEY_STORAGE_VERTICAL_OFFSET)
{
case REPOKEY_TYPE_SHA1:
data_addblob(xd, (unsigned char *)kv->str, SIZEOF_SHA1);
break;
+ case REPOKEY_TYPE_SHA256:
+ data_addblob(xd, (unsigned char *)kv->str, SIZEOF_SHA256);
+ break;
case REPOKEY_TYPE_U32:
u32 = kv->num;
v[0] = u32 >> 24;
data_addideof(xd, id, kv->eof);
data_addblob(xd, (unsigned char *)kv->str, strlen(kv->str) + 1);
break;
- case REPOKEY_TYPE_COUNTED:
+ case REPOKEY_TYPE_FIXARRAY:
if (kv->eof == 0)
{
if (kv->num)
{
}
break;
+ case REPOKEY_TYPE_FLEXARRAY:
+ if (!kv->entry)
+ data_addid(xd, kv->num);
+ if (!kv->eof)
+ data_addid(xd, cbdata->subschemata[cbdata->current_sub++]);
+ if (xd == cbdata->extdata + 0 && !kv->path && !cbdata->doingsolvables)
+ {
+ if (xd->len - cbdata->lastlen > cbdata->maxdata)
+ cbdata->maxdata = xd->len - cbdata->lastlen;
+ cbdata->lastlen = xd->len;
+ }
+ break;
default:
fprintf(stderr, "unknown type for %d: %d\n", key->name, key->type);
exit(1);
return n;
}
-#define BLOB_PAGEBITS 15
-#define BLOB_PAGESIZE (1 << BLOB_PAGEBITS)
-
static void
write_compressed_page(FILE *fp, unsigned char *page, int len)
{
}
+#if 0
static Id subfilekeys[] = {
REPODATA_INFO, REPOKEY_TYPE_VOID,
REPODATA_EXTERNAL, REPOKEY_TYPE_VOID,
REPODATA_RPMDBCOOKIE, REPOKEY_TYPE_SHA256,
0,
};
+#endif
/*
* Repo
*/
void
-repo_write(Repo *repo, FILE *fp, int (*keyfilter)(Repo *repo, Repokey *key, void *kfdata), void *kfdata, Repodatafile *fileinfo, int nsubfiles)
+repo_write(Repo *repo, FILE *fp, int (*keyfilter)(Repo *repo, Repokey *key, void *kfdata), void *kfdata, Id **keyarrayp)
{
Pool *pool = repo->pool;
int i, j, k, n;
Stringpool ownspool, *spool;
Dirpool owndirpool, *dirpool;
- int setfileinfo = 0;
Id *repodataschemata = 0;
+ Id mainschema;
struct extdata *xd;
- int entrysize;
- Id maxentrysize;
Id type_constantid = 0;
- /* If we're given a fileinfo structure, but have no subfiles, then we're
- writing a subfile and our callers wants info about it. */
- if (fileinfo && nsubfiles == 0)
- setfileinfo = 1;
- if (nsubfiles)
- repodataschemata = sat_calloc(nsubfiles, sizeof(Id));
-
memset(&cbdata, 0, sizeof(cbdata));
cbdata.repo = repo;
}
cbdata.nmykeys = i;
+ if (repo->nsolvables)
+ {
+ key = cbdata.mykeys + cbdata.nmykeys;
+ key->name = REPOSITORY_SOLVABLES;
+ key->type = REPOKEY_TYPE_FLEXARRAY;
+ key->size = 0;
+ key->storage = KEY_STORAGE_INCORE;
+ cbdata.keymap[key->name] = cbdata.nmykeys++;
+ }
+
+#if 0
/* If we store subfile info, generate the necessary keys. */
if (nsubfiles)
{
cbdata.keymap[key->name] = cbdata.nmykeys++;
}
}
+#endif
dirpoolusage = 0;
cbdata.schema = sat_calloc(cbdata.nmykeys, sizeof(Id));
cbdata.sp = cbdata.schema;
cbdata.solvschemata = sat_calloc(repo->nsolvables, sizeof(Id));
+#if 0
cbdata.extraschemata = sat_calloc(repo->nextra, sizeof(Id));
+#endif
idarraydata = repo->idarraydata;
+ cbdata.doingsolvables = 1;
for (i = repo->start, s = pool->solvables + i, n = 0; i < repo->end; i++, s++)
{
if (s->repo != repo)
continue;
if (i < data->start || i >= data->end)
continue;
- repodata_search(data, i - data->start, 0, repo_write_cb_needed, &cbdata);
+ repodata_search(data, i, 0, repo_write_cb_needed, &cbdata);
needid = cbdata.needid;
}
}
cbdata.solvschemata[n] = addschema(&cbdata, cbdata.schema);
n++;
}
+ cbdata.doingsolvables = 0;
+ assert(n == repo->nsolvables);
+
+ /* create main schema */
+ cbdata.sp = cbdata.schema;
+ /* collect all other data from all repodatas */
+ /* XXX: merge arrays of equal keys? */
+ for (j = 0, data = repo->repodata; j < repo->nrepodata; j++, data++)
+ repodata_search(data, REPOENTRY_META, 0, repo_write_cb_needed, &cbdata);
+ sp = cbdata.sp;
+ /* add solvables if needed */
+ if (repo->nsolvables)
+ {
+ *sp++ = cbdata.keymap[REPOSITORY_SOLVABLES];
+ cbdata.mykeys[cbdata.keymap[REPOSITORY_SOLVABLES]].size++;
+ }
+ *sp = 0;
+ mainschema = addschema(&cbdata, cbdata.schema);
+
+#if 0
if (repo->nextra && anyrepodataused)
for (i = -1; i >= -repo->nextra; i--)
{
needid[fileinfo[i].keys[j].name].need++;
}
}
+#endif
/********************************************************************/
/********************************************************************/
cbdata.extdata = sat_calloc(cbdata.nmykeys, sizeof(struct extdata));
-
- cbdata.current_sub = 0;
+
xd = cbdata.extdata;
- maxentrysize = 0;
+ cbdata.current_sub = 0;
+ /* write main schema */
+ cbdata.lastlen = 0;
+ data_addid(xd, mainschema);
+
+#if 1
+ for (j = 0, data = repo->repodata; j < repo->nrepodata; j++, data++)
+ repodata_search(data, REPOENTRY_META, 0, repo_write_cb_adddata, &cbdata);
+#endif
+
+ if (xd->len - cbdata.lastlen > cbdata.maxdata)
+ cbdata.maxdata = xd->len - cbdata.lastlen;
+ cbdata.lastlen = xd->len;
+
+ if (repo->nsolvables)
+ data_addid(xd, repo->nsolvables); /* FLEXARRAY nentries */
+ cbdata.doingsolvables = 1;
for (i = repo->start, s = pool->solvables + i, n = 0; i < repo->end; i++, s++)
{
if (s->repo != repo)
continue;
- entrysize = xd->len;
data_addid(xd, cbdata.solvschemata[n]);
if (cbdata.keymap[SOLVABLE_NAME])
data_addid(xd, needid[s->name].need);
continue;
if (i < data->start || i >= data->end)
continue;
- repodata_search(data, i - data->start, 0, repo_write_cb_adddata, &cbdata);
+ repodata_search(data, i, 0, repo_write_cb_adddata, &cbdata);
}
}
- entrysize = xd->len - entrysize;
- if (entrysize > maxentrysize)
- maxentrysize = entrysize;
+ if (xd->len - cbdata.lastlen > cbdata.maxdata)
+ cbdata.maxdata = xd->len - cbdata.lastlen;
+ cbdata.lastlen = xd->len;
n++;
}
+ cbdata.doingsolvables = 0;
+
+ assert(cbdata.current_sub == cbdata.nsubschemata);
+ if (cbdata.subschemata)
+ {
+ cbdata.subschemata = sat_free(cbdata.subschemata);
+ cbdata.nsubschemata = 0;
+ }
+#if 0
if (repo->nextra && anyrepodataused)
for (i = -1; i >= -repo->nextra; i--)
{
if (entrysize > maxentrysize)
maxentrysize = entrysize;
}
+#endif
/********************************************************************/
/* write file header */
write_u32(fp, 'S' << 24 | 'O' << 16 | 'L' << 8 | 'V');
- write_u32(fp, repo->nextra ? SOLV_VERSION_7 : SOLV_VERSION_6);
+ write_u32(fp, SOLV_VERSION_8);
/* write counts */
write_u32(fp, repo->nsolvables);
write_u32(fp, cbdata.nmykeys);
write_u32(fp, cbdata.nmyschemata);
- write_u32(fp, nsubfiles); /* info blocks. */
- if (repo->nextra)
- {
- write_u32(fp, repo->nextra);
- write_u32(fp, SOLV_CONTENT_VERSION);
- }
solv_flags = 0;
solv_flags |= SOLV_FLAG_PREFIX_POOL;
write_u32(fp, solv_flags);
/*
* write keys
*/
- if (setfileinfo)
- {
- fileinfo->nkeys = cbdata.nmykeys;
- fileinfo->keys = sat_calloc(fileinfo->nkeys, sizeof (*fileinfo->keys));
- }
+ if (keyarrayp)
+ *keyarrayp = sat_calloc(2 * cbdata.nmykeys + 1, sizeof(Id));
for (i = 1; i < cbdata.nmykeys; i++)
{
write_id(fp, needid[cbdata.mykeys[i].name].need);
else
write_id(fp, cbdata.extdata[i].len);
write_id(fp, cbdata.mykeys[i].storage);
- if (setfileinfo)
- fileinfo->keys[i] = cbdata.mykeys[i];
+ if (keyarrayp)
+ {
+ (*keyarrayp)[2 * i - 2] = cbdata.mykeys[i].name;
+ (*keyarrayp)[2 * i - 1] = cbdata.mykeys[i].type;
+ }
}
/*
write_idarray(fp, pool, 0, cbdata.myschemadata + cbdata.myschemata[i]);
}
+#if 0
/*
* write info block
*/
write_blob(fp, xd.buf, xd.len);
sat_free(xd.buf);
}
+#endif
/********************************************************************/
+ write_id(fp, cbdata.maxdata);
+ write_id(fp, cbdata.extdata[0].len);
+ if (cbdata.extdata[0].len)
+ write_blob(fp, cbdata.extdata[0].buf, cbdata.extdata[0].len);
+ sat_free(cbdata.extdata[0].buf);
+#if 0
/*
* write Solvable data
*/
write_blob(fp, cbdata.extdata[0].buf, cbdata.extdata[0].len);
}
sat_free(cbdata.extdata[0].buf);
+#endif
/* write vertical data */
for (i = 1; i < cbdata.nmykeys; i++)
for (i = 1; i < cbdata.nmykeys; i++)
if (cbdata.extdata[i].len)
write_blob(fp, cbdata.extdata[i].buf, cbdata.extdata[i].len);
-#endif
/* Fill fileinfo for our caller. */
if (setfileinfo)
fileinfo->checksumtype = 0;
fileinfo->location = 0;
}
+#endif
for (i = 1; i < cbdata.nmykeys; i++)
sat_free(cbdata.extdata[i].buf);
unsigned char *rpmdbcookie;
} Repodatafile;
-void repo_write(Repo *repo, FILE *fp, int (*keyfilter)(Repo *repo, Repokey *key, void *kfdata), void *kfdata, Repodatafile *fileinfo, int nsubfiles);
+void repo_write(Repo *repo, FILE *fp, int (*keyfilter)(Repo *repo, Repokey *key, void *kfdata), void *kfdata, Id **keyarrayp);
#endif
const char *type = find_attr("type", atts, 0);
s = pd->solvable = pool_id2solvable(pool, repo_add_solvable(pd->repo));
repodata_extend(pd->data, s - pool->solvables);
- pd->handle = repodata_get_handle(pd->data, (s - pool->solvables) - pd->repo->start);
+ pd->handle = s - pool->solvables;
if (type)
{
repodata_set_str(pd->data, pd->handle, PRODUCT_TYPE, type);
{
Pool *pool = pool_create();
Repo *repo, *ref = 0;
- Repodata *repodata;
+ Repodata *data;
FILE *fp;
Pool *refpool;
int c;
int nopacks = 0;
const char *root = 0;
const char *basefile = 0;
- const char *proddir = 0;
+ char *proddir = 0;
const char *attribute = 0;
/*
*/
repo = repo_create(pool, "installed");
- repodata = repo_add_repodata(repo, 0);
+ data = repo_add_repodata(repo, 0);
if (!nopacks)
- repo_add_rpmdb(repo, repodata, ref, root);
+ repo_add_rpmdb(repo, ref, root, REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE);
if (proddir && *proddir)
{
+ char *buf = proddir;
/* if <root> given, not '/', and proddir does not start with <root> */
if (root && *root)
{
int rootlen = strlen(root);
if (strncmp(root, proddir, rootlen))
{
- char *buf;
buf = (char *)sat_malloc(rootlen + strlen(proddir) + 2); /* + '/' + \0 */
strcpy(buf, root);
- if (root[rootlen-1] != '/'
- && *proddir != '/')
- {
- strcpy(buf+rootlen, "/");
- ++rootlen;
- }
- strcpy(buf+rootlen, proddir);
- proddir = buf;
+ if (root[rootlen - 1] != '/' && *proddir != '/')
+ buf[rootlen++] = '/';
+ strcpy(buf + rootlen, proddir);
}
}
-
- repo_add_products(repo, repodata, proddir, root, attribute);
+ repo_add_products(repo, proddir, root, attribute, REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE);
+ if (buf != proddir)
+ sat_free(buf);
}
- if (repodata)
- repodata_internalize(repodata);
+ repodata_internalize(data);
if (ref)
{
#include "repo_solv.h"
#include "common_write.h"
+static char *
+fgets0(char *s, int size, FILE *stream)
+{
+ char *p = s;
+ int c;
+
+ while (--size > 0)
+ {
+ c = getc(stream);
+ if (c == EOF)
+ {
+ if (p == s)
+ return 0;
+ c = 0;
+ }
+ *p++ = c;
+ if (!c)
+ return s;
+ }
+ *p = 0;
+ return s;
+}
+
int
main(int argc, char **argv)
{
const char **rpms = 0;
char *manifest = 0;
+ int manifest0 = 0;
int c, nrpms = 0;
Pool *pool = pool_create();
Repo *repo;
- Repodata *repodata;
FILE *fp;
char buf[4096], *p;
const char *basefile = 0;
- while ((c = getopt(argc, argv, "b:m:")) >= 0)
+ while ((c = getopt(argc, argv, "0b:m:")) >= 0)
{
switch(c)
{
case 'm':
manifest = optarg;
break;
+ case '0':
+ manifest0 = 1;
+ break;
default:
exit(1);
}
perror(manifest);
exit(1);
}
- while(fgets(buf, sizeof(buf), fp))
+ for (;;)
{
- if ((p = strchr(buf, '\n')) != 0)
- *p = 0;
+ if (manifest0)
+ {
+ if (!fgets0(buf, sizeof(buf), fp))
+ break;
+ }
+ else
+ {
+ if (!fgets(buf, sizeof(buf), fp))
+ break;
+ if ((p = strchr(buf, '\n')) != 0)
+ *p = 0;
+ }
rpms = sat_extend(rpms, nrpms, 1, sizeof(char *), 15);
rpms[nrpms++] = strdup(buf);
}
rpms[nrpms++] = strdup(argv[optind++]);
}
repo = repo_create(pool, "rpms2solv");
- repodata = repo_add_repodata(repo, 0);
- repo_add_rpms(repo, repodata, rpms, nrpms);
- if (repodata)
- repodata_internalize(repodata);
+ repo_add_rpms(repo, rpms, nrpms, 0);
tool_write(repo, basefile, 0);
pool_free(pool);
for (c = 0; c < nrpms; c++)
}
Pool *pool = pool_create();
Repo *repo = repo_create(pool, "<susetags>");
+
+ repo_add_repodata(repo, 0);
+
if (contentfile)
{
FILE *fp = fopen (contentfile, "r");
perror(contentfile);
exit(1);
}
- repo_add_content(repo, fp);
+ repo_add_content(repo, fp, REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE);
product = repo->start;
-
fclose (fp);
}
+
if (attrname)
{
/* ensure '.attr' suffix */
perror(fn);
exit(1);
}
- repo_add_susetags(repo, fp, product, 0, flags);
+ repo_add_susetags(repo, fp, product, 0, flags | REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE);
fclose(fp);
}
else if (!strcmp(fn, "packages.DU") || !strcmp(fn, "packages.DU.gz"))
perror(fn);
exit(1);
}
- repo_add_susetags(repo, fp, product, 0, flags | SUSETAGS_EXTEND);
+ repo_add_susetags(repo, fp, product, 0, flags | SUSETAGS_EXTEND | REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE);
fclose(fp);
}
else if (!strcmp(fn, "packages.FL") || !strcmp(fn, "packages.FL.gz"))
perror(fn);
exit(1);
}
- repo_add_susetags(repo, fp, product, 0, flags | SUSETAGS_EXTEND);
+ repo_add_susetags(repo, fp, product, 0, flags | SUSETAGS_EXTEND | REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE);
fclose(fp);
#else
/* ignore for now. reactivate when filters work */
perror(fn);
exit(1);
}
- repo_add_susetags(repo, fp, product, lang, flags | SUSETAGS_EXTEND);
+ repo_add_susetags(repo, fp, product, lang, flags | SUSETAGS_EXTEND | REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE);
fclose(fp);
}
}
free(files[i]);
free(files);
free(fnp);
+ repo_internalize(repo);
}
else
/* read data from stdin */
- repo_add_susetags(repo, stdin, product, 0, flags);
-
+ repo_add_susetags(repo, stdin, product, 0, REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE);
+ repo_internalize(repo);
tool_write(repo, basefile, attrname);
pool_free(pool);
exit(0);