}
static void
+incore_map_idarray(Repodata *data, unsigned char *dp, Id *map, Id max)
+{
+ /* We have to map the IDs, which might also change
+ the necessary number of bytes, so we can't just copy
+ over the blob and adjust it. */
+ for (;;)
+ {
+ Id id;
+ int eof;
+ dp = data_read_ideof(dp, &id, &eof);
+ if (max && id >= max)
+ {
+ pool_debug(mypool, SAT_ERROR, "incore_map_idarray: id too large (%u/%u)\n", id, max);
+ data->error = SOLV_ERROR_ID_RANGE;
+ break;
+ }
+ id = map[id];
+ if (id >= 64)
+ id = (id & 63) | ((id & ~63) << 1);
+ incore_add_id(data, eof ? id : id | 64);
+ if (eof)
+ break;
+ }
+}
+
+static void
incore_add_u32(Repodata *data, unsigned int x)
{
unsigned char *dp;
dps = dp;
dp = data_skip(dp, REPOKEY_TYPE_IDARRAY);
if (keys[key].storage == KEY_STORAGE_INCORE && idmap)
- abort();
- if (keys[key].storage == KEY_STORAGE_INCORE)
+ incore_map_idarray(&data, dps, idmap, numid);
+ else if (keys[key].storage == KEY_STORAGE_INCORE)
incore_add_blob(&data, dps, dp - dps);
break;
}
}
void
+repodata_add_idarray(Repodata *data, Id entry, Id keyname, Id id)
+{
+ Id *ida, *pp;
+ Repokey key;
+
+#if 0
+fprintf(stderr, "repodata_add_idarray %d %d (%d)\n", entry, id, data->attriddatalen);
+#endif
+ if (data->attrs && data->attrs[entry])
+ {
+ for (pp = data->attrs[entry]; *pp; pp += 2)
+ if (data->keys[*pp].name == keyname
+ && data->keys[*pp].type == REPOKEY_TYPE_IDARRAY)
+ break;
+ if (*pp)
+ {
+ int oldsize = 0;
+ for (ida = data->attriddata + pp[1]; *ida; ida++)
+ oldsize++;
+ if (ida + 1 == data->attriddata + data->attriddatalen)
+ {
+ /* this was the last entry, just append it */
+ data->attriddata = sat_extend(data->attriddata, data->attriddatalen, 1, sizeof(Id), REPODATA_ATTRIDDATA_BLOCK);
+ data->attriddatalen--; /* overwrite terminating 0 */
+ }
+ else
+ {
+ /* too bad. move to back. */
+ data->attriddata = sat_extend(data->attriddata, data->attriddatalen, oldsize + 2, sizeof(Id), REPODATA_ATTRIDDATA_BLOCK);
+ memcpy(data->attriddata + data->attriddatalen, data->attriddata + pp[1], oldsize * sizeof(Id));
+ pp[1] = data->attriddatalen;
+ data->attriddatalen += oldsize;
+ }
+ data->attriddata[data->attriddatalen++] = id;
+ data->attriddata[data->attriddatalen++] = 0;
+ return;
+ }
+ }
+ key.name = keyname;
+ key.type = REPOKEY_TYPE_IDARRAY;
+ key.size = 0;
+ key.storage = KEY_STORAGE_INCORE;
+ data->attriddata = sat_extend(data->attriddata, data->attriddatalen, 2, sizeof(Id), REPODATA_ATTRIDDATA_BLOCK);
+ repodata_set(data, entry, &key, data->attriddatalen);
+ data->attriddata[data->attriddatalen++] = id;
+ data->attriddata[data->attriddatalen++] = 0;
+}
+
+void
+repodata_add_poolstr_array(Repodata *data, Id entry, Id keyname,
+ const char *str)
+{
+ Id id;
+ if (data->localpool)
+ id = stringpool_str2id(&data->spool, str, 1);
+ else
+ id = str2id(data->repo->pool, str, 1);
+ repodata_add_idarray(data, entry, keyname, id);
+}
+
+void
repodata_merge_attrs(Repodata *data, Id dest, Id src)
{
Id *keyp;
case REPOKEY_TYPE_DIR:
data_addid(xd, id);
break;
+ case REPOKEY_TYPE_IDARRAY:
+ for (ida = data->attriddata + id; *ida; ida++)
+ data_addideof(xd, ida[0], ida[1] ? 0 : 1);
+ break;
case REPOKEY_TYPE_DIRNUMNUMARRAY:
for (ida = data->attriddata + id; *ida; ida += 3)
{
void repodata_set_str(Repodata *data, Id entry, Id keyname, const char *str);
void repodata_add_dirnumnum(Repodata *data, Id entry, Id keyname, Id dir, Id num, Id num2);
void repodata_add_dirstr(Repodata *data, Id entry, Id keyname, Id dir, const char *str);
+void repodata_add_idarray(Repodata *data, Id entry, Id keyname, Id id);
+void repodata_add_poolstr_array(Repodata *data, Id entry, Id keyname,
+ const char *str);
void repodata_merge_attrs (Repodata *data, Id dest, Id src);
void repodata_internalize(Repodata *data);