-int load_xml_metadata(GHashTable *hashtable, const char *primary_xml_path, const char *filelists_xml_path, const char *other_xml_path)
+int load_xml_files(GHashTable *hashtable, const char *primary_xml_path, const char *filelists_xml_path, const char *other_xml_path)
{
CompressionType compression_type;
CW_FILE *pri_xml_cwfile, *fil_xml_cwfile, *oth_xml_cwfile;
-int locate_and_load_xml_metadata(GHashTable *hashtable, const char *repopath, HashTableKey key)
+int load_xml_metadata(GHashTable *hashtable, struct MetadataLocation *ml, HashTableKey key)
{
- if (!hashtable || !repopath) {
+ if (!hashtable || !ml) {
return LOAD_METADATA_ERR;
}
-
- // Get paths of old metadata files from repomd
-
- struct MetadataLocation *ml;
- ml = get_metadata_location(repopath);
- if (!ml) {
- return LOAD_METADATA_ERR;
- }
-
-
if (!ml->pri_xml_href || !ml->fil_xml_href || !ml->oth_xml_href) {
// Some file(s) is/are missing
free_metadata_location(ml);
GHashTable *intern_hashtable; // key is checksum (pkgId)
intern_hashtable = new_metadata_hashtable();
- result = load_xml_metadata(intern_hashtable, ml->pri_xml_href, ml->fil_xml_href, ml->oth_xml_href);
+ result = load_xml_files(intern_hashtable, ml->pri_xml_href, ml->fil_xml_href, ml->oth_xml_href);
if (result == LOAD_METADATA_ERR) {
g_critical(MODULE"%s: Error encountered while parsing", __func__);
// Cleanup
destroy_metadata_hashtable(intern_hashtable);
+
+ return LOAD_METADATA_OK;
+}
+
+
+
+int locate_and_load_xml_metadata(GHashTable *hashtable, const char *repopath, HashTableKey key)
+{
+ if (!hashtable || !repopath) {
+ return LOAD_METADATA_ERR;
+ }
+
+ int ret;
+ struct MetadataLocation *ml;
+
+ ml = get_metadata_location(repopath);
+ ret = load_xml_metadata(hashtable, ml, key);
free_metadata_location(ml);
- return result;
+ return ret;
}
g_free(ml->groupfile_href);
g_free(ml->cgroupfile_href);
g_free(ml->repomd);
+ g_free(ml->original_url);
+ g_free(ml->local_path);
g_free(ml->tmp_dir);
g_free(ml);
}
struct MetadataLocation *mdloc;
mdloc = g_malloc0(sizeof(struct MetadataLocation));
mdloc->repomd = g_strdup(repomd_path);
+ mdloc->local_path = g_strdup(repopath);
xmlChar *data_type = NULL;
xmlChar *location_href = NULL;
ret = get_local_metadata(tmp_dir);
if (ret) {
ret->tmp_dir = g_strdup(tmp_dir);
+ ret->original_url = g_strdup(repopath);
}
if (ml->groupfile_href) list = g_slist_prepend(list, (gpointer) ml->groupfile_href);
if (ml->cgroupfile_href) list = g_slist_prepend(list, (gpointer) ml->cgroupfile_href);
if (ml->repomd) list = g_slist_prepend(list, (gpointer) ml->repomd);
+ if (ml->original_url) list = g_slist_prepend(list, (gpointer) ml->original_url);
if (ml->tmp_dir) list = g_slist_prepend(list, (gpointer) ml->tmp_dir);
return list;
gboolean ret = TRUE;
if (options->outputdir){
- if (!g_file_test(options->outputdir, G_FILE_TEST_EXISTS|G_FILE_TEST_IS_REGULAR)) {
- g_warning("Specified outputdir \"%s\" is regular file.", options->outputdir);
+ if (!g_file_test(options->outputdir, G_FILE_TEST_EXISTS|G_FILE_TEST_IS_DIR)) {
+ g_warning("Specified outputdir \"%s\" is not a directory.", options->outputdir);
ret = FALSE;
}
options->out_dir = normalize_dir_path(options->outputdir);
char *normalized = normalize_dir_path(options->repos[x]);
if (normalized) {
options->repo_list = g_slist_prepend(options->repo_list, normalized);
-/* if (!g_file_test(normalized, G_FILE_TEST_EXISTS|G_FILE_TEST_IS_DIR)) {
- g_critical("Path \"%s\" doesn't exists", normalized);
- ret = FALSE;
- } else {
- g_debug("Using repo: %s", normalized);
- }*/
}
x++;
}
- options->repo_list = g_slist_reverse (options->repo_list);
+ // Reverse come with downloading repos
+ // options->repo_list = g_slist_reverse (options->repo_list);
// Process archlist
options->arch_list = NULL;
// Merged table structure: {"package_name": [pkg, pkg, pkg, ...], ...}
-int add_package(Package *pkg, gchar *repopath, GHashTable *merged, struct CmdOptions *cmd_options)
+int add_package(Package *pkg, gchar *repopath, GHashTable *merged, GSList *arch_list)
{
GSList *list, *element;
// Check if the package meet the command line architecture constraints
- if (cmd_options->arch_list) {
+ if (arch_list) {
gboolean right_arch = FALSE;
- for (element=cmd_options->arch_list; element; element=g_slist_next(element)) {
+ for (element=arch_list; element; element=g_slist_next(element)) {
if (!g_strcmp0(pkg->arch, (gchar *) element->data)) {
right_arch = TRUE;
}
-long merge_repos(GHashTable *merged, struct CmdOptions *cmd_options) {
+long merge_repos(GHashTable *merged, GSList *repo_list, GSList *arch_list) {
long loaded_packages = 0;
// Load all repos
GSList *element = NULL;
- for (element = cmd_options->repo_list; element; element = g_slist_next(element)) {
+ for (element = repo_list; element; element = g_slist_next(element)) {
GHashTable *tmp_hashtable;
- gchar *repopath;
tmp_hashtable = new_metadata_hashtable();
- repopath = (gchar *) element->data;
- g_debug("Processing: %s", repopath);
+ struct MetadataLocation *ml = (struct MetadataLocation *) element->data;
+ gchar *repopath = ml->local_path;
+ g_debug("Processing: %s", ml->repomd);
- if (locate_and_load_xml_metadata(tmp_hashtable, repopath, HT_KEY_HASH) == LOAD_METADATA_ERR) {
- g_critical("Cannot load repo: \"%s\"", repopath);
+ if (load_xml_metadata(tmp_hashtable, ml, HT_KEY_HASH) == LOAD_METADATA_ERR) {
+ g_critical("Cannot load repo: \"%s\"", ml->repomd);
destroy_metadata_hashtable(tmp_hashtable);
break;
}
g_hash_table_iter_init (&iter, tmp_hashtable);
while (g_hash_table_iter_next (&iter, &key, &value)) {
Package *pkg = (Package *) value;
- if (add_package(pkg, repopath, merged, cmd_options)) {
+ if (add_package(pkg, repopath, merged, arch_list)) {
// Package was added - remove only record from hashtable
g_hash_table_iter_steal(&iter);
repo_loaded_packages++;
}
- // Load metadata
-
- long loaded_packages;
- GHashTable *merged_hashtable = new_merged_metadata_hashtable();
- loaded_packages = merge_repos(merged_hashtable, cmd_options);
-
-
- // Get paths of groupfiles
+ // Download repos
+ GSList *local_repos = NULL;
GSList *element = NULL;
gchar *groupfile = NULL;
+
for (element = cmd_options->repo_list; element; element = g_slist_next(element)) {
- gchar *repopath = (gchar *) element->data;
- struct MetadataLocation *loc = get_metadata_location(repopath);
- if (!loc || !loc->groupfile_href) {
- free_metadata_location(loc);
- break;
- }
- if (copy_file(loc->groupfile_href, cmd_options->out_repo) == CR_COPY_OK) {
- groupfile = g_strconcat(cmd_options->out_repo, get_filename(loc->groupfile_href), NULL);
+ struct MetadataLocation *loc = get_metadata_location((gchar *) element->data);
+ local_repos = g_slist_prepend(local_repos, loc);
+ }
+
+
+ // Get first groupfile
+
+ for (element = local_repos; element; element = g_slist_next(element)) {
+ struct MetadataLocation *loc = (struct MetadataLocation *) element->data;
+ if (!groupfile && loc->groupfile_href) {
+ if (copy_file(loc->groupfile_href, cmd_options->out_repo) == CR_COPY_OK) {
+ groupfile = g_strconcat(cmd_options->out_repo, get_filename(loc->groupfile_href), NULL);
+ break;
+ }
}
- free_metadata_location(loc);
- break;
}
+ // Load metadata
+
+ long loaded_packages;
+ GHashTable *merged_hashtable = new_merged_metadata_hashtable();
+ loaded_packages = merge_repos(merged_hashtable, local_repos, cmd_options->arch_list);
+
+
// Dump metadata
dump_merged_metadata(merged_hashtable, loaded_packages, groupfile, cmd_options);
+ // Remove downloaded repos and free repo location structures
+
+ for (element = local_repos; element; element = g_slist_next(element)) {
+ struct MetadataLocation *loc = (struct MetadataLocation *) element->data;
+ free_metadata_location(loc);
+ }
+
+
// Cleanup
g_free(groupfile);