+# "Garbage" from compilation
*.o
*.so
*.pyc
*_wrap.c
snippets/
-notes
-Makefile
+
+# Binary files
mergerepo_c
createrepo_c
-tests/testcompression_wrapper
-tests/testload_metadata
+modifyrepo_c
+sqliterepo_c
+
+# Packages and tarball
createrepo*.rpm
+python-createrepo*.rpm
+createrepo_c-*.tar.xz
+
+# Makefile generated files
+Makefile
cmake_install.cmake
install_manifest.txt
CMakeCache.txt
CPackConfig.cmake
CPackSourceConfig.cmake
_CPack_Packages/
-*_
src/version.h
src/createrepo_c.pc
+src/deltarpms.h
+
+# Devel stuff
+notes
build/
tags
-createrepo_c-*.tar.xz
+*_
+
+# Deltarepo acceptance tests
+deltarepo/acceptance_tests/repos/repo*/repodata
+
+# Bug fixing and feature request temp directories
+BUG*/
+JIRA*/
+ISSUE*/
Tomas Mlcoch <tmlcoch at redhat dot com>
+Mathieu Bridon <bochecha at fedoraproject dot org>
+Tom Prince <tom.prince at clusterhq dot com>
+Scott K Logan <logans at cottsay dot net>
+Ville Skyttä <ville.skytta at iki dot fi>
+Luke Macken <lmacken at redhat dot com>
+Jarek Polok <Jaroslaw.Polok@cern.ch>
+Neal Gompa <ngompa13@gmail.com>
+Ralph Bean <rbean@redhat.com>
-PROJECT (createrepo_c)
+PROJECT (createrepo_c C)
CMAKE_MINIMUM_REQUIRED (VERSION 2.6)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99")
-set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS} -Wall -Wextra")
+set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS} -ggdb -g -Wall -Wextra -Og")
IF(NOT CMAKE_BUILD_TYPE)
SET(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel."
ENDIF(NOT CMAKE_BUILD_TYPE)
IF(CMAKE_BUILD_TYPE AND CMAKE_BUILD_TYPE STREQUAL "DEBUG")
- MESSAGE("Build type is set do DEBUG!")
+ MESSAGE("Build type is set do DEBUG! (Used flags: \"${CMAKE_C_FLAGS_DEBUG}\")")
ENDIF()
include_directories(${LIBXML2_INCLUDE_DIR})
-# rpm:
-
-FIND_LIBRARY (RPMDB_LIBRARY NAMES rpmdb)
-IF (NOT RPMDB_LIBRARY)
- FIND_LIBRARY (RPMDB_LIBRARY NAMES rpm)
+IF (RPM_PATH)
+ SET (RPM_PATH "/home/tmlcoch/git/rpm")
+ include_directories("${RPM_PATH}/include/")
+ SET(RPMDB_LIBRARY "${RPM_PATH}/rpmio/.libs/librpmio.so"
+ "${RPM_PATH}/lib/.libs/librpm.so")
+ message("Using custom RPM: ${RPMDB_LIBRARY}")
+ELSE (RPM_PATH)
+ # rpm:
+ FIND_LIBRARY (RPMDB_LIBRARY NAMES rpmdb)
IF (NOT RPMDB_LIBRARY)
- MESSAGE(FATAL_ERROR "No Rpm library installed")
+ FIND_LIBRARY (RPMDB_LIBRARY NAMES rpm)
+ IF (NOT RPMDB_LIBRARY)
+ MESSAGE(FATAL_ERROR "No Rpm library installed")
+ ENDIF (NOT RPMDB_LIBRARY)
ENDIF (NOT RPMDB_LIBRARY)
-ENDIF (NOT RPMDB_LIBRARY)
-
-# rpmio:
-
-FIND_LIBRARY (RPMIO_LIBRARY NAMES rpmio)
-IF (RPMIO_LIBRARY)
- SET(RPMDB_LIBRARY ${RPMIO_LIBRARY} ${RPMDB_LIBRARY})
-ELSE (RPMIO_LIBRARY)
- MESSAGE(FATAL_ERROR "No Rpmio library installed")
-ENDIF (RPMIO_LIBRARY)
+ # rpmio:
+ FIND_LIBRARY (RPMIO_LIBRARY NAMES rpmio)
+ IF (RPMIO_LIBRARY)
+ SET(RPMDB_LIBRARY ${RPMIO_LIBRARY} ${RPMDB_LIBRARY})
+ ELSE (RPMIO_LIBRARY)
+ MESSAGE(FATAL_ERROR "No Rpmio library installed")
+ ENDIF (RPMIO_LIBRARY)
+
+ message("Using system RPM: ${RPMDB_LIBRARY}")
+ENDIF (RPM_PATH)
+
+# drpm
+if (DRPM_PATH)
+ include_directories (${DRPM_PATH}/)
+ find_library (DRPM_LIBRARY NAMES drpm PATHS ${DRPM_PATH}/ NO_DEFAULT_PATH)
+ set(CR_DELTA_RPM_SUPPORT "1")
+ message("Using custom DRPM: ${DRPM_LIBRARY}")
+ELSE (DRPM_PATH)
+ FIND_LIBRARY (DRPM_LIBRARY NAMES drpm libdrpm.so.0)
+ IF (NOT DRPM_LIBRARY)
+ MESSAGE("No DRPM library installed")
+ ELSE (NOT DRPM_LIBRARY)
+ MESSAGE("Using DRPM library: ${DRPM_LIBRARY}")
+ set(CR_DELTA_RPM_SUPPORT "1")
+ ENDIF (NOT DRPM_LIBRARY)
+endif (DRPM_PATH)
# Get package version
INCLUDE (${CMAKE_SOURCE_DIR}/VERSION.cmake)
message("Package version: ${VERSION}")
+# Default python version
+if (NOT PYTHON_DESIRED)
+ set (PYTHON_DESIRED "2")
+endif()
+
# Other files
-INSTALL(FILES createrepo_c.bash
- DESTINATION "/etc/bash_completion.d")
+pkg_check_modules(BASHCOMP bash-completion)
+if (BASHCOMP_FOUND)
+ execute_process(COMMAND ${PKG_CONFIG_EXECUTABLE} --variable=completionsdir bash-completion OUTPUT_VARIABLE BASHCOMP_DIR OUTPUT_STRIP_TRAILING_WHITESPACE)
+ message("Bash completion directory: ${BASHCOMP_DIR}")
+ INSTALL(FILES createrepo_c.bash DESTINATION ${BASHCOMP_DIR} RENAME createrepo_c)
+ INSTALL(CODE "
+ execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink createrepo_c \$ENV{DESTDIR}${BASHCOMP_DIR}/mergerepo_c)
+ execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink createrepo_c \$ENV{DESTDIR}${BASHCOMP_DIR}/modifyrepo_c)
+ execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink createrepo_c \$ENV{DESTDIR}${BASHCOMP_DIR}/sqliterepo_c)
+ ")
+ELSE (BASHCOMP_FOUND)
+ INSTALL(FILES createrepo_c.bash DESTINATION "/etc/bash_completion.d")
+ message("Bash completion directory: /etc/bash_completion.d")
+endif (BASHCOMP_FOUND)
# Gen manpage
* sqlite3 (https://sqlite.org/) - sqlite-devel/libsqlite3-dev
* xz (http://tukaani.org/xz/) - xz-devel/liblzma-dev
* zlib (http://www.zlib.net/) - zlib-devel/zlib1g-dev
-* *Optional:* doxygen (http://doxygen.org/) - doxygen/doxygen
+* *Documentation:* doxygen (http://doxygen.org/) - doxygen/doxygen
+* *Documentation:* sphinx (http://sphinx-doc.org/) - python-sphinx/
* **Test requires:** check (http://check.sourceforge.net/) - check-devel/check
* **Test requires:** python-nose (https://nose.readthedocs.org/) - python-nose/python-nose
-
+* **Experimental support:** drpm (https://git.fedorahosted.org/git/drpm.git)
From your checkout dir:
cmake -DCMAKE_BUILD_TYPE:STRING=DEBUG .. && make
+## Building from an rpm checkout
+
+E.g. when you want to try weak and rich dependencies.
+
+ cmake -DRPM_PATH="/home/tmlcoch/git/rpm" .. && make
+
+**Note:** The RPM must be buit in that directory
+
+Commands I am using for building of RPM:
+
+ cd /home/tmlcoch/git/rpm
+ CPPFLAGS='-I/usr/include/nss3/ -I/usr/include/nspr4/' ./autogen.sh --rpmconfigure --with-vendor=redhat --with-external-db --with-lua --with-selinux --with-cap --with-acl --enable-python
+ make clean && make
+
+## Building with delta rpm support (drpm)
+
+At first, you have to checkout drpm library from the
+https://git.fedorahosted.org/git/drpm.git and build it.
+
+ git clone ssh://git.fedorahosted.org/git/drpm.git
+ cd drpm/
+ make
+
+Then run ``cmake`` for createrepo_c with param ``-DDRPM_PATH="/home/tmlcoch/git/drpm"``
+where the path is path to your build of drpm library.
+
+ cmake -DDRPM_PATH="/home/tmlcoch/git/drpm" .. && make
+
+## Building for a different Python version
+
+By default, cmake should set up things to build for Python 2, but you can do a build for Python 3 like this::
+
+ cmake -DPYTHON_DESIRED=3 .
+
## Build tarball
utils/make_tarball.sh [git revision]
PYTHONPATH=`readlink -f ./build/src/python/` nosetests -s tests/python/tests/
+Or, for an alternative python version, specify the appropriate nosetests executable:
+
+ PYTHONPATH=`readlink -f ./build/src/python/` nosetests-3.4 -s tests/python/tests/
+
### Links
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&bug_status=MODIFIED&bug_status=VERIFIED&component=createrepo_c&query_format=advanced)
SET(CR_MAJOR "0")
-SET(CR_MINOR "2")
-SET(CR_PATCH "1")
+SET(CR_MINOR "10")
+SET(CR_PATCH "0")
--- /dev/null
+#!/bin/bash
+
+CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+BUILDDIR="$( cd "$CURDIR/../build" && pwd )"
+
+# All use cases
+PATH="$BUILDDIR/src/:/home/tmlcoch/git/yum-metadata-diff/:$PATH" LD_LIBRARY_PATH=$BUILDDIR/src/ PYTHONPATH=$BUILDDIR/src/python/ nosetests -s -v ./tests/ --processes 4 --process-timeout=300
+
+# Single module:
+#PATH="$BUILDDIR/src/:/home/tmlcoch/git/repodiff/:$PATH" LD_LIBRARY_PATH=$BUILDDIR/src/ PYTHONPATH=$BUILDDIR/src/python/ nosetests -s -v --processes 4 --process-timeout=300 tests/test_sqliterepo.py
+
+# Single test:
+# PATH="$BUILDDIR/src/:/home/tmlcoch/git/repodiff/:$PATH" LD_LIBRARY_PATH=$BUILDDIR/src/ PYTHONPATH=$BUILDDIR/src/python/ nosetests -s -v --processes 4 --process-timeout=300 tests/test_createrepo.py:TestCaseCreaterepo_emptyrepo.test_01_createrepo
--- /dev/null
+import os
+import re
+import time
+import shutil
+import pprint
+import filecmp
+import os.path
+import tempfile
+import unittest
+import threading
+import subprocess
+
+from .fixtures import PACKAGESDIR
+
+OUTPUTDIR = None
+OUTPUTDIR_LOCK = threading.Lock()
+
+def get_outputdir():
+ global OUTPUTDIR
+ if OUTPUTDIR:
+ return OUTPUTDIR
+ OUTPUTDIR_LOCK.acquire()
+ if not OUTPUTDIR:
+ prefix = time.strftime("./testresults_%Y%m%d_%H%M%S_")
+ OUTPUTDIR = tempfile.mkdtemp(prefix=prefix, dir="./")
+ OUTPUTDIR_LOCK.release()
+ return OUTPUTDIR
+
+
+class _Result(object):
+ def __str__(self):
+ return pprint.pformat(self.__dict__)
+
+
+class CrResult(_Result):
+ def __init__(self):
+ self.rc = None # Return code
+ self.out = None # stdout + stderr
+ self.dir = None # Directory that was processed
+ self.prog = None # Program name
+ self.cmd = None # Complete command
+ self.outdir = None # Output directory
+ self.logfile = None # Log file where the out was logged
+
+
+class RepoDiffResult(_Result):
+ def __init__(self):
+ self.rc = None
+ self.out = None
+ self.repo1 = None
+ self.repo2 = None
+ self.cmd = None # Complete command
+ self.logfile = None # Log file where the out was logged
+
+
+class RepoSanityCheckResult(_Result):
+ def __init__(self):
+ self.rc = None
+ self.out = None
+ self.repo = None
+ self.cmd = None
+ self.logfile = None
+
+
+class BaseTestCase(unittest.TestCase):
+
+ def __init__(self, *args, **kwargs):
+ self.outdir = get_outputdir()
+ self.tcdir = os.path.join(self.outdir, self.__class__.__name__)
+ if not os.path.exists(self.tcdir):
+ os.mkdir(self.tcdir)
+ if self.__class__.__doc__:
+ description_fn = os.path.join(self.tcdir, "description")
+ open(description_fn, "w").write(self.__class__.__doc__+'\n')
+ unittest.TestCase.__init__(self, *args, **kwargs)
+
+ self.tdir = None # Test dir for the current test
+ self.indir = None # Input dir for the current test
+ self._currentResult = None # Result of the current test
+
+ # Prevent use of a first line from test docstring as its name in output
+ self.shortDescription_orig = self.shortDescription
+ self.shortDescription = self._shortDescription
+ self.main_cwd = os.getcwd()
+
+ def _shortDescription(self):
+ return ".".join(self.id().split('.')[-2:])
+
+ def run(self, result=None):
+ # Hacky
+ self._currentResult = result # remember result for use in tearDown
+ unittest.TestCase.run(self, result)
+
+ def setUp(self):
+ os.chdir(self.main_cwd) # In case of TimedOutException in Nose test... the tearDown is not called :-/
+ caller = self.id().split(".", 3)[-1]
+ self.tdir = os.path.abspath(os.path.join(self.tcdir, caller))
+ os.mkdir(self.tdir)
+ self.indir = os.path.join(self.tdir, "input")
+ os.mkdir(self.indir)
+ description = self.shortDescription_orig()
+ if description:
+ fn = os.path.join(self.tdir, "description")
+ open(fn, "w").write(description+'\n')
+ #self.log = # TODO
+ os.chdir(self.tdir)
+ self.setup()
+
+ def setup(self):
+ pass
+
+ def tearDown(self):
+ if self.tdir and self._currentResult:
+ if not len(self._currentResult.errors) + len(self._currentResult.failures):
+ self.set_success()
+ self.teardown()
+ os.chdir(self.main_cwd)
+
+ def teardown(self):
+ pass
+
+ def runcmd(self, cmd, logfile=None, workdir=None, stdin_data=None):
+ """Stolen from the kobo library.
+ Author of the original function is dmach@redhat.com"""
+
+ # TODO: Add time how long the command takes
+
+ if type(cmd) in (list, tuple):
+ import pipes
+ cmd = " ".join(pipes.quote(i) for i in cmd)
+
+ if logfile is not None:
+ already_exists = False
+ if os.path.exists(logfile):
+ already_exists = True
+ logfile = open(logfile, "a")
+ if already_exists:
+ logfile.write("\n{0}\n Another run\n{0}\n".format('='*79))
+ logfile.write("cd %s\n" % os.getcwd())
+ for var in ("PATH", "PYTHONPATH", "LD_LIBRARY_PATH"):
+ logfile.write('export %s="%s"\n' % (var, os.environ.get(var,"")))
+ logfile.write("\n")
+ logfile.write(cmd+"\n")
+ logfile.write("\n+%s+\n\n" % ('-'*77))
+
+ stdin = None
+ if stdin_data is not None:
+ stdin = subprocess.PIPE
+
+ proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, cwd=workdir)
+
+ if stdin_data is not None:
+ class StdinThread(threading.Thread):
+ def run(self):
+ proc.stdin.write(stdin_data)
+ proc.stdin.close()
+ stdin_thread = StdinThread()
+ stdin_thread.daemon = True
+ stdin_thread.start()
+
+ output = ""
+ while True:
+ lines = proc.stdout.readline().decode('utf-8')
+ if lines == "":
+ break
+ if logfile:
+ logfile.write(lines)
+ output += lines
+ proc.wait()
+
+ if logfile:
+ logfile.write("\n+%s+\n\n" % ('-'*77))
+ logfile.write("RC: %s (%s)\n" % (proc.returncode, "Success" if not proc.returncode else "Failure"))
+ logfile.close()
+
+ if stdin_data is not None:
+ stdin_thread.join()
+
+ return proc.returncode, output
+
+ def set_success(self):
+ """Create a SUCCESS file in directory of the current test"""
+ fn = os.path.join(self.tdir, "SUCCESS")
+ open(fn, "w")
+
+ def copy_pkg(self, name, dst):
+ """Copy package from testdata into specified destination"""
+ src = os.path.join(PACKAGESDIR, name)
+ shutil.copy(src, dst)
+ return os.path.join(dst, name)
+
+ def indir_addpkg(self, name):
+ """Add package into input dir for the current test"""
+ src = os.path.join(PACKAGESDIR, name)
+ return self.copy_pkg(src, self.indir)
+
+ def indir_makedirs(self, path):
+ """Make a directory in input dir of the current test"""
+ path = path.lstrip('/')
+ final_path = os.path.join(self.indir, path)
+ os.makedirs(final_path)
+ return final_path
+
+ def indir_mkfile(self, name, content=""):
+ """Create a file in input dir of the current test"""
+ fn = os.path.join(self.indir, name)
+ with open(fn, "w") as f:
+ f.write(content)
+ return fn
+
+ def tdir_makedirs(self, path):
+ """Make a directory in test dir of the current test"""
+ path = path.lstrip('/')
+ final_path = os.path.join(self.tdir, path)
+ os.makedirs(final_path)
+ return final_path
+
+ def run_prog(self, prog, dir, args=None, outdir=None):
+ res = CrResult()
+ res.dir = dir
+ res.prog = prog
+ res.outdir = outdir
+ res.logfile = os.path.join(self.tdir, "out_%s" % res.prog)
+ res.cmd = "%(prog)s --verbose %(args)s %(dir)s" % {
+ "prog": res.prog,
+ "dir": res.dir,
+ "args": args or "",
+ }
+ res.rc, res.out = self.runcmd(res.cmd, logfile=res.logfile)
+ return res
+
+ def run_cr(self, dir, args=None, c=False, outdir=None):
+ """Run createrepo and return CrResult object with results
+
+ :returns: Result of the createrepo run
+ :rtype: CrResult
+ """
+ prog = "createrepo_c" if c else "createrepo"
+
+ if not outdir:
+ outdir = os.path.join(self.tdir, prog)
+ else:
+ outdir = os.path.join(self.tdir, outdir)
+ if not os.path.exists(outdir):
+ os.mkdir(outdir)
+
+ args = " -o %s %s" % (outdir, args if args else "")
+
+ res = self.run_prog(prog, dir, args, outdir)
+ return res
+
+ def run_sqlr(self, dir, args=None):
+ """"""
+ res = self.run_prog("sqliterepo_c", dir, args)
+ return res
+
+ def compare_repos(self, repo1, repo2):
+ """Compare two repos
+
+ :returns: Difference between two repositories
+ :rtype: RepoDiffResult
+ """
+ res = RepoDiffResult()
+ res.repo1 = os.path.join(repo1, "repodata")
+ res.repo2 = os.path.join(repo2, "repodata")
+ res.logfile = os.path.join(self.tdir, "out_cmp")
+ res.cmd = "yum-metadata-diff --verbose --compare %s %s" % (res.repo1, res.repo2)
+ res.rc, res.out = self.runcmd(res.cmd, logfile=res.logfile)
+ return res
+
+ def check_repo_sanity(self, repo):
+ """Check if a repository is sane
+
+ :returns: Result of the sanity check
+ :rtype: RepoSanityCheckResult
+ """
+ res = RepoSanityCheckResult()
+ res.repo = os.path.join(repo, "repodata")
+ res.logfile = os.path.join(self.tdir, "out_sanity")
+ res.cmd = "yum-metadata-diff --verbose --check %s" % res.repo
+ res.rc, res.out = self.runcmd(res.cmd, logfile=res.logfile)
+ return res
+
+ def assert_run_cr(self, *args, **kwargs):
+ """Run createrepo and assert that it finished with return code 0
+
+ :returns: Result of the createrepo run
+ :rtype: CrResult
+ """
+ res = self.run_cr(*args, **kwargs)
+ self.assertFalse(res.rc)
+ return res
+
+ def assert_run_sqlr(self, *args, **kwargs):
+ """Run sqliterepo and assert that it finished with return code 0
+
+ :returns: Result of the sqliterepo run
+ :rtype: CrResult
+ """
+ res = self.run_sqlr(*args, **kwargs)
+ self.assertFalse(res.rc)
+ return res
+
+ def assert_same_results(self, indir, args=None):
+ """Run both createrepo and createrepo_c and assert that results are same
+
+ :returns: (result of comparison, createrepo result, createrepo_c result)
+ :rtype: (RepoDiffResult, CrResult, CrResult)
+ """
+ crres = self.run_cr(indir, args)
+ crcres = self.run_cr(indir, args, c=True)
+ self.assertFalse(crres.rc) # Error while running createrepo
+ self.assertFalse(crcres.rc) # Error while running createrepo_c
+ cmpres = self.compare_repos(crres.outdir, crcres.outdir)
+ self.assertFalse(cmpres.rc) # Repos are not same
+ return (cmpres, crres, crcres)
+
+ def assert_repo_sanity(self, repo):
+ """Run repo sanity check and assert it's sane
+
+ :returns: Result of the sanity check
+ :rtype: RepoSanityCheckResult
+ """
+ res = self.check_repo_sanity(repo)
+ self.assertFalse(res.rc)
+ return res
+
+ def assert_repo_files(self, repo, file_patterns, additional_files_allowed=True):
+ """Assert that files (defined by re) are in the repo
+ """
+ compiled_patterns = map(re.compile, file_patterns)
+ fns = os.listdir(os.path.join(repo, "repodata/"))
+ used_patterns = []
+ for pattern in compiled_patterns:
+ for fn in fns[:]:
+ if pattern.match(fn):
+ fns.remove(fn)
+ used_patterns.append(pattern)
+
+ if not additional_files_allowed:
+ self.assertEqual(fns, []) # Unexpected additional files
+
+ unused_paterns = [x.pattern for x in (set(compiled_patterns) - set(used_patterns))]
+ self.assertEqual(unused_paterns, []) # Some patterns weren't used
+
+ def assert_same_dir_content(self, a, b):
+ """Assert identical content of two directories
+ (Not recursive yet)
+ """
+ # TODO: Recursive
+ self.assertTrue(os.path.isdir(a))
+ self.assertTrue(os.path.isdir(b))
+
+ _, logfn = tempfile.mkstemp(prefix="out_dircmp_%s_" % long(time.time()), dir=self.tdir)
+ logfile = open(logfn, "w")
+ logfile.write("A: %s\n" % a)
+ logfile.write("B: %s\n" % b)
+ logfile.write("\n")
+
+ dircmpobj = filecmp.dircmp(a, b)
+ if dircmpobj.left_only or dircmpobj.right_only or dircmpobj.diff_files:
+ logfile.write("A only:\n%s\n\n" % dircmpobj.left_only)
+ logfile.write("B only:\n%s\n\n" % dircmpobj.right_only)
+ logfile.write("Diff files:\n%s\n\n" % dircmpobj.diff_files)
+ logfile.close()
+ self.assertTrue(False)
+ logfile.write("OK\n")
+ logfile.close()
--- /dev/null
+import os.path
+
+TESTDATADIR = os.path.normpath(os.path.join(__file__, "../../testdata"))
+PACKAGESDIR=os.path.join(TESTDATADIR, "packages")
+
+PACKAGES = [
+ "Archer-3.4.5-6.x86_64.rpm",
+ "fake_bash-1.1.1-1.x86_64.rpm",
+ "super_kernel-6.0.1-2.x86_64.rpm",
+]
\ No newline at end of file
--- /dev/null
+import os
+import os.path
+
+from .fixtures import PACKAGES
+from .base import BaseTestCase
+
+
+class TestCaseCreaterepo_badparams(BaseTestCase):
+ """Use case with bad commandline arguments"""
+
+ def setup(self):
+ self.indir_addpkg(PACKAGES[0])
+
+ def test_01_createrepo_noinputdir(self):
+ """No directory to index was specified"""
+ res = self.run_cr("", c=True)
+ self.assertTrue(res.rc)
+
+ def test_02_createrepo_badinputdir(self):
+ """Directory specified to index doesn't exist"""
+ res = self.run_cr("somenonexistingdirectorytoindex/", c=True)
+ self.assertTrue(res.rc)
+
+ def test_03_createrepo_unknownparam(self):
+ """Unknown param is specified"""
+ res = self.run_cr(self.indir, "--someunknownparam", c=True)
+ self.assertTrue(res.rc)
+
+ def test_04_createrepo_badchecksumtype(self):
+ """Unknown checksum type is specified"""
+ res = self.run_cr(self.indir, "--checksum foobarunknownchecksum", c=True)
+ self.assertTrue(res.rc)
+
+ def test_05_createrepo_badcompressiontype(self):
+ """Unknown compressin type is specified"""
+ res = self.run_cr(self.indir, "--compress-type foobarunknowncompression", c=True)
+ self.assertTrue(res.rc)
+
+ def test_06_createrepo_badgroupfile(self):
+ """Bad groupfile file specified"""
+ res = self.run_cr(self.indir, "--groupfile badgroupfile", c=True)
+ self.assertTrue(res.rc)
+
+ def test_07_createrepo_badpkglist(self):
+ """Bad pkglist file specified"""
+ res = self.run_cr(self.indir, "--pkglist badpkglist", c=True)
+ self.assertTrue(res.rc)
+
+ def test_08_createrepo_retainoldmdbyagetogetherwithretainoldmd(self):
+ """Both --retain-old-md-by-age and --retain-old-md are specified"""
+ res = self.run_cr(self.indir, "--retain-old-md-by-age 1 --retain-old-md", c=True)
+ self.assertTrue(res.rc)
+
+ def test_09_createrepo_retainoldmdbyagewithbadage(self):
+ """Both --retain-old-md-by-age and --retain-old-md are specified"""
+ res = self.run_cr(self.indir, "--retain-old-md-by-age 55Z", c=True)
+ self.assertTrue(res.rc)
+
+
+class TestCaseCreaterepo_emptyrepo(BaseTestCase):
+ """Empty input repository"""
+
+ def setup(self):
+ self.fn_comps = self.indir_mkfile("comps.xml", '<?xml version="1.0"?><comps/>')
+
+ def test_01_createrepo(self):
+ """Repo from empty directory"""
+ res = self.assert_run_cr(self.indir, c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]{64}-primary.xml.gz$",
+ "[a-z0-9]{64}-filelists.xml.gz$",
+ "[a-z0-9]{64}-other.xml.gz$",
+ "[a-z0-9]{64}-primary.sqlite.bz2$",
+ "[a-z0-9]{64}-filelists.sqlite.bz2$",
+ "[a-z0-9]{64}-other.sqlite.bz2$",
+ ],
+ additional_files_allowed=False)
+
+ def test_02_createrepo_database(self):
+ """--database"""
+ res = self.assert_run_cr(self.indir, "--database", c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]+-primary.xml.gz$",
+ "[a-z0-9]+-filelists.xml.gz$",
+ "[a-z0-9]+-other.xml.gz$",
+ "[a-z0-9]+-primary.sqlite.bz2$",
+ "[a-z0-9]+-filelists.sqlite.bz2$",
+ "[a-z0-9]+-other.sqlite.bz2$",
+ ],
+ additional_files_allowed=False)
+
+ def test_03_createrepo_nodatabase(self):
+ """--database"""
+ res = self.assert_run_cr(self.indir, "--no-database", c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]+-primary.xml.gz$",
+ "[a-z0-9]+-filelists.xml.gz$",
+ "[a-z0-9]+-other.xml.gz$",
+ ],
+ additional_files_allowed=False)
+
+ def test_04_createrepo_groupfile(self):
+ """--groupfile"""
+ res = self.assert_run_cr(self.indir, "--groupfile %s" % self.fn_comps, c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]+-primary.xml.gz$",
+ "[a-z0-9]+-filelists.xml.gz$",
+ "[a-z0-9]+-other.xml.gz$",
+ "[a-z0-9]+-primary.sqlite.bz2$",
+ "[a-z0-9]+-filelists.sqlite.bz2$",
+ "[a-z0-9]+-other.sqlite.bz2$",
+ "[a-z0-9]+-comps.xml$",
+ "[a-z0-9]+-comps.xml.gz$",
+ ],
+ additional_files_allowed=False)
+
+ def test_05_createrepo_checksum(self):
+ """--checksum sha and --groupfile"""
+ res = self.assert_run_cr(self.indir,
+ "--checksum %(checksum)s --groupfile %(groupfile)s" % {
+ 'checksum': "sha1",
+ 'groupfile': self.fn_comps },
+ c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]{40}-primary.xml.gz$",
+ "[a-z0-9]{40}-filelists.xml.gz$",
+ "[a-z0-9]{40}-other.xml.gz$",
+ "[a-z0-9]{40}-primary.sqlite.bz2$",
+ "[a-z0-9]{40}-filelists.sqlite.bz2$",
+ "[a-z0-9]{40}-other.sqlite.bz2$",
+ "[a-z0-9]{40}-comps.xml$",
+ "[a-z0-9]{40}-comps.xml.gz$",
+ ],
+ additional_files_allowed=False)
+
+ def test_06_createrepo_simplemdfilenames(self):
+ """--simple-md-filenames and --groupfile"""
+ res = self.assert_run_cr(self.indir,
+ "--simple-md-filenames --groupfile %(groupfile)s" % {
+ 'groupfile': self.fn_comps },
+ c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "primary.xml.gz$",
+ "filelists.xml.gz$",
+ "other.xml.gz$",
+ "primary.sqlite.bz2$",
+ "filelists.sqlite.bz2$",
+ "other.sqlite.bz2$",
+ "comps.xml$",
+ "comps.xml.gz$",
+ ],
+ additional_files_allowed=False)
+
+ def test_07_createrepo_xz(self):
+ """--xz and --groupfile"""
+ res = self.assert_run_cr(self.indir,
+ "--xz --groupfile %(groupfile)s" % {
+ 'groupfile': self.fn_comps },
+ c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]{64}-primary.xml.gz$",
+ "[a-z0-9]{64}-filelists.xml.gz$",
+ "[a-z0-9]{64}-other.xml.gz$",
+ "[a-z0-9]{64}-primary.sqlite.xz$",
+ "[a-z0-9]{64}-filelists.sqlite.xz$",
+ "[a-z0-9]{64}-other.sqlite.xz$",
+ "[a-z0-9]{64}-comps.xml$",
+ "[a-z0-9]{64}-comps.xml.xz$",
+ ],
+ additional_files_allowed=False)
+
+ def test_08_createrepo_compresstype_bz2(self):
+ """--compress-type bz2"""
+ res = self.assert_run_cr(self.indir, "--compress-type bz2", c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]{64}-primary.xml.gz$",
+ "[a-z0-9]{64}-filelists.xml.gz$",
+ "[a-z0-9]{64}-other.xml.gz$",
+ "[a-z0-9]{64}-primary.sqlite.bz2$",
+ "[a-z0-9]{64}-filelists.sqlite.bz2$",
+ "[a-z0-9]{64}-other.sqlite.bz2$",
+ ],
+ additional_files_allowed=False)
+
+ def test_09_createrepo_compresstype_gz(self):
+ """--compress-type bz2"""
+ res = self.assert_run_cr(self.indir, "--compress-type gz", c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]{64}-primary.xml.gz$",
+ "[a-z0-9]{64}-filelists.xml.gz$",
+ "[a-z0-9]{64}-other.xml.gz$",
+ "[a-z0-9]{64}-primary.sqlite.gz$",
+ "[a-z0-9]{64}-filelists.sqlite.gz$",
+ "[a-z0-9]{64}-other.sqlite.gz$",
+ ],
+ additional_files_allowed=False)
+
+ def test_10_createrepo_compresstype_xz(self):
+ """--compress-type bz2"""
+ res = self.assert_run_cr(self.indir, "--compress-type xz", c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]{64}-primary.xml.gz$",
+ "[a-z0-9]{64}-filelists.xml.gz$",
+ "[a-z0-9]{64}-other.xml.gz$",
+ "[a-z0-9]{64}-primary.sqlite.xz$",
+ "[a-z0-9]{64}-filelists.sqlite.xz$",
+ "[a-z0-9]{64}-other.sqlite.xz$",
+ ],
+ additional_files_allowed=False)
+
+ def test_11_createrepo_repomd_checksum(self):
+ """--checksum sha and --groupfile"""
+ res = self.assert_run_cr(self.indir,
+ "--repomd-checksum %(checksum)s --groupfile %(groupfile)s" % {
+ 'checksum': "sha1",
+ 'groupfile': self.fn_comps },
+ c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]{40}-primary.xml.gz$",
+ "[a-z0-9]{40}-filelists.xml.gz$",
+ "[a-z0-9]{40}-other.xml.gz$",
+ "[a-z0-9]{40}-primary.sqlite.bz2$",
+ "[a-z0-9]{40}-filelists.sqlite.bz2$",
+ "[a-z0-9]{40}-other.sqlite.bz2$",
+ "[a-z0-9]{40}-comps.xml$",
+ "[a-z0-9]{40}-comps.xml.gz$",
+ ],
+ additional_files_allowed=False)
+
+ def test_12_createrepo_repomd_checksum(self):
+ """--checksum sha and --groupfile"""
+ res = self.assert_run_cr(self.indir,
+ "--checksum md5 --repomd-checksum %(checksum)s --groupfile %(groupfile)s" % {
+ 'checksum': "sha1",
+ 'groupfile': self.fn_comps },
+ c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]{40}-primary.xml.gz$",
+ "[a-z0-9]{40}-filelists.xml.gz$",
+ "[a-z0-9]{40}-other.xml.gz$",
+ "[a-z0-9]{40}-primary.sqlite.bz2$",
+ "[a-z0-9]{40}-filelists.sqlite.bz2$",
+ "[a-z0-9]{40}-other.sqlite.bz2$",
+ "[a-z0-9]{40}-comps.xml$",
+ "[a-z0-9]{40}-comps.xml.gz$",
+ ],
+ additional_files_allowed=False)
+
+ def test_13_createrepo_general_compress_type(self):
+ """--checksum sha and --groupfile"""
+ res = self.assert_run_cr(self.indir,
+ "--general-compress-type %(compress_type)s --groupfile %(groupfile)s" % {
+ 'compress_type': "xz",
+ 'groupfile': self.fn_comps },
+ c=True)
+ self.assert_repo_sanity(res.outdir)
+ self.assert_repo_files(res.outdir,
+ ["repomd.xml$",
+ "[a-z0-9]+-primary.xml.xz$",
+ "[a-z0-9]+-filelists.xml.xz$",
+ "[a-z0-9]+-other.xml.xz$",
+ "[a-z0-9]+-primary.sqlite.xz$",
+ "[a-z0-9]+-filelists.sqlite.xz$",
+ "[a-z0-9]+-other.sqlite.xz$",
+ "[a-z0-9]+-comps.xml$",
+ "[a-z0-9]+-comps.xml.xz$",
+ ],
+ additional_files_allowed=False)
\ No newline at end of file
--- /dev/null
+import os
+import os.path
+
+from .fixtures import PACKAGES
+from .base import BaseTestCase
+
+
+class TestCaseCreaterepoComparative_emptyrepo(BaseTestCase):
+ """Empty input repository"""
+
+ def test_01_createrepo(self):
+ """Repo from empty directory"""
+ self.assert_same_results(self.indir)
+
+ def test_02_createrepo_relativepath(self):
+ """Repo from empty directory - specified by relative path"""
+ self.assert_same_results(os.path.relpath(self.indir))
+
+ def test_03_createrepo_distrotag(self):
+ """--distro"""
+ self.assert_same_results(self.indir, "--distro DISTRO-TAG")
+
+ def test_04_createrepo_distrotag(self):
+ """--distro"""
+ self.assert_same_results(self.indir, "--distro CPEID,Footag")
+
+ def test_05_createrepo_distrotag(self):
+ """--distro"""
+ self.assert_same_results(self.indir, "--distro cpeid,tag_a --distro tag_b")
+
+ def test_06_createrepo_contenttag(self):
+ """--content"""
+ self.assert_same_results(self.indir, "--content contenttag_a")
+
+ def test_07_createrepo_contenttag(self):
+ """--content"""
+ self.assert_same_results(self.indir, "--content contenttag_a --content contettag_b")
+
+ def test_08_createrepo_repotag(self):
+ """--repo"""
+ self.assert_same_results(self.indir, "--repo repotag_a")
+
+ def test_09_createrepo_repotag(self):
+ """--repo"""
+ self.assert_same_results(self.indir, "--repo repotag_a --repo repotag_b")
+
+ def test_10_createrepo_nodatabase(self):
+ """--no-database"""
+ self.assert_same_results(self.indir, "--no-database")
+
+ def test_11_createrepo_uniquemdfilenames(self):
+ """--unique-md-filenames"""
+ self.assert_same_results(self.indir, "--unique-md-filenames")
+
+ def test_12_createrepo_simplemdfilenames(self):
+ """--simple-md-filenames"""
+ self.assert_same_results(self.indir, "--simple-md-filenames")
+
+ def test_13_createrepo_revision(self):
+ """--revision"""
+ self.assert_same_results(self.indir, "--revision XYZ")
+
+ def test_14_createrepo_skipsymlinks(self):
+ """--skip-symlinks"""
+ self.assert_same_results(self.indir, "--skip-symlinks")
+
+
+class TestCaseCreaterepoComparative_regularrepo(BaseTestCase):
+ """Repo with 3 packages"""
+
+ def setup(self):
+ self.indir_addpkg(PACKAGES[0])
+ self.indir_addpkg(PACKAGES[1])
+ self.indir_addpkg(PACKAGES[2])
+
+ def test_01_createrepo(self):
+ """Regular createrepo"""
+ self.assert_same_results(self.indir)
+
+ def test_02_createrepo_relativepath(self):
+ """Regular createrepo"""
+ self.assert_same_results(os.path.relpath(self.indir))
+
+ def test_03_createrepo_excludes(self):
+ """--excludes * param"""
+ self.assert_same_results(self.indir, "--excludes '*'")
+
+ def test_04_createrepo_excludes(self):
+ """--excludes"""
+ self.assert_same_results(self.indir, "--excludes 'Archer-3.4.5-6.x86_64.rpm'")
+
+ def test_05_createrepo_excludes(self):
+ """--excludes"""
+ self.assert_same_results(self.indir, "--excludes 'Archer-*.rpm'")
+
+ def test_06_createrepo_skipsymlinks(self):
+ """--skip-symlinks"""
+ self.assert_same_results(self.indir, "--skip-symlinks")
+
+ def test_07_createrepo_pkglist(self):
+ """--pkglist"""
+ fn_pkglist = self.indir_mkfile("pkglist", "%s\n" % PACKAGES[0])
+ self.assert_same_results(self.indir, "--pkglist %s" % fn_pkglist)
+
+ def test_08_createrepo_pkglist(self):
+ """--pkglist"""
+ fn_pkglist = self.indir_mkfile("pkglist", "%s\n%s\n" % (PACKAGES[0], PACKAGES[1]))
+ self.assert_same_results(self.indir, "--pkglist %s" % fn_pkglist)
+
+ def test_09_createrepo_pkglist(self):
+ """--pkglist"""
+ fn_pkglist = self.indir_mkfile("pkglist", "%s\n\n%s\n\nfoobar.rpm\n\n" % (PACKAGES[0], PACKAGES[1]))
+ self.assert_same_results(self.indir, "--pkglist %s" % fn_pkglist)
+
+class TestCaseCreaterepoComparative_regularrepowithsubdirs(BaseTestCase):
+ """Repo with 3 packages, each in its own subdir"""
+
+ def setup(self):
+ subdir_a = self.indir_makedirs("a")
+ self.copy_pkg(PACKAGES[0], subdir_a)
+ subdir_b = self.indir_makedirs("b")
+ self.copy_pkg(PACKAGES[1], subdir_b)
+ subdir_c = self.indir_makedirs("c")
+ self.copy_pkg(PACKAGES[2], subdir_c)
+
+ def test_01_createrepo(self):
+ """Regular createrepo"""
+ self.assert_same_results(self.indir)
+
+ def test_02_createrepo_skipsymlinks(self):
+ """--skip-symlinks"""
+ self.assert_same_results(self.indir, "--skip-symlinks")
+
+ def test_03_createrepo_excludes(self):
+ """--excludes"""
+ self.assert_same_results(self.indir, "--excludes 'Archer-3.4.5-6.x86_64.rpm'")
+
+ def test_04_createrepo_excludes(self):
+ """--excludes"""
+ self.assert_same_results(self.indir, "--excludes 'Archer-*.rpm'")
+
+
+class TestCaseCreaterepoComparative_repowithsymlinks(BaseTestCase):
+ """Repo with 2 packages and 1 symlink to a package"""
+
+ def setup(self):
+ self.indir_addpkg(PACKAGES[0])
+ self.indir_addpkg(PACKAGES[1])
+ pkg_in_tdir = self.copy_pkg(PACKAGES[2], self.tdir)
+ os.symlink(pkg_in_tdir, os.path.join(self.indir, os.path.basename(pkg_in_tdir)))
+
+ def test_01_createrepo(self):
+ """Regular createrepo"""
+ self.assert_same_results(self.indir)
+
+ def test_02_createrepo_skipsymlinks(self):
+ """--skip-symlinks"""
+ self.assert_same_results(self.indir, "--skip-symlinks")
+
+
+class TestCaseCreaterepoComparative_repowithbadpackages(BaseTestCase):
+ """Repo with 1 regular package and few broken packages"""
+
+ def setup(self):
+ self.indir_addpkg(PACKAGES[0])
+ self.indir_makedirs("adirthatlookslike.rpm")
+ self.indir_mkfile("emptyfilethatlookslike.rpm")
+ self.indir_mkfile("afilethatlookslike.rpm", content="foobar")
+
+ def test_01_createrepo(self):
+ """Regular createrepo"""
+ self.assert_same_results(self.indir)
+
+
+class TestCaseCreaterepoComparative_cachedir(BaseTestCase):
+ """Repo with 3 packages and cachedir used"""
+
+ def setup(self):
+ self.indir_addpkg(PACKAGES[0])
+ self.indir_addpkg(PACKAGES[1])
+ self.indir_addpkg(PACKAGES[2])
+
+ def test_01_createrepo_owncachedir(self):
+ """Each createrepo has its own cachedir"""
+ # Gen cache
+ self.assert_same_results(self.indir, "--cachedir cache")
+ # Run again and use the cache
+ _, crres, crcres = self.assert_same_results(self.indir, "--cachedir cache")
+
+ # Compare files in the cache files (they should be identical)
+ cr_cache = os.path.join(crres.outdir, "cache")
+ crc_cache = os.path.join(crcres.outdir, "cache")
+ self.assert_same_dir_content(cr_cache, crc_cache)
+
+ def test_02_createrepo_sharedcachedir(self):
+ """Use cache mutually"""
+ cache_cr = os.path.abspath(os.path.join(self.indir, "cache_cr"))
+ cache_crc = os.path.abspath(os.path.join(self.indir, "cache_crc"))
+
+ # Gen cache by the cr then use it by cr_c
+ self.assert_run_cr(self.indir, "--cachedir %s" % cache_cr)
+ self.assert_run_cr(self.indir, "--cachedir %s" % cache_cr, c=True)
+
+ # Gen cache by the cr then use it by cr_c
+ self.assert_run_cr(self.indir, "--cachedir %s" % cache_crc, c=True)
+ self.assert_run_cr(self.indir, "--cachedir %s" % cache_crc)
+
+ # Compare files in the cache files (they should be identical)
+ self.assert_same_dir_content(cache_cr, cache_crc)
+
--- /dev/null
+import os
+import os.path
+
+from .fixtures import PACKAGES
+from .base import BaseTestCase
+
+
+class TestCaseCreaterepoUpdateComparative_emptyrepo(BaseTestCase):
+ """Empty input repository"""
+
+ def test_01_createrepoupdate(self):
+ """Repo from empty directory"""
+ self.assert_same_results(self.indir)
+ self.assert_same_results(self.indir, "--update")
+
+ def test_02_createrepoupdate_double(self):
+ """Repo from empty directory"""
+ self.assert_same_results(self.indir)
+ self.assert_same_results(self.indir, "--update")
+ self.assert_same_results(self.indir, "--update")
+
+ def test_03_createrepoupdate_relativepath(self):
+ """Repo from empty directory - specified by relative path"""
+ self.assert_same_results(os.path.relpath(self.indir))
+ self.assert_same_results(os.path.relpath(self.indir), "--update")
+
+ def test_04_createrepoupdate_simplemdfilenames(self):
+ """Repo from empty directory - specified by relative path"""
+ self.assert_same_results(os.path.relpath(self.indir))
+ self.assert_same_results(os.path.relpath(self.indir), "--update --simple-md-filenames")
+
+
+class TestCaseCreaterepoUpdateComparative_regularrepo(BaseTestCase):
+ """Repo with 3 packages"""
+
+ def setup(self):
+ self.indir_addpkg(PACKAGES[0])
+ self.indir_addpkg(PACKAGES[1])
+ self.indir_addpkg(PACKAGES[2])
+
+ def test_01_createrepoupdate(self):
+ """Repo from empty directory"""
+ self.assert_same_results(self.indir)
+ self.assert_same_results(self.indir, "--update")
+
+ def test_02_createrepoupdate_double(self):
+ """Repo from empty directory"""
+ self.assert_same_results(self.indir)
+ self.assert_same_results(self.indir, "--update")
+ self.assert_same_results(self.indir, "--update")
+
+ def test_03_createrepoupdate_relativepath(self):
+ """Repo from empty directory - specified by relative path"""
+ self.assert_same_results(os.path.relpath(self.indir))
+ self.assert_same_results(os.path.relpath(self.indir), "--update")
+
+ def test_04_createrepoupdate_simplemdfilenames(self):
+ """Repo from empty directory - specified by relative path"""
+ self.assert_same_results(os.path.relpath(self.indir))
+ self.assert_same_results(os.path.relpath(self.indir), "--update --simple-md-filenames")
+
+
+class TestCaseCreaterepoUpdateComparative_regularrepoandupdaterepo(BaseTestCase):
+ """Repo with 3 packages and repo that will be used as --update-md-path"""
+
+ def setup(self):
+ self.indir_addpkg(PACKAGES[0])
+ self.indir_addpkg(PACKAGES[1])
+ self.indir_addpkg(PACKAGES[2])
+ res = self.assert_run_cr(self.indir)
+ self.update_md_path = os.path.join(self.tdir, "update_md_path")
+ os.rename(res.outdir, self.update_md_path)
+ os.rename(res.logfile, os.path.join(self.tdir, "out_createrepo-update_md_path"))
+
+ def test_01_createrepoupdate_updatemdpath(self):
+ """Repo from empty directory"""
+ self.assert_same_results(self.indir, "--update --update-md-path %s" % self.update_md_path)
+
+ def test_02_createrepoupdate_updatemdpathandupdate(self):
+ """Repo from empty directory"""
+ self.assert_same_results(self.indir, "--update --update-md-path %s" % self.update_md_path)
+ self.assert_same_results(self.indir, "--update")
+
+ def test_03_createrepoupdate_updatemdpathdouble(self):
+ """Repo from empty directory"""
+ self.assert_same_results(self.indir, "--update --update-md-path %s" % self.update_md_path)
+ self.assert_same_results(self.indir, "--update --update-md-path %s" % self.update_md_path)
--- /dev/null
+import os
+import time
+import os.path
+
+from .fixtures import PACKAGES
+from .base import BaseTestCase
+
+BASE_XML_PATTERNS_SIMPLE = ["repomd\.xml", "primary\.xml\..+", "filelists\.xml\..+", "other\.xml\..+"]
+BASE_XML_PATTERNS_UNIQUE = ["repomd\.xml", ".*-primary\.xml\..+", ".*-filelists\.xml\..+", ".*-other\.xml\..+"]
+
+DBS_PATTERNS = [".*primary\.sqlite\..+", ".*filelists\.sqlite\..+", ".*other\.sqlite\..+"]
+DBS_PATTERNS_SIMPLE = ["primary\.sqlite\..+", "filelists\.sqlite\..+", "other\.sqlite\..+"]
+DBS_PATTERNS_UNIQUE = [".*-primary\.sqlite\..+", ".*-filelists\.sqlite\..+", ".*-other\.sqlite\..+"]
+
+DBS_PATTERNS_UNIQUE_MD5 = ["[0-9a-z]{32}-primary\.sqlite\..+", ".*-filelists\.sqlite\..+", ".*-other\.sqlite\..+"]
+
+DBS_PATTERNS_SIMPLE_GZ = ["primary\.sqlite\.gz", "filelists\.sqlite\.gz", "other\.sqlite\.gz"]
+DBS_PATTERNS_SIMPLE_BZ2 = ["primary\.sqlite\.bz2", "filelists\.sqlite\.bz2", "other\.sqlite\.bz2"]
+DBS_PATTERNS_SIMPLE_XZ = ["primary\.sqlite\.xz", "filelists\.sqlite\.xz", "other\.sqlite\.xz"]
+
+
+class TestCaseSqliterepo_badparams(BaseTestCase):
+ """Use case with bad commandline arguments"""
+
+ def setup(self):
+ self.indir_addpkg(PACKAGES[0])
+
+
+class TestCaseSqliterepo(BaseTestCase):
+ """Base use cases"""
+
+ def setup(self):
+ self.indir_addpkg(PACKAGES[0])
+
+ def test_01_sqliterepo(self):
+ """Sqlitedbs already exists, sqliterepo without --force should fail"""
+ cr_res = self.run_cr(self.indir, c=True)
+ sq_res = self.run_sqlr(cr_res.outdir)
+ self.assertTrue(sq_res.rc)
+ self.assertTrue("already has sqlitedb present" in sq_res.out)
+
+ def test_02_sqliterepo(self):
+ """Sqlitedbs should be created"""
+ outdir = self.tdir_makedirs("repository")
+ self.assert_run_cr(self.indir, args="--no-database", c=True, outdir=outdir)
+ self.assert_run_sqlr(outdir)
+ # Check that DBs really exists
+ self.assert_repo_files(outdir, DBS_PATTERNS, additional_files_allowed=True)
+
+ def test_03_sqliterepo(self):
+ """Sqlitedbs with simple md filenames should be created"""
+ outdir = self.tdir_makedirs("repository")
+ self.assert_run_cr(self.indir, args="--no-database --simple-md-filenames", c=True, outdir=outdir)
+ self.assert_run_sqlr(outdir)
+ # Check that DBs really exists
+ self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE, additional_files_allowed=True)
+
+ def test_04_sqliterepo(self):
+ """Sqlitedbs with unique md filenames should be created"""
+ outdir = self.tdir_makedirs("repository")
+ self.assert_run_cr(self.indir, args="--no-database --unique-md-filenames", c=True, outdir=outdir)
+ self.assert_run_sqlr(outdir)
+ # Check that DBs really exists
+ self.assert_repo_files(outdir, DBS_PATTERNS_UNIQUE, additional_files_allowed=True)
+
+ def test_05_sqliterepo(self):
+ """--xz is used (old bz2 DBs should be removed)"""
+ outdir = self.tdir_makedirs("repository")
+ self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir)
+ self.assert_run_sqlr(outdir, args="--force --xz")
+
+ # Check that DBs really exists
+ self.assert_repo_files(outdir,
+ DBS_PATTERNS_SIMPLE_XZ+BASE_XML_PATTERNS_SIMPLE,
+ additional_files_allowed=False)
+
+ def test_06_sqliterepo(self):
+ """DBs already exists but --force is used sqlitedbs should be created"""
+ outdir = self.tdir_makedirs("repository")
+ self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir)
+ old_primary_ts = os.path.getmtime(os.path.join(outdir, "repodata", "primary.sqlite.bz2"))
+
+ self.assert_run_sqlr(outdir, args="--force")
+ new_primary_ts = os.path.getmtime(os.path.join(outdir, "repodata", "primary.sqlite.bz2"))
+
+ # Check that DBs really exists
+ self.assert_repo_files(outdir, DBS_PATTERNS, additional_files_allowed=True)
+
+ # Check that DBs are newer than the old ones
+ self.assertTrue(old_primary_ts < new_primary_ts)
+
+ def test_07_sqliterepo(self):
+ """--force and --keep-old and --xz are used (old bz2 DBs should be keeped)"""
+ outdir = self.tdir_makedirs("repository")
+ self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir)
+ self.assert_run_sqlr(outdir, args="--force --keep-old --xz")
+
+ # Check that DBs really exists
+ self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE_BZ2, additional_files_allowed=True)
+ self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE_XZ, additional_files_allowed=True)
+
+ def test_08_sqliterepo(self):
+ """--local-sqlite is used and old DBs exists (--force is also used)"""
+ outdir = self.tdir_makedirs("repository")
+ self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir)
+ old_primary_ts = os.path.getmtime(os.path.join(outdir, "repodata", "primary.sqlite.bz2"))
+
+ self.assert_run_sqlr(outdir, args="--local-sqlite --force")
+ new_primary_ts = os.path.getmtime(os.path.join(outdir, "repodata", "primary.sqlite.bz2"))
+
+ # Check that DBs really exists
+ expected_files = DBS_PATTERNS_SIMPLE_BZ2 + BASE_XML_PATTERNS_SIMPLE
+ self.assert_repo_files(outdir, expected_files, additional_files_allowed=False)
+
+ # Check that DBs are newer than the old ones
+ self.assertTrue(old_primary_ts < new_primary_ts)
+
+ def test_09_sqliterepo(self):
+ """--local-sqlite is used with --xz and --force and --keep-old"""
+ outdir = self.tdir_makedirs("repository")
+ self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir)
+ self.assert_run_sqlr(outdir, args="--local-sqlite --force --keep-old --xz")
+
+ # Check that DBs really exists
+ self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE_BZ2, additional_files_allowed=True)
+ self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE_XZ, additional_files_allowed=True)
+
+ def test_10_sqliterepo(self):
+ """--compress-type used"""
+ outdir = self.tdir_makedirs("repository")
+ self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir)
+ self.assert_run_sqlr(outdir, args="--compress-type gz --force")
+
+ # Check that DBs really exists
+ expected_files = DBS_PATTERNS_SIMPLE_GZ + BASE_XML_PATTERNS_SIMPLE
+ self.assert_repo_files(outdir, expected_files, additional_files_allowed=False)
+
+ def test_11_sqliterepo(self):
+ """--checksum used"""
+ outdir = self.tdir_makedirs("repository")
+ self.assert_run_cr(self.indir, args="--database --unique-md-filenames", c=True, outdir=outdir)
+ self.assert_run_sqlr(outdir, args="--checksum md5 --force")
+
+ # Check that DBs really exists
+ expected_files = DBS_PATTERNS_UNIQUE_MD5 + BASE_XML_PATTERNS_UNIQUE
+ self.assert_repo_files(outdir, expected_files, additional_files_allowed=False)
\ No newline at end of file
COMPREPLY=()
case $3 in
- -V|--version|-h|--help|-u|--baseurl|--content|--repo|\
- -x|--excludes|--changelog-limit|\
- -q|--quiet|-v|--verbose|--skip-stat)
+ -V|--version|-h|--help)
return 0
;;
- --update-md-path|-o|--outputdir)
+ --update-md-path|-o|--outputdir|--oldpackagedirs)
COMPREPLY=( $( compgen -d -- "$2" ) )
return 0
;;
# COMPREPLY=( $( compgen -W '0 1 2 3 4 5 6 7 8 9' -- "$2" ) )
# return 0
# ;;
-# --num-deltas)
+# --num-deltas|--max-delta-rpm-size)
# COMPREPLY=( $( compgen -W '1 2 3 4 5 6 7 8 9' -- "$2" ) )
# return 0
# ;;
--skip-stat --pkglist --includepkg --outputdir
--skip-symlinks --changelog-limit --unique-md-filenames
--simple-md-filenames --retain-old-md --distro --content --repo
- --revision --read-pkgs-list --update --workers --xz
- --compress-type --keep-all-metadata' -- "$2" ) )
+ --revision --read-pkgs-list --workers --xz
+ --compress-type --keep-all-metadata --compatibility
+ --retain-old-md-by-age --cachedir --local-sqlite
+ --cut-dirs --location-prefix
+ --deltas --oldpackagedirs
+ --num-deltas --max-delta-rpm-size' -- "$2" ) )
else
COMPREPLY=( $( compgen -d -- "$2" ) )
fi
COMPREPLY=()
case $3 in
- --version|-h|--help|-a|--archlist)
+ --version|-h|--help)
return 0
;;
-g|--groupfile|--blocked)
COMPREPLY=( $( compgen -W '--version --help --repo --archlist --database
--no-database --verbose --outputdir --nogroups --noupdateinfo
--compress-type --method --all --noarch-repo --unique-md-filenames
- --simple-md-filenames --koji --groupfile
+ --simple-md-filenames --omit-baseurl --koji --groupfile
--blocked' -- "$2" ) )
else
COMPREPLY=( $( compgen -d -- "$2" ) )
COMPREPLY=()
case $3 in
- --version|-h|--help|-a|--archlist|--unique-md-filenames|--simple-md-filenames|--verbose)
+ --version|-h|--help)
return 0
;;
-f|--batchfile)
} &&
complete -F _cr_modifyrepo -o filenames modifyrepo_c
+_cr_sqliterepo()
+{
+ COMPREPLY=()
+
+ case $3 in
+ -h|--help|-V|--version)
+ return 0
+ ;;
+ --compress-type)
+ _cr_compress_type "" "$2"
+ return 0
+ ;;
+ -s|--checksum)
+ _cr_checksum_type "$1" "$2"
+ return 0
+ ;;
+ esac
+
+ if [[ $2 == -* ]] ; then
+ COMPREPLY=( $( compgen -W '--help --version --quiet --verbose
+ --force --keep-old --xz --compress-type --checksum
+ --local-sqlite ' -- "$2" ) )
+ else
+ COMPREPLY=( $( compgen -f -- "$2" ) )
+ fi
+} &&
+complete -F _cr_sqliterepo -o filenames sqliterepo_c
+
# Local variables:
# mode: shell-script
# sh-basic-offset: 4
+++ /dev/null
-# Examples of usage
-
- ./deltarepo.py repo1 repo2 -v
-
- ./deltarepo.py --apply repo1 delta/
+++ /dev/null
-#!/usr/bin/env python
-
-import sys
-import os.path
-import hashlib
-import logging
-from optparse import OptionParser, OptionGroup
-import deltarepo
-
-LOG_FORMAT = "%(message)s"
-
-# TODO:
-# - Support for different type of compression (?)
-
-
-def parse_options():
- parser = OptionParser("usage: %prog [options] <first_repo> <second_repo>\n" \
- " %prog --apply <repo> <delta_repo>")
- parser.add_option("--version", action="store_true",
- help="Show version number and quit.")
- parser.add_option("-q", "--quiet", action="store_true",
- help="Run in quiet mode.")
- parser.add_option("-v", "--verbose", action="store_true",
- help="Run in verbose mode.")
- #parser.add_option("-l", "--list-datatypes", action="store_true",
- # help="List datatypes for which delta is supported.")
- parser.add_option("-o", "--outputdir", action="store", metavar="DIR",
- help="Output directory.", default="./")
-
- group = OptionGroup(parser, "Delta generation")
- #group.add_option("--skip", action="append", metavar="DATATYPE",
- # help="Skip delta on the DATATYPE. Could be specified "\
- # "multiple times. (E.g., --skip=comps)")
- #group.add_option("--do-only", action="append", metavar="DATATYPE",
- # help="Do delta only for the DATATYPE. Could be specified "\
- # "multiple times. (E.g., --do-only=primary)")
- group.add_option("-t", "--id-type", action="store", metavar="HASHTYPE",
- help="Hash function for the ids (RepoId and DeltaRepoId). " \
- "Default is sha256.", default="sha256")
- parser.add_option_group(group)
-
- group = OptionGroup(parser, "Delta application")
- group.add_option("-a", "--apply", action="store_true",
- help="Enable delta application mode.")
- #group.add_option("-d", "--database", action="store_true",
- # help="Gen database.")
- parser.add_option_group(group)
-
- options, args = parser.parse_args()
-
- # Error checks
-
- if options.version:
- return (options.args)
-
- if len(args) != 2:
- parser.error("Two repository paths have to be specified!")
-
- if options.id_type not in hashlib.algorithms:
- parser.error("Unsupported hash algorithm %s" % options.id_type)
-
- if options.quiet and options.verbose:
- parser.error("Cannot use quiet and verbose simultaneously!")
-
- if not os.path.isdir(args[0]) or \
- not os.path.isdir(os.path.join(args[0], "repodata")) or \
- not os.path.isfile(os.path.join(args[0], "repodata", "repomd.xml")):
- parser.error("Not a repository: %s" % args[0])
-
- if not os.path.isdir(args[1]) or \
- not os.path.isdir(os.path.join(args[1], "repodata")) or \
- not os.path.isfile(os.path.join(args[1], "repodata", "repomd.xml")):
- parser.error("Not a repository: %s" % args[1])
-
- if not os.path.isdir(options.outputdir):
- parser.error("Not a directory: %s" % options.outputdir)
-
- return (options, args)
-
-def print_version():
- print "DeltaRepo: %s" % deltarepo.VERBOSE_VERSION
-
-def setup_logging(quiet, verbose):
- logger = logging.getLogger("deltarepo_logger")
- formatter = logging.Formatter(LOG_FORMAT)
- logging.basicConfig(format=LOG_FORMAT)
- if quiet:
- logger.setLevel(logging.ERROR)
- elif verbose:
- logger.setLevel(logging.DEBUG)
- else:
- logger.setLevel(logging.INFO)
- return logger
-
-if __name__ == "__main__":
- options, args = parse_options()
-
- if options.version:
- print_version()
- sys.exit(0)
-
- logger = setup_logging(options.quiet, options.verbose)
-
- if options.apply:
- # Applying delta
- da = deltarepo.DeltaRepoApplicator(args[0],
- args[1],
- out_path=options.outputdir,
- logger=logger)
- da.apply()
- else:
- # Do delta
- dg = deltarepo.DeltaRepoGenerator(args[0],
- args[1],
- out_path=options.outputdir,
- logger=logger,
- repoid_type=options.id_type)
- dg.gen()
+++ /dev/null
-"""
-DeltaRepo package for Python.
-This is the library for generation, application and handling of
-DeltaRepositories.
-The library is builded on the Createrepo_c library and its a part of it.
-
-Copyright (C) 2013 Tomas Mlcoch
-
-"""
-
-import createrepo_c as cr
-from deltarepo.common import LoggingInterface, Metadata, RemovedXml
-from deltarepo.applicator import DeltaRepoApplicator
-from deltarepo.generator import DeltaRepoGenerator
-from deltarepo.delta_plugins import PLUGINS
-from deltarepo.errors import DeltaRepoError, DeltaRepoPluginError
-
-__all__ = ['VERSION', 'VERBOSE_VERSION', 'DeltaRepoError',
- 'DeltaRepoPluginError', 'DeltaRepoGenerator'
- 'DeltaRepoApplicator']
-
-VERSION = "0.0.1"
-VERBOSE_VERSION = "%s (createrepo_c: %s)" % (VERSION, cr.VERSION)
+++ /dev/null
-"""
-DeltaRepo package for Python.
-This is the library for generation, application and handling of
-DeltaRepositories.
-The library is builded on the Createrepo_c library and its a part of it.
-
-Copyright (C) 2013 Tomas Mlcoch
-
-"""
-
-import os
-import shutil
-import createrepo_c as cr
-from deltarepo.common import LoggingInterface, Metadata, RemovedXml
-from deltarepo.delta_plugins import PLUGINS
-from deltarepo.errors import DeltaRepoError, DeltaRepoPluginError
-
-__all__ = ['DeltaRepoApplicator']
-
-class DeltaRepoApplicator(LoggingInterface):
-
- def __init__(self,
- old_repo_path,
- delta_repo_path,
- out_path=None,
- logger=None):
-
- # Initialization
-
- LoggingInterface.__init__(self, logger)
-
- self.repoid_type = None
- self.unique_md_filenames = False
- self.databases = False
- self.removedxmlobj = RemovedXml()
-
- self.out_path = out_path or "./"
-
- self.final_path = os.path.join(self.out_path, "repodata")
-
- self.new_repo_path = out_path
- self.new_repodata_path = os.path.join(self.new_repo_path, ".repodata/")
- self.new_repomd_path = os.path.join(self.new_repodata_path, "repomd.xml")
-
- self.old_repo_path = old_repo_path
- self.old_repodata_path = os.path.join(self.old_repo_path, "repodata/")
- self.old_repomd_path = os.path.join(self.old_repodata_path, "repomd.xml")
-
- self.delta_repo_path = delta_repo_path
- self.delta_repodata_path = os.path.join(self.delta_repo_path, "repodata/")
- self.delta_repomd_path = os.path.join(self.delta_repodata_path, "repomd.xml")
-
- # Prepare repomd objects
- self.old_repomd = cr.Repomd(self.old_repomd_path)
- self.delta_repomd = cr.Repomd(self.delta_repomd_path)
- self.new_repomd = cr.Repomd()
-
- # Check if delta repo id correspond with the old repo id
- if not self.delta_repomd.repoid or \
- len(self.delta_repomd.repoid.split('-')) != 2:
- raise DeltaRepoError("Bad DeltaRepoId")
-
- self.repoid_type_str = self.delta_repomd.repoid_type
- self.old_id, self.new_id = self.delta_repomd.repoid.split('-')
- self._debug("Delta %s -> %s" % (self.old_id, self.new_id))
-
- if self.old_repomd.repoid_type == self.delta_repomd.repoid_type:
- if self.old_repomd.repoid and self.old_repomd.repoid != self.old_id:
- raise DeltaRepoError("Not suitable delta for current repo " \
- "(Expected: {0} Real: {1})".format(
- self.old_id, self.old_repomd.repoid))
- else:
- self._debug("Different repoid types repo: {0} vs delta: {1}".format(
- self.old_repomd.repoid_type, self.delta_repomd.repoid_type))
-
- # Use revision and tags
- self.new_repomd.set_revision(self.delta_repomd.revision)
- for tag in self.delta_repomd.distro_tags:
- self.new_repomd.add_distro_tag(tag[1], tag[0])
- for tag in self.delta_repomd.repo_tags:
- self.new_repomd.add_repo_tag(tag)
- for tag in self.delta_repomd.content_tags:
- self.new_repomd.add_content_tag(tag)
-
- # Load records
- self.old_records = {}
- self.delta_records = {}
- for record in self.old_repomd.records:
- self.old_records[record.type] = record
- for record in self.delta_repomd.records:
- self.delta_records[record.type] = record
-
- old_record_types = set(self.old_records.keys())
- delta_record_types = set(self.delta_records.keys())
-
- self.deleted_repomd_record_types = old_record_types - delta_record_types
- self.added_repomd_record_types = delta_record_types - old_record_types
-
- # Important sanity checks (repo without primary is definitely bad)
- if not "primary" in self.old_records:
- raise DeltaRepoError("Missing \"primary\" metadata in old repo")
-
- if not "primary" in self.delta_records:
- raise DeltaRepoError("Missing \"primary\" metadata in delta repo")
-
- # Detect type of checksum in the delta repomd.xml
- self.checksum_type = cr.checksum_type(self.delta_records["primary"].checksum_type)
- if self.checksum_type == cr.UNKNOWN_CHECKSUM:
- raise DeltaRepoError("Unknown checksum type used in delta repo: %s" % \
- self.delta_records["primary"].checksum_type)
-
- # Detection if use unique md filenames
- if self.delta_records["primary"].location_href.split("primary")[0] != "":
- self.unique_md_filenames = True
-
- # Load removedxml
- self.removedxml_path = None
- if "removed" in self.delta_records:
- self.removedxml_path = os.path.join(self.delta_repo_path,
- self.delta_records["removed"].location_href)
- self.removedxmlobj.xml_parse(self.removedxml_path)
- else:
- self._warning("\"removed\" record is missing in repomd.xml "\
- "of delta repo")
-
- # Prepare bundle
- self.bundle = {}
- self.bundle["repoid_type_str"] = self.repoid_type_str
- self.bundle["removed_obj"] = self.removedxmlobj
- self.bundle["unique_md_filenames"] = self.unique_md_filenames
-
- def _new_metadata(self, metadata_type):
- """Return Metadata Object for the metadata_type or None"""
-
- if metadata_type not in self.delta_records:
- return None
-
- metadata = Metadata(metadata_type)
-
- # Build delta filename
- metadata.delta_fn = os.path.join(self.delta_repo_path,
- self.delta_records[metadata_type].location_href)
-
- # Build old filename
- if metadata_type in self.old_records:
- metadata.old_fn = os.path.join(self.old_repo_path,
- self.old_records[metadata_type].location_href)
-
- # Set output directory
- metadata.out_dir = self.new_repodata_path
-
- # Determine checksum type
- metadata.checksum_type = cr.checksum_type(
- self.delta_records[metadata_type].checksum_type)
-
- # Determine compression type
- metadata.compression_type = cr.detect_compression(metadata.delta_fn)
- if (metadata.compression_type == cr.UNKNOWN_COMPRESSION):
- raise DeltaRepoError("Cannot detect compression type for {0}".format(
- metadata.delta_fn))
-
- return metadata
-
- def apply(self):
-
- # Prepare output path
- os.mkdir(self.new_repodata_path)
-
- processed_metadata = set()
- used_plugins = set()
- plugin_used = True
-
- while plugin_used:
- # Iterate on plugins until any of them was used
- plugin_used = False
-
- for plugin in PLUGINS:
-
- # Use only plugins that haven't been already used
- if plugin in used_plugins:
- continue
-
- # Check which metadata this plugin want to process
- conflicting_metadata = set(plugin.METADATA) & processed_metadata
- if conflicting_metadata:
- message = "Plugin {0}: Error - Plugin want to process " \
- "already processed metadata {1}".format(
- plugin.NAME, conflicting_metadata)
- self._error(message)
- raise DeltaRepoError(message)
-
- # Prepare metadata for the plugin
- metadata_objects = {}
- for metadata_name in plugin.METADATA:
- metadata_object = self._new_metadata(metadata_name)
- if metadata_object is not None:
- metadata_objects[metadata_name] = metadata_object
-
- # Skip plugin if no supported metadata available
- if not metadata_objects:
- self._debug("Plugin {0}: Skipped - None of supported " \
- "metadata {1} available".format(
- plugin.NAME, plugin.METADATA))
- used_plugins.add(plugin)
- continue
-
- # Check if bundle contains all what plugin need
- required_bundle_keys = set(plugin.APPLY_REQUIRED_BUNDLE_KEYS)
- bundle_keys = set(self.bundle.keys())
- if not required_bundle_keys.issubset(bundle_keys):
- self._debug("Plugin {0}: Skipped - Bundle keys {1} "\
- "are not available".format(plugin.NAME,
- (required_bundle_keys - bundle_keys)))
- continue
-
- # Use the plugin
- self._debug("Plugin {0}: Active".format(plugin.NAME))
- plugin_instance = plugin()
- plugin_instance.apply(metadata_objects, self.bundle)
-
- # Check what bundle keys was added by the plugin
- new_bundle_keys = set(self.bundle.keys())
- diff = new_bundle_keys - bundle_keys
- if diff != set(plugin.APPLY_BUNDLE_CONTRIBUTION):
- message = "Plugin {0}: Error - Plugin should add: {1} " \
- "bundle items but add: {2}".format(
- plugin.NAME, plugin.APPLY_BUNDLE_CONTRIBUTION,
- list(diff))
- self._error(message)
- raise DeltaRepoError(message)
-
- # Put repomd records from processed metadatas to repomd
- for md in metadata_objects.values():
- self._debug("Plugin {0}: Processed \"{1}\" delta record "\
- "which produced:".format(
- plugin.NAME, md.metadata_type))
- for repomd_record in md.generated_repomd_records:
- self._debug(" - {0}".format(repomd_record.type))
- self.new_repomd.set_record(repomd_record)
-
- # Organization stuff
- processed_metadata.update(set(metadata_objects.keys()))
- used_plugins.add(plugin)
- plugin_used = True
-
- # TODO:
- # Process rest of the metadata files
-
- self._debug("Used plugins: {0}".format([p.NAME for p in used_plugins]))
-
- # Check if calculated repoids match
- self._debug("Checking expected repoids")
-
- if "old_repoid" in self.bundle:
- if self.old_id != self.bundle["old_repoid"]:
- message = "Repoid of the \"{0}\" repository doesn't match "\
- "the real repoid ({1} != {2}).".format(
- self.old_repo_path, self.old_id,
- self.bundle["old_repoid"])
- self._error(message)
- raise DeltaRepoError(message)
- else:
- self._debug("Repoid of the old repo matches ({0})".format(
- self.old_id))
- else:
- self._warning("\"old_repoid\" item is missing in bundle.")
-
- if "new_repoid" in self.bundle:
- if self.new_id != self.bundle["new_repoid"]:
- message = "Repoid of the \"{0}\" repository doesn't match "\
- "the real repoid ({1} != {2}).".format(
- self.new_repo_path, self.new_id,
- self.bundle["new_repoid"])
- self._error(message)
- raise DeltaRepoError(message)
- else:
- self._debug("Repoid of the new repo matches ({0})".format(
- self.new_id))
- else:
- self._warning("\"new_repoid\" item is missing in bundle.")
-
- # Prepare and write out the new repomd.xml
- self._debug("Preparing repomd.xml ...")
- self.new_repomd.set_repoid(self.new_id, self.repoid_type_str)
- self.new_repomd.sort_records()
- new_repomd_xml = self.new_repomd.xml_dump()
-
- self._debug("Writing repomd.xml ...")
- open(self.new_repomd_path, "w").write(new_repomd_xml)
-
- # Final move
- if os.path.exists(self.final_path):
- self._warning("Destination dir already exists! Removing %s" % \
- self.final_path)
- shutil.rmtree(self.final_path)
- self._debug("Moving %s -> %s" % (self.new_repodata_path, self.final_path))
- os.rename(self.new_repodata_path, self.final_path)
-
+++ /dev/null
-import os
-import tempfile
-import logging
-import xml.dom.minidom
-import createrepo_c as cr
-from lxml import etree
-
-class LoggingInterface(object):
- def __init__(self, logger=None):
- if logger is None:
- logger = logging.getLogger()
- logger.disabled = True
- self.logger = logger
-
- def _debug(self, msg):
- self.logger.debug(msg)
-
- def _info(self, msg):
- self.logger.info(msg)
-
- def _warning(self, msg):
- self.logger.warning(msg)
-
- def _error(self, msg):
- self.logger.error(msg)
-
- def _critical(self, msg):
- self.logger.critical(msg)
-
-class RemovedXml(object):
- def __init__(self):
- self.packages = {} # { location_href: location_base }
- self.files = {} # { location_href: location_base or Null }
-
- def __str__(self):
- print self.packages
- print self.files
-
- def add_pkg(self, pkg):
- self.packages[pkg.location_href] = pkg.location_base
-
- def add_pkg_locations(self, location_href, location_base):
- self.packages[location_href] = location_base
-
- def add_record(self, rec):
- self.files[rec.location_href] = rec.location_base
-
- def xml_dump(self):
- xmltree = etree.Element("removed")
- packages = etree.SubElement(xmltree, "packages")
- for href, base in self.packages.iteritems():
- attrs = {}
- if href: attrs['href'] = href
- if base: attrs['base'] = base
- if not attrs: continue
- etree.SubElement(packages, "location", attrs)
- files = etree.SubElement(xmltree, "files")
- for href, base in self.files.iteritems():
- attrs = {}
- if href: attrs['href'] = href
- if base: attrs['base'] = base
- if not attrs: continue
- etree.SubElement(files, "location", attrs)
- return etree.tostring(xmltree,
- pretty_print=True,
- encoding="UTF-8",
- xml_declaration=True)
-
- def xml_parse(self, path):
-
- _, tmp_path = tempfile.mkstemp()
- cr.decompress_file(path, tmp_path, cr.AUTO_DETECT_COMPRESSION)
- dom = xml.dom.minidom.parse(tmp_path)
- os.remove(tmp_path)
-
- packages = dom.getElementsByTagName("packages")
- if packages:
- for loc in packages[0].getElementsByTagName("location"):
- href = loc.getAttribute("href")
- base = loc.getAttribute("base")
- if not href:
- continue
- if not base:
- base = None
- self.packages[href] = base
-
- files = dom.getElementsByTagName("files")
- if files:
- for loc in files[0].getElementsByTagName("location"):
- href = loc.getAttribute("href")
- base = loc.getAttribute("base")
- if not href:
- continue
- if not base:
- base = None
- self.files[href] = base
-
-class Metadata(object):
- """Metadata file"""
-
- def __init__(self, metadata_type):
-
- self.metadata_type = metadata_type
-
- # Paths
- self.old_fn = None
- self.delta_fn = None
- self.new_fn = None
-
- self.out_dir = None
-
- # Settings
- self.checksum_type = None
- self.compression_type = None
-
- # Records
-
- # List of new repomd records related to this delta file
- # List because some deltafiles could lead to more output files.
- # E.g.: delta of primary.xml generate new primary.xml and
- # primary.sqlite as well.
- self.generated_repomd_records = []
+++ /dev/null
-import os
-import os.path
-import hashlib
-import createrepo_c as cr
-from deltarepo.errors import DeltaRepoError, DeltaRepoPluginError
-
-PLUGINS = []
-
-class DeltaRepoPlugin(object):
-
- # Plugin name
- NAME = ""
-
- # Plugin version (integer number!)
- VERSION = 1
-
- # List of Metadata this plugin takes care of.
- # The plugin HAS TO do deltas for each of listed metadata and be able
- # to apply deltas on them!
- MATADATA = []
-
- # Its highly recomended for plugin to be maximal independend on
- # other plugins and metadata not specified in METADATA.
- # But some kind of dependency mechanism can be implemented via
- # *_REQUIRED_BUNDLE_KEYS and *_BUDLE_CONTRIBUTION.
-
- # List of bundle keys that have to be filled before
- # apply() method of this plugin should be called
- APPLY_REQUIRED_BUNDLE_KEYS = []
-
- # List of bundle key this pulugin adds during apply() method call
- APPLY_BUNDLE_CONTRIBUTION = []
-
- # List of bundle keys that have to be filled before
- # gen() method of this plugin should be called
- GEN_REQUIRED_BUNDLE_KEYS = []
-
- # List of bundle key this pulugin adds during gen() method call
- GEN_BUNDLE_CONTRIBUTION = []
-
- # If two plugins want to add the same key to the bundle
- # then exception is raised.
- # If plugin requires a key that isn't provided by any of registered
- # plugins then exception is raised.
- # If plugin adds to bundle a key that is not listed in BUNDLE_CONTRIBUTION,
- # then exception is raised.
-
- def __init__(self):
- pass
-
- def apply(self, metadata, bundle):
- """
- :arg metadata: Dict with available metadata listed in METADATA.
- key is metadata type (e.g. "primary", "filelists", ...)
- value is Metadata object
- This method is called only if at least one metadata listed
- in METADATA are found in delta repomd.
- :arg bundle: Dict with various metadata.
-
- Apply method has to do:
- * Raise DeltaRepoPluginError if something is bad
- * Build a new filename for each metadata and store it
- to Metadata Object.
- *
- """
- raise NotImplementedError("Not implemented")
-
- def gen(self, metadata, bundle):
- raise NotImplementedError("Not implemented")
-
-class MainDeltaRepoPlugin(DeltaRepoPlugin):
-
- NAME = "MainDeltaPlugin"
- VERSION = 1
- METADATA = ["primary", "filelists", "other"]
- APPLY_REQUIRED_BUNDLE_KEYS = ["repoid_type_str",
- "removed_obj",
- "unique_md_filenames"]
- APPLY_BUNDLE_CONTRIBUTION = ["old_repoid", "new_repoid"]
- GEN_REQUIRED_BUNDLE_KEYS = ["repoid_type_str",
- "removed_obj",
- "unique_md_filenames"]
- GEN_BUNDLE_CONTRIBUTION = ["old_repoid", "new_repoid"]
-
- def _path(self, path, record):
- """Return path to the repodata file."""
- return os.path.join(path, record.location_href)
-
- def _pkg_id_tuple(self, pkg):
- """Return tuple identifying a package in repodata.
- (pkgId, location_href, location_base)"""
- return (pkg.pkgId, pkg.location_href, pkg.location_base)
-
- def _pkg_id_str(self, pkg):
- """Return string identifying a package in repodata.
- This strings are used for the RepoId calculation."""
- if not pkg.pkgId:
- self._warning("Missing pkgId in a package!")
- if not pkg.location_href:
- self._warning("Missing location_href at package %s %s" % \
- (pkg.name, pkg.pkgId))
- return "%s%s%s" % (pkg.pkgId or '',
- pkg.location_href or '',
- pkg.location_base or '')
-
- def apply(self, metadata, bundle):
-
- # Get info from bundle
- removed_obj = bundle["removed_obj"]
- repoid_type_str = bundle["repoid_type_str"]
- unique_md_filenames = bundle["unique_md_filenames"]
-
- # Check input arguments
- if "primary" not in metadata:
- raise DeltaRepoPluginError("Primary metadata missing")
-
- pri_md = metadata.get("primary")
- fil_md = metadata.get("filelists")
- oth_md = metadata.get("other")
-
- # Build and prepare destination paths
- # (And store them in the same Metadata object)
- def prepare_paths_in_metadata(md, xmlclass, dbclass):
- if md is None:
- return
-
- suffix = cr.compression_suffix(md.compression_type) or ""
- md.new_fn = os.path.join(md.out_dir,
- "{0}.xml{1}".format(
- md.metadata_type, suffix))
- md.new_f_stat = cr.ContentStat(md.checksum_type)
- md.new_f = xmlclass(md.new_fn,
- md.compression_type,
- md.new_f_stat)
- md.db_fn = os.path.join(md.out_dir, "{0}.sqlite".format(
- md.metadata_type))
- md.db = dbclass(md.db_fn)
-
- # Primary
- prepare_paths_in_metadata(pri_md,
- cr.PrimaryXmlFile,
- cr.PrimarySqlite)
-
- # Filelists
- prepare_paths_in_metadata(fil_md,
- cr.FilelistsXmlFile,
- cr.FilelistsSqlite)
-
- # Other
- prepare_paths_in_metadata(oth_md,
- cr.OtherXmlFile,
- cr.OtherSqlite)
-
- # Apply delta
-
- removed_packages = set() # set of pkgIds (hashes)
- all_packages = {} # dict { 'pkgId': pkg }
-
- old_repoid_strings = []
- new_repoid_strings = []
-
- def old_pkgcb(pkg):
- old_repoid_strings.append(self._pkg_id_str(pkg))
- if pkg.location_href in removed_obj.packages:
- if removed_obj.packages[pkg.location_href] == pkg.location_base:
- # This package won't be in new metadata
- return
- new_repoid_strings.append(self._pkg_id_str(pkg))
- all_packages[pkg.pkgId] = pkg
-
- def delta_pkgcb(pkg):
- new_repoid_strings.append(self._pkg_id_str(pkg))
- all_packages[pkg.pkgId] = pkg
-
- do_primary_files = 1
- if fil_md and fil_md.delta_fn and fil_md.old_fn:
- # Don't read files from primary if there is filelists.xml
- do_primary_files = 0
-
- cr.xml_parse_primary(pri_md.old_fn, pkgcb=old_pkgcb,
- do_files=do_primary_files)
- cr.xml_parse_primary(pri_md.delta_fn, pkgcb=delta_pkgcb,
- do_files=do_primary_files)
-
- # Calculate RepoIds
- old_repoid = ""
- new_repoid = ""
-
- h = hashlib.new(repoid_type_str)
- old_repoid_strings.sort()
- for i in old_repoid_strings:
- h.update(i)
- old_repoid = h.hexdigest()
-
- h = hashlib.new(repoid_type_str)
- new_repoid_strings.sort()
- for i in new_repoid_strings:
- h.update(i)
- new_repoid = h.hexdigest()
-
- bundle["old_repoid"] = old_repoid
- bundle["new_repoid"] = new_repoid
-
- # Sort packages
- def cmp_pkgs(x, y):
- # Compare only by filename
- ret = cmp(os.path.basename(x.location_href),
- os.path.basename(y.location_href))
- if ret != 0:
- return ret
-
- # Compare by full location_href path
- return cmp(x.location_href, y.location_href)
-
- all_packages_sorted = sorted(all_packages.values(), cmp=cmp_pkgs)
-
- def newpkgcb(pkgId, name, arch):
- return all_packages.get(pkgId, None)
-
- # Parse filelists
- if fil_md and fil_md.delta_fn and fil_md.old_fn:
- cr.xml_parse_filelists(fil_md.old_fn, newpkgcb=newpkgcb)
- cr.xml_parse_filelists(fil_md.delta_fn, newpkgcb=newpkgcb)
-
- # Parse other
- if oth_md and oth_md.delta_fn and oth_md.old_fn:
- cr.xml_parse_other(oth_md.old_fn, newpkgcb=newpkgcb)
- cr.xml_parse_other(oth_md.delta_fn, newpkgcb=newpkgcb)
-
- num_of_packages = len(all_packages_sorted)
-
- # Write out primary
- pri_md.new_f.set_num_of_pkgs(num_of_packages)
- for pkg in all_packages_sorted:
- pri_md.new_f.add_pkg(pkg)
- if pri_md.db:
- pri_md.db.add_pkg(pkg)
-
- # Write out filelists
- if fil_md.new_f:
- fil_md.new_f.set_num_of_pkgs(num_of_packages)
- for pkg in all_packages_sorted:
- fil_md.new_f.add_pkg(pkg)
- if fil_md.db:
- fil_md.db.add_pkg(pkg)
-
- # Write out other
- if oth_md.new_f:
- oth_md.new_f.set_num_of_pkgs(num_of_packages)
- for pkg in all_packages_sorted:
- oth_md.new_f.add_pkg(pkg)
- if oth_md.db:
- oth_md.db.add_pkg(pkg)
-
- # Finish metadata
- def finish_metadata(md):
- if md is None:
- return
-
- # Close XML file
- md.new_f.close()
-
- # Prepare repomd record of xml file
- rec = cr.RepomdRecord(md.metadata_type, md.new_fn)
- rec.load_contentstat(md.new_f_stat)
- rec.fill(md.checksum_type)
- if unique_md_filenames:
- rec.rename_file()
-
- md.generated_repomd_records.append(rec)
-
- # Prepare database
- if hasattr(md, "db") and md.db:
- md.db.dbinfo_update(rec.checksum)
- md.db.close()
- db_stat = cr.ContentStat(md.checksum_type)
- db_compressed = md.db_fn+".bz2"
- cr.compress_file(md.db_fn, None, cr.BZ2, db_stat)
- os.remove(md.db_fn)
-
- # Pripare repomd record of database file
- db_rec = cr.RepomdRecord("{0}_db".format(md.metadata_type),
- db_compressed)
- db_rec.load_contentstat(db_stat)
- db_rec.fill(md.checksum_type)
- if unique_md_filenames:
- db_rec.rename_file()
-
- md.generated_repomd_records.append(db_rec)
-
- # Add records to the bundle
-
- finish_metadata(pri_md)
- finish_metadata(fil_md)
- finish_metadata(oth_md)
-
- def gen(self, metadata, bundle):
-
- # Get info from bundle
- removed_obj = bundle["removed_obj"]
- repoid_type_str = bundle["repoid_type_str"]
- unique_md_filenames = bundle["unique_md_filenames"]
-
- # Check input arguments
- if "primary" not in metadata:
- raise DeltaRepoPluginError("Primary metadata missing")
-
- pri_md = metadata.get("primary")
- fil_md = metadata.get("filelists")
- oth_md = metadata.get("other")
-
- # Build and prepare destination paths
- # (And store them in the same Metadata object)
- def prepare_paths_in_metadata(md, xmlclass, dbclass):
- if md is None:
- return
-
- suffix = cr.compression_suffix(md.compression_type) or ""
- md.delta_fn = os.path.join(md.out_dir,
- "{0}.xml{1}".format(
- md.metadata_type, suffix))
- md.delta_f_stat = cr.ContentStat(md.checksum_type)
- md.delta_f = xmlclass(md.delta_fn,
- md.compression_type,
- md.delta_f_stat)
- # Database for delta repo is redundant
- #md.db_fn = os.path.join(md.out_dir, "{0}.sqlite".format(
- # md.metadata_type))
- #md.db = dbclass(md.db_fn)
-
- # Primary
- prepare_paths_in_metadata(pri_md,
- cr.PrimaryXmlFile,
- cr.PrimarySqlite)
-
- # Filelists
- prepare_paths_in_metadata(fil_md,
- cr.FilelistsXmlFile,
- cr.FilelistsSqlite)
-
- # Other
- prepare_paths_in_metadata(oth_md,
- cr.OtherXmlFile,
- cr.OtherSqlite)
-
- # Gen delta
-
- old_packages = set()
- added_packages = {} # dict { 'pkgId': pkg }
- added_packages_ids = [] # list of package ids
-
- old_repoid_strings = []
- new_repoid_strings = []
-
- def old_pkgcb(pkg):
- old_packages.add(self._pkg_id_tuple(pkg))
- old_repoid_strings.append(self._pkg_id_str(pkg))
-
- def new_pkgcb(pkg):
- new_repoid_strings.append(self._pkg_id_str(pkg))
- pkg_id_tuple = self._pkg_id_tuple(pkg)
- if not pkg_id_tuple in old_packages:
- # This package is only in new repodata
- added_packages[pkg.pkgId] = pkg
- added_packages_ids.append(pkg.pkgId)
- else:
- # This package is also in the old repodata
- old_packages.remove(pkg_id_tuple)
-
- do_new_primary_files = 1
- if fil_md.delta_f and fil_md.new_fn:
- # All files will be parsed from filelists
- do_new_primary_files = 0
-
- cr.xml_parse_primary(pri_md.old_fn, pkgcb=old_pkgcb, do_files=0)
- cr.xml_parse_primary(pri_md.new_fn, pkgcb=new_pkgcb,
- do_files=do_new_primary_files)
-
- # Calculate RepoIds
- old_repo_id = ""
- new_repo_id = ""
-
- h = hashlib.new(repoid_type_str)
- old_repoid_strings.sort()
- for i in old_repoid_strings:
- h.update(i)
- old_repoid = h.hexdigest()
-
- h = hashlib.new(repoid_type_str)
- new_repoid_strings.sort()
- for i in new_repoid_strings:
- h.update(i)
- new_repoid = h.hexdigest()
-
- bundle["old_repoid"] = old_repoid
- bundle["new_repoid"] = new_repoid
-
- # Prepare list of removed packages
- removed_pkgs = sorted(old_packages)
- for _, location_href, location_base in removed_pkgs:
- removed_obj.add_pkg_locations(location_href, location_base)
-
- num_of_packages = len(added_packages)
-
- # Filelists and Other cb
- def newpkgcb(pkgId, name, arch):
- return added_packages.get(pkgId, None)
-
- # Write out filelists delta
- if fil_md.delta_f and fil_md.new_fn:
- cr.xml_parse_filelists(fil_md.new_fn, newpkgcb=newpkgcb)
- fil_md.delta_f.set_num_of_pkgs(num_of_packages)
- for pkgid in added_packages_ids:
- fil_md.delta_f.add_pkg(added_packages[pkgid])
- fil_md.delta_f.close()
-
- # Write out other delta
- if oth_md.delta_f and oth_md.new_fn:
- cr.xml_parse_other(oth_md.new_fn, newpkgcb=newpkgcb)
- oth_md.delta_f.set_num_of_pkgs(num_of_packages)
- for pkgid in added_packages_ids:
- oth_md.delta_f.add_pkg(added_packages[pkgid])
- oth_md.delta_f.close()
-
- # Write out primary delta
- # Note: Writing of primary delta has to be after parsing of filelists
- # Otherway cause missing files if do_new_primary_files was 0
- pri_md.delta_f.set_num_of_pkgs(num_of_packages)
- for pkgid in added_packages_ids:
- pri_md.delta_f.add_pkg(added_packages[pkgid])
- pri_md.delta_f.close()
-
- # Finish metadata
- def finish_metadata(md):
- if md is None:
- return
-
- # Close XML file
- md.delta_f.close()
-
- # Prepare repomd record of xml file
- rec = cr.RepomdRecord(md.metadata_type, md.delta_fn)
- rec.load_contentstat(md.delta_f_stat)
- rec.fill(md.checksum_type)
- if unique_md_filenames:
- rec.rename_file()
-
- md.generated_repomd_records.append(rec)
-
- # Prepare database
- if hasattr(md, "db") and md.db:
- md.db.dbinfo_update(rec.checksum)
- md.db.close()
- db_stat = cr.ContentStat(md.checksum_type)
- db_compressed = md.db_fn+".bz2"
- cr.compress_file(md.db_fn, None, cr.BZ2, db_stat)
- os.remove(md.db_fn)
-
- # Pripare repomd record of database file
- db_rec = cr.RepomdRecord("{0}_db".format(md.metadata_type),
- db_compressed)
- db_rec.load_contentstat(db_stat)
- db_rec.fill(md.checksum_type)
- if unique_md_filenames:
- db_rec.rename_file()
-
- md.generated_repomd_records.append(db_rec)
-
- # Add records to the bundle
-
- finish_metadata(pri_md)
- finish_metadata(fil_md)
- finish_metadata(oth_md)
-
-
-PLUGINS.append(MainDeltaRepoPlugin)
+++ /dev/null
-
-__all__ = ["DeltaRepoError", "DeltaRepoPluginError"]
-
-class DeltaRepoError(Exception):
- pass
-
-class DeltaRepoPluginError(DeltaRepoError):
- pass
+++ /dev/null
-"""
-DeltaRepo package for Python.
-This is the library for generation, application and handling of
-DeltaRepositories.
-The library is builded on the Createrepo_c library and its a part of it.
-
-Copyright (C) 2013 Tomas Mlcoch
-
-"""
-
-import os
-import shutil
-import createrepo_c as cr
-from deltarepo.common import LoggingInterface, Metadata, RemovedXml
-from deltarepo.delta_plugins import PLUGINS
-from deltarepo.errors import DeltaRepoError, DeltaRepoPluginError
-
-__all__ = ['DeltaRepoGenerator']
-
-class DeltaRepoGenerator(LoggingInterface):
-
- def __init__(self,
- old_repo_path,
- new_repo_path,
- out_path=None,
- logger=None,
- repoid_type="sha256",
- compression_type="xz"):
-
- # Initialization
-
- LoggingInterface.__init__(self, logger)
-
- self.out_path = out_path or "./"
-
- self.final_path = os.path.join(self.out_path, "repodata")
-
- self.new_repo_path = new_repo_path
- self.new_repodata_path = os.path.join(self.new_repo_path, "repodata/")
- self.new_repomd_path = os.path.join(self.new_repodata_path, "repomd.xml")
-
- self.old_repo_path = old_repo_path
- self.old_repodata_path = os.path.join(self.old_repo_path, "repodata/")
- self.old_repomd_path = os.path.join(self.old_repodata_path, "repomd.xml")
-
- self.delta_repo_path = out_path
- self.delta_repodata_path = os.path.join(self.delta_repo_path, ".repodata/")
- self.delta_repomd_path = os.path.join(self.delta_repodata_path, "repomd.xml")
-
- # Repoid type
- self.repoid_type_str = repoid_type or "sha256"
- self.compression_type_str = compression_type or "xz"
- self.compression_type = cr.compression_type(self.compression_type_str)
-
- # Prepare Repomd objects
- self.old_repomd = cr.Repomd(self.old_repomd_path)
- self.new_repomd = cr.Repomd(self.new_repomd_path)
- self.delta_repomd = cr.Repomd()
-
- # Use revision and tags
- self.delta_repomd.set_revision(self.new_repomd.revision)
- for tag in self.new_repomd.distro_tags:
- self.delta_repomd.add_distro_tag(tag[1], tag[0])
- for tag in self.new_repomd.repo_tags:
- self.delta_repomd.add_repo_tag(tag)
- for tag in self.new_repomd.content_tags:
- self.delta_repomd.add_content_tag(tag)
-
- # Load records
- self.old_records = {}
- self.new_records = {}
- for record in self.old_repomd.records:
- self.old_records[record.type] = record
- for record in self.new_repomd.records:
- self.new_records[record.type] = record
-
- old_record_types = set(self.old_records.keys())
- new_record_types = set(self.new_records.keys())
-
- self.deleted_repomd_record_types = old_record_types - new_record_types
- self.added_repomd_record_types = new_record_types - old_record_types
-
- # Important sanity checks (repo without primary is definitely bad)
- if not "primary" in self.old_records:
- raise DeltaRepoError("Missing \"primary\" metadata in old repo")
-
- if not "primary" in self.new_records:
- raise DeltaRepoError("Missing \"primary\" metadata in new repo")
-
- # Detect type of checksum in the new repomd.xml (global)
- self.checksum_type = cr.checksum_type(self.new_records["primary"].checksum_type)
- if self.checksum_type == cr.UNKNOWN_CHECKSUM:
- raise DeltaRepoError("Unknown checksum type used in new repo: %s" % \
- self.new_records["primary"].checksum_type)
-
- # TODO: Je treba detekovat typ checksumu, kdyz se stejne pro kazdej
- # record nakonec detekuje znova???
-
- # Detection if use unique md filenames
- if self.new_records["primary"].location_href.split("primary")[0] != "":
- self.unique_md_filenames = True
-
- self.old_id = self.old_repomd.repoid
- self.new_id = self.new_repomd.repoid
-
- # Prepare removed xml object
- self.removedxmlobj = RemovedXml()
-
- # Prepare bundle
- self.bundle = {}
- self.bundle["repoid_type_str"] = self.repoid_type_str
- self.bundle["removed_obj"] = self.removedxmlobj
- self.bundle["unique_md_filenames"] = self.unique_md_filenames
-
- def _new_metadata(self, metadata_type):
- """Return Metadata Object for the metadata_type or None"""
-
- if metadata_type not in self.new_records:
- return None
-
- metadata = Metadata(metadata_type)
-
- # Build new filename
- metadata.new_fn = os.path.join(self.new_repo_path,
- self.new_records[metadata_type].location_href)
-
- # Build old filename
- if metadata_type in self.old_records:
- metadata.old_fn = os.path.join(self.old_repo_path,
- self.old_records[metadata_type].location_href)
-
- # Set output directory
- metadata.out_dir = self.delta_repodata_path
-
- # Determine checksum type (for this specific repodata)
- # Detected checksum type should be probably same as the globally
- # detected one, but it is not guaranted
- metadata.checksum_type = cr.checksum_type(
- self.new_records[metadata_type].checksum_type)
-
- # Determine compression type
- metadata.compression_type = cr.detect_compression(metadata.new_fn)
- if (metadata.compression_type == cr.UNKNOWN_COMPRESSION):
- raise DeltaRepoError("Cannot detect compression type for {0}".format(
- metadata.new_fn))
-
- return metadata
-
- def gen(self):
-
- # Prepare output path
- os.mkdir(self.delta_repodata_path)
-
- processed_metadata = set()
- used_plugins = set()
- plugin_used = True
-
- while plugin_used:
- # Iterate on plugins until any of them was used
- plugin_used = False
-
- for plugin in PLUGINS:
-
- # Use only plugins that haven't been already used
- if plugin in used_plugins:
- continue
-
- # Check which metadata this plugin want to process
- conflicting_metadata = set(plugin.METADATA) & processed_metadata
- if conflicting_metadata:
- message = "Plugin {0}: Error - Plugin want to process " \
- "already processed metadata {1}".format(
- plugin.NAME, conflicting_metadata)
- self._error(message)
- raise DeltaRepoError(message)
-
- # Prepare metadata for the plugin
- metadata_objects = {}
- for metadata_name in plugin.METADATA:
- metadata_object = self._new_metadata(metadata_name)
- if metadata_object is not None:
- metadata_objects[metadata_name] = metadata_object
-
- # Skip plugin if no supported metadata available
- if not metadata_objects:
- self._debug("Plugin {0}: Skipped - None of supported " \
- "metadata {1} available".format(
- plugin.NAME, plugin.METADATA))
- used_plugins.add(plugin)
- continue
-
- # Check if bundle contains all what plugin need
- required_bundle_keys = set(plugin.GEN_REQUIRED_BUNDLE_KEYS)
- bundle_keys = set(self.bundle.keys())
- if not required_bundle_keys.issubset(bundle_keys):
- self._debug("Plugin {0}: Skipped - Bundle keys {1} "\
- "are not available".format(plugin.NAME,
- (required_bundle_keys - bundle_keys)))
- continue
-
- # Use the plugin
- self._debug("Plugin {0}: Active".format(plugin.NAME))
- plugin_instance = plugin()
- plugin_instance.gen(metadata_objects, self.bundle)
-
- # Check what bundle keys was added by the plugin
- new_bundle_keys = set(self.bundle.keys())
- diff = new_bundle_keys - bundle_keys
- if diff != set(plugin.GEN_BUNDLE_CONTRIBUTION):
- message = "Plugin {0}: Error - Plugin should add: {1} " \
- "bundle items but add: {2}".format(
- plugin.NAME, plugin.GEN_BUNDLE_CONTRIBUTION,
- list(diff))
- self._error(message)
- raise DeltaRepoError(message)
-
- # Put repomd records from processed metadatas to repomd
- for md in metadata_objects.values():
- self._debug("Plugin {0}: Processed \"{1}\" delta record "\
- "which produced:".format(
- plugin.NAME, md.metadata_type))
- for repomd_record in md.generated_repomd_records:
- self._debug(" - {0}".format(repomd_record.type))
- self.delta_repomd.set_record(repomd_record)
-
- # Organization stuff
- processed_metadata.update(set(metadata_objects.keys()))
- used_plugins.add(plugin)
- plugin_used = True
-
- # TODO:
- # Process rest of the metadata files
-
- # Write out removed.xml
- self._debug("Writing removed.xml ...")
- removedxml_xml = self.removedxmlobj.xml_dump()
- removedxml_path = os.path.join(self.delta_repodata_path, "removed.xml")
-
- if (self.compression_type != cr.UNKNOWN_COMPRESSION):
- removedxml_path += cr.compression_suffix(self.compression_type)
- stat = cr.ContentStat(self.checksum_type)
- f = cr.CrFile(removedxml_path, cr.MODE_WRITE, cr.XZ, stat)
- f.write(removedxml_xml)
- f.close()
- else:
- open(removedxml_path, "w").write(removedxml_xml)
-
- removedxml_rec = cr.RepomdRecord("removed", removedxml_path)
- removedxml_rec.load_contentstat(stat)
- removedxml_rec.fill(self.checksum_type)
- if self.unique_md_filenames:
- removedxml_rec.rename_file()
- self.delta_repomd.set_record(removedxml_rec)
-
- # Check if calculated repoids match
- self._debug("Checking expected repoids")
-
- if not "new_repoid" in self.bundle or not "old_repoid" in self.bundle:
- message = "\"new_repoid\" or \"old_repoid\" is missing in bundle"
- self._error(message)
- raise DeltaRepoError(message)
-
- if self.old_id:
- if self.old_id != self.bundle["old_repoid"]:
- message = "Repoid of the \"{0}\" repository doesn't match "\
- "the real repoid ({1} != {2}).".format(
- self.old_repo_path, self.old_id,
- self.bundle["old_repoid"])
- self._error(message)
- raise DeltaRepoError(message)
- else:
- self._debug("Repoid of the old repo matches ({0})".format(
- self.old_id))
- else:
- self._debug("Repoid of the \"{0}\" is not part of its "\
- "repomd".format(self.old_repo_path))
-
- if self.new_id:
- if self.new_id and self.new_id != self.bundle["new_repoid"]:
- message = "Repoid of the \"{0}\" repository doesn't match "\
- "the real repoid ({1} != {2}).".format(
- self.new_repo_path, self.new_id,
- self.bundle["new_repoid"])
- self._error(message)
- raise DeltaRepoError(message)
- else:
- self._debug("Repoid of the new repo matches ({0})".format(
- self.new_id))
- else:
- self._debug("Repoid of the \"{0}\" is not part of its "\
- "repomd".format(self.new_repo_path))
-
- # Prepare and write out the new repomd.xml
- self._debug("Preparing repomd.xml ...")
- deltarepoid = "{0}-{1}".format(self.bundle["old_repoid"],
- self.bundle["new_repoid"])
- self.delta_repomd.set_repoid(deltarepoid, self.repoid_type_str)
- self.delta_repomd.sort_records()
- delta_repomd_xml = self.delta_repomd.xml_dump()
-
- self._debug("Writing repomd.xml ...")
- open(self.delta_repomd_path, "w").write(delta_repomd_xml)
-
- # Final move
- if os.path.exists(self.final_path):
- self._warning("Destination dir already exists! Removing %s" % \
- self.final_path)
- shutil.rmtree(self.final_path)
- self._debug("Moving %s -> %s" % (self.delta_repodata_path, self.final_path))
- os.rename(self.delta_repodata_path, self.final_path)
-
COMMENT "Building C API documentation with Doxygen" VERBATIM)
endif(DOXYGEN_FOUND)
-INSTALL(FILES createrepo_c.8.gz mergerepo_c.8.gz
+INSTALL(FILES createrepo_c.8 mergerepo_c.8 modifyrepo_c.8 sqliterepo_c.8
DESTINATION share/man/man8
COMPONENT bin)
--- /dev/null
+.\" Man page generated from reStructuredText.
+.
+.TH CREATEREPO_C "2015-10-20" "" ""
+.SH NAME
+createrepo_c \- Create rpm-md format (xml-rpm-metadata) repository
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" -*- coding: utf-8 -*-
+.
+.SH SYNOPSIS
+.sp
+createrepo_c [options] <directory>
+.SH OPTIONS
+.SS \-V \-\-version
+.sp
+Show program\(aqs version number and exit.
+.SS \-q \-\-quiet
+.sp
+Run quietly.
+.SS \-v \-\-verbose
+.sp
+Run verbosely.
+.SS \-x \-\-excludes <packages>
+.sp
+File globs to exclude, can be specified multiple times.
+.SS \-\-basedir <basedir>
+.sp
+Basedir for path to directories.
+.SS \-u \-\-baseurl <URL>
+.sp
+Optional base URL location for all files.
+.SS \-g \-\-groupfile GROUPFILE
+.sp
+Path to groupfile to include in metadata.
+.SS \-s \-\-checksum <checksum_type>
+.sp
+Choose the checksum type used in repomd.xml and for packages in the metadata. The default is now sha256.
+.SS \-p \-\-pretty
+.sp
+Make sure all xml generated is formatted (default)
+.SS \-d \-\-database
+.sp
+Generate sqlite databases for use with yum.
+.SS \-\-no\-database
+.sp
+Do not generate sqlite databases in the repository.
+.SS \-\-update
+.sp
+If metadata already exists in the outputdir and an rpm is unchanged (based on file size and mtime) since the metadata was generated, reuse the existing metadata rather than recalculating it. In the case of a large repository with only a few new or modified rpms this can significantly reduce I/O and processing time.
+.SS \-\-update\-md\-path
+.sp
+Use the existing repodata for \-\-update from this path.
+.SS \-\-skip\-stat
+.sp
+Skip the stat() call on a \-\-update, assumes if the filename is the same then the file is still the same (only use this if you\(aqre fairly trusting or gullible).
+.SS \-i \-\-pkglist <filename>
+.sp
+Specify a text file which contains the complete list of files to include in the repository from the set found in the directory. File format is one package per line, no wildcards or globs.
+.SS \-n \-\-includepkg <packages>
+.sp
+Specify pkgs to include on the command line. Takes urls as well as local paths.
+.SS \-o \-\-outputdir <URL>
+.sp
+Optional output directory.
+.SS \-S \-\-skip\-symlinks
+.sp
+Ignore symlinks of packages.
+.SS \-\-changelog\-limit <number>
+.sp
+Only import the last N changelog entries, from each rpm, into the metadata.
+.SS \-\-unique\-md\-filenames
+.sp
+Include the file\(aqs checksum in the metadata filename, helps HTTP caching (default).
+.SS \-\-simple\-md\-filenames
+.sp
+Do not include the file\(aqs checksum in the metadata filename.
+.SS \-\-retain\-old\-md <N>
+.sp
+Keep around the latest (by timestamp) N copies of the old repodata.
+.SS \-\-distro DISTRO
+.sp
+Distro tag and optional cpeid: \-\-distro\(aqcpeid,textname\(aq.
+.SS \-\-content CONTENT_TAGS
+.sp
+Tags for the content in the repository.
+.SS \-\-repo REPO_TAGS
+.sp
+Tags to describe the repository itself.
+.SS \-\-revision REVISION
+.sp
+User\-specified revision for this repository.
+.SS \-\-read\-pkgs\-list READ_PKGS_LIST
+.sp
+Output the paths to the pkgs actually read useful with \-\-update.
+.SS \-\-workers
+.sp
+Number of workers to spawn to read rpms.
+.SS \-\-xz
+.sp
+Use xz for repodata compression.
+.SS \-\-compress\-type <compress_type>
+.sp
+Which compression type to use.
+.SS \-\-general\-compress\-type <compress_type>
+.sp
+Which compression type to use (even for primary, filelists and other xml).
+.SS \-\-keep\-all\-metadata
+.sp
+Keep groupfile and updateinfo from source repo during update.
+.SS \-\-compatibility
+.sp
+Enforce maximal compatibility with classical createrepo.
+.SS \-\-retain\-old\-md\-by\-age AGE
+.sp
+During \-\-update, remove all files in repodata/ which are older then the specified period of time. (e.g. \(aq2h\(aq, \(aq30d\(aq, ...). Available units (m \- minutes, h \- hours, d \- days)
+.SS \-c \-\-cachedir CACHEDIR.
+.sp
+Set path to cache dir
+.SS \-\-deltas
+.sp
+Tells createrepo to generate deltarpms and the delta metadata.
+.SS \-\-oldpackagedirs PATH
+.sp
+Paths to look for older pkgs to delta against. Can be specified multiple times.
+.SS \-\-num\-deltas INT
+.sp
+The number of older versions to make deltas against. Defaults to 1.
+.SS \-\-max\-delta\-rpm\-size MAX_DELTA_RPM_SIZE
+.sp
+Max size of an rpm that to run deltarpm against (in bytes).
+.SS \-\-local\-sqlite
+.sp
+Gen sqlite DBs locally (into a directory for temporary files). Sometimes, sqlite has a trouble to gen DBs on a NFS mount, use this option in such cases. This option could lead to a higher memory consumption if TMPDIR is set to /tmp or not set at all, because then the /tmp is used and /tmp dir is often a ramdisk.
+.SS \-\-cut\-dirs NUM
+.sp
+Ignore NUM of directory components in location_href during repodata generation
+.SS \-\-location\-prefix PREFIX
+.sp
+Append this prefix before location_href in output repodata
+.SS \-\-repomd\-checksum <checksum_type>
+.sp
+Checksum type to be used in repomd.xml
+.SS \-\-ignore\-lock
+.sp
+Expert (risky) option: Ignore an existing .repodata/. (Remove the existing .repodata/ and create an empty new one to serve as a lock for other createrepo intances. For the repodata generation, a different temporary dir with the name in format .repodata.time.microseconds.pid/ will be used). NOTE: Use this option on your own risk! If two createrepos run simultaneously, then the state of the generated metadata is not guaranted \- it can be inconsistent and wrong.
+.\" Generated by docutils manpage writer.
+.
--- /dev/null
+.\" Man page generated from reStructuredText.
+.
+.TH MERGEREPO_C "2015-10-20" "" ""
+.SH NAME
+mergerepo_c \- Merge multiple rpm-md format repositories together
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" -*- coding: utf-8 -*-
+.
+.SH SYNOPSIS
+.sp
+mergerepo_c [options] \-\-repo repo1 \-\-repo repo2
+.SH OPTIONS
+.SS \-\-version
+.sp
+Show program\(aqs version number and exit
+.SS \-r \-\-repo REPOS
+.sp
+Repo url
+.SS \-a \-\-archlist ARCHLIST
+.sp
+Defaults to all arches \- otherwise specify arches
+.SS \-d \-\-database
+.SS \-\-no\-database
+.SS \-v \-\-verbose
+.SS \-o \-\-outputdir OUTPUTDIR
+.sp
+Location to create the repository
+.SS \-\-nogroups
+.sp
+Do not merge group (comps) metadata
+.SS \-\-noupdateinfo
+.sp
+Do not merge updateinfo metadata
+.SS \-\-compress\-type COMPRESS_TYPE
+.sp
+Which compression type to use
+.SS \-\-method MERGE_METHOD
+.sp
+Specify merge method for packages with the same name and arch (available merge methods: repo (default), ts, nvr)
+.SS \-\-all
+.sp
+Include all packages with the same name and arch if version or release is different. If used \-\-method argument is ignored!
+.SS \-\-noarch\-repo URL
+.sp
+Packages with noarch architecture will be replaced by package from this repo if exists in it.
+.SS \-\-unique\-md\-filenames
+.sp
+Include the file\(aqs checksum in the metadata filename, helps HTTP caching (default).
+.SS \-\-simple\-md\-filenames
+.sp
+Do not include the file\(aqs checksum in the metadata filename.
+.SS \-\-omit\-baseurl
+.sp
+Don\(aqt add a baseurl to packages that don\(aqt have one before.
+.SS \-k \-\-koji
+.sp
+Enable koji mergerepos behaviour.
+.SS \-g \-\-groupfile GROUPFILE
+.sp
+Path to groupfile to include in metadata.
+.SS \-b \-\-blocked FILE
+.sp
+A file containing a list of srpm names to exclude from the merged repo. Only works with combination with \-\-koji/\-k.
+.\" Generated by docutils manpage writer.
+.
--- /dev/null
+.\" Man page generated from reStructuredText.
+.
+.TH MODIFYREPO_C "2015-10-20" "" ""
+.SH NAME
+modifyrepo_c \- Modify a repomd.xml of rpm-md format repository
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" -*- coding: utf-8 -*-
+.
+.SH SYNOPSIS
+.sp
+modifyrepo_c [options] <input metadata> <output repodata>
+.sp
+modifyrepo_c \-\-remove <metadata type> <output repodata>
+.sp
+modifyrepo_c [options] \-\-batchfile <batch file> <output repodata>
+.SH OPTIONS
+.SS \-\-version
+.sp
+Show program\(aqs version number and exit.
+.SS \-\-mdtype MDTYPE
+.sp
+Specific datatype of the metadata, will be derived from the filename if not specified.
+.SS \-\-remove
+.sp
+Remove specified file from repodata.
+.SS \-\-compress
+.sp
+Compress the new repodata before adding it to the repo. (default)
+.SS \-\-no\-compress
+.sp
+Do not compress the new repodata before adding it to the repo.
+.SS \-\-compress\-type COMPRESS_TYPE
+.sp
+Compression format to use.
+.SS \-s \-\-checksum SUMTYPE
+.sp
+Specify the checksum type to use. (default: sha256)
+.SS \-\-unique\-md\-filenames
+.sp
+Include the file\(aqs checksum in the filename, helps with proxies. (default)
+.SS \-\-simple\-md\-filenames
+.sp
+Do not include the file\(aqs checksum in the filename.
+.SS \-\-verbose
+.sp
+Verbose output.
+.SS \-f \-\-batchfile BATCHFILE
+.sp
+Batch file.
+.SS \-\-new\-name NEWFILENAME
+.sp
+New filename for the file
+.\" Generated by docutils manpage writer.
+.
ADD_CUSTOM_TARGET (doc-python
PYTHONPATH=${CMAKE_BINARY_DIR}/src/python sphinx-build -E -b html
- ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/html
COMMENT "Building Python API documentation with Sphinx")
--- /dev/null
+.\" Man page generated from reStructuredText.
+.
+.TH SQLITEREPO_C "2015-10-20" "" ""
+.SH NAME
+sqliterepo_c \- Generate sqlite db files for a repository in rpm-md format
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" -*- coding: utf-8 -*-
+.
+.SH SYNOPSIS
+.sp
+sqliterepo_c [options] <repo_directory>
+.SH OPTIONS
+.SS \-V \-\-version
+.sp
+Show program\(aqs version number and exit.
+.SS \-q \-\-quiet
+.sp
+Run quietly.
+.SS \-v \-\-verbose
+.sp
+Run verbosely.
+.SS \-f \-\-force
+.sp
+Overwirte existing DBs.
+.SS \-\-keep\-old
+.sp
+Do not remove old DBs. Use only with combination with \-\-force.
+.SS \-\-xz
+.sp
+Use xz for repodata compression.
+.SS \-\-compress\-type <compress_type>
+.sp
+Which compression type to use.
+.SS \-\-checksum <checksum_type>
+.sp
+Which checksum type to use in repomd.xml for sqlite DBs.
+.SS \-\-local\-sqlite
+.sp
+Gen sqlite DBs locally (into a directory for temporary files). Sometimes, sqlite has a trouble to gen DBs on a NFS mount, use this option in such cases. This option could lead to a higher memory consumption if TMPDIR is set to /tmp or not set at all, because then the /tmp is used and /tmp dir is often a ramdisk.
+.\" Generated by docutils manpage writer.
+.
--- /dev/null
+#!/usr/bin/env python
+
+import sys
+import os
+import os.path
+import hashlib
+
+import createrepo_c as cr
+
+REPO_PATH = "repo/"
+
+class CalculationException(Exception):
+ pass
+
+def calculate_contenthash(path):
+ if not os.path.isdir(path) or \
+ not os.path.isdir(os.path.join(path, "repodata/")):
+ raise AttributeError("Not a repo: {0}".format(path))
+
+ repomd_path = os.path.join(path, "repodata/repomd.xml")
+ repomd = cr.Repomd(repomd_path)
+
+ primary_path = None
+ for rec in repomd.records:
+ if rec.type == "primary":
+ primary_path = rec.location_href
+ break
+
+ if not primary_path:
+ raise CalculationException("primary metadata are missing")
+
+ pkgids = []
+
+ def pkgcb(pkg):
+ pkgids.append("{0}{1}{2}".format(pkg.pkgId,
+ pkg.location_href,
+ pkg.location_base or ''))
+
+ cr.xml_parse_primary(os.path.join(path, primary_path),
+ pkgcb=pkgcb)
+
+ contenthash = hashlib.new("sha256")
+ for pkgid in sorted(pkgids):
+ contenthash.update(pkgid)
+ return contenthash.hexdigest()
+
+if __name__ == "__main__":
+ path = REPO_PATH
+ if len(sys.argv) == 2:
+ path = sys.argv[1]
+ print calculate_contenthash(path)
--- /dev/null
+#!/usr/bin/env python
+
+import os
+import os.path
+
+import createrepo_c as cr
+
+REPO_PATH = "repo/"
+
+def parse_repomd(path):
+ repomd = cr.Repomd(path)
+ print "Revision:", repomd.revision
+ if repomd.contenthash:
+ print "Contenthash:", repomd.contenthash
+ print "Contenthash type:", repomd.contenthash_type
+ print "Repo tags:", repomd.repo_tags
+ print "Content tags:", repomd.content_tags
+ print "Distro tags:", repomd.distro_tags
+ print
+ for rec in repomd.records:
+ print "Type:", rec.type
+ print "Location href:", rec.location_href
+ print "Location base:", rec.location_base
+ print "Checksum:", rec.checksum
+ print "Checksum type:", rec.checksum_type
+ print "Checksum open:", rec.checksum_open
+ print "Checksum open type:", rec.checksum_open_type
+ print "Timestamp:", rec.timestamp
+ print "Size:", rec.size
+ print "Size open:", rec.size_open
+ if rec.db_ver:
+ print "Db version:", rec.db_ver
+ print
+
+if __name__ == "__main__":
+ repomd_path = os.path.join(REPO_PATH, "repodata/repomd.xml")
+ parse_repomd(repomd_path)
if not os.path.exists(new_repodata_path):
shutil.move(repodata_path, new_repodata_path)
break
+ x += 1
os.mkdir(repodata_path)
# Prepare metadata files
pri_xml_path = os.path.join(repodata_path, "primary.xml.gz")
fil_xml_path = os.path.join(repodata_path, "filelists.xml.gz")
oth_xml_path = os.path.join(repodata_path, "other.xml.gz")
- pri_db_path = os.path.join(repodata_path, "primary.sqlite.gz")
- fil_db_path = os.path.join(repodata_path, "filelists.sqlite.gz")
- oth_db_path = os.path.join(repodata_path, "other.sqlite.gz")
+ pri_db_path = os.path.join(repodata_path, "primary.sqlite")
+ fil_db_path = os.path.join(repodata_path, "filelists.sqlite")
+ oth_db_path = os.path.join(repodata_path, "other.sqlite")
pri_xml = cr.PrimaryXmlFile(pri_xml_path)
fil_xml = cr.FilelistsXmlFile(fil_xml_path)
# Process all packages
for filename in pkg_list:
pkg = cr.package_from_rpm(filename)
+ pkg.location_href = os.path.basename(filename)
print "Processing: %s" % pkg.nevra()
pri_xml.add_pkg(pkg)
fil_xml.add_pkg(pkg)
--- /dev/null
+#!/usr/bin/env python
+"""
+An example of inserting updateinfo.xml into repodata.
+"""
+import os
+import shutil
+
+import createrepo_c as cr
+
+REPO_PATH = "repo/"
+
+def modifyrepo(filename, repodata):
+ repodata = os.path.join(repodata, 'repodata')
+ uinfo_xml = os.path.join(repodata, os.path.basename(filename))
+ shutil.copyfile(filename, uinfo_xml)
+
+ uinfo_rec = cr.RepomdRecord('updateinfo', uinfo_xml)
+ uinfo_rec.fill(cr.SHA256)
+ uinfo_rec.rename_file()
+
+ repomd_xml = os.path.join(repodata, 'repomd.xml')
+ repomd = cr.Repomd(repomd_xml)
+ repomd.set_record(uinfo_rec)
+ with file(repomd_xml, 'w') as repomd_file:
+ repomd_file.write(repomd.xml_dump())
+
+
+if __name__ == '__main__':
+ # Generate the updateinfo.xml
+ execfile('updateinfo_gen_02.py')
+
+ modifyrepo(OUT_FILE, REPO_PATH)
--- /dev/null
+#!/usr/bin/python
+
+import datetime
+import createrepo_c as cr
+
+
+def generate():
+ pkg = cr.UpdateCollectionPackage()
+ pkg.name = "Foo"
+ pkg.version = "1.2.3"
+ pkg.release = "1"
+ pkg.epoch = "0"
+ pkg.arch = "noarch"
+ pkg.src = "foo.src.rpm"
+ pkg.filename = "foo-1.2.3-1.rpm"
+ pkg.sum = "123456789"
+ pkg.sum_type = cr.MD5
+ pkg.reboot_suggested = False
+
+ col = cr.UpdateCollection()
+ col.shortname = "Bar-product"
+ col.name = "Bar Product"
+ col.append(pkg)
+
+ ref = cr.UpdateReference()
+ ref.href = "http://foo.bar/foobar"
+ ref.id = "123"
+ ref.type = "self"
+ ref.title = "Foo Update"
+
+ rec = cr.UpdateRecord()
+ rec.fromstr = "security@foo.bar"
+ rec.status = "final"
+ rec.type = "enhancement"
+ rec.version = "1"
+ rec.id = "UPDATE-1"
+ rec.title = "Bar Product Update"
+ rec.issued_date = datetime.datetime(2014, 8, 14)
+ rec.updated_date = datetime.datetime(2014, 8, 14)
+ rec.rights = "Copyright 2014 Bar Inc"
+ rec.summary = "An update for Bar"
+ rec.description = "Fixes a bug"
+ rec.append_collection(col)
+ rec.append_reference(ref)
+
+ ui = cr.UpdateInfo()
+ ui.append(rec)
+
+ print ui.xml_dump(),
+
+if __name__ == "__main__":
+ generate()
--- /dev/null
+#!/usr/bin/python
+
+import datetime
+import createrepo_c as cr
+
+OUT_FILE = "updateinfo.xml.gz"
+
+def generate():
+ pkg = cr.UpdateCollectionPackage()
+ pkg.name = "Foo"
+ pkg.version = "1.2.3"
+ pkg.release = "1"
+ pkg.epoch = "0"
+ pkg.arch = "noarch"
+ pkg.src = "foo.src.rpm"
+ pkg.filename = "foo-1.2.3-1.rpm"
+ pkg.sum = "123456789"
+ pkg.sum_type = cr.MD5
+ pkg.reboot_suggested = False
+
+ col = cr.UpdateCollection()
+ col.shortname = "Bar-product"
+ col.name = "Bar Product"
+ col.append(pkg)
+
+ ref = cr.UpdateReference()
+ ref.href = "http://foo.bar/foobar"
+ ref.id = "123"
+ ref.type = "self"
+ ref.title = "Foo Update"
+
+ rec = cr.UpdateRecord()
+ rec.fromstr = "security@foo.bar"
+ rec.status = "final"
+ rec.type = "enhancement"
+ rec.version = "1"
+ rec.id = "UPDATE-1"
+ rec.title = "Bar Product Update"
+ rec.issued_date = datetime.datetime(2014, 8, 14)
+ rec.updated_date = datetime.datetime(2014, 8, 14)
+ rec.rights = "Copyright 2014 Bar Inc"
+ rec.summary = "An update for Bar"
+ rec.description = "Fixes a bug"
+ rec.append_collection(col)
+ rec.append_reference(ref)
+
+ chunk = cr.xml_dump_updaterecord(rec)
+
+ f = cr.UpdateInfoXmlFile(OUT_FILE)
+ f.add_chunk(chunk)
+ f.close()
+
+ print "See the %s" % OUT_FILE
+
+
+if __name__ == "__main__":
+ generate()
--- /dev/null
+#!/usr/bin/env python
+
+import os
+import os.path
+import optparse
+
+import createrepo_c as cr
+
+
+def parse_updateinfo(path):
+ uinfo = cr.UpdateInfo(path)
+ for update in uinfo.updates:
+ print "From: %s" % update.fromstr
+ print "Status: %s" % update.status
+ print "Type: %s" % update.type
+ print "Version: %s" % update.version
+ print "Id: %s" % update.id
+ print "Title: %s" % update.title
+ print "Issued date: %s" % update.issued_date
+ print "Updated date: %s" % update.updated_date
+ print "Rights: %s" % update.rights
+ print "Release: %s" % update.release
+ print "Pushcount: %s" % update.pushcount
+ print "Severity: %s" % update.severity
+ print "Summary: %s" % update.summary
+ print "Description: %s" % update.description
+ print "Solution: %s" % update.solution
+ print "References:"
+ for ref in update.references:
+ print " Href: %s" % ref.href
+ print " Id: %s" % ref.id
+ print " Type: %s" % ref.type
+ print " Title: %s" % ref.title
+ print " ----------------------------"
+ print "Pkglist (collections):"
+ for col in update.collections:
+ print " Short: %s" % col.shortname
+ print " name: %s" % col.name
+ print " Packages:"
+ for pkg in col.packages:
+ print " Name: %s" % pkg.name
+ print " Version: %s" % pkg.version
+ print " Release: %s" % pkg.release
+ print " Epoch: %s" % pkg.epoch
+ print " Arch: %s" % pkg.arch
+ print " Src: %s" % pkg.src
+ print " Filename: %s" % pkg.filename
+ print " Sum: %s" % pkg.sum
+ print " Sum type: %s (%s)" % (pkg.sum_type, cr.checksum_name_str(pkg.sum_type))
+ print " Reboot suggested: %s" % pkg.reboot_suggested
+ print " ----------------------------"
+ print "=============================="
+
+
+if __name__ == "__main__":
+ parser = optparse.OptionParser(usage="%prog PATH_TO_UPDATEINFO")
+ options, args = parser.parse_args()
+ if len(args) != 1:
+ parser.error("You have to specify exactly one update info")
+ parse_updateinfo(args[0])
SET (createrepo_c_SRCS
checksum.c
compression_wrapper.c
+ createrepo_shared.c
+ deltarpms.c
+ dumper_thread.c
error.c
+ helpers.c
load_metadata.c
locate_metadata.c
misc.c
repomd.c
sqlite.c
threads.c
+ updateinfo.c
xml_dump.c
+ xml_dump_deltapackage.c
xml_dump_filelists.c
xml_dump_other.c
xml_dump_primary.c
xml_dump_repomd.c
+ xml_dump_updateinfo.c
xml_file.c
xml_parser.c
xml_parser_filelists.c
xml_parser_other.c
xml_parser_primary.c
- xml_parser_repomd.c)
+ xml_parser_repomd.c
+ xml_parser_updateinfo.c)
SET(headers
checksum.h
compression_wrapper.h
constants.h
createrepo_c.h
+ deltarpms.h
error.h
+ helpers.h
load_metadata.h
locate_metadata.h
misc.h
repomd.h
sqlite.h
threads.h
+ updateinfo.h
version.h
xml_dump.h
xml_file.h
TARGET_LINK_LIBRARIES(libcreaterepo_c ${RPMDB_LIBRARY})
TARGET_LINK_LIBRARIES(libcreaterepo_c ${SQLITE3_LIBRARIES})
TARGET_LINK_LIBRARIES(libcreaterepo_c ${ZLIB_LIBRARY})
+IF (DRPM_LIBRARY)
+ TARGET_LINK_LIBRARIES(libcreaterepo_c ${DRPM_LIBRARY})
+ENDIF (DRPM_LIBRARY)
SET_TARGET_PROPERTIES(libcreaterepo_c PROPERTIES
${GLIB2_LIBRARIES}
${GTHREAD2_LIBRARIES})
+ADD_EXECUTABLE(sqliterepo_c sqliterepo_c.c)
+TARGET_LINK_LIBRARIES(sqliterepo_c
+ libcreaterepo_c
+ ${GLIB2_LIBRARIES}
+ ${GTHREAD2_LIBRARIES})
+
CONFIGURE_FILE("createrepo_c.pc.cmake" "${CMAKE_SOURCE_DIR}/src/createrepo_c.pc" @ONLY)
CONFIGURE_FILE("version.h.in" "${CMAKE_CURRENT_SOURCE_DIR}/version.h" @ONLY)
+CONFIGURE_FILE("deltarpms.h.in" "${CMAKE_CURRENT_SOURCE_DIR}/deltarpms.h" @ONLY)
IF (CMAKE_SIZEOF_VOID_P MATCHES "8")
SET (LIB_SUFFIX "64")
INSTALL(TARGETS createrepo_c DESTINATION bin/)
INSTALL(TARGETS mergerepo_c DESTINATION bin/)
INSTALL(TARGETS modifyrepo_c DESTINATION bin/)
+INSTALL(TARGETS sqliterepo_c DESTINATION bin/)
ADD_SUBDIRECTORY(python)
#include "error.h"
#include "checksum.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
#define MAX_CHECKSUM_NAME_LEN 7
#define BUFFER_SIZE 2048
case CR_CHECKSUM_SHA512: ctx_type = EVP_sha512(); break;
case CR_CHECKSUM_UNKNOWN:
default:
- g_set_error(err, CR_CHECKSUM_ERROR, CRE_UNKNOWNCHECKSUMTYPE,
+ g_set_error(err, ERR_DOMAIN, CRE_UNKNOWNCHECKSUMTYPE,
"Unknown checksum type");
return NULL;
}
f = fopen(filename, "rb");
if (!f) {
- g_set_error(err, CR_CHECKSUM_ERROR, CRE_IO,
- "Cannot open a file: %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open a file: %s", g_strerror(errno));
return NULL;
}
ctx = EVP_MD_CTX_create();
rc = EVP_DigestInit_ex(ctx, ctx_type, NULL);
if (!rc) {
- g_set_error(err, CR_CHECKSUM_ERROR, CRE_OPENSSL,
+ g_set_error(err, ERR_DOMAIN, CRE_OPENSSL,
"EVP_DigestInit_ex() failed");
EVP_MD_CTX_destroy(ctx);
fclose(f);
if (feof(f)) {
EVP_DigestUpdate(ctx, buf, readed);
} else {
- g_set_error(err, CR_CHECKSUM_ERROR, CRE_IO,
- "Error while reading a file: %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Error while reading a file: %s", g_strerror(errno));
EVP_MD_CTX_destroy(ctx);
fclose(f);
return NULL;
case CR_CHECKSUM_SHA512: ctx_type = EVP_sha512(); break;
case CR_CHECKSUM_UNKNOWN:
default:
- g_set_error(err, CR_CHECKSUM_ERROR, CRE_UNKNOWNCHECKSUMTYPE,
+ g_set_error(err, ERR_DOMAIN, CRE_UNKNOWNCHECKSUMTYPE,
"Unknown checksum type");
return NULL;
}
ctx = EVP_MD_CTX_create();
if (!ctx) {
- g_set_error(err, CR_CHECKSUM_ERROR, CRE_OPENSSL,
+ g_set_error(err, ERR_DOMAIN, CRE_OPENSSL,
"EVP_MD_CTX_create() failed");
return NULL;
}
if (!EVP_DigestInit_ex(ctx, ctx_type, NULL)) {
- g_set_error(err, CR_CHECKSUM_ERROR, CRE_OPENSSL,
+ g_set_error(err, ERR_DOMAIN, CRE_OPENSSL,
"EVP_DigestInit_ex() failed");
EVP_MD_CTX_destroy(ctx);
return NULL;
return CRE_OK;
if (!EVP_DigestUpdate(ctx->ctx, buf, len)) {
- g_set_error(err, CR_CHECKSUM_ERROR, CRE_OPENSSL,
+ g_set_error(err, ERR_DOMAIN, CRE_OPENSSL,
"EVP_DigestUpdate() failed");
return CRE_OPENSSL;
}
assert(!err || *err == NULL);
if (!EVP_DigestFinal_ex(ctx->ctx, raw_checksum, &len)) {
- g_set_error(err, CR_CHECKSUM_ERROR, CRE_OPENSSL,
+ g_set_error(err, ERR_DOMAIN, CRE_OPENSSL,
"EVP_DigestFinal_ex() failed");
EVP_MD_CTX_destroy(ctx->ctx);
g_free(ctx);
#ifndef __C_CREATEREPOLIB_CHECKSUM_H__
#define __C_CREATEREPOLIB_CHECKSUM_H__
+#include <glib.h>
+
#ifdef __cplusplus
extern "C" {
#endif
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ *
+ * Copyright (C) 2012 Colin Walters <walters@verbum.org>.
+ * Copyright (C) 2014 Richard Hughes <richard@hughsie.com>
+ * Copyright (C) 2012 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+/* This file was taken from libhif (https://github.com/hughsie/libhif) */
+
+#ifndef __CR_CLEANUP_H__
+#define __CR_CLEANUP_H__
+
+#include <stdio.h>
+#include <glib.h>
+#include <unistd.h>
+
+G_BEGIN_DECLS
+
+static void
+my_close(int fildes)
+{
+ if (fildes < 0)
+ return;
+ close(fildes);
+}
+
+#define CR_DEFINE_CLEANUP_FUNCTION(Type, name, func) \
+ static inline void name (void *v) \
+ { \
+ func (*(Type*)v); \
+ }
+
+#define CR_DEFINE_CLEANUP_FUNCTION0(Type, name, func) \
+ static inline void name (void *v) \
+ { \
+ if (*(Type*)v) \
+ func (*(Type*)v); \
+ }
+
+#define CR_DEFINE_CLEANUP_FUNCTIONt(Type, name, func) \
+ static inline void name (void *v) \
+ { \
+ if (*(Type*)v) \
+ func (*(Type*)v, TRUE); \
+ }
+
+CR_DEFINE_CLEANUP_FUNCTION0(FILE*, cr_local_file_fclose, fclose)
+CR_DEFINE_CLEANUP_FUNCTION0(GArray*, cr_local_array_unref, g_array_unref)
+CR_DEFINE_CLEANUP_FUNCTION0(GChecksum*, cr_local_checksum_free, g_checksum_free)
+CR_DEFINE_CLEANUP_FUNCTION0(GDir*, cr_local_dir_close, g_dir_close)
+CR_DEFINE_CLEANUP_FUNCTION0(GError*, cr_local_free_error, g_error_free)
+CR_DEFINE_CLEANUP_FUNCTION0(GHashTable*, cr_local_hashtable_unref, g_hash_table_unref)
+CR_DEFINE_CLEANUP_FUNCTION0(GKeyFile*, cr_local_keyfile_free, g_key_file_free)
+#if GLIB_CHECK_VERSION(2, 32, 0)
+CR_DEFINE_CLEANUP_FUNCTION0(GKeyFile*, cr_local_keyfile_unref, g_key_file_unref)
+#endif
+CR_DEFINE_CLEANUP_FUNCTION0(GPtrArray*, cr_local_ptrarray_unref, g_ptr_array_unref)
+CR_DEFINE_CLEANUP_FUNCTION0(GTimer*, cr_local_destroy_timer, g_timer_destroy)
+
+CR_DEFINE_CLEANUP_FUNCTIONt(GString*, cr_local_free_string, g_string_free)
+
+CR_DEFINE_CLEANUP_FUNCTION(char**, cr_local_strfreev, g_strfreev)
+CR_DEFINE_CLEANUP_FUNCTION(GList*, cr_local_free_list, g_list_free)
+CR_DEFINE_CLEANUP_FUNCTION(void*, cr_local_free, g_free)
+CR_DEFINE_CLEANUP_FUNCTION(int, cr_local_file_close, my_close)
+
+#define _cleanup_array_unref_ __attribute__ ((cleanup(cr_local_array_unref)))
+#define _cleanup_checksum_free_ __attribute__ ((cleanup(cr_local_checksum_free)))
+#define _cleanup_dir_close_ __attribute__ ((cleanup(cr_local_dir_close)))
+#define _cleanup_error_free_ __attribute__ ((cleanup(cr_local_free_error)))
+#define _cleanup_file_close_ __attribute__ ((cleanup(cr_local_file_close)))
+#define _cleanup_file_fclose_ __attribute__ ((cleanup(cr_local_file_fclose)))
+#define _cleanup_free_ __attribute__ ((cleanup(cr_local_free)))
+#define _cleanup_hashtable_unref_ __attribute__ ((cleanup(cr_local_hashtable_unref)))
+#define _cleanup_keyfile_free_ __attribute__ ((cleanup(cr_local_keyfile_free)))
+#define _cleanup_keyfile_unref_ __attribute__ ((cleanup(cr_local_keyfile_unref)))
+#define _cleanup_list_free_ __attribute__ ((cleanup(cr_local_free_list)))
+#define _cleanup_ptrarray_unref_ __attribute__ ((cleanup(cr_local_ptrarray_unref)))
+#define _cleanup_string_free_ __attribute__ ((cleanup(cr_local_free_string)))
+#define _cleanup_strv_free_ __attribute__ ((cleanup(cr_local_strfreev)))
+#define _cleanup_timer_destroy_ __attribute__ ((cleanup(cr_local_destroy_timer)))
+
+G_END_DECLS
+
+#endif
* USA.
*/
+#include <glib.h>
+#include <glib/gstdio.h>
#include <string.h>
#include <assert.h>
+#include <errno.h>
#include "cmd_parser.h"
+#include "deltarpms.h"
#include "error.h"
#include "compression_wrapper.h"
#include "misc.h"
+#include "cleanup.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
#define DEFAULT_CHANGELOG_LIMIT 10
#define DEFAULT_CHECKSUM "sha256"
#define DEFAULT_WORKERS 5
#define DEFAULT_UNIQUE_MD_FILENAMES TRUE
-
+#define DEFAULT_IGNORE_LOCK FALSE
+#define DEFAULT_LOCAL_SQLITE FALSE
struct CmdOptions _cmd_options = {
- .changelog_limit = DEFAULT_CHANGELOG_LIMIT,
- .checksum = NULL,
- .workers = DEFAULT_WORKERS,
- .unique_md_filenames = DEFAULT_UNIQUE_MD_FILENAMES,
- .checksum_type = CR_CHECKSUM_SHA256,
- .compression_type = CR_CW_UNKNOWN_COMPRESSION
+ .changelog_limit = DEFAULT_CHANGELOG_LIMIT,
+ .checksum = NULL,
+ .workers = DEFAULT_WORKERS,
+ .unique_md_filenames = DEFAULT_UNIQUE_MD_FILENAMES,
+ .checksum_type = CR_CHECKSUM_SHA256,
+ .retain_old = 0,
+ .compression_type = CR_CW_UNKNOWN_COMPRESSION,
+ .general_compression_type = CR_CW_UNKNOWN_COMPRESSION,
+ .ignore_lock = DEFAULT_IGNORE_LOCK,
+ .md_max_age = G_GINT64_CONSTANT(0),
+ .cachedir = NULL,
+ .local_sqlite = DEFAULT_LOCAL_SQLITE,
+ .cut_dirs = 0,
+ .location_prefix = NULL,
+ .repomd_checksum = NULL,
+
+ .deltas = FALSE,
+ .oldpackagedirs = NULL,
+ .num_deltas = 1,
+ .max_delta_rpm_size = CR_DEFAULT_MAX_DELTA_RPM_SIZE,
+
+ .checksum_cachedir = NULL,
+ .repomd_checksum_type = CR_CHECKSUM_SHA256,
};
{ "verbose", 'v', 0, G_OPTION_ARG_NONE, &(_cmd_options.verbose),
"Run verbosely.", NULL },
{ "excludes", 'x', 0, G_OPTION_ARG_FILENAME_ARRAY, &(_cmd_options.excludes),
- "File globs to exclude, can be specified multiple times.", "<packages>" },
+ "File globs to exclude, can be specified multiple times.", "PACKAGE_NAME_GLOB" },
{ "basedir", 0, 0, G_OPTION_ARG_FILENAME, &(_cmd_options.basedir),
- "Basedir for path to directories.", "<basedir>" },
+ "Basedir for path to directories.", "BASEDIR" },
{ "baseurl", 'u', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.location_base),
- "Optional base URL location for all files.", "<URL>" },
+ "Optional base URL location for all files.", "URL" },
{ "groupfile", 'g', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.groupfile),
"Path to groupfile to include in metadata.",
"GROUPFILE" },
{ "checksum", 's', 0, G_OPTION_ARG_STRING, &(_cmd_options.checksum),
"Choose the checksum type used in repomd.xml and for packages in the "
- "metadata. The default is now \"sha256\".", "<checksum_type>" },
+ "metadata. The default is now \"sha256\".", "CHECKSUM_TYPE" },
{ "pretty", 'p', 0, G_OPTION_ARG_NONE, &(_cmd_options.pretty),
"Make sure all xml generated is formatted (default)", NULL },
{ "database", 'd', 0, G_OPTION_ARG_NONE, &(_cmd_options.database),
{ "pkglist", 'i', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.pkglist),
"Specify a text file which contains the complete list of files to "
"include in the repository from the set found in the directory. File "
- "format is one package per line, no wildcards or globs.", "<filename>" },
+ "format is one package per line, no wildcards or globs.", "FILENAME" },
{ "includepkg", 'n', 0, G_OPTION_ARG_FILENAME_ARRAY, &(_cmd_options.includepkg),
"Specify pkgs to include on the command line. Takes urls as well as local paths.",
- "<packages>" },
+ "PACKAGE" },
{ "outputdir", 'o', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.outputdir),
- "Optional output directory.", "<URL>" },
+ "Optional output directory.", "URL" },
{ "skip-symlinks", 'S', 0, G_OPTION_ARG_NONE, &(_cmd_options.skip_symlinks),
"Ignore symlinks of packages.", NULL},
{ "changelog-limit", 0, 0, G_OPTION_ARG_INT, &(_cmd_options.changelog_limit),
"Only import the last N changelog entries, from each rpm, into the metadata.",
- "<number>" },
+ "NUM" },
{ "unique-md-filenames", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.unique_md_filenames),
"Include the file's checksum in the metadata filename, helps HTTP caching (default).",
NULL },
{ "simple-md-filenames", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.simple_md_filenames),
"Do not include the file's checksum in the metadata filename.", NULL },
{ "retain-old-md", 0, 0, G_OPTION_ARG_INT, &(_cmd_options.retain_old),
- "Keep around the latest (by timestamp) N copies of the old repodata.", NULL },
+ "Keep around the latest (by timestamp) N copies of the old repodata.", "NUM" },
{ "distro", 0, 0, G_OPTION_ARG_STRING_ARRAY, &(_cmd_options.distro_tags),
"Distro tag and optional cpeid: --distro'cpeid,textname'.", "DISTRO" },
{ "content", 0, 0, G_OPTION_ARG_STRING_ARRAY, &(_cmd_options.content_tags),
{ "xz", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.xz_compression),
"Use xz for repodata compression.", NULL },
{ "compress-type", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.compress_type),
- "Which compression type to use.", "<compress_type>" },
+ "Which compression type to use.", "COMPRESSION_TYPE" },
+ { "general-compress-type", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.general_compress_type),
+ "Which compression type to use (even for primary, filelists and other xml).",
+ "COMPRESSION_TYPE" },
{ "keep-all-metadata", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.keep_all_metadata),
"Keep groupfile and updateinfo from source repo during update.", NULL },
+ { "compatibility", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.compatibility),
+ "Enforce maximal compatibility with classical createrepo.", NULL },
+ { "retain-old-md-by-age", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.retain_old_md_by_age),
+ "During --update, remove all files in repodata/ which are older "
+ "then the specified period of time. (e.g. '2h', '30d', ...). "
+ "Available units (m - minutes, h - hours, d - days)", "AGE" },
+ { "cachedir", 'c', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.cachedir),
+ "Set path to cache dir", "CACHEDIR." },
+#ifdef CR_DELTA_RPM_SUPPORT
+ { "deltas", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.deltas),
+ "Tells createrepo to generate deltarpms and the delta metadata.", NULL },
+ { "oldpackagedirs", 0, 0, G_OPTION_ARG_FILENAME_ARRAY, &(_cmd_options.oldpackagedirs),
+ "Paths to look for older pkgs to delta against. Can be specified "
+ "multiple times.", "PATH" },
+ { "num-deltas", 0, 0, G_OPTION_ARG_INT, &(_cmd_options.num_deltas),
+ "The number of older versions to make deltas against. Defaults to 1.", "INT" },
+ { "max-delta-rpm-size", 0, 0, G_OPTION_ARG_INT64, &(_cmd_options.max_delta_rpm_size),
+ "Max size of an rpm that to run deltarpm against (in bytes).", "MAX_DELTA_RPM_SIZE" },
+#endif
+ { "local-sqlite", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.local_sqlite),
+ "Gen sqlite DBs locally (into a directory for temporary files). "
+ "Sometimes, sqlite has a trouble to gen DBs on a NFS mount, "
+ "use this option in such cases. "
+ "This option could lead to a higher memory consumption "
+ "if TMPDIR is set to /tmp or not set at all, because then the /tmp is "
+ "used and /tmp dir is often a ramdisk.", NULL },
+ { "cut-dirs", 0, 0, G_OPTION_ARG_INT, &(_cmd_options.cut_dirs),
+ "Ignore NUM of directory components in location_href during repodata "
+ "generation", "NUM" },
+ { "location-prefix", 0, 0, G_OPTION_ARG_FILENAME, &(_cmd_options.location_prefix),
+ "Append this prefix before location_href in output repodata", "PREFIX" },
+ { "repomd-checksum", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.repomd_checksum),
+ "Checksum type to be used in repomd.xml", "CHECKSUM_TYPE"},
{ NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL },
};
+static GOptionEntry expert_entries[] =
+{
+ { "ignore-lock", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.ignore_lock),
+ "Expert (risky) option: Ignore an existing .repodata/. "
+ "(Remove the existing .repodata/ and create an empty new one "
+ "to serve as a lock for other createrepo intances. For the repodata "
+ "generation, a different temporary dir with the name in format "
+ "\".repodata.time.microseconds.pid/\" will be used). "
+ "NOTE: Use this option on your "
+ "own risk! If two createrepos run simultaneously, then the state of the "
+ "generated metadata is not guaranted - it can be inconsistent and wrong.",
+ NULL },
+ { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL },
+};
+
struct CmdOptions *parse_arguments(int *argc, char ***argv, GError **err)
{
gboolean ret;
GOptionContext *context;
+ GOptionGroup *group_expert;
assert(!err || *err == NULL);
- context = g_option_context_new("- program that creates a repomd (xml-based"
- " rpm metadata) repository from a set of"
- " rpms.");
+ context = g_option_context_new("<directory_to_index>");
+ g_option_context_set_summary(context, "Program that creates a repomd "
+ "(xml-based rpm metadata) repository from a set of rpms.");
g_option_context_add_main_entries(context, cmd_entries, NULL);
+ group_expert = g_option_group_new("expert",
+ "Expert (risky) options",
+ "Expert (risky) options",
+ NULL,
+ NULL);
+ g_option_group_add_entries(group_expert, expert_entries);
+ g_option_context_add_group(context, group_expert);
+
ret = g_option_context_parse(context, argc, argv, err);
g_option_context_free(context);
return &(_cmd_options);
}
+/** Convert string to compression type set an error if failed.
+ * @param type_str String with compression type (e.g. "gz")
+ * @param type Pointer to cr_CompressionType variable
+ * @param err Err that will be set in case of error
+ */
+static gboolean
+check_and_set_compression_type(const char *type_str,
+ cr_CompressionType *type,
+ GError **err)
+{
+ assert(!err || *err == NULL);
+
+ _cleanup_string_free_ GString *compress_str = NULL;
+ compress_str = g_string_ascii_down(g_string_new(type_str));
+
+ if (!strcmp(compress_str->str, "gz")) {
+ *type = CR_CW_GZ_COMPRESSION;
+ } else if (!strcmp(compress_str->str, "bz2")) {
+ *type = CR_CW_BZ2_COMPRESSION;
+ } else if (!strcmp(compress_str->str, "xz")) {
+ *type = CR_CW_XZ_COMPRESSION;
+ } else {
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "Unknown/Unsupported compression type \"%s\"", type_str);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/** Convert a time period to seconds (gint64 value)
+ * Format: "[0-9]+[mhd]?"
+ * Units: m - minutes, h - hours, d - days, ...
+ * @param timeperiod Time period
+ * @param time Time period converted to gint64 will be stored here
+ */
+static gboolean
+parse_period_of_time(const gchar *timeperiod, gint64 *time, GError **err)
+{
+ assert(!err || *err == NULL);
+
+ gchar *endptr = NULL;
+ gint64 val = g_ascii_strtoll(timeperiod, &endptr, 0);
+
+ *time = G_GINT64_CONSTANT(0);
+
+ // Check the state of the conversion
+ if (val == 0 && endptr == timeperiod) {
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "Bad time period \"%s\"", timeperiod);
+ return FALSE;
+ }
+
+ if (val == G_MAXINT64) {
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "Time period \"%s\" is too high", timeperiod);
+ return FALSE;
+ }
+
+ if (val == G_MININT64) {
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "Time period \"%s\" is too low", timeperiod);
+ return FALSE;
+ }
+
+ if (!endptr || endptr[0] == '\0') // Secs
+ *time = (gint64) val;
+ else if (!strcmp(endptr, "m")) // Minutes
+ *time = (gint64) val*60;
+ else if (!strcmp(endptr, "h")) // Hours
+ *time = (gint64) val*60*60;
+ else if (!strcmp(endptr, "d")) // Days
+ *time = (gint64) val*24*60*60;
+ else {
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "Bad time unit \"%s\"", endptr);
+ return FALSE;
+ }
+ return TRUE;
+}
gboolean
check_arguments(struct CmdOptions *options,
// Check outputdir
if (options->outputdir && !g_file_test(options->outputdir, G_FILE_TEST_EXISTS|G_FILE_TEST_IS_DIR)) {
- g_set_error(err, CR_CMD_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Specified outputdir \"%s\" doesn't exists",
options->outputdir);
return FALSE;
}
// Check changelog_limit
- if ((options->changelog_limit < 0) || (options->changelog_limit > 100)) {
+ if ((options->changelog_limit < -1)) {
g_warning("Wrong changelog limit \"%d\" - Using 10", options->changelog_limit);
options->changelog_limit = DEFAULT_CHANGELOG_LIMIT;
}
cr_ChecksumType type;
type = cr_checksum_type(options->checksum);
if (type == CR_CHECKSUM_UNKNOWN) {
- g_set_error(err, CR_CMD_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Unknown/Unsupported checksum type \"%s\"",
options->checksum);
return FALSE;
options->checksum_type = type;
}
+ // Check and set checksum type for repomd
+ if (options->repomd_checksum) {
+ cr_ChecksumType type;
+ type = cr_checksum_type(options->repomd_checksum);
+ if (type == CR_CHECKSUM_UNKNOWN) {
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "Unknown/Unsupported checksum type \"%s\"",
+ options->repomd_checksum);
+ return FALSE;
+ }
+ options->repomd_checksum_type = type;
+ } else {
+ options->repomd_checksum_type = options->checksum_type;
+ }
+
// Check and set compression type
if (options->compress_type) {
- GString *compress_str = g_string_ascii_down(g_string_new(options->compress_type));
- if (!strcmp(compress_str->str, "gz")) {
- options->compression_type = CR_CW_GZ_COMPRESSION;
- } else if (!strcmp(compress_str->str, "bz2")) {
- options->compression_type = CR_CW_BZ2_COMPRESSION;
- } else if (!strcmp(compress_str->str, "xz")) {
- options->compression_type = CR_CW_XZ_COMPRESSION;
- } else {
- g_string_free(compress_str, TRUE);
- g_set_error(err, CR_CMD_ERROR, CRE_BADARG,
- "Unknown/Unsupported compression type \"%s\"",
- options->compress_type);
+ if (!check_and_set_compression_type(options->compress_type,
+ &(options->compression_type),
+ err)) {
+ return FALSE;
+ }
+ }
+
+ // Check and set general compression type
+ if (options->general_compress_type) {
+ if (!check_and_set_compression_type(options->general_compress_type,
+ &(options->general_compression_type),
+ err)) {
return FALSE;
}
- g_string_free(compress_str, TRUE);
}
int x;
}
if (!remote && !g_file_test(options->groupfile_fullpath, G_FILE_TEST_IS_REGULAR)) {
- g_set_error(err, CR_CMD_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"groupfile %s doesn't exists",
options->groupfile_fullpath);
return FALSE;
// Process pkglist file
if (options->pkglist) {
if (!g_file_test(options->pkglist, G_FILE_TEST_IS_REGULAR)) {
- g_warning("pkglist file \"%s\" doesn't exists", options->pkglist);
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "pkglist file \"%s\" doesn't exists", options->pkglist);
+ return FALSE;
} else {
char *content = NULL;
GError *tmp_err = NULL;
g_strfreev(items);
}
+ // Check retain-old-md-by-age
+ if (options->retain_old_md_by_age) {
+ if (options->retain_old) {
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "--retain-old-md-by-age cannot be combined "
+ "with --retain-old-md");
+ return FALSE;
+ }
+
+ // Parse argument
+ if (!parse_period_of_time(options->retain_old_md_by_age,
+ &options->md_max_age,
+ err))
+ return FALSE;
+ }
+
+ // Check oldpackagedirs
+ x = 0;
+ while (options->oldpackagedirs && options->oldpackagedirs[x]) {
+ char *path = options->oldpackagedirs[x];
+ options->oldpackagedirs_paths = g_slist_prepend(
+ options->oldpackagedirs_paths,
+ (gpointer) path);
+ x++;
+ }
+
+ // Check cut_dirs
+ if (options->cut_dirs < 0) {
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "--cur-dirs value must be possitive integer");
+ return FALSE;
+ }
+
return TRUE;
}
g_free(options->groupfile);
g_free(options->groupfile_fullpath);
g_free(options->revision);
+ g_free(options->retain_old_md_by_age);
+ g_free(options->cachedir);
+ g_free(options->checksum_cachedir);
g_strfreev(options->excludes);
g_strfreev(options->includepkg);
g_strfreev(options->distro_tags);
g_strfreev(options->content_tags);
g_strfreev(options->repo_tags);
+ g_strfreev(options->oldpackagedirs);
cr_slist_free_full(options->include_pkgs, g_free);
cr_slist_free_full(options->exclude_masks,
cr_slist_free_full(options->l_update_md_paths, g_free);
cr_slist_free_full(options->distro_cpeids, g_free);
cr_slist_free_full(options->distro_values, g_free);
+ g_slist_free(options->oldpackagedirs_paths);
}
gboolean no_database; /*!< do not create database */
char *checksum; /*!< type of checksum */
char *compress_type; /*!< which compression type to use */
+ char *general_compress_type;/*!< which compression type to use (even for
+ primary, filelists and other xml) */
gboolean skip_symlinks; /*!< ignore symlinks of packages */
- int changelog_limit; /*!< number of changelog messages in
+ gint changelog_limit; /*!< number of changelog messages in
other.(xml|sqlite) */
gboolean unique_md_filenames; /*!< include the file checksums in
the filenames */
gboolean simple_md_filenames; /*!< simple filenames (names without
checksums) */
- int retain_old; /*!< keep latest N copies of the old repodata */
+ gint retain_old; /*!< keep latest N copies of the old repodata */
char **distro_tags; /*!< distro tag and optional cpeid */
char **content_tags; /*!< tags for the content in the repository */
char **repo_tags; /*!< tags to describe the repo_tagsitory itself */
char *revision; /*!< user-specified revision */
char *read_pkgs_list; /*!< output the paths to pkgs actually read */
- int workers; /*!< number of threads to spawn */
+ gint workers; /*!< number of threads to spawn */
gboolean xz_compression; /*!< use xz for repodata compression */
gboolean keep_all_metadata; /*!< keep groupfile and updateinfo from source
repo during update */
+ gboolean ignore_lock; /*!< Ignore existing .repodata/ - remove it,
+ create the new one (empty) to serve as
+ a lock and use a .repodata.date.pid for
+ data generation. */
+ gboolean compatibility; /*!< Enforce maximal compatibility with
+ createrepo. I.e. mimics some dump
+ behavior like perseve old comps file(s)
+ during update etc.*/
+ char *retain_old_md_by_age; /*!< Remove all files in repodata/ older
+ then specified period of time,
+ during --update.
+ Value examples: "360" (360 sec),
+ "5d" (5 days), ..
+ Available units: (m - minutes, h - hours,
+ d - days) */
+ char *cachedir; /*!< Cache dir for checksums */
+
+ gboolean deltas; /*!< Is delta generation enabled? */
+ char **oldpackagedirs; /*!< Paths to look for older pks
+ to delta agains */
+ gint num_deltas; /*!< Number of older version to make
+ deltas against */
+ gint64 max_delta_rpm_size; /*!< Max size of an rpm that to run
+ deltarpm against */
+ gboolean local_sqlite; /*!< Gen sqlite locally into a directory for
+ temporary files.
+ For situations when sqlite has a trouble
+ to gen DBs on NFS mounts. */
+ gint cut_dirs; /*!< Ignore *num* of directory components
+ during repodata generation in location
+ href value. */
+ gchar *location_prefix; /*!< Append this prefix into location_href
+ during repodata generation. */
+ gchar *repomd_checksum; /*!< Checksum type for entries in repomd.xml */
/* Items filled by check_arguments() */
GSList *distro_cpeids; /*!< CPEIDs from --distro params */
GSList *distro_values; /*!< values from --distro params */
cr_ChecksumType checksum_type; /*!< checksum type */
+ cr_ChecksumType repomd_checksum_type; /*!< checksum type */
cr_CompressionType compression_type; /*!< compression type */
+ cr_CompressionType general_compression_type; /*!< compression type */
+ gint64 md_max_age; /*!< Max age of files in repodata/.
+ Older files will be removed
+ during --update.
+ Filled if --retain-old-md-by-age
+ is used */
+ char *checksum_cachedir; /*!< Path to cachedir */
+ GSList *oldpackagedirs_paths; /*!< paths to look for older pkgs to delta against */
};
#include "error.h"
#include "compression_wrapper.h"
+
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
/*
-#define Z_CR_CW_NO_COMPRESSION 0
-#define Z_BEST_SPEED 1
-#define Z_BEST_COMPRESSION 9
-#define Z_DEFAULT_COMPRESSION (-1)
+#define Z_CR_CW_NO_COMPRESSION 0
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
*/
-#define CR_CW_GZ_COMPRESSION_LEVEL Z_DEFAULT_COMPRESSION
+#define CR_CW_GZ_COMPRESSION_LEVEL Z_DEFAULT_COMPRESSION
/*
#define Z_FILTERED 1
if (!g_file_test(filename, G_FILE_TEST_IS_REGULAR)) {
g_debug("%s: File %s doesn't exists or not a regular file",
__func__, filename);
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_NOFILE,
+ g_set_error(err, ERR_DOMAIN, CRE_NOFILE,
"File %s doesn't exists or not a regular file", filename);
return CR_CW_UNKNOWN_COMPRESSION;
}
magic_t myt = magic_open(MAGIC_MIME);
if (myt == NULL) {
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_MAGIC,
+ g_set_error(err, ERR_DOMAIN, CRE_MAGIC,
"magic_open() failed: Cannot allocate the magic cookie");
return CR_CW_UNKNOWN_COMPRESSION;
}
if (magic_load(myt, NULL) == -1) {
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_MAGIC,
+ g_set_error(err, ERR_DOMAIN, CRE_MAGIC,
"magic_load() failed: %s", magic_error(myt));
return CR_CW_UNKNOWN_COMPRESSION;
}
} else {
g_debug("%s: Mime type not detected! (%s): %s", __func__, filename,
magic_error(myt));
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_MAGIC,
+ g_set_error(err, ERR_DOMAIN, CRE_MAGIC,
"mime_type() detection failed: %s", magic_error(myt));
magic_close(myt);
return CR_CW_UNKNOWN_COMPRESSION;
}
-static char *
+static const char *
cr_gz_strerror(gzFile f)
{
int errnum;
- char *msg = (char *) gzerror(f, &errnum);
+ const char *msg = gzerror(f, &errnum);
if (errnum == Z_ERRNO)
- msg = strerror(errno);
+ msg = g_strerror(errno);
return msg;
}
g_debug("%s: CR_CW_AUTO_DETECT_COMPRESSION cannot be used if "
"mode is CR_CW_MODE_WRITE", __func__);
assert(0);
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_ASSERT,
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT,
"CR_CW_AUTO_DETECT_COMPRESSION cannot be used if "
"mode is CR_CW_MODE_WRITE");
return NULL;
g_debug("%s: CR_CW_UNKNOWN_COMPRESSION cannot be used if mode"
" is CR_CW_MODE_WRITE", __func__);
assert(0);
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_ASSERT,
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT,
"CR_CW_UNKNOWN_COMPRESSION cannot be used if mode "
"is CR_CW_MODE_WRITE");
return NULL;
if (type == CR_CW_UNKNOWN_COMPRESSION) {
// Detection without error but compression type is unknown
g_debug("%s: Cannot detect compression type", __func__);
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_UNKNOWNCOMPRESSION,
+ g_set_error(err, ERR_DOMAIN, CRE_UNKNOWNCOMPRESSION,
"Cannot detect compression type");
return NULL;
}
mode_str = (mode == CR_CW_MODE_WRITE) ? "w" : "r";
file->FILE = (void *) fopen(filename, mode_str);
if (!file->FILE)
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_IO,
- "fopen(): %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "fopen(): %s", g_strerror(errno));
break;
case (CR_CW_GZ_COMPRESSION): // ---------------------------------------
file->FILE = (void *) gzopen(filename, mode_str);
if (!file->FILE) {
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_GZ,
- "gzopen(): %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_GZ,
+ "gzopen(): %s", g_strerror(errno));
break;
}
if (gzbuffer((gzFile) file->FILE, GZ_BUFFER_SIZE) == -1) {
g_debug("%s: gzbuffer() call failed", __func__);
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_GZ,
+ g_set_error(err, ERR_DOMAIN, CRE_GZ,
"gzbuffer() call failed");
}
break;
int bzerror;
if (!f) {
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_IO,
- "fopen(): %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "fopen(): %s", g_strerror(errno));
break;
}
err_msg = "other error";
}
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BZ2,
+ g_set_error(err, ERR_DOMAIN, CRE_BZ2,
"Bz2 error: %s", err_msg);
}
err_msg = "Unknown error";
}
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_XZ,
+ g_set_error(err, ERR_DOMAIN, CRE_XZ,
"XZ error (%d): %s", ret, err_msg);
g_free((void *) xz_file);
break;
FILE *f = fopen(filename, mode_str);
if (!f) {
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_XZ,
- "fopen(): %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_XZ,
+ "fopen(): %s", g_strerror(errno));
lzma_end(&(xz_file->stream));
g_free((void *) xz_file);
break;
if (!file->FILE) {
// File is not open -> cleanup
if (err && *err == NULL)
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_XZ,
+ g_set_error(err, ERR_DOMAIN, CRE_XZ,
"Unknown error while opening: %s", filename);
g_free(file);
return NULL;
ret = CRE_OK;
} else {
ret = CRE_IO;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_IO,
- "fclose(): %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "fclose(): %s", g_strerror(errno));
}
break;
}
ret = CRE_GZ;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_GZ,
+ g_set_error(err, ERR_DOMAIN, CRE_GZ,
"gzclose(): %s", err_msg);
}
break;
}
ret = CRE_BZ2;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BZ2,
+ g_set_error(err, ERR_DOMAIN, CRE_BZ2,
"Bz2 error: %s", err_msg);
}
break;
}
ret = CRE_XZ;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_XZ,
+ g_set_error(err, ERR_DOMAIN, CRE_XZ,
"XZ: lzma_code() error (%d): %s",
rc, err_msg);
break;
if (fwrite(xz_file->buffer, 1, olen, xz_file->file) != olen) {
// Error while writing
ret = CRE_XZ;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_XZ,
- "XZ: fwrite() error: %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_XZ,
+ "XZ: fwrite() error: %s", g_strerror(errno));
break;
}
default: // -----------------------------------------------------------
ret = CRE_BADARG;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Bad compressed file type");
break;
}
assert(!err || *err == NULL);
if (cr_file->mode != CR_CW_MODE_READ) {
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"File is not opened in read mode");
return CR_CW_ERR;
}
ret = fread(buffer, 1, len, (FILE *) cr_file->FILE);
if ((ret != (int) len) && !feof((FILE *) cr_file->FILE)) {
ret = CR_CW_ERR;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_IO,
- "fread(): %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "fread(): %s", g_strerror(errno));
}
break;
ret = gzread((gzFile) cr_file->FILE, buffer, len);
if (ret == -1) {
ret = CR_CW_ERR;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_GZ,
+ g_set_error(err, ERR_DOMAIN, CRE_GZ,
"fread(): %s", cr_gz_strerror((gzFile) cr_file->FILE));
}
break;
err_msg = "other error";
}
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BZ2,
+ g_set_error(err, ERR_DOMAIN, CRE_BZ2,
"Bz2 error: %s", err_msg);
}
break;
if (stream->avail_in == 0) {
if ((lret = fread(xz_file->buffer, 1, XZ_BUFFER_SIZE, xz_file->file)) < 0) {
g_debug("%s: XZ: Error while fread", __func__);
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_XZ,
- "XZ: fread(): %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_XZ,
+ "XZ: fread(): %s", g_strerror(errno));
return CR_CW_ERR; // Error while reading input file
} else if (lret == 0) {
g_debug("%s: EOF", __func__);
g_debug("%s: XZ: Error while decoding (%d): %s",
__func__, lret, err_msg);
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_XZ,
+ g_set_error(err, ERR_DOMAIN, CRE_XZ,
"XZ: Error while decoding (%d): %s",
lret, err_msg);
return CR_CW_ERR; // Error while decoding
default: // -----------------------------------------------------------
ret = CR_CW_ERR;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Bad compressed file type");
break;
}
|| (ret != CR_CW_ERR && *err == NULL));
if (cr_file->stat && ret != CR_CW_ERR) {
- GError *tmp_err = NULL;
cr_file->stat->size += ret;
if (cr_file->checksum_ctx) {
GError *tmp_err = NULL;
assert(!err || *err == NULL);
if (cr_file->mode != CR_CW_MODE_WRITE) {
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"File is not opened in read mode");
return ret;
}
case (CR_CW_NO_COMPRESSION): // ---------------------------------------
if ((ret = (int) fwrite(buffer, 1, len, (FILE *) cr_file->FILE)) != (int) len) {
ret = CR_CW_ERR;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_IO,
- "fwrite(): %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "fwrite(): %s", g_strerror(errno));
}
break;
if ((ret = gzwrite((gzFile) cr_file->FILE, buffer, len)) == 0) {
ret = CR_CW_ERR;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_GZ,
+ g_set_error(err, ERR_DOMAIN, CRE_GZ,
"gzwrite(): %s", cr_gz_strerror((gzFile) cr_file->FILE));
}
break;
err_msg = "other error";
}
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BZ2,
+ g_set_error(err, ERR_DOMAIN, CRE_BZ2,
"Bz2 error: %s", err_msg);
}
break;
break;
}
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_XZ,
+ g_set_error(err, ERR_DOMAIN, CRE_XZ,
"XZ: lzma_code() error (%d): %s",
lret, err_msg);
break; // Error while coding
size_t out_len = XZ_BUFFER_SIZE - stream->avail_out;
if ((fwrite(xz_file->buffer, 1, out_len, xz_file->file)) != out_len) {
ret = CR_CW_ERR;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_XZ,
- "XZ: fwrite(): %s", strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_XZ,
+ "XZ: fwrite(): %s", g_strerror(errno));
break; // Error while writing
}
}
}
default: // -----------------------------------------------------------
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Bad compressed file type");
break;
}
return 0;
if (cr_file->mode != CR_CW_MODE_WRITE) {
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"File is not opened in write mode");
return CR_CW_ERR;
}
break;
default: // -----------------------------------------------------------
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Bad compressed file type");
break;
}
return 0;
if (cr_file->mode != CR_CW_MODE_WRITE) {
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"File is not opened in write mode");
return CR_CW_ERR;
}
if (ret < 0) {
g_debug("%s: vasprintf() call failed", __func__);
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_MEMORY,
+ g_set_error(err, ERR_DOMAIN, CRE_MEMORY,
"vasprintf() call failed");
return CR_CW_ERR;
}
default: // -----------------------------------------------------------
ret = CR_CW_ERR;
- g_set_error(err, CR_COMPRESSION_WRAPPER_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Bad compressed file type");
break;
}
extern "C" {
#endif
+#include <glib.h>
#include "checksum.h"
/** \defgroup compression_wrapper Wrapper for compressed file.
#include <sys/stat.h>
#include <dirent.h>
#include <fcntl.h>
+#include <stdint.h>
+#include <unistd.h>
#include "cmd_parser.h"
#include "compression_wrapper.h"
+#include "createrepo_shared.h"
+#include "deltarpms.h"
+#include "dumper_thread.h"
#include "checksum.h"
+#include "cleanup.h"
#include "error.h"
+#include "helpers.h"
#include "load_metadata.h"
#include "locate_metadata.h"
#include "misc.h"
#include "xml_dump.h"
#include "xml_file.h"
+#define OUTDELTADIR "drpms/"
-#define MAX_TASK_BUFFER_LEN 20
-
-
-struct UserData {
- GThreadPool *pool; // thread pool
- cr_XmlFile *pri_f; // Opened compressed primary.xml.*
- cr_XmlFile *fil_f; // Opened compressed filelists.xml.*
- cr_XmlFile *oth_f; // Opened compressed other.xml.*
- cr_SqliteDb *pri_db; // Primary db
- cr_SqliteDb *fil_db; // Filelists db
- cr_SqliteDb *oth_db; // Other db
- int changelog_limit; // Max number of changelogs for a package
- const char *location_base; // Base location url
- int repodir_name_len; // Len of path to repo /foo/bar/repodata
- // This part |<----->|
- const char *checksum_type_str; // Name of selected checksum
- cr_ChecksumType checksum_type; // Constant representing selected checksum
- gboolean skip_symlinks; // Skip symlinks
- long package_count; // Total number of packages to process
-
- // Update stuff
- gboolean skip_stat; // Skip stat() while updating
- cr_Metadata *old_metadata; // Loaded metadata
-
- // Thread serialization
- GMutex *mutex_pri; // Mutex for primary metadata
- GMutex *mutex_fil; // Mutex for filelists metadata
- GMutex *mutex_oth; // Mutex for other metadata
- GCond *cond_pri; // Condition for primary metadata
- GCond *cond_fil; // Condition for filelists metadata
- GCond *cond_oth; // Condition for other metadata
- volatile long id_pri; // ID of task on turn (write primary metadata)
- volatile long id_fil; // ID of task on turn (write filelists metadata)
- volatile long id_oth; // ID of task on turn (write other metadata)
-
- // Buffering
- GQueue *buffer; // Buffer for done tasks
- GMutex *mutex_buffer; // Mutex for accessing the buffer
-};
-
-
-struct PoolTask {
- long id; // ID of the task
- char* full_path; // Complete path - /foo/bar/packages/foo.rpm
- char* filename; // Just filename - foo.rpm
- char* path; // Just path - /foo/bar/packages
-};
-
-
-struct BufferedTask {
- long id; // ID of the task
- struct cr_XmlStruct res; // XML for primary, filelists and other
- cr_Package *pkg; // Package structure
- char *location_href; // location_href path
- int pkg_from_md; // If true - package structure if from
- // old metadata and must not be freed!
- // If false - package is from file and
- // it must be freed!
-};
-
-
-// Global variables used by signal handler
-char *tmp_repodata_path = NULL; // Path to temporary dir - /foo/bar/.repodata
-
-
-// Signal handler
-void
-sigint_catcher(int sig)
-{
- CR_UNUSED(sig);
- g_message("SIGINT catched: Terminating...");
- if (tmp_repodata_path)
- cr_remove_dir(tmp_repodata_path, NULL);
- exit(1);
-}
-
-
-int
-allowed_file(const gchar *filename, struct CmdOptions *options)
+// TODO: Pass only exlude_masks list here
+/** Check if the filename is excluded by any exlude mask.
+ * @param filename Filename (basename).
+ * @param options Command line options.
+ * @return TRUE if file should be included, FALSE otherwise
+ */
+static gboolean
+allowed_file(const gchar *filename, GSList *exclude_masks)
{
// Check file against exclude glob masks
- if (options->exclude_masks) {
+ if (exclude_masks) {
int str_len = strlen(filename);
gchar *reversed_filename = g_utf8_strreverse(filename, str_len);
- GSList *element = options->exclude_masks;
+ GSList *element = exclude_masks;
for (; element; element=g_slist_next(element)) {
if (g_pattern_match((GPatternSpec *) element->data,
str_len, filename, reversed_filename))
}
-gint
-buf_task_sort_func(gconstpointer a, gconstpointer b, gpointer data)
-{
- CR_UNUSED(data);
- const struct BufferedTask *task_a = a;
- const struct BufferedTask *task_b = b;
- if (task_a->id < task_b->id) return -1;
- if (task_a->id == task_b->id) return 0;
- return 1;
-}
-
-
-void
-write_pkg(long id,
- struct cr_XmlStruct res,
- cr_Package *pkg,
- struct UserData *udata)
-{
- GError *tmp_err = NULL;
-
- // Write primary data
- g_mutex_lock(udata->mutex_pri);
- while (udata->id_pri != id)
- g_cond_wait (udata->cond_pri, udata->mutex_pri);
- ++udata->id_pri;
- cr_xmlfile_add_chunk(udata->pri_f, (const char *) res.primary, &tmp_err);
- if (tmp_err) {
- g_critical("Cannot add primary chunk:\n%s\nError: %s",
- res.primary, tmp_err->message);
- g_clear_error(&tmp_err);
- }
-
- if (udata->pri_db) {
- cr_db_add_pkg(udata->pri_db, pkg, &tmp_err);
- if (tmp_err) {
- g_critical("Cannot add record of %s (%s) to primary db: %s",
- pkg->name, pkg->pkgId, tmp_err->message);
- g_clear_error(&tmp_err);
- }
- }
-
- g_cond_broadcast(udata->cond_pri);
- g_mutex_unlock(udata->mutex_pri);
-
- // Write fielists data
- g_mutex_lock(udata->mutex_fil);
- while (udata->id_fil != id)
- g_cond_wait (udata->cond_fil, udata->mutex_fil);
- ++udata->id_fil;
- cr_xmlfile_add_chunk(udata->fil_f, (const char *) res.filelists, &tmp_err);
- if (tmp_err) {
- g_critical("Cannot add filelists chunk:\n%s\nError: %s",
- res.filelists, tmp_err->message);
- g_clear_error(&tmp_err);
- }
-
- if (udata->fil_db) {
- cr_db_add_pkg(udata->fil_db, pkg, &tmp_err);
- if (tmp_err) {
- g_critical("Cannot add record of %s (%s) to filelists db: %s",
- pkg->name, pkg->pkgId, tmp_err->message);
- g_clear_error(&tmp_err);
- }
- }
-
- g_cond_broadcast(udata->cond_fil);
- g_mutex_unlock(udata->mutex_fil);
-
- // Write other data
- g_mutex_lock(udata->mutex_oth);
- while (udata->id_oth != id)
- g_cond_wait (udata->cond_oth, udata->mutex_oth);
- ++udata->id_oth;
- cr_xmlfile_add_chunk(udata->oth_f, (const char *) res.other, &tmp_err);
- if (tmp_err) {
- g_critical("Cannot add other chunk:\n%s\nError: %s",
- res.other, tmp_err->message);
- g_clear_error(&tmp_err);
- }
-
- if (udata->oth_db) {
- cr_db_add_pkg(udata->oth_db, pkg, NULL);
- if (tmp_err) {
- g_critical("Cannot add record of %s (%s) to other db: %s",
- pkg->name, pkg->pkgId, tmp_err->message);
- g_clear_error(&tmp_err);
- }
- }
-
- g_cond_broadcast(udata->cond_oth);
- g_mutex_unlock(udata->mutex_oth);
-}
-
-
-void
-dumper_thread(gpointer data, gpointer user_data)
-{
- GError *tmp_err = NULL;
- gboolean old_used = FALSE; // To use old metadata?
- cr_Package *md = NULL; // Package from loaded MetaData
- cr_Package *pkg = NULL; // Package from file
- struct stat stat_buf; // Struct with info from stat() on file
- struct cr_XmlStruct res; // Structure for generated XML
-
- struct UserData *udata = (struct UserData *) user_data;
- struct PoolTask *task = (struct PoolTask *) data;
-
- // get location_href without leading part of path (path to repo)
- // including '/' char
- const char *location_href = task->full_path + udata->repodir_name_len;
- const char *location_base = udata->location_base;
-
- // Get stat info about file
- if (udata->old_metadata && !(udata->skip_stat)) {
- if (stat(task->full_path, &stat_buf) == -1) {
- g_critical("Stat() on %s: %s", task->full_path, strerror(errno));
- goto task_cleanup;
- }
- }
-
- // Update stuff
- if (udata->old_metadata) {
- // We have old metadata
- md = (cr_Package *) g_hash_table_lookup(
- cr_metadata_hashtable(udata->old_metadata),
- task->filename);
-
- if (md) {
- g_debug("CACHE HIT %s", task->filename);
-
- if (udata->skip_stat) {
- old_used = TRUE;
- } else if (stat_buf.st_mtime == md->time_file
- && stat_buf.st_size == md->size_package
- && !strcmp(udata->checksum_type_str, md->checksum_type))
- {
- old_used = TRUE;
- } else {
- g_debug("%s metadata are obsolete -> generating new",
- task->filename);
- }
-
- if (old_used) {
- // We have usable old data, but we have to set proper locations
- // WARNING! This two lines destructively modifies content of
- // packages in old metadata.
- md->location_href = (char *) location_href;
- md->location_base = (char *) location_base;
- }
- }
- }
-
- // Load package and gen XML metadata
- if (!old_used) {
- // Load package from file
- pkg = cr_package_from_rpm(task->full_path, udata->checksum_type,
- location_href, udata->location_base,
- udata->changelog_limit, NULL, &tmp_err);
- assert(pkg || tmp_err);
-
- if (!pkg) {
- g_warning("Cannot read package: %s: %s",
- task->full_path, tmp_err->message);
- g_clear_error(&tmp_err);
- goto task_cleanup;
- }
-
- res = cr_xml_dump(pkg, &tmp_err);
- if (tmp_err) {
- g_critical("Cannot dump XML for %s (%s): %s",
- pkg->name, pkg->pkgId, tmp_err->message);
- g_clear_error(&tmp_err);
- goto task_cleanup;
- }
- } else {
- // Just gen XML from old loaded metadata
- pkg = md;
- res = cr_xml_dump(md, &tmp_err);
- if (tmp_err) {
- g_critical("Cannot dump XML for %s (%s): %s",
- md->name, md->pkgId, tmp_err->message);
- g_clear_error(&tmp_err);
- goto task_cleanup;
- }
- }
-
- // Buffering stuff
- g_mutex_lock(udata->mutex_buffer);
-
- if (g_queue_get_length(udata->buffer) < MAX_TASK_BUFFER_LEN
- && udata->id_pri != task->id
- && udata->package_count > (task->id + 1))
- {
- // If:
- // * this isn't our turn
- // * the buffer isn't full
- // * this isn't the last task
- // Then: save the task to the buffer
-
- struct BufferedTask *buf_task = malloc(sizeof(struct BufferedTask));
- buf_task->id = task->id;
- buf_task->res = res;
- buf_task->pkg = pkg;
- buf_task->location_href = NULL;
- buf_task->pkg_from_md = (pkg == md) ? 1 : 0;
-
- if (pkg == md) {
- // We MUST store location_href for reused packages who goes to the buffer
- // We don't need to store location_base because it is allocated in
- // user_data during this function calls.
-
- buf_task->location_href = g_strdup(location_href);
- buf_task->pkg->location_href = buf_task->location_href;
- }
-
- g_queue_insert_sorted(udata->buffer, buf_task, buf_task_sort_func, NULL);
- g_mutex_unlock(udata->mutex_buffer);
-
- g_free(task->full_path);
- g_free(task->filename);
- g_free(task->path);
- g_free(task);
-
- return;
- }
-
- g_mutex_unlock(udata->mutex_buffer);
-
- // Dump XML and SQLite
- write_pkg(task->id, res, pkg, udata);
-
- // Clean up
- if (pkg != md)
- cr_package_free(pkg);
- g_free(res.primary);
- g_free(res.filelists);
- g_free(res.other);
-
-task_cleanup:
- if (udata->id_pri <= task->id) {
- // An error was encountered and we have to wait to increment counters
- g_mutex_lock(udata->mutex_pri);
- while (udata->id_pri != task->id)
- g_cond_wait (udata->cond_pri, udata->mutex_pri);
- ++udata->id_pri;
- g_cond_broadcast(udata->cond_pri);
- g_mutex_unlock(udata->mutex_pri);
-
- g_mutex_lock(udata->mutex_fil);
- while (udata->id_fil != task->id)
- g_cond_wait (udata->cond_fil, udata->mutex_fil);
- ++udata->id_fil;
- g_cond_broadcast(udata->cond_fil);
- g_mutex_unlock(udata->mutex_fil);
-
- g_mutex_lock(udata->mutex_oth);
- while (udata->id_oth != task->id)
- g_cond_wait (udata->cond_oth, udata->mutex_oth);
- ++udata->id_oth;
- g_cond_broadcast(udata->cond_oth);
- g_mutex_unlock(udata->mutex_oth);
- }
-
- g_free(task->full_path);
- g_free(task->filename);
- g_free(task->path);
- g_free(task);
-
- // Try to write all results from buffer which was waiting for us
- while (1) {
- struct BufferedTask *buf_task;
- g_mutex_lock(udata->mutex_buffer);
- buf_task = g_queue_peek_head(udata->buffer);
- if (buf_task && buf_task->id == udata->id_pri) {
- buf_task = g_queue_pop_head (udata->buffer);
- g_mutex_unlock(udata->mutex_buffer);
- // Dump XML and SQLite
- write_pkg(buf_task->id, buf_task->res, buf_task->pkg, udata);
- // Clean up
- if (!buf_task->pkg_from_md)
- cr_package_free(buf_task->pkg);
- g_free(buf_task->res.primary);
- g_free(buf_task->res.filelists);
- g_free(buf_task->res.other);
- g_free(buf_task->location_href);
- g_free(buf_task);
- } else {
- g_mutex_unlock(udata->mutex_buffer);
- break;
- }
- }
-
- return;
-}
-
-
-// Function used to sort pool tasks - this function is responsible for
-// order of packages in metadata
-int
-task_cmp(gconstpointer a_p, gconstpointer b_p, gpointer user_data)
+/** Function used to sort pool tasks.
+ * This function is responsible for order of packages in metadata.
+ *
+ * @param a_p Pointer to first struct PoolTask
+ * @param b_p Pointer to second struct PoolTask
+ * @param user_data Unused (user data)
+ */
+static int
+task_cmp(gconstpointer a_p, gconstpointer b_p, G_GNUC_UNUSED gpointer user_data)
{
int ret;
const struct PoolTask *a = a_p;
const struct PoolTask *b = b_p;
- CR_UNUSED(user_data);
ret = g_strcmp0(a->filename, b->filename);
if (ret) return ret;
return g_strcmp0(a->path, b->path);
}
-long
+/** Recursively walkt throught the input directory and add push the found
+ * rpms to the thread pool (create a PoolTask and push it to the pool).
+ * If the filelists is supplied then no recursive walk is done and only
+ * files from filelists are pushed into the pool.
+ * This function also filters out files that shoudn't be processed
+ * (e.g. directories with .rpm suffix, files that match one of
+ * the exclude masks, etc.).
+ *
+ * @param pool GThreadPool pool
+ * @param in_dir Directory to scan
+ * @param cmd_options Options specified on command line
+ * @param current_pkglist Pointer to a list where basenames of files that
+ * will be processed will be appended to.
+ * @param output_pkg_list File where relative paths of processed packages
+ * will be writen to.
+ * @return Number of packages that are going to be processed
+ */
+static long
fill_pool(GThreadPool *pool,
gchar *in_dir,
struct CmdOptions *cmd_options,
struct PoolTask *task;
long package_count = 0;
- if (!(cmd_options->include_pkgs)) {
+ if (cmd_options->pkglist && !cmd_options->include_pkgs) {
+ g_warning("Used pkglist doesn't contain any useful items");
+ } else if (!(cmd_options->include_pkgs)) {
// --pkglist (or --includepkg) is not supplied -> do dir walk
g_message("Directory walk started");
// This probably should be always true
repo_relative_path = full_path + in_dir_len;
- if (allowed_file(repo_relative_path, cmd_options)) {
+ if (allowed_file(repo_relative_path, cmd_options->exclude_masks)) {
// FINALLY! Add file into pool
g_debug("Adding pkg: %s", full_path);
task = g_malloc(sizeof(struct PoolTask));
else // Use only a last part of the path
filename = relative_path + x + 1;
- if (allowed_file(filename, cmd_options)) {
+ if (allowed_file(filename, cmd_options->exclude_masks)) {
// Check filename against exclude glob masks
gchar *full_path = g_strconcat(in_dir, relative_path, NULL);
// ^^^ /path/to/in_repo/packages/i386/foobar.rpm
}
+/** Prepare cache dir for checksums.
+ * Called only if --cachedir options is used.
+ * It tries to create cache directory if it doesn't exist yet.
+ * It also fill checksum_cachedir option in cmd_options structure.
+ *
+ * @param cmd_options Commandline options
+ * @param out_dir Repo output directory
+ * @param err GError **
+ * @return FALSE if err is set, TRUE otherwise
+ */
+static gboolean
+prepare_cache_dir(struct CmdOptions *cmd_options,
+ const gchar *out_dir,
+ GError **err)
+{
+ if (!cmd_options->cachedir)
+ return TRUE;
+
+ if (g_str_has_prefix(cmd_options->cachedir, "/")) {
+ // Absolute local path
+ cmd_options->checksum_cachedir = cr_normalize_dir_path(
+ cmd_options->cachedir);
+ } else {
+ // Relative path (from intput_dir)
+ gchar *tmp = g_strconcat(out_dir, cmd_options->cachedir, NULL);
+ cmd_options->checksum_cachedir = cr_normalize_dir_path(tmp);
+ g_free(tmp);
+ }
+
+ // Create the cache directory
+ if (g_mkdir(cmd_options->checksum_cachedir, S_IRWXU|S_IRWXG|S_IROTH|S_IXOTH)) {
+ if (errno == EEXIST) {
+ if (!g_file_test(cmd_options->checksum_cachedir,
+ G_FILE_TEST_IS_DIR))
+ {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG,
+ "The %s already exists and it is not a directory!",
+ cmd_options->checksum_cachedir);
+ return FALSE;
+ }
+ } else {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG,
+ "cannot use cachedir %s: %s",
+ cmd_options->checksum_cachedir, g_strerror(errno));
+ return FALSE;
+ }
+ }
+
+ g_debug("Cachedir for checksums is %s", cmd_options->checksum_cachedir);
+ return TRUE;
+}
+
+
int
main(int argc, char **argv)
{
struct CmdOptions *cmd_options;
+ gboolean ret;
GError *tmp_err = NULL;
// Arguments parsing
-
cmd_options = parse_arguments(&argc, &argv, &tmp_err);
if (!cmd_options) {
- fprintf(stderr, "Argument parsing failed: %s\n", tmp_err->message);
+ g_printerr("Argument parsing failed: %s\n", tmp_err->message);
g_error_free(tmp_err);
exit(EXIT_FAILURE);
}
-
// Arguments pre-check
-
if (cmd_options->version) {
// Just print version
printf("Version: %d.%d.%d\n", CR_VERSION_MAJOR,
if (argc != 2) {
// No mandatory arguments
- fprintf(stderr, "Must specify exactly one directory to index.\n");
- fprintf(stderr, "Usage: %s [options] <directory_to_index>\n\n",
+ g_printerr("Must specify exactly one directory to index.\n");
+ g_printerr("Usage: %s [options] <directory_to_index>\n\n",
cr_get_filename(argv[0]));
free_options(cmd_options);
exit(EXIT_FAILURE);
}
-
// Dirs
-
gchar *in_dir = NULL; // path/to/repo/
gchar *in_repo = NULL; // path/to/repo/repodata/
gchar *out_dir = NULL; // path/to/out_repo/
gchar *out_repo = NULL; // path/to/out_repo/repodata/
- gchar *tmp_out_repo = NULL; // path/to/out_repo/.repodata/
+ gchar *tmp_out_repo = NULL; // usually path/to/out_repo/.repodata/
+ gchar *lock_dir = NULL; // path/to/out_repo/.repodata/
if (cmd_options->basedir && !g_str_has_prefix(argv[1], "/")) {
gchar *tmp = cr_normalize_dir_path(argv[1]);
}
// Check if inputdir exists
-
if (!g_file_test(in_dir, G_FILE_TEST_IS_DIR)) {
- fprintf(stderr, "Directory %s must exist\n", in_dir);
+ g_printerr("Directory %s must exist\n", in_dir);
g_free(in_dir);
free_options(cmd_options);
exit(EXIT_FAILURE);
// Check parsed arguments
-
if (!check_arguments(cmd_options, in_dir, &tmp_err)) {
- fprintf(stderr, "%s\n", tmp_err->message);
+ g_printerr("%s\n", tmp_err->message);
g_error_free(tmp_err);
g_free(in_dir);
free_options(cmd_options);
exit(EXIT_FAILURE);
}
-
// Set logging stuff
+ cr_setup_logging(cmd_options->quiet, cmd_options->verbose);
- g_log_set_default_handler (cr_log_fn, NULL);
-
- if (cmd_options->quiet) {
- // Quiet mode
- GLogLevelFlags levels = G_LOG_LEVEL_MESSAGE | G_LOG_LEVEL_INFO |
- G_LOG_LEVEL_DEBUG | G_LOG_LEVEL_WARNING;
- g_log_set_handler(NULL, levels, cr_null_log_fn, NULL);
- g_log_set_handler("C_CREATEREPOLIB", levels, cr_null_log_fn, NULL);
- } else if (cmd_options->verbose) {
- // Verbose mode
- GLogLevelFlags levels = G_LOG_LEVEL_MESSAGE | G_LOG_LEVEL_INFO |
- G_LOG_LEVEL_DEBUG | G_LOG_LEVEL_WARNING;
- g_log_set_handler(NULL, levels, cr_log_fn, NULL);
- g_log_set_handler("C_CREATEREPOLIB", levels, cr_log_fn, NULL);
- } else {
- // Standard mode
- GLogLevelFlags levels = G_LOG_LEVEL_DEBUG;
- g_log_set_handler(NULL, levels, cr_null_log_fn, NULL);
- g_log_set_handler("C_CREATEREPOLIB", levels, cr_null_log_fn, NULL);
- }
-
+ // Emit debug message with version
+ g_debug("Version: %d.%d.%d\n", CR_VERSION_MAJOR,
+ CR_VERSION_MINOR,
+ CR_VERSION_PATCH);
// Set paths of input and output repos
-
in_repo = g_strconcat(in_dir, "repodata/", NULL);
if (cmd_options->outputdir) {
out_dir = cr_normalize_dir_path(cmd_options->outputdir);
out_repo = g_strconcat(out_dir, "repodata/", NULL);
- tmp_out_repo = g_strconcat(out_dir, ".repodata/", NULL);
} else {
out_dir = g_strdup(in_dir);
out_repo = g_strdup(in_repo);
- tmp_out_repo = g_strconcat(out_dir, ".repodata/", NULL);
}
- // Block SIGINT
-
- sigset_t intmask;
- sigemptyset(&intmask);
- sigaddset(&intmask, SIGINT);
- sigprocmask(SIG_BLOCK, &intmask, NULL);
-
- // Check if tmp_out_repo exists & Create tmp_out_repo dir
-
- if (g_mkdir(tmp_out_repo, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) {
- if (errno == EEXIST) {
- g_critical("Temporary repodata directory: %s already exists! ("
- "Another createrepo process is running?)", tmp_out_repo);
- } else {
- g_critical("Error while creating temporary repodata directory %s: %s",
- tmp_out_repo, strerror(errno));
- }
-
+ // Prepare cachedir for checksum if --cachedir is used
+ if (!prepare_cache_dir(cmd_options, out_dir, &tmp_err)) {
+ g_printerr("%s\n", tmp_err->message);
+ g_error_free(tmp_err);
+ g_free(in_dir);
+ g_free(in_repo);
+ g_free(out_dir);
+ g_free(out_repo);
+ free_options(cmd_options);
exit(EXIT_FAILURE);
}
-
- // Set handler for sigint
-
- tmp_repodata_path = tmp_out_repo;
-
- g_debug("SIGINT handler setup");
- struct sigaction sigact;
- sigact.sa_handler = sigint_catcher;
- sigemptyset(&sigact.sa_mask);
- sigact.sa_flags = 0;
- if (sigaction(SIGINT, &sigact, NULL) == -1) {
- g_critical("sigaction(): %s", strerror(errno));
+ // Block signals that terminates the process
+ if (!cr_block_terminating_signals(&tmp_err)) {
+ g_printerr("%s\n", tmp_err->message);
exit(EXIT_FAILURE);
}
- // Unblock SIGINT
+ // Check if lock exists & Create lock dir
+ if (!cr_lock_repo(out_dir, cmd_options->ignore_lock, &lock_dir, &tmp_out_repo, &tmp_err)) {
+ g_printerr("%s\n", tmp_err->message);
+ exit(EXIT_FAILURE);
+ }
- sigemptyset(&intmask);
- sigaddset(&intmask, SIGINT);
- sigprocmask(SIG_UNBLOCK, &intmask, NULL);
+ // Setup cleanup handlers
+ if (!cr_set_cleanup_handler(lock_dir, tmp_out_repo, &tmp_err)) {
+ g_printerr("%s\n", tmp_err->message);
+ exit(EXIT_FAILURE);
+ }
+ // Unblock the blocked signals
+ if (!cr_unblock_terminating_signals(&tmp_err)) {
+ g_printerr("%s\n", tmp_err->message);
+ exit(EXIT_FAILURE);
+ }
// Open package list
-
FILE *output_pkg_list = NULL;
if (cmd_options->read_pkgs_list) {
output_pkg_list = fopen(cmd_options->read_pkgs_list, "w");
if (!output_pkg_list) {
g_critical("Cannot open \"%s\" for writing: %s",
- cmd_options->read_pkgs_list, strerror(errno));
+ cmd_options->read_pkgs_list, g_strerror(errno));
exit(EXIT_FAILURE);
}
}
// Init package parser
-
cr_package_parser_init();
cr_xml_dump_init();
-
// Thread pool - Creation
-
struct UserData user_data;
g_thread_init(NULL);
- GThreadPool *pool = g_thread_pool_new(dumper_thread,
+ GThreadPool *pool = g_thread_pool_new(cr_dumper_thread,
&user_data,
0,
TRUE,
// Thread pool - Fill with tasks
-
package_count = fill_pool(pool,
in_dir,
cmd_options,
// Load old metadata if --update
-
cr_Metadata *old_metadata = NULL;
struct cr_MetadataLocation *old_metadata_location = NULL;
if (package_count && cmd_options->update) {
int ret;
old_metadata = cr_metadata_new(CR_HT_KEY_FILENAME, 1, current_pkglist);
+ cr_metadata_set_dupaction(old_metadata, CR_HT_DUPACT_REMOVEALL);
if (cmd_options->outputdir)
- old_metadata_location = cr_locate_metadata(out_dir, 1, NULL);
+ old_metadata_location = cr_locate_metadata(out_dir, TRUE, NULL);
else
- old_metadata_location = cr_locate_metadata(in_dir, 1, NULL);
+ old_metadata_location = cr_locate_metadata(in_dir, TRUE, NULL);
if (old_metadata_location) {
ret = cr_metadata_load_xml(old_metadata,
assert(ret == CRE_OK || tmp_err);
if (ret == CRE_OK) {
- g_debug("Old metadata from: %s - loaded", out_dir);
+ g_debug("Old metadata from: %s - loaded",
+ old_metadata_location->original_url);
} else {
g_debug("Old metadata from %s - loading failed: %s",
- out_dir, tmp_err->message);
+ old_metadata_location->original_url, tmp_err->message);
g_clear_error(&tmp_err);
}
}
// Copy groupfile
-
gchar *groupfile = NULL;
if (cmd_options->groupfile_fullpath) {
g_debug("Copy groupfile %s -> %s",
cmd_options->groupfile_fullpath, groupfile);
- int ret;
ret = cr_better_copy_file(cmd_options->groupfile_fullpath,
groupfile,
&tmp_err);
- assert(ret == CRE_OK || tmp_err);
-
- if (ret != CRE_OK) {
+ if (!ret) {
g_critical("Error while copy %s -> %s: %s",
cmd_options->groupfile_fullpath,
groupfile,
g_debug("Copy groupfile %s -> %s", src_groupfile, groupfile);
- int ret = cr_better_copy_file(src_groupfile, groupfile, &tmp_err);
- assert(ret == CRE_OK || tmp_err);
-
- if (ret != CRE_OK) {
+ if (!cr_better_copy_file(src_groupfile, groupfile, &tmp_err)){
g_critical("Error while copy %s -> %s: %s",
src_groupfile, groupfile, tmp_err->message);
g_clear_error(&tmp_err);
// Copy update info
-
char *updateinfo = NULL;
if (cmd_options->update && cmd_options->keep_all_metadata &&
old_metadata_location && old_metadata_location->updateinfo_href)
{
- int ret;
gchar *src_updateinfo = old_metadata_location->updateinfo_href;
updateinfo = g_strconcat(tmp_out_repo,
cr_get_filename(src_updateinfo),
g_debug("Copy updateinfo %s -> %s", src_updateinfo, updateinfo);
- ret = cr_better_copy_file(src_updateinfo, updateinfo, &tmp_err);
- assert(ret == CRE_OK || tmp_err);
-
- if (ret != CRE_OK) {
+ if (!cr_better_copy_file(src_updateinfo, updateinfo, &tmp_err)) {
g_critical("Error while copy %s -> %s: %s",
src_updateinfo, updateinfo, tmp_err->message);
g_clear_error(&tmp_err);
// Setup compression types
-
+ const char *xml_compression_suffix = NULL;
const char *sqlite_compression_suffix = NULL;
+ const char *prestodelta_compression_suffix = NULL;
+ cr_CompressionType xml_compression = CR_CW_GZ_COMPRESSION;
cr_CompressionType sqlite_compression = CR_CW_BZ2_COMPRESSION;
cr_CompressionType groupfile_compression = CR_CW_GZ_COMPRESSION;
+ cr_CompressionType prestodelta_compression = CR_CW_GZ_COMPRESSION;
if (cmd_options->compression_type != CR_CW_UNKNOWN_COMPRESSION) {
- sqlite_compression = cmd_options->compression_type;
- groupfile_compression = cmd_options->compression_type;
+ sqlite_compression = cmd_options->compression_type;
+ groupfile_compression = cmd_options->compression_type;
+ prestodelta_compression = cmd_options->compression_type;
}
if (cmd_options->xz_compression) {
- sqlite_compression = CR_CW_XZ_COMPRESSION;
- groupfile_compression = CR_CW_XZ_COMPRESSION;
+ sqlite_compression = CR_CW_XZ_COMPRESSION;
+ groupfile_compression = CR_CW_XZ_COMPRESSION;
+ prestodelta_compression = CR_CW_GZ_COMPRESSION;
+ }
+
+ if (cmd_options->general_compression_type != CR_CW_UNKNOWN_COMPRESSION) {
+ xml_compression = cmd_options->general_compression_type;
+ sqlite_compression = cmd_options->general_compression_type;
+ groupfile_compression = cmd_options->general_compression_type;
+ prestodelta_compression = cmd_options->general_compression_type;
}
+ xml_compression_suffix = cr_compression_suffix(xml_compression);
sqlite_compression_suffix = cr_compression_suffix(sqlite_compression);
+ prestodelta_compression_suffix = cr_compression_suffix(prestodelta_compression);
// Create and open new compressed files
-
cr_XmlFile *pri_cr_file;
cr_XmlFile *fil_cr_file;
cr_XmlFile *oth_cr_file;
g_message("Temporary output repo path: %s", tmp_out_repo);
g_debug("Creating .xml.gz files");
- pri_xml_filename = g_strconcat(tmp_out_repo, "/primary.xml.gz", NULL);
- fil_xml_filename = g_strconcat(tmp_out_repo, "/filelists.xml.gz", NULL);
- oth_xml_filename = g_strconcat(tmp_out_repo, "/other.xml.gz", NULL);
+ pri_xml_filename = g_strconcat(tmp_out_repo, "/primary.xml", xml_compression_suffix, NULL);
+ fil_xml_filename = g_strconcat(tmp_out_repo, "/filelists.xml", xml_compression_suffix, NULL);
+ oth_xml_filename = g_strconcat(tmp_out_repo, "/other.xml", xml_compression_suffix, NULL);
- pri_stat = cr_contentstat_new(cmd_options->checksum_type, NULL);
+ pri_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL);
pri_cr_file = cr_xmlfile_sopen_primary(pri_xml_filename,
- CR_CW_GZ_COMPRESSION,
+ xml_compression,
pri_stat,
&tmp_err);
assert(pri_cr_file || tmp_err);
exit(EXIT_FAILURE);
}
- fil_stat = cr_contentstat_new(cmd_options->checksum_type, NULL);
+ fil_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL);
fil_cr_file = cr_xmlfile_sopen_filelists(fil_xml_filename,
- CR_CW_GZ_COMPRESSION,
+ xml_compression,
fil_stat,
&tmp_err);
assert(fil_cr_file || tmp_err);
exit(EXIT_FAILURE);
}
- oth_stat = cr_contentstat_new(cmd_options->checksum_type, NULL);
+ oth_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL);
oth_cr_file = cr_xmlfile_sopen_other(oth_xml_filename,
- CR_CW_GZ_COMPRESSION,
+ xml_compression,
oth_stat,
&tmp_err);
assert(oth_cr_file || tmp_err);
exit(EXIT_FAILURE);
}
-
// Set number of packages
-
g_debug("Setting number of packages");
cr_xmlfile_set_num_of_pkgs(pri_cr_file, package_count, NULL);
cr_xmlfile_set_num_of_pkgs(fil_cr_file, package_count, NULL);
cr_xmlfile_set_num_of_pkgs(oth_cr_file, package_count, NULL);
-
// Open sqlite databases
-
gchar *pri_db_filename = NULL;
gchar *fil_db_filename = NULL;
gchar *oth_db_filename = NULL;
cr_SqliteDb *oth_db = NULL;
if (!cmd_options->no_database) {
+ _cleanup_file_close_ int pri_db_fd = -1;
+ _cleanup_file_close_ int fil_db_fd = -1;
+ _cleanup_file_close_ int oth_db_fd = -1;
+
g_message("Preparing sqlite DBs");
- g_debug("Creating databases");
- pri_db_filename = g_strconcat(tmp_out_repo, "/primary.sqlite", NULL);
- fil_db_filename = g_strconcat(tmp_out_repo, "/filelists.sqlite", NULL);
- oth_db_filename = g_strconcat(tmp_out_repo, "/other.sqlite", NULL);
+ if (!cmd_options->local_sqlite) {
+ g_debug("Creating databases");
+ pri_db_filename = g_strconcat(tmp_out_repo, "/primary.sqlite", NULL);
+ fil_db_filename = g_strconcat(tmp_out_repo, "/filelists.sqlite", NULL);
+ oth_db_filename = g_strconcat(tmp_out_repo, "/other.sqlite", NULL);
+ } else {
+ g_debug("Creating databases localy");
+ const gchar *tmpdir = g_get_tmp_dir();
+ pri_db_filename = g_build_filename(tmpdir, "primary.XXXXXX.sqlite", NULL);
+ fil_db_filename = g_build_filename(tmpdir, "filelists.XXXXXX.sqlite", NULL);
+ oth_db_filename = g_build_filename(tmpdir, "other.XXXXXXX.sqlite", NULL);
+ pri_db_fd = g_mkstemp(pri_db_filename);
+ g_debug("%s", pri_db_filename);
+ if (pri_db_fd == -1) {
+ g_critical("Cannot open %s: %s", pri_db_filename, g_strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ fil_db_fd = g_mkstemp(fil_db_filename);
+ g_debug("%s", fil_db_filename);
+ if (fil_db_fd == -1) {
+ g_critical("Cannot open %s: %s", fil_db_filename, g_strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ oth_db_fd = g_mkstemp(oth_db_filename);
+ g_debug("%s", oth_db_filename);
+ if (oth_db_fd == -1) {
+ g_critical("Cannot open %s: %s", oth_db_filename, g_strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ }
pri_db = cr_db_open_primary(pri_db_filename, &tmp_err);
assert(pri_db || tmp_err);
}
// Thread pool - User data initialization
-
user_data.pri_f = pri_cr_file;
user_data.fil_f = fil_cr_file;
user_data.oth_f = oth_cr_file;
user_data.location_base = cmd_options->location_base;
user_data.checksum_type_str = cr_checksum_name_str(cmd_options->checksum_type);
user_data.checksum_type = cmd_options->checksum_type;
+ user_data.checksum_cachedir = cmd_options->checksum_cachedir;
user_data.skip_symlinks = cmd_options->skip_symlinks;
- user_data.skip_stat = cmd_options->skip_stat;
- user_data.old_metadata = old_metadata;
user_data.repodir_name_len = strlen(in_dir);
user_data.package_count = package_count;
- user_data.buffer = g_queue_new();
- user_data.mutex_buffer = g_mutex_new();
+ user_data.skip_stat = cmd_options->skip_stat;
+ user_data.old_metadata = old_metadata;
user_data.mutex_pri = g_mutex_new();
user_data.mutex_fil = g_mutex_new();
user_data.mutex_oth = g_mutex_new();
user_data.id_pri = 0;
user_data.id_fil = 0;
user_data.id_oth = 0;
+ user_data.buffer = g_queue_new();
+ user_data.mutex_buffer = g_mutex_new();
+ user_data.deltas = cmd_options->deltas;
+ user_data.max_delta_rpm_size= cmd_options->max_delta_rpm_size;
+ user_data.mutex_deltatargetpackages = g_mutex_new();
+ user_data.deltatargetpackages = NULL;
+ user_data.cut_dirs = cmd_options->cut_dirs;
+ user_data.location_prefix = cmd_options->location_prefix;
g_debug("Thread pool user data ready");
-
// Start pool
-
g_thread_pool_set_max_threads(pool, cmd_options->workers, NULL);
g_message("Pool started (with %d workers)", cmd_options->workers);
-
// Wait until pool is finished
-
g_thread_pool_free(pool, FALSE, TRUE);
g_message("Pool finished");
g_mutex_free(user_data.mutex_pri);
g_mutex_free(user_data.mutex_fil);
g_mutex_free(user_data.mutex_oth);
-
+ g_mutex_free(user_data.mutex_deltatargetpackages);
// Create repomd records for each file
-
g_debug("Generating repomd.xml");
cr_Repomd *repomd_obj = cr_repomd_new();
cr_RepomdRecord *groupfile_rec = NULL;
cr_RepomdRecord *compressed_groupfile_rec = NULL;
cr_RepomdRecord *updateinfo_rec = NULL;
-
+ cr_RepomdRecord *prestodelta_rec = NULL;
// XML
-
cr_repomd_record_load_contentstat(pri_xml_rec, pri_stat);
cr_repomd_record_load_contentstat(fil_xml_rec, fil_stat);
cr_repomd_record_load_contentstat(oth_xml_rec, oth_stat);
cr_RepomdRecordFillTask *oth_fill_task;
pri_fill_task = cr_repomdrecordfilltask_new(pri_xml_rec,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
NULL);
g_thread_pool_push(fill_pool, pri_fill_task, NULL);
fil_fill_task = cr_repomdrecordfilltask_new(fil_xml_rec,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
NULL);
g_thread_pool_push(fill_pool, fil_fill_task, NULL);
oth_fill_task = cr_repomdrecordfilltask_new(oth_xml_rec,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
NULL);
g_thread_pool_push(fill_pool, oth_fill_task, NULL);
// Groupfile
-
if (groupfile) {
groupfile_rec = cr_repomd_record_new("group", groupfile);
compressed_groupfile_rec = cr_repomd_record_new("group_gz", groupfile);
cr_repomd_record_compress_and_fill(groupfile_rec,
compressed_groupfile_rec,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
groupfile_compression,
&tmp_err);
if (tmp_err) {
// Updateinfo
-
if (updateinfo) {
updateinfo_rec = cr_repomd_record_new("updateinfo", updateinfo);
cr_repomd_record_fill(updateinfo_rec,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
&tmp_err);
if (tmp_err) {
g_critical("Cannot process updateinfo %s: %s",
}
}
-
// Wait till repomd record fill task of xml files ends.
-
g_thread_pool_free(fill_pool, FALSE, TRUE);
cr_repomdrecordfilltask_free(pri_fill_task, NULL);
cr_repomdrecordfilltask_free(oth_fill_task, NULL);
// Sqlite db
-
if (!cmd_options->no_database) {
- gchar *pri_db_name = g_strconcat(pri_db_filename,
+ gchar *pri_db_name = g_strconcat(tmp_out_repo, "/primary.sqlite",
sqlite_compression_suffix, NULL);
- gchar *fil_db_name = g_strconcat(fil_db_filename,
+ gchar *fil_db_name = g_strconcat(tmp_out_repo, "/filelists.sqlite",
sqlite_compression_suffix, NULL);
- gchar *oth_db_name = g_strconcat(oth_db_filename,
+ gchar *oth_db_name = g_strconcat(tmp_out_repo, "/other.sqlite",
sqlite_compression_suffix, NULL);
cr_db_dbinfo_update(pri_db, pri_xml_rec->checksum, NULL);
// Compress dbs
-
GThreadPool *compress_pool = g_thread_pool_new(cr_compressing_thread,
NULL, 3, FALSE, NULL);
pri_db_task = cr_compressiontask_new(pri_db_filename,
pri_db_name,
sqlite_compression,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
1, NULL);
g_thread_pool_push(compress_pool, pri_db_task, NULL);
fil_db_task = cr_compressiontask_new(fil_db_filename,
fil_db_name,
sqlite_compression,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
1, NULL);
g_thread_pool_push(compress_pool, fil_db_task, NULL);
oth_db_task = cr_compressiontask_new(oth_db_filename,
oth_db_name,
sqlite_compression,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
1, NULL);
g_thread_pool_push(compress_pool, oth_db_task, NULL);
g_thread_pool_free(compress_pool, FALSE, TRUE);
- // Prepare repomd records
+ if (!cmd_options->local_sqlite) {
+ cr_rm(pri_db_filename, CR_RM_FORCE, NULL, NULL);
+ cr_rm(fil_db_filename, CR_RM_FORCE, NULL, NULL);
+ cr_rm(oth_db_filename, CR_RM_FORCE, NULL, NULL);
+ }
+ // Prepare repomd records
pri_db_rec = cr_repomd_record_new("primary_db", pri_db_name);
fil_db_rec = cr_repomd_record_new("filelists_db", fil_db_name);
oth_db_rec = cr_repomd_record_new("other_db", oth_db_name);
cr_RepomdRecordFillTask *oth_db_fill_task;
pri_db_fill_task = cr_repomdrecordfilltask_new(pri_db_rec,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
NULL);
g_thread_pool_push(fill_pool, pri_db_fill_task, NULL);
fil_db_fill_task = cr_repomdrecordfilltask_new(fil_db_rec,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
NULL);
g_thread_pool_push(fill_pool, fil_db_fill_task, NULL);
oth_db_fill_task = cr_repomdrecordfilltask_new(oth_db_rec,
- cmd_options->checksum_type,
+ cmd_options->repomd_checksum_type,
NULL);
g_thread_pool_push(fill_pool, oth_db_fill_task, NULL);
cr_repomdrecordfilltask_free(oth_db_fill_task, NULL);
}
+#ifdef CR_DELTA_RPM_SUPPORT
+ // Delta generation
+ if (cmd_options->deltas) {
+ gchar *filename, *outdeltadir = NULL;
+ gchar *prestodelta_xml_filename = NULL;
+ GHashTable *ht_oldpackagedirs = NULL;
+ cr_XmlFile *prestodelta_cr_file = NULL;
+ cr_ContentStat *prestodelta_stat = NULL;
+
+ filename = g_strconcat("prestodelta.xml",
+ prestodelta_compression_suffix,
+ NULL);
+ outdeltadir = g_build_filename(out_dir, OUTDELTADIR, NULL);
+ prestodelta_xml_filename = g_build_filename(tmp_out_repo,
+ filename,
+ NULL);
+ g_free(filename);
+
+ // 0) Prepare outdeltadir
+ if (g_file_test(outdeltadir, G_FILE_TEST_EXISTS)) {
+ if (!g_file_test(outdeltadir, G_FILE_TEST_IS_DIR)) {
+ g_critical("The file %s already exists and it is not a directory",
+ outdeltadir);
+ goto deltaerror;
+ }
+ } else if (g_mkdir(outdeltadir, S_IRWXU|S_IRWXG|S_IROTH|S_IXOTH)) {
+ g_critical("Cannot create %s: %s", outdeltadir, g_strerror(errno));
+ goto deltaerror;
+ }
- // Add checksums into files names
+ // 1) Scan old package directories
+ ht_oldpackagedirs = cr_deltarpms_scan_oldpackagedirs(cmd_options->oldpackagedirs_paths,
+ cmd_options->max_delta_rpm_size,
+ &tmp_err);
+ if (!ht_oldpackagedirs) {
+ g_critical("cr_deltarpms_scan_oldpackagedirs failed: %s\n", tmp_err->message);
+ g_clear_error(&tmp_err);
+ goto deltaerror;
+ }
+
+ // 2) Generate drpms in parallel
+ ret = cr_deltarpms_parallel_deltas(user_data.deltatargetpackages,
+ ht_oldpackagedirs,
+ outdeltadir,
+ cmd_options->num_deltas,
+ cmd_options->workers,
+ cmd_options->max_delta_rpm_size,
+ cmd_options->max_delta_rpm_size,
+ &tmp_err);
+ if (!ret) {
+ g_critical("Parallel generation of drpms failed: %s", tmp_err->message);
+ g_clear_error(&tmp_err);
+ goto deltaerror;
+ }
+ // 3) Generate prestodelta.xml file
+ prestodelta_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL);
+ prestodelta_cr_file = cr_xmlfile_sopen_prestodelta(prestodelta_xml_filename,
+ prestodelta_compression,
+ prestodelta_stat,
+ &tmp_err);
+ if (!prestodelta_cr_file) {
+ g_critical("Cannot open %s: %s", prestodelta_xml_filename, tmp_err->message);
+ g_clear_error(&tmp_err);
+ goto deltaerror;
+ }
+
+ ret = cr_deltarpms_generate_prestodelta_file(
+ outdeltadir,
+ prestodelta_cr_file,
+ //cmd_options->checksum_type,
+ CR_CHECKSUM_SHA256, // Createrepo always uses SHA256
+ cmd_options->workers,
+ out_dir,
+ &tmp_err);
+ if (!ret) {
+ g_critical("Cannot generate %s: %s", prestodelta_xml_filename,
+ tmp_err->message);
+ g_clear_error(&tmp_err);
+ goto deltaerror;
+ }
+
+ cr_xmlfile_close(prestodelta_cr_file, NULL);
+ prestodelta_cr_file = NULL;
+
+ // 4) Prepare repomd record
+ prestodelta_rec = cr_repomd_record_new("prestodelta", prestodelta_xml_filename);
+ cr_repomd_record_load_contentstat(prestodelta_rec, prestodelta_stat);
+ cr_repomd_record_fill(prestodelta_rec, cmd_options->repomd_checksum_type, NULL);
+
+deltaerror:
+ // 5) Cleanup
+ g_hash_table_destroy(ht_oldpackagedirs);
+ g_free(outdeltadir);
+ g_free(prestodelta_xml_filename);
+ cr_xmlfile_close(prestodelta_cr_file, NULL);
+ cr_contentstat_free(prestodelta_stat, NULL);
+ cr_slist_free_full(user_data.deltatargetpackages,
+ (GDestroyNotify) cr_deltatargetpackage_free);
+ }
+#endif
+
+ // Add checksums into files names
if (cmd_options->unique_md_filenames) {
cr_repomd_record_rename_file(pri_xml_rec, NULL);
cr_repomd_record_rename_file(fil_xml_rec, NULL);
cr_repomd_record_rename_file(groupfile_rec, NULL);
cr_repomd_record_rename_file(compressed_groupfile_rec, NULL);
cr_repomd_record_rename_file(updateinfo_rec, NULL);
+ cr_repomd_record_rename_file(prestodelta_rec, NULL);
}
-
// Gen xml
-
cr_repomd_set_record(repomd_obj, pri_xml_rec);
cr_repomd_set_record(repomd_obj, fil_xml_rec);
cr_repomd_set_record(repomd_obj, oth_xml_rec);
cr_repomd_set_record(repomd_obj, groupfile_rec);
cr_repomd_set_record(repomd_obj, compressed_groupfile_rec);
cr_repomd_set_record(repomd_obj, updateinfo_rec);
+ cr_repomd_set_record(repomd_obj, prestodelta_rec);
int i = 0;
while (cmd_options->repo_tags && cmd_options->repo_tags[i])
}
// Write repomd.xml
-
gchar *repomd_path = g_strconcat(tmp_out_repo, "repomd.xml", NULL);
FILE *frepomd = fopen(repomd_path, "w");
if (!frepomd) {
- g_critical("Cannot open %s: %s", repomd_path, strerror(errno));
+ g_critical("Cannot open %s: %s", repomd_path, g_strerror(errno));
exit(EXIT_FAILURE);
}
fputs(repomd_xml, frepomd);
g_free(repomd_path);
- // Move files from out_repo into tmp_out_repo
+ // Final move
+ // Copy selected metadata from the old repository
+ cr_RetentionType retentiontype = CR_RETENTION_DEFAULT;
+ gint64 retentionval = (gint64) cmd_options->retain_old;
- g_debug("Moving data from %s", out_repo);
- if (g_file_test(out_repo, G_FILE_TEST_EXISTS)) {
+ if (cmd_options->retain_old_md_by_age) {
+ retentiontype = CR_RETENTION_BYAGE;
+ retentionval = cmd_options->md_max_age;
+ } else if (cmd_options->compatibility) {
+ retentiontype = CR_RETENTION_COMPATIBILITY;
+ }
- // Delete old metadata
- g_debug("Removing old metadata from %s", out_repo);
- cr_remove_metadata_classic(out_dir, cmd_options->retain_old, NULL);
+ ret = cr_old_metadata_retention(out_repo,
+ tmp_out_repo,
+ retentiontype,
+ retentionval,
+ &tmp_err);
+ if (!ret) {
+ g_critical("%s", tmp_err->message);
+ g_clear_error(&tmp_err);
+ exit(EXIT_FAILURE);
+ }
- // Move files from out_repo to tmp_out_repo
- GDir *dirp;
- dirp = g_dir_open (out_repo, 0, NULL);
- if (!dirp) {
- g_critical("Cannot open directory: %s", out_repo);
- exit(EXIT_FAILURE);
- }
+ gboolean old_repodata_renamed = FALSE;
- const gchar *filename;
- while ((filename = g_dir_read_name(dirp))) {
- gchar *full_path = g_strconcat(out_repo, filename, NULL);
- gchar *new_full_path = g_strconcat(tmp_out_repo, filename, NULL);
-
- // Do not override new file with the old one
- if (g_file_test(new_full_path, G_FILE_TEST_EXISTS)) {
- g_debug("Skip move of: %s -> %s (the destination file already exists)",
- full_path, new_full_path);
- g_debug("Removing: %s", full_path);
- g_remove(full_path);
- g_free(full_path);
- g_free(new_full_path);
- continue;
- }
+ // === This section should be maximally atomic ===
- if (g_rename(full_path, new_full_path) == -1)
- g_critical("Cannot move file %s -> %s", full_path, new_full_path);
- else
- g_debug("Moved %s -> %s", full_path, new_full_path);
+ sigset_t new_mask, old_mask;
+ sigemptyset(&old_mask);
+ sigfillset(&new_mask);
+ sigdelset(&new_mask, SIGKILL); // These two signals cannot be
+ sigdelset(&new_mask, SIGSTOP); // blocked
- g_free(full_path);
- g_free(new_full_path);
- }
+ sigprocmask(SIG_BLOCK, &new_mask, &old_mask);
- g_dir_close(dirp);
+ // Rename out_repo to "repodata.old.pid.date.microsecs"
+ gchar *tmp_dirname = cr_append_pid_and_datetime("repodata.old.", NULL);
+ gchar *old_repodata_path = g_build_filename(out_dir, tmp_dirname, NULL);
+ g_free(tmp_dirname);
- // Remove out_repo
- if (g_rmdir(out_repo) == -1) {
- g_critical("Cannot remove %s", out_repo);
- } else {
- g_debug("Old out repo %s removed", out_repo);
- }
+ if (g_rename(out_repo, old_repodata_path) == -1) {
+ g_debug("Old repodata doesn't exists: Cannot rename %s -> %s: %s",
+ out_repo, old_repodata_path, g_strerror(errno));
+ } else {
+ g_debug("Renamed %s -> %s", out_repo, old_repodata_path);
+ old_repodata_renamed = TRUE;
}
-
// Rename tmp_out_repo to out_repo
if (g_rename(tmp_out_repo, out_repo) == -1) {
- g_critical("Cannot rename %s -> %s", tmp_out_repo, out_repo);
+ g_critical("Cannot rename %s -> %s: %s", tmp_out_repo, out_repo,
+ g_strerror(errno));
+ exit(EXIT_FAILURE);
} else {
g_debug("Renamed %s -> %s", tmp_out_repo, out_repo);
}
+ // Remove lock
+ if (g_strcmp0(lock_dir, tmp_out_repo))
+ // If lock_dir is not same as temporary repo dir then remove it
+ cr_remove_dir(lock_dir, NULL);
- // Clean up
+ // Disable path stored for exit handler
+ cr_unset_cleanup_handler(NULL);
+
+ sigprocmask(SIG_SETMASK, &old_mask, NULL);
+
+ // === End of section that has to be maximally atomic ===
+ if (old_repodata_renamed) {
+ // Remove "metadata.old" dir
+ if (cr_rm(old_repodata_path, CR_RM_RECURSIVE, NULL, &tmp_err)) {
+ g_debug("Old repo %s removed", old_repodata_path);
+ } else {
+ g_warning("Cannot remove %s: %s", old_repodata_path, tmp_err->message);
+ g_clear_error(&tmp_err);
+ }
+ }
+
+
+ // Clean up
g_debug("Memory cleanup");
if (old_metadata)
cr_metadata_free(old_metadata);
+ g_free(old_repodata_path);
g_free(in_repo);
g_free(out_repo);
- tmp_repodata_path = NULL;
g_free(tmp_out_repo);
g_free(in_dir);
g_free(out_dir);
+ g_free(lock_dir);
g_free(pri_xml_filename);
g_free(fil_xml_filename);
g_free(oth_xml_filename);
#include <glib.h>
#include "checksum.h"
#include "compression_wrapper.h"
+#include "deltarpms.h"
#include "error.h"
#include "load_metadata.h"
#include "locate_metadata.h"
#include "repomd.h"
#include "sqlite.h"
#include "threads.h"
+#include "updateinfo.h"
#include "version.h"
#include "xml_dump.h"
#include "xml_file.h"
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2015 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <glib.h>
+#include <glib/gstdio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include "error.h"
+#include "misc.h"
+#include "cleanup.h"
+
+
+char *global_lock_dir = NULL; // Path to .repodata/ dir that is used as a lock
+char *global_tmp_out_repo = NULL; // Path to temporary repodata directory,
+ // if NULL that it's same as
+ // the global_lock_dir
+
+/**
+ * Clean up function called on normal program termination.
+ * It removes temporary .repodata/ directory that servers as a lock
+ * for other createrepo[_c] processes.
+ * This functions acts only if exit status != EXIST_SUCCESS.
+ *
+ * @param exit_status Status
+ * @param data User data (unused)
+ */
+static void
+failure_exit_cleanup(int exit_status, G_GNUC_UNUSED void *data)
+{
+ if (exit_status != EXIT_SUCCESS) {
+ if (global_lock_dir) {
+ g_debug("Removing %s", global_lock_dir);
+ cr_remove_dir(global_lock_dir, NULL);
+ }
+
+ if (global_tmp_out_repo) {
+ g_debug("Removing %s", global_tmp_out_repo);
+ cr_remove_dir(global_tmp_out_repo, NULL);
+ }
+ }
+}
+
+/** Signal handler
+ * @param sig Signal number
+ */
+static void
+sigint_catcher(int sig)
+{
+ g_message("%s catched: Terminating...", strsignal(sig));
+ exit(1);
+}
+
+gboolean
+cr_set_cleanup_handler(const char *lock_dir,
+ const char *tmp_out_repo,
+ G_GNUC_UNUSED GError **err)
+{
+ assert(!err || *err == NULL);
+
+ // Set global variables
+ global_lock_dir = g_strdup(lock_dir);
+ if (g_strcmp0(lock_dir, tmp_out_repo))
+ global_tmp_out_repo = g_strdup(tmp_out_repo);
+ else
+ global_tmp_out_repo = NULL;
+
+ // Register on exit cleanup function
+ if (atexit(failure_exit_cleanup))
+ g_warning("Cannot set exit cleanup function by atexit()");
+
+ // Prepare signal handler configuration
+ g_debug("Signal handler setup");
+ struct sigaction sigact;
+ sigact.sa_handler = sigint_catcher;
+ sigemptyset(&sigact.sa_mask);
+ sigact.sa_flags = 0;
+
+ // Handle signals that terminate (from the POSIX.1-1990)
+ sigaction(SIGHUP, &sigact, NULL);
+ sigaction(SIGINT, &sigact, NULL);
+ sigaction(SIGPIPE, &sigact, NULL);
+ sigaction(SIGALRM, &sigact, NULL);
+ sigaction(SIGTERM, &sigact, NULL);
+ sigaction(SIGUSR1, &sigact, NULL);
+ sigaction(SIGUSR2, &sigact, NULL);
+
+ // Handle signals that terminate (from the POSIX.1-2001)
+ sigaction(SIGPOLL, &sigact, NULL);
+ sigaction(SIGPROF, &sigact, NULL);
+ sigaction(SIGVTALRM, &sigact, NULL);
+
+ return TRUE;
+}
+
+gboolean
+cr_unset_cleanup_handler(G_GNUC_UNUSED GError **err)
+{
+ g_free(global_lock_dir);
+ global_lock_dir = NULL;
+ g_free(global_tmp_out_repo);
+ global_tmp_out_repo = NULL;
+
+ return TRUE;
+}
+
+gboolean
+cr_block_terminating_signals(GError **err)
+{
+ assert(!err || *err == NULL);
+
+ sigset_t intmask;
+
+ sigemptyset(&intmask);
+ sigaddset(&intmask, SIGHUP);
+ sigaddset(&intmask, SIGINT);
+ sigaddset(&intmask, SIGPIPE);
+ sigaddset(&intmask, SIGALRM);
+ sigaddset(&intmask, SIGTERM);
+ sigaddset(&intmask, SIGUSR1);
+ sigaddset(&intmask, SIGUSR2);
+ sigaddset(&intmask, SIGPOLL);
+ sigaddset(&intmask, SIGPROF);
+ sigaddset(&intmask, SIGVTALRM);
+
+ if (sigprocmask(SIG_BLOCK, &intmask, NULL)) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_SIGPROCMASK,
+ "Cannot block terminating signals: %s", g_strerror(errno));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+gboolean
+cr_unblock_terminating_signals(GError **err)
+{
+ assert(!err || *err == NULL);
+
+ sigset_t intmask;
+
+ sigemptyset(&intmask);
+ sigaddset(&intmask, SIGHUP);
+ sigaddset(&intmask, SIGINT);
+ sigaddset(&intmask, SIGPIPE);
+ sigaddset(&intmask, SIGALRM);
+ sigaddset(&intmask, SIGTERM);
+ sigaddset(&intmask, SIGUSR1);
+ sigaddset(&intmask, SIGUSR2);
+ sigaddset(&intmask, SIGPOLL);
+ sigaddset(&intmask, SIGPROF);
+ sigaddset(&intmask, SIGVTALRM);
+
+ if (sigprocmask(SIG_UNBLOCK, &intmask, NULL)) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_SIGPROCMASK,
+ "Cannot unblock terminating signals: %s", g_strerror(errno));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+gboolean
+cr_lock_repo(const gchar *repo_dir,
+ gboolean ignore_lock,
+ gchar **lock_dir_p,
+ gchar **tmp_repodata_dir_p,
+ GError **err)
+{
+ assert(!err || *err == NULL);
+
+ _cleanup_free_ gchar *lock_dir = NULL;
+ _cleanup_free_ gchar *tmp_repodata_dir = NULL;
+ _cleanup_error_free_ GError *tmp_err = NULL;
+
+ lock_dir = g_build_filename(repo_dir, ".repodata/", NULL);
+ *lock_dir_p = g_strdup(lock_dir);
+
+ if (g_mkdir(lock_dir, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) {
+ if (errno != EEXIST) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Error while creating temporary repodata "
+ "directory: %s: %s", lock_dir, g_strerror(errno));
+ return FALSE;
+ }
+
+ g_debug("Temporary repodata directory: %s already exists! "
+ "(Another createrepo process is running?)", lock_dir);
+
+ if (ignore_lock == FALSE) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Temporary repodata directory %s already exists! "
+ "(Another createrepo process is running?)", lock_dir);
+ return FALSE;
+ }
+
+ // The next section takes place only if the --ignore-lock is used
+ // Ugly, but user wants it -> it's his fault if something gets broken
+
+ // Remove existing .repodata/
+ g_debug("(--ignore-lock enabled) Let's remove the old .repodata/");
+ if (cr_rm(lock_dir, CR_RM_RECURSIVE, NULL, &tmp_err)) {
+ g_debug("(--ignore-lock enabled) Removed: %s", lock_dir);
+ } else {
+ g_critical("(--ignore-lock enabled) Cannot remove %s: %s",
+ lock_dir, tmp_err->message);
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot remove %s (--ignore-lock enabled) :%s",
+ lock_dir, tmp_err->message);
+ return FALSE;
+ }
+
+ // Try to create own - just as a lock
+ if (g_mkdir(lock_dir, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) {
+ g_critical("(--ignore-lock enabled) Cannot create %s: %s",
+ lock_dir, g_strerror(errno));
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot create: %s (--ignore-lock enabled): %s",
+ lock_dir, g_strerror(errno));
+ return FALSE;
+ } else {
+ g_debug("(--ignore-lock enabled) Own and empty %s created "
+ "(serves as a lock)", lock_dir);
+ }
+
+ // To data generation use a different one
+ _cleanup_free_ gchar *tmp = NULL;
+ tmp_repodata_dir = g_build_filename(repo_dir, ".repodata.", NULL);
+ tmp = cr_append_pid_and_datetime(tmp_repodata_dir, "/");
+ tmp_repodata_dir = tmp;
+
+ if (g_mkdir(tmp_repodata_dir, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) {
+ g_critical("(--ignore-lock enabled) Cannot create %s: %s",
+ tmp_repodata_dir, g_strerror(errno));
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot create: %s (--ignore-lock enabled): %s",
+ tmp_repodata_dir, g_strerror(errno));
+ return FALSE;
+ } else {
+ g_debug("(--ignore-lock enabled) For data generation is used: %s",
+ tmp_repodata_dir);
+ }
+ }
+
+ if (tmp_repodata_dir)
+ *tmp_repodata_dir_p = g_strdup(tmp_repodata_dir);
+ else
+ *tmp_repodata_dir_p = g_strdup(lock_dir);
+
+ return TRUE;
+}
+
+void
+cr_setup_logging(gboolean quiet, gboolean verbose)
+{
+ g_log_set_default_handler (cr_log_fn, NULL);
+
+ if (quiet) {
+ // Quiet mode
+ GLogLevelFlags levels = G_LOG_LEVEL_MESSAGE | G_LOG_LEVEL_INFO |
+ G_LOG_LEVEL_DEBUG | G_LOG_LEVEL_WARNING;
+ g_log_set_handler(NULL, levels, cr_null_log_fn, NULL);
+ g_log_set_handler("C_CREATEREPOLIB", levels, cr_null_log_fn, NULL);
+ } else if (verbose) {
+ // Verbose mode
+ GLogLevelFlags levels = G_LOG_LEVEL_MESSAGE | G_LOG_LEVEL_INFO |
+ G_LOG_LEVEL_DEBUG | G_LOG_LEVEL_WARNING;
+ g_log_set_handler(NULL, levels, cr_log_fn, NULL);
+ g_log_set_handler("C_CREATEREPOLIB", levels, cr_log_fn, NULL);
+ } else {
+ // Standard mode
+ GLogLevelFlags levels = G_LOG_LEVEL_DEBUG;
+ g_log_set_handler(NULL, levels, cr_null_log_fn, NULL);
+ g_log_set_handler("C_CREATEREPOLIB", levels, cr_null_log_fn, NULL);
+ }
+}
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2013 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef __C_CREATEREPOLIB_CREATEREPO_SHARED_H__
+#define __C_CREATEREPOLIB_CREATEREPO_SHARED_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <glib.h>
+#include "checksum.h"
+#include "compression_wrapper.h"
+#include "package.h"
+
+/** \defgroup createrepo_shared Createrepo API.
+ *
+ * Module with createrepo API
+ *
+ * \addtogroup createrepo_shared
+ * @{
+ */
+
+
+/**
+ * This function does:
+ * 1) Sets atexit cleanup function that removes temporary directories
+ * 2) Sets a signal handler for signals that lead to process temination.
+ * (List obtained from the "man 7 signal")
+ * Signals that are ignored (SIGCHILD) or lead just to stop (SIGSTOP, ...)
+ * don't get this handler - these signals do not terminate the process!
+ * This handler assures that the cleanup function that is hooked on exit
+ * gets called.
+ *
+ * @param lock_dir Dir that serves as lock (".repodata/")
+ * @param tmp_out_repo Dir that is really used for repodata generation
+ * (usually exactly the same as lock dir if not
+ * --ignore-lock is specified). Could be NULL.
+ * @return TRUE on success, FALSE if err is set.
+ */
+gboolean
+cr_set_cleanup_handler(const char *lock_dir,
+ const char *tmp_out_repo,
+ GError **err);
+
+/**
+ * Block process terminating signals.
+ * (Useful for creating pseudo-atomic sections in code)
+ */
+gboolean
+cr_block_terminating_signals(GError **err);
+
+/**
+ * Unblock process terminating signals.
+ */
+gboolean
+cr_unblock_terminating_signals(GError **err);
+
+
+/**
+ * This function does:
+ * - Tries to create repo/.repodata/ dir.
+ * - If it doesn't exists, it's created and function returns TRUE.
+ * - If it exists and ignore_lock is FALSE, returns FALSE and err is set.
+ * - If it exists and ignore_lock is TRUE it:
+ * - Removes the existing .repodata/ dir and all its content
+ * - Creates (empty) new one (just as a lock dir - place holder)
+ * - Creates .repodata.pid.datetime.usec/ that should be used for
+ * repodata generation
+ *
+ * @param repo_dir Path to repo (a dir that contains repodata/ subdir)
+ * @param ignore_lock Ignore existing .repodata/ dir - remove it and
+ * create a new one.
+ * @param lock_dir Location to store path to a directory used as
+ * a lock. Always repodir+"/.repodata/".
+ * Even if FALSE is returned, the content of this
+ * variable IS DEFINED.
+ * @param tmp_repodata_dir Location to store a path to a directory used as
+ * a temporary directory for repodata generation.
+ * If ignore_lock is FALSE than
+ * lock_dir is same as tmp_repodata_dir.
+ * If FALSE is returned, the content of this variable
+ * is undefined.
+ * @param err GError **
+ * @return TRUE on success, FALSE if err is set.
+ */
+gboolean
+cr_lock_repo(const gchar *repo_dir,
+ gboolean ignore_lock,
+ gchar **lock_dir,
+ gchar **tmp_repodata_dir,
+ GError **err);
+
+/**
+ * Unset cleanup handler.
+ * @param err GError **
+ * @return TRUE on success, FALSE if err is set.
+ */
+gboolean
+cr_unset_cleanup_handler(GError **err);
+
+/**
+ * Setup logging for the application.
+ */
+void
+cr_setup_logging(gboolean quiet, gboolean verbose);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __C_CREATEREPOLIB_CREATEREPO_SHARED__ */
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <glib.h>
+#include <glib/gstdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include "deltarpms.h"
+#ifdef CR_DELTA_RPM_SUPPORT
+#include <drpm.h>
+#endif
+#include "package.h"
+#include "parsepkg.h"
+#include "misc.h"
+#include "error.h"
+
+
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+#define MAKEDELTARPM "/usr/bin/makedeltarpm"
+
+
+gboolean
+cr_drpm_support(void)
+{
+#ifdef CR_DELTA_RPM_SUPPORT
+ if (g_file_test(MAKEDELTARPM, G_FILE_TEST_IS_REGULAR
+ | G_FILE_TEST_IS_EXECUTABLE))
+ return TRUE;
+#endif
+ return FALSE;
+}
+
+#ifdef CR_DELTA_RPM_SUPPORT
+
+char *
+cr_drpm_create(cr_DeltaTargetPackage *old,
+ cr_DeltaTargetPackage *new,
+ const char *destdir,
+ GError **err)
+{
+ gchar *drpmfn, *drpmpath, *error_str = NULL;
+ GPtrArray *cmd_array;
+ int spawn_flags = G_SPAWN_SEARCH_PATH | G_SPAWN_STDOUT_TO_DEV_NULL;
+ GError *tmp_err = NULL;
+ gint status = 0;
+ gboolean ret;
+
+ drpmfn = g_strdup_printf("%s-%s-%s_%s-%s.%s.drpm",
+ old->name, old->version, old->release,
+ new->version, new->release, old->arch);
+ drpmpath = g_build_filename(destdir, drpmfn, NULL);
+ g_free(drpmfn);
+
+ cmd_array = g_ptr_array_new();
+ g_ptr_array_add(cmd_array, MAKEDELTARPM);
+ g_ptr_array_add(cmd_array, (gpointer) old->path);
+ g_ptr_array_add(cmd_array, (gpointer) new->path);
+ g_ptr_array_add(cmd_array, (gpointer) drpmpath);
+ g_ptr_array_add(cmd_array, (gpointer) NULL);
+
+ g_spawn_sync(NULL, // working directory
+ (char **) cmd_array->pdata, // argv
+ NULL, // envp
+ spawn_flags, // spawn flags
+ NULL, // child setup function
+ NULL, // user data for child setup
+ NULL, // stdout
+ &error_str, // stderr
+ &status, // status
+ &tmp_err // err
+ );
+
+ g_ptr_array_free(cmd_array, TRUE);
+
+ if (tmp_err) {
+ g_free(error_str);
+ free(drpmpath);
+ g_propagate_error(err, tmp_err);
+ return NULL;
+ }
+
+ ret = cr_spawn_check_exit_status(status, &tmp_err);
+ if (!ret) {
+ g_propagate_prefixed_error(err, tmp_err, "%s: ", error_str);
+ free(drpmpath);
+ g_free(error_str);
+ return NULL;
+ }
+
+ g_free(error_str);
+
+ return drpmpath;
+}
+
+void
+cr_deltapackage_free(cr_DeltaPackage *deltapackage)
+{
+ if (!deltapackage)
+ return;
+ cr_package_free(deltapackage->package);
+ g_string_chunk_free(deltapackage->chunk);
+ g_free(deltapackage);
+}
+
+cr_DeltaPackage *
+cr_deltapackage_from_drpm_base(const char *filename,
+ int changelog_limit,
+ cr_HeaderReadingFlags flags,
+ GError **err)
+{
+ struct drpm *delta = NULL;
+ cr_DeltaPackage *deltapackage = NULL;
+ char *str;
+ int ret;
+
+ assert(!err || *err == NULL);
+
+ deltapackage = g_new0(cr_DeltaPackage, 1);
+ deltapackage->chunk = g_string_chunk_new(0);
+
+ deltapackage->package = cr_package_from_rpm_base(filename,
+ changelog_limit,
+ flags,
+ err);
+ if (!deltapackage->package)
+ goto errexit;
+
+ ret = drpm_read(&delta, filename);
+ if (ret != DRPM_ERR_OK) {
+ g_set_error(err, ERR_DOMAIN, CRE_DELTARPM,
+ "Deltarpm cannot read %s (%d)", filename, ret);
+ goto errexit;
+ }
+
+ ret = drpm_get_string(delta, DRPM_TAG_SRCNEVR, &str);
+ if (ret != DRPM_ERR_OK) {
+ g_set_error(err, ERR_DOMAIN, CRE_DELTARPM,
+ "Deltarpm cannot read source NEVR from %s (%d)",
+ filename, ret);
+ goto errexit;
+ }
+
+ deltapackage->nevr = cr_safe_string_chunk_insert_null(
+ deltapackage->chunk, str);
+
+ ret = drpm_get_string(delta, DRPM_TAG_SEQUENCE, &str);
+ if (ret != DRPM_ERR_OK) {
+ g_set_error(err, ERR_DOMAIN, CRE_DELTARPM,
+ "Deltarpm cannot read delta sequence from %s (%d)",
+ filename, ret);
+ goto errexit;
+ }
+
+ deltapackage->sequence = cr_safe_string_chunk_insert_null(
+ deltapackage->chunk, str);
+
+ drpm_destroy(&delta);
+
+ return deltapackage;
+
+errexit:
+
+ if (delta)
+ drpm_destroy(&delta);
+ cr_deltapackage_free(deltapackage);
+
+ return NULL;
+}
+
+
+static void
+cr_free_gslist_of_strings(gpointer list)
+{
+ if (!list) return;
+ cr_slist_free_full((GSList *) list, (GDestroyNotify) g_free);
+}
+
+/*
+ * 1) Scanning for old candidate rpms
+ */
+
+GHashTable *
+cr_deltarpms_scan_oldpackagedirs(GSList *oldpackagedirs,
+ gint64 max_delta_rpm_size,
+ GError **err)
+{
+ GHashTable *ht = NULL;
+
+ assert(!err || *err == NULL);
+
+ ht = g_hash_table_new_full(g_str_hash,
+ g_str_equal,
+ (GDestroyNotify) g_free,
+ (GDestroyNotify) cr_free_gslist_of_strings);
+
+ for (GSList *elem = oldpackagedirs; elem; elem = g_slist_next(elem)) {
+ gchar *dirname = elem->data;
+ const gchar *filename;
+ GDir *dirp;
+ GSList *filenames = NULL;
+
+ dirp = g_dir_open(dirname, 0, NULL);
+ if (!dirp) {
+ g_warning("Cannot open directory %s", dirname);
+ continue;
+ }
+
+ while ((filename = g_dir_read_name(dirp))) {
+ gchar *full_path;
+ struct stat st;
+
+ if (!g_str_has_suffix(filename, ".rpm"))
+ continue; // Skip non rpm files
+
+ full_path = g_build_filename(dirname, filename, NULL);
+
+ if (stat(full_path, &st) == -1) {
+ g_warning("Cannot stat %s: %s", full_path, g_strerror(errno));
+ g_free(full_path);
+ continue;
+ }
+
+ if (st.st_size > max_delta_rpm_size) {
+ g_debug("%s: Skipping %s that is > max_delta_rpm_size",
+ __func__, full_path);
+ g_free(full_path);
+ continue;
+ }
+
+ g_free(full_path);
+
+ filenames = g_slist_prepend(filenames, g_strdup(filename));
+ }
+
+ if (filenames) {
+ g_hash_table_replace(ht,
+ (gpointer) g_strdup(dirname),
+ (gpointer) filenames);
+ }
+
+ g_dir_close(dirp);
+ }
+
+
+ return ht;
+}
+
+/*
+ * 2) Parallel delta generation
+ */
+
+
+typedef struct {
+ cr_DeltaTargetPackage *tpkg;
+} cr_DeltaTask;
+
+
+typedef struct {
+ const char *outdeltadir;
+ gint num_deltas;
+ GHashTable *oldpackages;
+ GMutex *mutex;
+ gint64 active_work_size;
+ gint active_tasks;
+ GCond *cond_task_finished;
+} cr_DeltaThreadUserData;
+
+
+static gint
+cmp_deltatargetpackage_evr(gconstpointer aa, gconstpointer bb)
+{
+ const cr_DeltaTargetPackage *a = aa;
+ const cr_DeltaTargetPackage *b = bb;
+
+ return cr_cmp_evr(a->epoch, a->version, a->release,
+ b->epoch, b->version, b->release);
+}
+
+
+static void
+cr_delta_thread(gpointer data, gpointer udata)
+{
+ cr_DeltaTask *task = data;
+ cr_DeltaThreadUserData *user_data = udata;
+ cr_DeltaTargetPackage *tpkg = task->tpkg; // Shortcut
+
+ GHashTableIter iter;
+ gpointer key, value;
+
+ // Iterate through specified oldpackage directories
+ g_hash_table_iter_init(&iter, user_data->oldpackages);
+ while (g_hash_table_iter_next(&iter, &key, &value)) {
+ gchar *dirname = key;
+ GSList *local_candidates = NULL;
+
+ // Select appropriate candidates from the directory
+ for (GSList *elem = value; elem; elem = g_slist_next(elem)) {
+ gchar *filename = elem->data;
+ if (g_str_has_prefix(filename, tpkg->name)) {
+ cr_DeltaTargetPackage *l_tpkg;
+ gchar *path = g_build_filename(dirname, filename, NULL);
+ l_tpkg = cr_deltatargetpackage_from_rpm(path, NULL);
+ g_free(path);
+ if (!l_tpkg)
+ continue;
+
+ // Check the candidate more carefully
+
+ if (g_strcmp0(tpkg->name, l_tpkg->name)) {
+ cr_deltatargetpackage_free(l_tpkg);
+ continue;
+ }
+
+ if (g_strcmp0(tpkg->arch, l_tpkg->arch)) {
+ cr_deltatargetpackage_free(l_tpkg);
+ continue;
+ }
+
+ if (cr_cmp_evr(tpkg->epoch, tpkg->version, tpkg->release,
+ l_tpkg->epoch, l_tpkg->version, l_tpkg->release) <= 0)
+ {
+ cr_deltatargetpackage_free(l_tpkg);
+ continue;
+ }
+
+ // This candidate looks good
+ local_candidates = g_slist_prepend(local_candidates, l_tpkg);
+ }
+ }
+
+ // Sort the candidates
+ local_candidates = g_slist_sort(local_candidates,
+ cmp_deltatargetpackage_evr);
+ local_candidates = g_slist_reverse(local_candidates);
+
+ // Generate deltas
+ int x = 0;
+ for (GSList *lelem = local_candidates; lelem; lelem = g_slist_next(lelem)){
+ GError *tmp_err = NULL;
+ cr_DeltaTargetPackage *old = lelem->data;
+
+ g_debug("Generating delta %s -> %s", old->path, tpkg->path);
+ cr_drpm_create(old, tpkg, user_data->outdeltadir, &tmp_err);
+ if (tmp_err) {
+ g_warning("Cannot generate delta %s -> %s : %s",
+ old->path, tpkg->path, tmp_err->message);
+ g_error_free(tmp_err);
+ continue;
+ }
+ if (++x == user_data->num_deltas)
+ break;
+ }
+ }
+
+ g_debug("Deltas for \"%s\" (%"G_GINT64_FORMAT") generated",
+ tpkg->name, tpkg->size_installed);
+
+ g_mutex_lock(user_data->mutex);
+ user_data->active_work_size -= tpkg->size_installed;
+ user_data->active_tasks--;
+ g_cond_signal(user_data->cond_task_finished);
+ g_mutex_unlock(user_data->mutex);
+ g_free(task);
+}
+
+
+static gint
+cmp_deltatargetpackage_sizes(gconstpointer a, gconstpointer b)
+{
+ const cr_DeltaTargetPackage *dtpk_a = a;
+ const cr_DeltaTargetPackage *dtpk_b = b;
+
+ if (dtpk_a->size_installed < dtpk_b->size_installed)
+ return -1;
+ else if (dtpk_a->size_installed == dtpk_b->size_installed)
+ return 0;
+ else
+ return 1;
+}
+
+
+gboolean
+cr_deltarpms_parallel_deltas(GSList *targetpackages,
+ GHashTable *oldpackages,
+ const char *outdeltadir,
+ gint num_deltas,
+ gint workers,
+ gint64 max_delta_rpm_size,
+ gint64 max_work_size,
+ GError **err)
+{
+ GThreadPool *pool;
+ cr_DeltaThreadUserData user_data;
+ GList *targets = NULL;
+ GError *tmp_err = NULL;
+
+ assert(!err || *err == NULL);
+
+ if (num_deltas < 1)
+ return TRUE;
+
+ if (workers < 1) {
+ g_set_error(err, ERR_DOMAIN, CRE_DELTARPM,
+ "Number of delta workers must be a positive integer number");
+ return FALSE;
+ }
+
+ // Init user_data
+ user_data.outdeltadir = outdeltadir;
+ user_data.num_deltas = num_deltas;
+ user_data.oldpackages = oldpackages;
+ user_data.mutex = g_mutex_new();
+ user_data.active_work_size = G_GINT64_CONSTANT(0);
+ user_data.active_tasks = 0;
+ user_data.cond_task_finished = g_cond_new();
+
+ // Make sorted list of targets without packages
+ // that are bigger then max_delta_rpm_size
+ for (GSList *elem = targetpackages; elem; elem = g_slist_next(elem)) {
+ cr_DeltaTargetPackage *tpkg = elem->data;
+ if (tpkg->size_installed < max_delta_rpm_size)
+ targets = g_list_insert_sorted(targets, tpkg, cmp_deltatargetpackage_sizes);
+ }
+ targets = g_list_reverse(targets);
+
+ // Setup the pool of workers
+ pool = g_thread_pool_new(cr_delta_thread,
+ &user_data,
+ workers,
+ TRUE,
+ &tmp_err);
+ if (tmp_err) {
+ g_propagate_prefixed_error(err, tmp_err, "Cannot create delta pool: ");
+ return FALSE;
+ }
+
+ // Push tasks into the pool
+ while (targets) {
+ gboolean inserted = FALSE;
+ gint64 active_work_size;
+ gint64 active_tasks;
+
+ g_mutex_lock(user_data.mutex);
+ while (user_data.active_tasks == workers)
+ // Wait if all available threads are busy
+ g_cond_wait(user_data.cond_task_finished, user_data.mutex);
+ active_work_size = user_data.active_work_size;
+ active_tasks = user_data.active_tasks;
+ g_mutex_unlock(user_data.mutex);
+
+ for (GList *elem = targets; elem; elem = g_list_next(elem)) {
+ cr_DeltaTargetPackage *tpkg = elem->data;
+ if ((active_work_size + tpkg->size_installed) <= max_work_size) {
+ cr_DeltaTask *task = g_new0(cr_DeltaTask, 1);
+ task->tpkg = tpkg;
+
+ g_mutex_lock(user_data.mutex);
+ user_data.active_work_size += tpkg->size_installed;
+ user_data.active_tasks++;
+ g_mutex_unlock(user_data.mutex);
+
+ g_thread_pool_push(pool, task, NULL);
+ targets = g_list_delete_link(targets, elem);
+ inserted = TRUE;
+ break;
+ }
+ }
+
+ if (!inserted) {
+ // In this iteration, no task was pushed to the pool
+ g_mutex_lock(user_data.mutex);
+ while (user_data.active_tasks == active_tasks)
+ // Wait until any of running tasks finishes
+ g_cond_wait(user_data.cond_task_finished, user_data.mutex);
+ g_mutex_unlock(user_data.mutex);
+ }
+ }
+
+ g_thread_pool_free(pool, FALSE, TRUE);
+ g_list_free(targets);
+ g_mutex_free(user_data.mutex);
+ g_cond_free(user_data.cond_task_finished);
+
+ return TRUE;
+}
+
+
+cr_DeltaTargetPackage *
+cr_deltatargetpackage_from_package(cr_Package *pkg,
+ const char *path,
+ GError **err)
+{
+ cr_DeltaTargetPackage *tpkg;
+
+ assert(pkg);
+ assert(!err || *err == NULL);
+
+ tpkg = g_new0(cr_DeltaTargetPackage, 1);
+ tpkg->chunk = g_string_chunk_new(0);
+ tpkg->name = cr_safe_string_chunk_insert(tpkg->chunk, pkg->name);
+ tpkg->arch = cr_safe_string_chunk_insert(tpkg->chunk, pkg->arch);
+ tpkg->epoch = cr_safe_string_chunk_insert(tpkg->chunk, pkg->epoch);
+ tpkg->version = cr_safe_string_chunk_insert(tpkg->chunk, pkg->version);
+ tpkg->release = cr_safe_string_chunk_insert(tpkg->chunk, pkg->release);
+ tpkg->location_href = cr_safe_string_chunk_insert(tpkg->chunk, pkg->location_href);
+ tpkg->size_installed = pkg->size_installed;
+ tpkg->path = cr_safe_string_chunk_insert(tpkg->chunk, path);
+
+ return tpkg;
+}
+
+
+cr_DeltaTargetPackage *
+cr_deltatargetpackage_from_rpm(const char *path, GError **err)
+{
+ cr_Package *pkg;
+ cr_DeltaTargetPackage *tpkg;
+
+ assert(!err || *err == NULL);
+
+ pkg = cr_package_from_rpm_base(path, 0, 0, err);
+ if (!pkg)
+ return NULL;
+
+ tpkg = cr_deltatargetpackage_from_package(pkg, path, err);
+ cr_package_free(pkg);
+ return tpkg;
+}
+
+
+void
+cr_deltatargetpackage_free(cr_DeltaTargetPackage *tpkg)
+{
+ if (!tpkg)
+ return;
+ g_string_chunk_free(tpkg->chunk);
+ g_free(tpkg);
+}
+
+
+GSList *
+cr_deltarpms_scan_targetdir(const char *path,
+ gint64 max_delta_rpm_size,
+ GError **err)
+{
+ GSList *targets = NULL;
+ GQueue *sub_dirs = g_queue_new();
+ GStringChunk *sub_dirs_chunk = g_string_chunk_new(1024);
+
+ assert(!err || *err == NULL);
+
+ g_queue_push_head(sub_dirs, g_strdup(path));
+
+ // Recursively walk the dir
+ gchar *dirname;
+ while ((dirname = g_queue_pop_head(sub_dirs))) {
+
+ // Open the directory
+ GDir *dirp = g_dir_open(dirname, 0, NULL);
+ if (!dirp) {
+ g_warning("Cannot open directory %s", dirname);
+ return NULL;
+ }
+
+ // Iterate over files in directory
+ const gchar *filename;
+ while ((filename = g_dir_read_name(dirp))) {
+ gchar *full_path;
+ struct stat st;
+ cr_DeltaTargetPackage *tpkg;
+
+ full_path = g_build_filename(dirname, filename, NULL);
+
+ if (!g_str_has_suffix(filename, ".rpm")) {
+ if (g_file_test(full_path, G_FILE_TEST_IS_DIR)) {
+ // Directory
+ gchar *sub_dir_in_chunk;
+ sub_dir_in_chunk = g_string_chunk_insert(sub_dirs_chunk,
+ full_path);
+ g_queue_push_head(sub_dirs, sub_dir_in_chunk);
+ g_debug("Dir to scan: %s", sub_dir_in_chunk);
+ }
+ g_free(full_path);
+ continue;
+ }
+
+ if (stat(full_path, &st) == -1) {
+ g_warning("Cannot stat %s: %s", full_path, g_strerror(errno));
+ g_free(full_path);
+ continue;
+ }
+
+ if (st.st_size > max_delta_rpm_size) {
+ g_debug("%s: Skipping %s that is > max_delta_rpm_size",
+ __func__, full_path);
+ g_free(full_path);
+ continue;
+ }
+
+ tpkg = cr_deltatargetpackage_from_rpm(full_path, NULL);
+ if (tpkg)
+ targets = g_slist_prepend(targets, tpkg);
+ g_free(full_path);
+ }
+
+ g_dir_close(dirp);
+ }
+
+ cr_queue_free_full(sub_dirs, g_free);
+ g_string_chunk_free(sub_dirs_chunk);
+
+ return targets;
+}
+
+
+/*
+ * 3) Parallel xml chunk generation
+ */
+
+typedef struct {
+ gchar *full_path;
+} cr_PrestoDeltaTask;
+
+typedef struct {
+ GMutex *mutex;
+ GHashTable *ht;
+ cr_ChecksumType checksum_type;
+ const gchar *prefix_to_strip;
+ size_t prefix_len;
+} cr_PrestoDeltaUserData;
+
+void
+cr_prestodeltatask_free(cr_PrestoDeltaTask *task)
+{
+ if (!task)
+ return;
+ g_free(task->full_path);
+ g_free(task);
+}
+
+static gboolean
+walk_drpmsdir(const gchar *drpmsdir, GSList **inlist, GError **err)
+{
+ gboolean ret = TRUE;
+ GSList *candidates = NULL;
+ GQueue *sub_dirs = g_queue_new();
+ GStringChunk *sub_dirs_chunk = g_string_chunk_new(1024);
+
+ assert(drpmsdir);
+ assert(inlist);
+ assert(!err || *err == NULL);
+
+ g_queue_push_head(sub_dirs, g_strdup(drpmsdir));
+
+ // Recursively walk the drpmsdir
+ gchar *dirname;
+ while ((dirname = g_queue_pop_head(sub_dirs))) {
+
+ // Open the directory
+ GDir *dirp = g_dir_open(drpmsdir, 0, NULL);
+ if (!dirp) {
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open directory %s", drpmsdir);
+ goto exit;
+ }
+
+ // Iterate over files in directory
+ const gchar *filename;
+ while ((filename = g_dir_read_name(dirp))) {
+ gchar *full_path = g_build_filename(dirname, filename, NULL);
+
+ // Non .rpm files
+ if (!g_str_has_suffix (filename, ".drpm")) {
+ if (g_file_test(full_path, G_FILE_TEST_IS_DIR)) {
+ // Directory
+ gchar *sub_dir_in_chunk;
+ sub_dir_in_chunk = g_string_chunk_insert(sub_dirs_chunk,
+ full_path);
+ g_queue_push_head(sub_dirs, sub_dir_in_chunk);
+ g_debug("Dir to scan: %s", sub_dir_in_chunk);
+ }
+ g_free(full_path);
+ continue;
+ }
+
+ // Take the file
+ cr_PrestoDeltaTask *task = g_new0(cr_PrestoDeltaTask, 1);
+ task->full_path = full_path;
+ candidates = g_slist_prepend(candidates, task);
+ }
+ g_free(dirname);
+ g_dir_close(dirp);
+ }
+
+ *inlist = candidates;
+ candidates = NULL;
+
+exit:
+ g_slist_free_full(candidates, (GDestroyNotify) cr_prestodeltatask_free);
+ cr_queue_free_full(sub_dirs, g_free);
+ g_string_chunk_free(sub_dirs_chunk);
+
+ return ret;
+}
+
+
+static void
+cr_prestodelta_thread(gpointer data, gpointer udata)
+{
+ cr_PrestoDeltaTask *task = data;
+ cr_PrestoDeltaUserData *user_data = udata;
+
+ cr_DeltaPackage *dpkg = NULL;
+ struct stat st;
+ gchar *xml_chunk = NULL, *key = NULL, *checksum = NULL;
+ GError *tmp_err = NULL;
+
+ printf("%s\n", task->full_path);
+
+ // Load delta package
+ dpkg = cr_deltapackage_from_drpm_base(task->full_path, 0, 0, &tmp_err);
+ if (!dpkg) {
+ g_warning("Cannot read drpm %s: %s", task->full_path, tmp_err->message);
+ g_error_free(tmp_err);
+ goto exit;
+ }
+
+ // Set the filename
+ dpkg->package->location_href = cr_safe_string_chunk_insert(
+ dpkg->package->chunk,
+ task->full_path + user_data->prefix_len);
+
+ // Stat the package (to get the size)
+ if (stat(task->full_path, &st) == -1) {
+ g_warning("%s: stat(%s) error (%s)", __func__,
+ task->full_path, g_strerror(errno));
+ goto exit;
+ } else {
+ dpkg->package->size_package = st.st_size;
+ }
+
+ // Calculate the checksum
+ checksum = cr_checksum_file(task->full_path,
+ user_data->checksum_type,
+ &tmp_err);
+ if (!checksum) {
+ g_warning("Cannot calculate checksum for %s: %s",
+ task->full_path, tmp_err->message);
+ g_error_free(tmp_err);
+ goto exit;
+ }
+ dpkg->package->checksum_type = cr_safe_string_chunk_insert(
+ dpkg->package->chunk,
+ cr_checksum_name_str(
+ user_data->checksum_type));
+ dpkg->package->pkgId = cr_safe_string_chunk_insert(dpkg->package->chunk,
+ checksum);
+
+ // Generate XML
+ xml_chunk = cr_xml_dump_deltapackage(dpkg, &tmp_err);
+ if (tmp_err) {
+ g_warning("Cannot generate xml for drpm %s: %s",
+ task->full_path, tmp_err->message);
+ g_error_free(tmp_err);
+ goto exit;
+ }
+
+ // Put the XML into the shared hash table
+ gpointer pkey = NULL;
+ gpointer pval = NULL;
+ key = cr_package_nevra(dpkg->package);
+ g_mutex_lock(user_data->mutex);
+ if (g_hash_table_lookup_extended(user_data->ht, key, &pkey, &pval)) {
+ // Key exists in the table
+ // 1. Remove the key and value from the table without freeing them
+ g_hash_table_steal(user_data->ht, key);
+ // 2. Append to the list (the value from the hash table)
+ GSList *list = (GSList *) pval;
+ list = g_slist_append(pval, xml_chunk);
+ // 3. Insert the modified list again
+ g_hash_table_insert(user_data->ht, pkey, list);
+ } else {
+ // Key doesn't exist yet
+ GSList *list = g_slist_prepend(NULL, xml_chunk);
+ g_hash_table_insert(user_data->ht, g_strdup(key), list);
+ }
+ g_mutex_unlock(user_data->mutex);
+
+exit:
+ g_free(checksum);
+ g_free(key);
+ cr_deltapackage_free(dpkg);
+}
+
+static gchar *
+gen_newpackage_xml_chunk(const char *strnevra,
+ GSList *delta_chunks)
+{
+ cr_NEVRA *nevra;
+ GString *chunk;
+
+ if (!delta_chunks)
+ return NULL;
+
+ nevra = cr_str_to_nevra(strnevra);
+
+ chunk = g_string_new(NULL);
+ g_string_printf(chunk, " <newpackage name=\"%s\" epoch=\"%s\" "
+ "version=\"%s\" release=\"%s\" arch=\"%s\">\n",
+ nevra->name, nevra->epoch ? nevra->epoch : "0",
+ nevra->version, nevra->release, nevra->arch);
+
+ cr_nevra_free(nevra);
+
+ for (GSList *elem = delta_chunks; elem; elem = g_slist_next(elem)) {
+ gchar *delta_chunk = elem->data;
+ g_string_append(chunk, delta_chunk);
+ }
+
+ g_string_append(chunk, " </newpackage>\n");
+
+ return g_string_free(chunk, FALSE);
+}
+
+gboolean
+cr_deltarpms_generate_prestodelta_file(const gchar *drpmsdir,
+ cr_XmlFile *f,
+ cr_ChecksumType checksum_type,
+ gint workers,
+ const gchar *prefix_to_strip,
+ GError **err)
+{
+ gboolean ret = TRUE;
+ GSList *candidates = NULL;
+ GThreadPool *pool;
+ cr_PrestoDeltaUserData user_data;
+ GHashTable *ht = NULL;
+ GHashTableIter iter;
+ gpointer key, value;
+ GError *tmp_err = NULL;
+
+ assert(drpmsdir);
+ assert(f);
+ assert(!err || *err == NULL);
+
+ // Walk the drpms directory
+
+ if (!walk_drpmsdir(drpmsdir, &candidates, &tmp_err)) {
+ g_propagate_prefixed_error(err, tmp_err, "%s: ", __func__);
+ ret = FALSE;
+ goto exit;
+ }
+
+ // Setup pool of workers
+
+ ht = g_hash_table_new_full(g_str_hash,
+ g_str_equal,
+ (GDestroyNotify) g_free,
+ (GDestroyNotify) cr_free_gslist_of_strings);
+
+ user_data.mutex = g_mutex_new();
+ user_data.ht = ht;
+ user_data.checksum_type = checksum_type;
+ user_data.prefix_to_strip = prefix_to_strip,
+ user_data.prefix_len = prefix_to_strip ? strlen(prefix_to_strip) : 0;
+
+ pool = g_thread_pool_new(cr_prestodelta_thread,
+ &user_data,
+ workers,
+ TRUE,
+ &tmp_err);
+ if (tmp_err) {
+ g_propagate_prefixed_error(err, tmp_err,
+ "Cannot create pool for prestodelta file generation: ");
+ ret = FALSE;
+ goto exit;
+ }
+
+ // Push tasks to the pool
+
+ for (GSList *elem = candidates; elem; elem = g_slist_next(elem)) {
+ g_thread_pool_push(pool, elem->data, NULL);
+ }
+
+ // Wait until the pool finishes
+
+ g_thread_pool_free(pool, FALSE, TRUE);
+
+ // Write out the results
+
+
+ g_hash_table_iter_init(&iter, user_data.ht);
+ while (g_hash_table_iter_next(&iter, &key, &value)) {
+ gchar *chunk = NULL;
+ gchar *nevra = key;
+
+ chunk = gen_newpackage_xml_chunk(nevra, (GSList *) value);
+ cr_xmlfile_add_chunk(f, chunk, NULL);
+ g_free(chunk);
+ }
+
+exit:
+ g_slist_free_full(candidates, (GDestroyNotify) cr_prestodeltatask_free);
+ g_mutex_free(user_data.mutex);
+ g_hash_table_destroy(ht);
+
+ return ret;
+}
+
+#endif
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef __C_CREATEREPOLIB_DELTARPMS_H__
+#define __C_CREATEREPOLIB_DELTARPMS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <glib.h>
+#include <rpm/rpmlib.h>
+#include "package.h"
+#include "parsehdr.h"
+#include "xml_file.h"
+
+/** \defgroup deltarpms Support for deltarpms
+ * \addtogroup deltarpms
+ * @{
+ */
+
+#cmakedefine CR_DELTA_RPM_SUPPORT
+#define CR_DEFAULT_MAX_DELTA_RPM_SIZE 100000000
+
+typedef struct {
+ cr_Package *package;
+ char *nevr;
+ char *sequence;
+ GStringChunk *chunk;
+} cr_DeltaPackage;
+
+typedef struct {
+ char *name;
+ char *arch;
+ char *epoch;
+ char *version;
+ char *release;
+ char *location_href;
+ gint64 size_installed;
+
+ char *path;
+ GStringChunk *chunk;
+} cr_DeltaTargetPackage;
+
+gboolean cr_drpm_support(void);
+
+#ifdef CR_DELTA_RPM_SUPPORT
+char *
+cr_drpm_create(cr_DeltaTargetPackage *old,
+ cr_DeltaTargetPackage *new,
+ const char *destdir,
+ GError **err);
+
+cr_DeltaPackage *
+cr_deltapackage_from_drpm_base(const char *filename,
+ int changelog_limit,
+ cr_HeaderReadingFlags flags,
+ GError **err);
+
+void
+cr_deltapackage_free(cr_DeltaPackage *deltapackage);
+
+GHashTable *
+cr_deltarpms_scan_oldpackagedirs(GSList *oldpackagedirs,
+ gint64 max_delta_rpm_size,
+ GError **err);
+
+cr_DeltaTargetPackage *
+cr_deltatargetpackage_from_package(cr_Package *pkg,
+ const char *path,
+ GError **err);
+
+cr_DeltaTargetPackage *
+cr_deltatargetpackage_from_rpm(const char *path, GError **err);
+
+void
+cr_deltatargetpackage_free(cr_DeltaTargetPackage *tpkg);
+
+gboolean
+cr_deltarpms_parallel_deltas(GSList *targetpackages,
+ GHashTable *oldpackages,
+ const char *outdeltadir,
+ gint num_deltas,
+ gint workers,
+ gint64 max_delta_rpm_size,
+ gint64 max_work_size,
+ GError **err);
+
+GSList *
+cr_deltarpms_scan_targetdir(const char *path,
+ gint64 max_delta_rpm_size,
+ GError **err);
+
+gboolean
+cr_deltarpms_generate_prestodelta_file(const gchar *drpmdir,
+ cr_XmlFile *f,
+ cr_ChecksumType checksum_type,
+ gint workers,
+ const gchar *prefix_to_strip,
+ GError **err);
+#endif
+
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __C_CREATEREPOLIB_DELTARPMS_H__ */
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <glib.h>
+#include <glib/gstdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include "checksum.h"
+#include "cleanup.h"
+#include "deltarpms.h"
+#include "dumper_thread.h"
+#include "error.h"
+#include "misc.h"
+#include "parsepkg.h"
+#include "xml_dump.h"
+
+#define MAX_TASK_BUFFER_LEN 20
+#define CACHEDCHKSUM_BUFFER_LEN 2048
+
+struct BufferedTask {
+ long id; // ID of the task
+ struct cr_XmlStruct res; // XML for primary, filelists and other
+ cr_Package *pkg; // Package structure
+ char *location_href; // location_href path
+ int pkg_from_md; // If true - package structure if from
+ // old metadata and must not be freed!
+ // If false - package is from file and
+ // it must be freed!
+};
+
+
+static gint
+buf_task_sort_func(gconstpointer a, gconstpointer b, G_GNUC_UNUSED gpointer data)
+{
+ const struct BufferedTask *task_a = a;
+ const struct BufferedTask *task_b = b;
+ if (task_a->id < task_b->id) return -1;
+ if (task_a->id == task_b->id) return 0;
+ return 1;
+}
+
+
+static void
+write_pkg(long id,
+ struct cr_XmlStruct res,
+ cr_Package *pkg,
+ struct UserData *udata)
+{
+ GError *tmp_err = NULL;
+
+ // Write primary data
+ g_mutex_lock(udata->mutex_pri);
+ while (udata->id_pri != id)
+ g_cond_wait (udata->cond_pri, udata->mutex_pri);
+ ++udata->id_pri;
+ cr_xmlfile_add_chunk(udata->pri_f, (const char *) res.primary, &tmp_err);
+ if (tmp_err) {
+ g_critical("Cannot add primary chunk:\n%s\nError: %s",
+ res.primary, tmp_err->message);
+ g_clear_error(&tmp_err);
+ }
+
+ if (udata->pri_db) {
+ cr_db_add_pkg(udata->pri_db, pkg, &tmp_err);
+ if (tmp_err) {
+ g_critical("Cannot add record of %s (%s) to primary db: %s",
+ pkg->name, pkg->pkgId, tmp_err->message);
+ g_clear_error(&tmp_err);
+ }
+ }
+
+ g_cond_broadcast(udata->cond_pri);
+ g_mutex_unlock(udata->mutex_pri);
+
+ // Write fielists data
+ g_mutex_lock(udata->mutex_fil);
+ while (udata->id_fil != id)
+ g_cond_wait (udata->cond_fil, udata->mutex_fil);
+ ++udata->id_fil;
+ cr_xmlfile_add_chunk(udata->fil_f, (const char *) res.filelists, &tmp_err);
+ if (tmp_err) {
+ g_critical("Cannot add filelists chunk:\n%s\nError: %s",
+ res.filelists, tmp_err->message);
+ g_clear_error(&tmp_err);
+ }
+
+ if (udata->fil_db) {
+ cr_db_add_pkg(udata->fil_db, pkg, &tmp_err);
+ if (tmp_err) {
+ g_critical("Cannot add record of %s (%s) to filelists db: %s",
+ pkg->name, pkg->pkgId, tmp_err->message);
+ g_clear_error(&tmp_err);
+ }
+ }
+
+ g_cond_broadcast(udata->cond_fil);
+ g_mutex_unlock(udata->mutex_fil);
+
+ // Write other data
+ g_mutex_lock(udata->mutex_oth);
+ while (udata->id_oth != id)
+ g_cond_wait (udata->cond_oth, udata->mutex_oth);
+ ++udata->id_oth;
+ cr_xmlfile_add_chunk(udata->oth_f, (const char *) res.other, &tmp_err);
+ if (tmp_err) {
+ g_critical("Cannot add other chunk:\n%s\nError: %s",
+ res.other, tmp_err->message);
+ g_clear_error(&tmp_err);
+ }
+
+ if (udata->oth_db) {
+ cr_db_add_pkg(udata->oth_db, pkg, NULL);
+ if (tmp_err) {
+ g_critical("Cannot add record of %s (%s) to other db: %s",
+ pkg->name, pkg->pkgId, tmp_err->message);
+ g_clear_error(&tmp_err);
+ }
+ }
+
+ g_cond_broadcast(udata->cond_oth);
+ g_mutex_unlock(udata->mutex_oth);
+}
+
+static char *
+get_checksum(const char *filename,
+ cr_ChecksumType type,
+ cr_Package *pkg,
+ const char *cachedir,
+ GError **err)
+{
+ GError *tmp_err = NULL;
+ char *checksum = NULL;
+ char *cachefn = NULL;
+
+ if (cachedir) {
+ // Prepare cache fn
+ char *key;
+ cr_ChecksumCtx *ctx = cr_checksum_new(type, err);
+ if (!ctx) return NULL;
+
+ if (pkg->siggpg)
+ cr_checksum_update(ctx, pkg->siggpg->data, pkg->siggpg->size, NULL);
+ if (pkg->sigpgp)
+ cr_checksum_update(ctx, pkg->sigpgp->data, pkg->sigpgp->size, NULL);
+ if (pkg->hdrid)
+ cr_checksum_update(ctx, pkg->hdrid, strlen(pkg->hdrid), NULL);
+
+ key = cr_checksum_final(ctx, err);
+ if (!key) return NULL;
+
+ cachefn = g_strdup_printf("%s%s-%s-%"G_GINT64_FORMAT"-%"G_GINT64_FORMAT,
+ cachedir,
+ cr_get_filename(pkg->location_href),
+ key, pkg->size_installed, pkg->time_file);
+ free(key);
+
+ // Try to load checksum
+ FILE *f = fopen(cachefn, "r");
+ if (f) {
+ char buf[CACHEDCHKSUM_BUFFER_LEN];
+ size_t readed = fread(buf, 1, CACHEDCHKSUM_BUFFER_LEN, f);
+ if (!ferror(f) && readed > 0) {
+ checksum = g_strndup(buf, readed);
+ }
+ fclose(f);
+ }
+
+ if (checksum) {
+ g_debug("Cached checksum used: %s: \"%s\"", cachefn, checksum);
+ goto exit;
+ }
+ }
+
+ // Calculate checksum
+ checksum = cr_checksum_file(filename, type, &tmp_err);
+ if (!checksum) {
+ g_propagate_prefixed_error(err, tmp_err,
+ "Error while checksum calculation: ");
+ goto exit;
+ }
+
+ // Cache the checksum value
+ if (cachefn && !g_file_test(cachefn, G_FILE_TEST_EXISTS)) {
+ gchar *template = g_strconcat(cachefn, "-XXXXXX", NULL);
+ gint fd = g_mkstemp(template);
+ if (fd < 0) {
+ g_free(template);
+ goto exit;
+ }
+ write(fd, checksum, strlen(checksum));
+ close(fd);
+ if (g_rename(template, cachefn) == -1)
+ g_remove(template);
+ g_free(template);
+ }
+
+exit:
+ g_free(cachefn);
+
+ return checksum;
+}
+
+static cr_Package *
+load_rpm(const char *fullpath,
+ cr_ChecksumType checksum_type,
+ const char *checksum_cachedir,
+ const char *location_href,
+ const char *location_base,
+ int changelog_limit,
+ struct stat *stat_buf,
+ cr_HeaderReadingFlags hdrrflags,
+ GError **err)
+{
+ cr_Package *pkg = NULL;
+ GError *tmp_err = NULL;
+
+ assert(fullpath);
+ assert(!err || *err == NULL);
+
+ // Get a package object
+ pkg = cr_package_from_rpm_base(fullpath, changelog_limit, hdrrflags, err);
+ if (!pkg)
+ goto errexit;
+
+ pkg->location_href = cr_safe_string_chunk_insert(pkg->chunk, location_href);
+ pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, location_base);
+
+ // Get checksum type string
+ pkg->checksum_type = cr_safe_string_chunk_insert(pkg->chunk,
+ cr_checksum_name_str(checksum_type));
+
+ // Get file stat
+ if (!stat_buf) {
+ struct stat stat_buf_own;
+ if (stat(fullpath, &stat_buf_own) == -1) {
+ g_warning("%s: stat(%s) error (%s)", __func__,
+ fullpath, g_strerror(errno));
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "stat(%s) failed: %s",
+ fullpath, g_strerror(errno));
+ goto errexit;
+ }
+ pkg->time_file = stat_buf_own.st_mtime;
+ pkg->size_package = stat_buf_own.st_size;
+ } else {
+ pkg->time_file = stat_buf->st_mtime;
+ pkg->size_package = stat_buf->st_size;
+ }
+
+ // Compute checksum
+ char *checksum = get_checksum(fullpath, checksum_type, pkg,
+ checksum_cachedir, &tmp_err);
+ if (!checksum)
+ goto errexit;
+ pkg->pkgId = cr_safe_string_chunk_insert(pkg->chunk, checksum);
+ free(checksum);
+
+ // Get header range
+ struct cr_HeaderRangeStruct hdr_r = cr_get_header_byte_range(fullpath,
+ &tmp_err);
+ if (tmp_err) {
+ g_propagate_prefixed_error(err, tmp_err,
+ "Error while determinig header range: ");
+ goto errexit;
+ }
+
+ pkg->rpm_header_start = hdr_r.start;
+ pkg->rpm_header_end = hdr_r.end;
+
+ return pkg;
+
+errexit:
+ cr_package_free(pkg);
+ return NULL;
+}
+
+void
+cr_dumper_thread(gpointer data, gpointer user_data)
+{
+ GError *tmp_err = NULL;
+ gboolean old_used = FALSE; // To use old metadata?
+ cr_Package *md = NULL; // Package from loaded MetaData
+ cr_Package *pkg = NULL; // Package from file
+ struct stat stat_buf; // Struct with info from stat() on file
+ struct cr_XmlStruct res; // Structure for generated XML
+ cr_HeaderReadingFlags hdrrflags = CR_HDRR_NONE;
+
+ struct UserData *udata = (struct UserData *) user_data;
+ struct PoolTask *task = (struct PoolTask *) data;
+
+ // get location_href without leading part of path (path to repo)
+ // including '/' char
+ const gchar *location_base = udata->location_base;
+ _cleanup_free_ gchar *location_href = NULL;
+ location_href = g_strdup(task->full_path + udata->repodir_name_len);
+
+ // User requested modification of the location href
+ if (udata->cut_dirs) {
+ gchar *tmp = location_href;
+ location_href = g_strdup(cr_cut_dirs(location_href, udata->cut_dirs));
+ g_free(tmp);
+ }
+
+ if (udata->location_prefix) {
+ gchar *tmp = location_href;
+ location_href = g_build_filename(udata->location_prefix, tmp, NULL);
+ g_free(tmp);
+ }
+
+ // If --cachedir is used, load signatures and hdrid from packages too
+ if (udata->checksum_cachedir)
+ hdrrflags = CR_HDRR_LOADHDRID | CR_HDRR_LOADSIGNATURES;
+
+ // Get stat info about file
+ if (udata->old_metadata && !(udata->skip_stat)) {
+ if (stat(task->full_path, &stat_buf) == -1) {
+ g_critical("Stat() on %s: %s", task->full_path, g_strerror(errno));
+ goto task_cleanup;
+ }
+ }
+
+ // Update stuff
+ if (udata->old_metadata) {
+ // We have old metadata
+ md = (cr_Package *) g_hash_table_lookup(
+ cr_metadata_hashtable(udata->old_metadata),
+ task->filename);
+
+ if (md) {
+ g_debug("CACHE HIT %s", task->filename);
+
+ if (udata->skip_stat) {
+ old_used = TRUE;
+ } else if (stat_buf.st_mtime == md->time_file
+ && stat_buf.st_size == md->size_package
+ && !strcmp(udata->checksum_type_str, md->checksum_type))
+ {
+ old_used = TRUE;
+ } else {
+ g_debug("%s metadata are obsolete -> generating new",
+ task->filename);
+ }
+
+ if (old_used) {
+ // We have usable old data, but we have to set proper locations
+ // WARNING! This two lines destructively modifies content of
+ // packages in old metadata.
+ md->location_href = location_href;
+ md->location_base = (gchar *) location_base;
+ // ^^^ The location_base is not properly saved into pkg chunk
+ // this is intentional as after the metadata are written
+ // (dumped) none should use them again.
+ }
+ }
+ }
+
+ // Load package and gen XML metadata
+ if (!old_used) {
+ // Load package from file
+ pkg = load_rpm(task->full_path, udata->checksum_type,
+ udata->checksum_cachedir, location_href,
+ udata->location_base, udata->changelog_limit,
+ NULL, hdrrflags, &tmp_err);
+ assert(pkg || tmp_err);
+
+ if (!pkg) {
+ g_warning("Cannot read package: %s: %s",
+ task->full_path, tmp_err->message);
+ g_clear_error(&tmp_err);
+ goto task_cleanup;
+ }
+
+ res = cr_xml_dump(pkg, &tmp_err);
+ if (tmp_err) {
+ g_critical("Cannot dump XML for %s (%s): %s",
+ pkg->name, pkg->pkgId, tmp_err->message);
+ g_clear_error(&tmp_err);
+ goto task_cleanup;
+ }
+ } else {
+ // Just gen XML from old loaded metadata
+ pkg = md;
+ res = cr_xml_dump(md, &tmp_err);
+ if (tmp_err) {
+ g_critical("Cannot dump XML for %s (%s): %s",
+ md->name, md->pkgId, tmp_err->message);
+ g_clear_error(&tmp_err);
+ goto task_cleanup;
+ }
+ }
+
+#ifdef CR_DELTA_RPM_SUPPORT
+ // Delta candidate
+ if (udata->deltas
+ && !old_used
+ && pkg->size_installed < udata->max_delta_rpm_size)
+ {
+ cr_DeltaTargetPackage *tpkg;
+ tpkg = cr_deltatargetpackage_from_package(pkg,
+ task->full_path,
+ NULL);
+ if (tpkg) {
+ g_mutex_lock(udata->mutex_deltatargetpackages);
+ udata->deltatargetpackages = g_slist_prepend(
+ udata->deltatargetpackages,
+ tpkg);
+ g_mutex_unlock(udata->mutex_deltatargetpackages);
+ } else {
+ g_warning("Cannot create deltatargetpackage for: %s-%s-%s",
+ pkg->name, pkg->version, pkg->release);
+ }
+ }
+#endif
+
+ // Buffering stuff
+ g_mutex_lock(udata->mutex_buffer);
+
+ if (g_queue_get_length(udata->buffer) < MAX_TASK_BUFFER_LEN
+ && udata->id_pri != task->id
+ && udata->package_count > (task->id + 1))
+ {
+ // If:
+ // * this isn't our turn
+ // * the buffer isn't full
+ // * this isn't the last task
+ // Then: save the task to the buffer
+
+ struct BufferedTask *buf_task = malloc(sizeof(struct BufferedTask));
+ buf_task->id = task->id;
+ buf_task->res = res;
+ buf_task->pkg = pkg;
+ buf_task->location_href = NULL;
+ buf_task->pkg_from_md = (pkg == md) ? 1 : 0;
+
+ if (pkg == md) {
+ // We MUST store location_href for reused packages who goes to the buffer
+ // We don't need to store location_base because it is allocated in
+ // user_data during this function calls.
+
+ buf_task->location_href = g_strdup(location_href);
+ buf_task->pkg->location_href = buf_task->location_href;
+ }
+
+ g_queue_insert_sorted(udata->buffer, buf_task, buf_task_sort_func, NULL);
+ g_mutex_unlock(udata->mutex_buffer);
+
+ g_free(task->full_path);
+ g_free(task->filename);
+ g_free(task->path);
+ g_free(task);
+
+ return;
+ }
+
+ g_mutex_unlock(udata->mutex_buffer);
+
+ // Dump XML and SQLite
+ write_pkg(task->id, res, pkg, udata);
+
+ // Clean up
+ if (pkg != md)
+ cr_package_free(pkg);
+ g_free(res.primary);
+ g_free(res.filelists);
+ g_free(res.other);
+
+task_cleanup:
+ if (udata->id_pri <= task->id) {
+ // An error was encountered and we have to wait to increment counters
+ g_mutex_lock(udata->mutex_pri);
+ while (udata->id_pri != task->id)
+ g_cond_wait (udata->cond_pri, udata->mutex_pri);
+ ++udata->id_pri;
+ g_cond_broadcast(udata->cond_pri);
+ g_mutex_unlock(udata->mutex_pri);
+
+ g_mutex_lock(udata->mutex_fil);
+ while (udata->id_fil != task->id)
+ g_cond_wait (udata->cond_fil, udata->mutex_fil);
+ ++udata->id_fil;
+ g_cond_broadcast(udata->cond_fil);
+ g_mutex_unlock(udata->mutex_fil);
+
+ g_mutex_lock(udata->mutex_oth);
+ while (udata->id_oth != task->id)
+ g_cond_wait (udata->cond_oth, udata->mutex_oth);
+ ++udata->id_oth;
+ g_cond_broadcast(udata->cond_oth);
+ g_mutex_unlock(udata->mutex_oth);
+ }
+
+ g_free(task->full_path);
+ g_free(task->filename);
+ g_free(task->path);
+ g_free(task);
+
+ // Try to write all results from buffer which was waiting for us
+ while (1) {
+ struct BufferedTask *buf_task;
+ g_mutex_lock(udata->mutex_buffer);
+ buf_task = g_queue_peek_head(udata->buffer);
+ if (buf_task && buf_task->id == udata->id_pri) {
+ buf_task = g_queue_pop_head (udata->buffer);
+ g_mutex_unlock(udata->mutex_buffer);
+ // Dump XML and SQLite
+ write_pkg(buf_task->id, buf_task->res, buf_task->pkg, udata);
+ // Clean up
+ if (!buf_task->pkg_from_md)
+ cr_package_free(buf_task->pkg);
+ g_free(buf_task->res.primary);
+ g_free(buf_task->res.filelists);
+ g_free(buf_task->res.other);
+ g_free(buf_task->location_href);
+ g_free(buf_task);
+ } else {
+ g_mutex_unlock(udata->mutex_buffer);
+ break;
+ }
+ }
+
+ return;
+}
+
+
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef __C_CREATEREPOLIB_DUMPER_THREAD_H__
+#define __C_CREATEREPOLIB_DUMPER_THREAD_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <glib.h>
+#include <rpm/rpmlib.h>
+#include "load_metadata.h"
+#include "locate_metadata.h"
+#include "misc.h"
+#include "package.h"
+#include "sqlite.h"
+#include "xml_file.h"
+
+/** \defgroup dumperthread Implementation of concurent dumping used in createrepo_c
+ * \addtogroup dumperthread
+ * @{
+ */
+
+struct PoolTask {
+ long id; // ID of the task
+ char* full_path; // Complete path - /foo/bar/packages/foo.rpm
+ char* filename; // Just filename - foo.rpm
+ char* path; // Just path - /foo/bar/packages
+};
+
+struct UserData {
+ cr_XmlFile *pri_f; // Opened compressed primary.xml.*
+ cr_XmlFile *fil_f; // Opened compressed filelists.xml.*
+ cr_XmlFile *oth_f; // Opened compressed other.xml.*
+ cr_SqliteDb *pri_db; // Primary db
+ cr_SqliteDb *fil_db; // Filelists db
+ cr_SqliteDb *oth_db; // Other db
+ int changelog_limit; // Max number of changelogs for a package
+ const char *location_base; // Base location url
+ int repodir_name_len; // Len of path to repo /foo/bar/repodata
+ // This part |<----->|
+ const char *checksum_type_str; // Name of selected checksum
+ cr_ChecksumType checksum_type; // Constant representing selected checksum
+ const char *checksum_cachedir; // Dir with cached checksums
+ gboolean skip_symlinks; // Skip symlinks
+ long package_count; // Total number of packages to process
+
+ // Update stuff
+ gboolean skip_stat; // Skip stat() while updating
+ cr_Metadata *old_metadata; // Loaded metadata
+
+ // Thread serialization
+ GMutex *mutex_pri; // Mutex for primary metadata
+ GMutex *mutex_fil; // Mutex for filelists metadata
+ GMutex *mutex_oth; // Mutex for other metadata
+ GCond *cond_pri; // Condition for primary metadata
+ GCond *cond_fil; // Condition for filelists metadata
+ GCond *cond_oth; // Condition for other metadata
+ volatile long id_pri; // ID of task on turn (write primary metadata)
+ volatile long id_fil; // ID of task on turn (write filelists metadata)
+ volatile long id_oth; // ID of task on turn (write other metadata)
+
+ // Buffering
+ GQueue *buffer; // Buffer for done tasks
+ GMutex *mutex_buffer; // Mutex for accessing the buffer
+
+ // Delta generation
+ gboolean deltas; // Are deltas enabled?
+ gint64 max_delta_rpm_size; // Max size of an rpm that to run
+ // deltarpm against
+ GMutex *mutex_deltatargetpackages; // Mutex
+ GSList *deltatargetpackages; // List of cr_DeltaTargetPackages
+
+ // Location href modifiers
+ gint cut_dirs; // Ignore *num* of directory components
+ // in location href path
+ gchar *location_prefix; // Append this prefix into location_href
+ // during repodata generation
+};
+
+
+void
+cr_dumper_thread(gpointer data, gpointer user_data);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __C_CREATEREPOLIB_DUMPER_THREAD_H__ */
return "Assert error";
case CRE_BADCMDARG:
return "Bad command line argument(s)";
+ case CRE_SPAWNERRCODE:
+ return "Child process exited with error code != 0";
+ case CRE_SPAWNKILLED:
+ return "Child process killed by signal";
+ case CRE_SPAWNSTOPED:
+ return "Child process stopped by signal";
+ case CRE_SPAWNABNORMAL:
+ return "Child process exited abnormally";
+ case CRE_DELTARPM:
+ return "Deltarpm error";
default:
return "Unknown error";
}
}
GQuark
-cr_cmd_error_quark(void)
+createrepo_c_error_quark(void)
{
- return g_quark_from_static_string("cr_cmd_error");
-}
-
-GQuark
-cr_checksum_error_quark(void)
-{
- return g_quark_from_static_string("cr_checksum_error");
-}
-
-GQuark
-cr_compression_wrapper_error_quark(void)
-{
- return g_quark_from_static_string("cr_compression_wrapper_error");
-}
-
-GQuark
-cr_db_error_quark(void)
-{
- return g_quark_from_static_string("cr_db_error");
-}
-
-GQuark
-cr_load_metadata_error_quark(void)
-{
- return g_quark_from_static_string("cr_load_metadata_error");
-}
-
-GQuark
-cr_locate_metadata_error_quark(void)
-{
- return g_quark_from_static_string("cr_locate_metadata_error");
-}
-
-GQuark
-cr_parsepkg_error_quark(void)
-{
- return g_quark_from_static_string("cr_parsepkg_error");
-}
-
-GQuark
-cr_misc_error_quark(void)
-{
- return g_quark_from_static_string("cr_misc_error");
-}
-
-GQuark cr_modifyrepo_error_quark(void)
-{
- return g_quark_from_static_string("cr_modifyrepo_error");
-}
-
-GQuark
-cr_repomd_error_quark(void)
-{
- return g_quark_from_static_string("cr_repomd_error");
-}
-
-GQuark
-cr_repomd_record_error_quark(void)
-{
- return g_quark_from_static_string("cr_repomd_record_error");
-}
-
-GQuark
-cr_threads_error_quark(void)
-{
- return g_quark_from_static_string("cr_threads_error");
-}
-
-GQuark
-cr_xml_dump_filelists_error_quark(void)
-{
- return g_quark_from_static_string("cr_xml_dump_filelists_error");
-}
-
-GQuark
-cr_xml_dump_other_error_quark(void)
-{
- return g_quark_from_static_string("cr_xml_dump_other_error");
-}
-
-GQuark
-cr_xml_dump_primary_error_quark(void)
-{
- return g_quark_from_static_string("cr_xml_dump_primary_error");
-}
-
-GQuark
-cr_xml_dump_repomd_error_quark(void)
-{
- return g_quark_from_static_string("cr_xml_dump_repomd_error");
-}
-
-GQuark
-cr_xml_file_error_quark(void)
-{
- return g_quark_from_static_string("cr_xml_file_error");
-}
-
-GQuark
-cr_xml_parser_error_quark(void)
-{
- return g_quark_from_static_string("cr_xml_parser_error");
-}
-
-GQuark
-cr_xml_parser_fil_error_quark(void)
-{
- return g_quark_from_static_string("cr_xml_parser_filelists_error");
-}
-
-GQuark
-cr_xml_parser_oth_error_quark(void)
-{
- return g_quark_from_static_string("cr_xml_parser_other_error");
-}
-
-GQuark
-cr_xml_parser_pri_error_quark(void)
-{
- return g_quark_from_static_string("cr_xml_parser_primary_error");
-}
-
-GQuark
-cr_xml_parser_repomd_error_quark(void)
-{
- return g_quark_from_static_string("cr_xml_parser_repomd_error");
+ static GQuark quark = 0;
+ if (!quark)
+ quark = g_quark_from_static_string ("createrepo_c_error");
+ return quark;
}
it happend, probable reason is that some values of createrepo_c
object was changed (by you - a programmer) in a bad way */
CRE_BADCMDARG, /*!<
- Bad command line argument(s) */
+ (26) Bad command line argument(s) */
+ CRE_SPAWNERRCODE, /*!<
+ (27) Child process exited with error code != 0 */
+ CRE_SPAWNKILLED, /*!<
+ (28) Child process killed by signal */
+ CRE_SPAWNSTOPED, /*!<
+ (29) Child process stopped by signal */
+ CRE_SPAWNABNORMAL, /*!<
+ (30) Child process exited abnormally */
+ CRE_DELTARPM, /*!<
+ (31) Deltarpm related error */
+ CRE_BADXMLUPDATEINFO, /*!<
+ (32) Bad updateinfo.xml file */
+ CRE_SIGPROCMASK, /*!<
+ (33) Cannot change blocked signals */
+ CRE_SENTINEL, /*!<
+ (XX) Sentinel */
} cr_Error;
/** Converts cr_Error return code to error string.
const char *cr_strerror(cr_Error rc);
/* Error domains */
-#define CR_CHECKSUM_ERROR cr_checksum_error_quark()
-#define CR_CMD_ERROR cr_cmd_error_quark()
-#define CR_COMPRESSION_WRAPPER_ERROR cr_compression_wrapper_error_quark()
-#define CR_DB_ERROR cr_db_error_quark()
-#define CR_LOAD_METADATA_ERROR cr_load_metadata_error_quark()
-#define CR_LOCATE_METADATA_ERROR cr_locate_metadata_error_quark()
-#define CR_MISC_ERROR cr_misc_error_quark()
-#define CR_MODIFYREPO_ERROR cr_modifyrepo_error_quark()
-#define CR_PARSEPKG_ERROR cr_parsepkg_error_quark()
-#define CR_REPOMD_ERROR cr_repomd_error_quark()
-#define CR_REPOMD_RECORD_ERROR cr_repomd_record_error_quark()
-#define CR_THREADS_ERROR cr_threads_error_quark()
-#define CR_XML_DUMP_FILELISTS_ERROR cr_xml_dump_filelists_error_quark()
-#define CR_XML_DUMP_OTHER_ERROR cr_xml_dump_other_error_quark()
-#define CR_XML_DUMP_PRIMARY_ERROR cr_xml_dump_primary_error_quark()
-#define CR_XML_DUMP_REPOMD_ERROR cr_xml_dump_repomd_error_quark()
-#define CR_XML_FILE_ERROR cr_xml_file_error_quark()
-#define CR_XML_PARSER_ERROR cr_xml_parser_error_quark()
-#define CR_XML_PARSER_FIL_ERROR cr_xml_parser_fil_error_quark()
-#define CR_XML_PARSER_OTH_ERROR cr_xml_parser_oth_error_quark()
-#define CR_XML_PARSER_PRI_ERROR cr_xml_parser_pri_error_quark()
-#define CR_XML_PARSER_REPOMD_ERROR cr_xml_parser_repomd_error_quark()
+#define CREATEREPO_C_ERROR createrepo_c_error_quark()
-GQuark cr_checksum_error_quark(void);
-GQuark cr_cmd_error_quark(void);
-GQuark cr_compression_wrapper_error_quark(void);
-GQuark cr_db_error_quark(void);
-GQuark cr_load_metadata_error_quark(void);
-GQuark cr_locate_metadata_error_quark(void);
-GQuark cr_misc_error_quark(void);
-GQuark cr_modifyrepo_error_quark(void);
-GQuark cr_parsepkg_error_quark(void);
-GQuark cr_repomd_error_quark(void);
-GQuark cr_repomd_record_error_quark(void);
-GQuark cr_threads_error_quark(void);
-GQuark cr_xml_dump_filelists_error_quark(void);
-GQuark cr_xml_dump_other_error_quark(void);
-GQuark cr_xml_dump_primary_error_quark(void);
-GQuark cr_xml_dump_repomd_error_quark(void);
-GQuark cr_xml_file_error_quark(void);
-GQuark cr_xml_parser_error_quark(void);
-GQuark cr_xml_parser_fil_error_quark(void);
-GQuark cr_xml_parser_oth_error_quark(void);
-GQuark cr_xml_parser_pri_error_quark(void);
-GQuark cr_xml_parser_repomd_error_quark(void);
+GQuark createrepo_c_error_quark(void);
#endif
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <glib.h>
+#include <glib/gstdio.h>
+#include <errno.h>
+#include <string.h>
+#include <time.h>
+#include <assert.h>
+#include "helpers.h"
+#include "error.h"
+#include "misc.h"
+#include "checksum.h"
+#include "modifyrepo_shared.h"
+#include "compression_wrapper.h"
+#include "threads.h"
+#include "xml_dump.h"
+#include "locate_metadata.h"
+
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
+typedef struct _old_file {
+ time_t mtime;
+ gchar *path;
+} OldFile;
+
+
+static void
+cr_free_old_file(gpointer data)
+{
+ OldFile *old_file = (OldFile *) data;
+ g_free(old_file->path);
+ g_free(old_file);
+}
+
+
+static gint
+cr_cmp_old_repodata_files(gconstpointer a, gconstpointer b)
+{
+ if (((OldFile *) a)->mtime < ((OldFile *) b)->mtime)
+ return 1;
+ if (((OldFile *) a)->mtime > ((OldFile *) b)->mtime)
+ return -1;
+ return 0;
+}
+
+static void
+cr_stat_and_insert(const gchar *dirname, const gchar *filename, GSList **list)
+{
+ struct stat buf;
+ OldFile *old_file;
+ gchar *path = g_strconcat(dirname, filename, NULL);
+ if (stat(path, &buf) == -1)
+ buf.st_mtime = 1;
+ old_file = g_malloc0(sizeof(OldFile));
+ old_file->mtime = buf.st_mtime;
+ old_file->path = path;
+ *list = g_slist_insert_sorted(*list, old_file, cr_cmp_old_repodata_files);
+}
+
+/* List files that should be removed from the repo or not copied
+ * to the new repo. (except the repomd.xml)
+ */
+static gboolean
+cr_repodata_blacklist_classic(const char *repodata_path,
+ int retain,
+ GSList **blacklist,
+ GError **err)
+{
+ /* This piece of code implement the retain_old functionality in
+ * the same way as original createrepo does.
+ * The way is pretty stupid. Because:
+ * - Old metadata are kept in the repodata/ but not referenced by
+ * repomd.xml
+ * - Thus, old repodata are searched by its filename
+ * - It manipulate only with primary, filelists, other and
+ * related databases.
+ */
+
+ /* By default, createrepo_c keeps (copy from the old repo
+ * to the new repo) all files that are in the repodata/ directory
+ * but are not referenced by the repomd.xml.
+ *
+ * But this hack appends to the old_basenames list a metadata
+ * that should be ignored (that should not be copied to the
+ * new repository).
+ */
+
+ GSList *pri_lst = NULL, *pri_db_lst = NULL;
+ GSList *fil_lst = NULL, *fil_db_lst = NULL;
+ GSList *oth_lst = NULL, *oth_db_lst = NULL;
+ GSList **lists[] = { &pri_lst, &pri_db_lst,
+ &fil_lst, &fil_db_lst,
+ &oth_lst, &oth_db_lst };
+ const int num_of_lists = CR_ARRAYLEN(lists);
+
+ GDir *dirp = NULL;
+ const gchar *filename;
+ GError *tmp_err = NULL;
+
+ assert(blacklist);
+ assert(!err || *err == NULL);
+
+ *blacklist = NULL;
+
+ if (retain == -1) {
+ // -1 means retain all - nothing to be blacklisted
+ return TRUE;
+ } else if (retain < 0) {
+ // other negative values are error
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "Number of retained old metadatas "
+ "must be integer number >= -1");
+ return FALSE;
+ }
+
+ // Open the repodata/ directory
+ dirp = g_dir_open (repodata_path, 0, &tmp_err);
+ if (!dirp) {
+ g_warning("Cannot open directory: %s: %s", repodata_path, tmp_err->message);
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open directory: %s: %s",
+ repodata_path, tmp_err->message);
+ g_error_free(tmp_err);
+ return FALSE;
+ }
+
+ // Create sorted (by mtime) lists of old metadata files
+ // More recent files are first
+ while ((filename = g_dir_read_name (dirp))) {
+ // Get filename without suffix
+ gchar *name_without_suffix;
+ gchar *lastdot = strrchr(filename, '.');
+ if (!lastdot) continue; // Filename doesn't contain '.'
+ name_without_suffix = g_strndup(filename, (lastdot - filename));
+
+ // XXX: This detection is pretty shitty, but it mimics
+ // behaviour of original createrepo
+ if (g_str_has_suffix(name_without_suffix, "primary.xml")) {
+ cr_stat_and_insert(repodata_path, filename, &pri_lst);
+ } else if (g_str_has_suffix(name_without_suffix, "primary.sqlite")) {
+ cr_stat_and_insert(repodata_path, filename, &pri_db_lst);
+ } else if (g_str_has_suffix(name_without_suffix, "filelists.xml")) {
+ cr_stat_and_insert(repodata_path, filename, &fil_lst);
+ } else if (g_str_has_suffix(name_without_suffix, "filelists.sqlite")) {
+ cr_stat_and_insert(repodata_path, filename, &fil_db_lst);
+ } else if (g_str_has_suffix(name_without_suffix, "other.xml")) {
+ cr_stat_and_insert(repodata_path, filename, &oth_lst);
+ } else if (g_str_has_suffix(name_without_suffix, "other.sqlite")) {
+ cr_stat_and_insert(repodata_path, filename, &oth_db_lst);
+ }
+ g_free(name_without_suffix);
+ }
+
+ g_dir_close(dirp);
+ dirp = NULL;
+
+ // Append files to the blacklist
+ for (int x = 0; x < num_of_lists; x++) {
+ for (GSList *el = g_slist_nth(*(lists[x]), retain); el; el = g_slist_next(el)) {
+ OldFile *of = (OldFile *) el->data;
+ *blacklist = g_slist_prepend(*blacklist,
+ g_path_get_basename(of->path));
+ }
+ // Free the list
+ cr_slist_free_full(*(lists[x]), cr_free_old_file);
+ }
+
+ return TRUE;
+}
+
+/* List files that should be removed from the repo or not copied
+ * to the new repo. (except the repomd.xml)
+ * This function blacklist all metadata files listed in repomd.xml
+ * if retain == 0, otherwise it don't blacklist any file
+ */
+static gboolean
+cr_repodata_blacklist(const char *repodata_path,
+ int retain,
+ GSList **blacklist,
+ GError **err)
+{
+ gchar *old_repomd_path = NULL;
+ cr_Repomd *repomd = NULL;
+ GError *tmp_err = NULL;
+
+ assert(blacklist);
+ assert(!err || *err == NULL);
+
+ *blacklist = NULL;
+
+ if (retain == -1 || retain > 0) {
+ // retain all - nothing to be blacklisted
+ return TRUE;
+ } else if (retain < 0) {
+ // other negative values are error
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "Number of retained old metadatas "
+ "must be integer number >= -1");
+ return FALSE;
+ }
+
+ // Parse old repomd.xml
+ old_repomd_path = g_build_filename(repodata_path, "repomd.xml", NULL);
+ repomd = cr_repomd_new();
+ cr_xml_parse_repomd(old_repomd_path, repomd, NULL, NULL, &tmp_err);
+ if (tmp_err) {
+ g_warning("Cannot parse repomd: %s", old_repomd_path);
+ g_clear_error(&tmp_err);
+ cr_repomd_free(repomd);
+ repomd = cr_repomd_new();
+ }
+ g_free(old_repomd_path);
+
+ // Parse the old repomd.xml and append its items
+ // to the old_basenames list
+ for (GSList *elem = repomd->records; elem; elem = g_slist_next(elem)) {
+ cr_RepomdRecord *rec = elem->data;
+
+ if (!rec->location_href) {
+ // Ignore bad records (records without location_href)
+ g_warning("Record without location href in old repo");
+ continue;
+ }
+
+ if (rec->location_base) {
+ // Ignore files with base location
+ g_debug("Old repomd record with base location is ignored: "
+ "%s - %s", rec->location_base, rec->location_href);
+ continue;
+ }
+
+ *blacklist = g_slist_prepend(*blacklist,
+ g_path_get_basename(rec->location_href));
+ }
+
+ cr_repomd_free(repomd);
+ return TRUE;
+}
+
+static gboolean
+cr_repodata_blacklist_by_age(const char *repodata_path,
+ gint64 md_max_age,
+ GSList **blacklist,
+ GError **err)
+{
+ GDir *dirp = NULL;
+ const gchar *filename;
+ time_t current_time;
+ GError *tmp_err = NULL;
+
+ assert(blacklist);
+ assert(!err || *err == NULL);
+
+ *blacklist = NULL;
+
+ if (md_max_age < 0) {
+ // A negative value means retain all - nothing to be blacklisted
+ return TRUE;
+ }
+
+ // Open the repodata/ directory
+ dirp = g_dir_open (repodata_path, 0, &tmp_err);
+ if (!dirp) {
+ g_warning("Cannot open directory: %s: %s", repodata_path, tmp_err->message);
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open directory: %s: %s",
+ repodata_path, tmp_err->message);
+ g_error_free(tmp_err);
+ return FALSE;
+ }
+
+ current_time = time(NULL);
+
+ // Create sorted (by mtime) lists of old metadata files
+ // More recent files are first
+ while ((filename = g_dir_read_name (dirp))) {
+ struct stat buf;
+ gchar *fullpath = g_strconcat(repodata_path, filename, NULL);
+ if (stat(fullpath, &buf) == -1) {
+ g_warning("Cannot stat %s", fullpath);
+ g_free(fullpath);
+ continue;
+ }
+ g_free(fullpath);
+
+ // Check file age (current_time - mtime)
+ gint64 age = difftime(current_time, buf.st_mtime);
+ if (age <= md_max_age)
+ continue; // The file is young
+
+ // Debug
+ g_debug("File is too old (%"G_GINT64_FORMAT" > %"G_GINT64_FORMAT") %s",
+ age, md_max_age, filename);
+
+ // Add the file to the blacklist
+ *blacklist = g_slist_prepend(*blacklist, g_strdup(filename));
+ }
+
+ g_dir_close(dirp);
+ return TRUE;
+}
+
+int
+cr_remove_metadata_classic(const char *repopath, int retain, GError **err)
+{
+ int rc = CRE_OK;
+ gboolean ret = TRUE;
+ gchar *full_repopath = NULL;
+ GSList *blacklist = NULL;
+ GDir *dirp = NULL;
+ const gchar *filename;
+ GError *tmp_err = NULL;
+
+ assert(repopath);
+ assert(!err || *err == NULL);
+
+ full_repopath = g_strconcat(repopath, "/repodata/", NULL);
+
+ // Get list of files that should be deleted
+ ret = cr_repodata_blacklist_classic(full_repopath, retain, &blacklist, err);
+ if (!ret)
+ return FALSE;
+
+ // Always remove repomd.xml
+ blacklist = g_slist_prepend(blacklist, g_strdup("repomd.xml"));
+
+ // Open the repodata/ directory
+ dirp = g_dir_open(full_repopath, 0, &tmp_err);
+ if (tmp_err) {
+ g_debug("%s: Path %s doesn't exist", __func__, repopath);
+ g_propagate_prefixed_error(err, tmp_err, "Cannot open a dir: ");
+ rc = CRE_IO;
+ goto cleanup;
+ }
+
+ // Iterate over the files in the repository and remove all files
+ // that are listed on blacklist
+ while ((filename = g_dir_read_name(dirp))) {
+ gchar *full_path;
+
+ if (!g_slist_find_custom(blacklist, filename, (GCompareFunc) g_strcmp0))
+ // The filename is not blacklisted, skip it
+ continue;
+
+ full_path = g_strconcat(full_repopath, filename, NULL);
+
+ // REMOVE
+ // TODO: Use more sophisticated function
+ if (g_remove(full_path) != -1)
+ g_debug("Removed %s", full_path);
+ else
+ g_warning("Cannot remove %s: %s", full_path, g_strerror(errno));
+
+ g_free(full_path);
+ }
+
+cleanup:
+
+ cr_slist_free_full(blacklist, g_free);
+ g_free(full_repopath);
+ if (dirp)
+ g_dir_close(dirp);
+
+ return rc;
+}
+
+gboolean
+cr_old_metadata_retention(const char *old_repo,
+ const char *new_repo,
+ cr_RetentionType type,
+ gint64 val,
+ GError **err)
+{
+ gboolean ret = TRUE;
+ GSList *blacklist = NULL;
+ GDir *dirp = NULL;
+ const gchar *filename;
+ GError *tmp_err = NULL;
+
+ assert(!err || *err == NULL);
+
+ if (!g_file_test(old_repo, G_FILE_TEST_EXISTS))
+ return TRUE;
+
+ g_debug("Copying files from old repository to the new one");
+
+ // Get list of file that should be skiped during copying
+ g_debug("Retention type: %d (%"G_GINT64_FORMAT")", type, val);
+ if (type == CR_RETENTION_BYAGE)
+ ret = cr_repodata_blacklist_by_age(old_repo, val, &blacklist, err);
+ else if (type == CR_RETENTION_COMPATIBILITY)
+ ret = cr_repodata_blacklist_classic(old_repo, (int) val, &blacklist, err);
+ else // CR_RETENTION_DEFAULT
+ ret = cr_repodata_blacklist(old_repo, (int) val, &blacklist, err);
+
+ if (!ret)
+ return FALSE;
+
+ // Never copy old repomd.xml to the new repository
+ blacklist = g_slist_prepend(blacklist, g_strdup("repomd.xml"));
+
+ // Open directory with old repo
+ dirp = g_dir_open (old_repo, 0, &tmp_err);
+ if (!dirp) {
+ g_warning("Cannot open directory: %s: %s", old_repo, tmp_err->message);
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open directory: %s: %s",
+ old_repo, tmp_err->message);
+ g_error_free(tmp_err);
+ ret = FALSE;
+ goto exit;
+ }
+
+ // Iterate over the files in the old repository and copy all
+ // that are not listed on blacklist
+ while ((filename = g_dir_read_name(dirp))) {
+ if (g_slist_find_custom(blacklist, filename, (GCompareFunc) g_strcmp0)) {
+ g_debug("Blacklisted: %s", filename);
+ continue;
+ }
+
+ gchar *full_path = g_strconcat(old_repo, filename, NULL);
+ gchar *new_full_path = g_strconcat(new_repo, filename, NULL);
+
+ // Do not override new file with the old one
+ if (g_file_test(new_full_path, G_FILE_TEST_EXISTS)) {
+ g_debug("Skipped copy: %s -> %s (file already exists)",
+ full_path, new_full_path);
+ g_free(full_path);
+ g_free(new_full_path);
+ continue;
+ }
+
+ // COPY!
+ cr_cp(full_path,
+ new_full_path,
+ CR_CP_RECURSIVE|CR_CP_PRESERVE_ALL,
+ NULL,
+ &tmp_err);
+
+ if (tmp_err) {
+ g_warning("Cannot copy %s -> %s: %s",
+ full_path, new_full_path, tmp_err->message);
+ g_clear_error(&tmp_err);
+ } else {
+ g_debug("Copied %s -> %s", full_path, new_full_path);
+ }
+
+ g_free(full_path);
+ g_free(new_full_path);
+ }
+
+exit:
+
+ // Cleanup
+ cr_slist_free_full(blacklist, g_free);
+ if (dirp)
+ g_dir_close(dirp);
+
+ return ret;
+}
+
+
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef __C_CREATEREPOLIB_HELPERS_H__
+#define __C_CREATEREPOLIB_HELPERS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <glib.h>
+#include "checksum.h"
+#include "compression_wrapper.h"
+#include "package.h"
+
+/** \defgroup helpers Helpers for createrepo_c, modifyrepo_c, mergerepo_c
+ *
+ * Module with helpers for createrepo_c, modifyrepo_c, mergerepo_c
+ *
+ * \addtogroup helpers
+ * @{
+ */
+
+typedef enum {
+ CR_RETENTION_DEFAULT,
+ CR_RETENTION_COMPATIBILITY,
+ CR_RETENTION_BYAGE,
+} cr_RetentionType;
+
+gboolean
+cr_old_metadata_retention(const char *old_repo,
+ const char *new_repo,
+ cr_RetentionType type,
+ gint64 val,
+ GError **err);
+
+/** Remove files related to repodata from the specified path.
+ * Files not listed in repomd.xml and with nonstandard names (standard names
+ * are names with suffixes like primary.xml.*, primary.sqlite.*, other.xml.*,
+ * etc.) are keep untouched (repodata/ subdirectory IS NOT removed!).
+ * @param repopath path to directory with repodata/ subdirectory
+ * @param err GError **
+ * @return number of removed files
+ */
+int cr_remove_metadata(const char *repopath, GError **err);
+
+/** Remove repodata in same manner as classic createrepo.
+ * This function removes only (primary|filelists|other)[.sqlite].* files
+ * from repodata.
+ * @param repopath path to directory with repodata/subdirectory
+ * @param retain keep around the latest N old, uniquely named primary,
+ * filelists and otherdata xml and sqlite files.
+ * If <1 no old files will be kept.
+ * @param err GError **
+ * @return cr_Error code
+ */
+int cr_remove_metadata_classic(const char *repopath, int retain, GError **err);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __C_CREATEREPOLIB_HELPERS__ */
#include "locate_metadata.h"
#include "xml_parser.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
#define STRINGCHUNK_SIZE 16384
/** Structure for loaded metadata
GHashTable *ht; /*!< hashtable with packages */
GStringChunk *chunk; /*!< NULL or string chunk with strings from htn */
GHashTable *pkglist_ht; /*!< list of allowed package basenames to load */
+ cr_HashTableKeyDupAction dupaction; /*!<
+ How to behave in case of duplicated items */
};
cr_HashTableKey
g_hash_table_insert(md->pkglist_ht, g_strdup(elem->data), NULL);
}
+ md->dupaction = CR_HT_DUPACT_KEEPFIRST;
+
return md;
}
g_free(md);
}
+gboolean
+cr_metadata_set_dupaction(cr_Metadata *md, cr_HashTableKeyDupAction dupaction)
+{
+ if (!md || dupaction >= CR_HT_DUPACT_SENTINEL)
+ return FALSE;
+ md->dupaction = dupaction;
+ return TRUE;
+}
+
// Callbacks for XML parsers
+typedef enum {
+ PARSING_PRI,
+ PARSING_FIL,
+ PARSING_OTH,
+} cr_ParsingState;
+
typedef struct {
GHashTable *ht;
GStringChunk *chunk;
GHashTable *pkglist_ht;
+ GHashTable *ignored_pkgIds; /*!< If there are multiple packages
+ wich have the same checksum (pkgId) but they are in fact different
+ (they have different basenames, mtimes or sizes),
+ then we want to ignore these packages during
+ loading. It's because the pkgId is used to pair metadata from
+ primary.xml with metadata from filelists.xml and other.xml and
+ we want the pkgId to be unique.
+ Key is pkgId and value is NULL. */
+ cr_ParsingState state;
+ gint64 pkgKey; /*!< basically order of the package */
} cr_CbData;
static int
primary_newpkgcb(cr_Package **pkg,
- const char *pkgId,
- const char *name,
- const char *arch,
- void *cbdata,
- GError **err)
+ G_GNUC_UNUSED const char *pkgId,
+ G_GNUC_UNUSED const char *name,
+ G_GNUC_UNUSED const char *arch,
+ void *cbdata,
+ G_GNUC_UNUSED GError **err)
{
cr_CbData *cb_data = cbdata;
- CR_UNUSED(name);
- CR_UNUSED(arch);
- CR_UNUSED(err);
-
assert(*pkg == NULL);
if (cb_data->chunk) {
*pkg = cr_package_new_without_chunk();
(*pkg)->chunk = cb_data->chunk;
+ (*pkg)->loadingflags |= CR_PACKAGE_SINGLE_CHUNK;
} else {
*pkg = cr_package_new();
}
}
static int
-primary_pkgcb(cr_Package *pkg, void *cbdata, GError **err)
+primary_pkgcb(cr_Package *pkg, void *cbdata, G_GNUC_UNUSED GError **err)
{
gboolean store_pkg = TRUE;
cr_CbData *cb_data = cbdata;
-
- CR_UNUSED(err);
+ cr_Package *epkg;
+ char *basename = cr_get_filename(pkg->location_href);
assert(pkg);
assert(pkg->pkgId);
- if (cb_data->pkglist_ht) {
- char *basename = cr_get_filename(pkg->location_href);
- if (basename)
- store_pkg = g_hash_table_lookup_extended(cb_data->pkglist_ht,
- basename,
- NULL, NULL);
- }
-
if (cb_data->chunk) {
+ // Set pkg internal chunk to NULL,
+ // if global chunk for all packages is used
assert(pkg->chunk == cb_data->chunk);
pkg->chunk = NULL;
}
+ if (cb_data->pkglist_ht && basename) {
+ // If a pkglist was specified,
+ // check if the package should be loaded or not
+ store_pkg = g_hash_table_lookup_extended(cb_data->pkglist_ht,
+ basename, NULL, NULL);
+ }
+
+ if (store_pkg) {
+ // Check if pkgId is not on the list of blocked Ids
+ if (g_hash_table_lookup_extended(cb_data->ignored_pkgIds, pkg->pkgId,
+ NULL, NULL))
+ // We should ignore this pkgId (package's hash)
+ store_pkg = FALSE;
+ }
+
if (!store_pkg) {
+ // Drop the currently loaded package
cr_package_free(pkg);
return CR_CB_RET_OK;
}
- // Store package into the hashtable
- g_hash_table_replace(cb_data->ht, pkg->pkgId, pkg);
+ epkg = g_hash_table_lookup(cb_data->ht, pkg->pkgId);
+
+ if (!epkg) {
+ // Store package into the hashtable
+ pkg->loadingflags |= CR_PACKAGE_FROM_XML;
+ pkg->loadingflags |= CR_PACKAGE_LOADED_PRI;
+ g_hash_table_replace(cb_data->ht, pkg->pkgId, pkg);
+ } else {
+ // Package with the same pkgId (hash) already exists
+ if (epkg->time_file == pkg->time_file
+ && epkg->size_package == pkg->size_package
+ && !g_strcmp0(cr_get_filename(pkg->location_href), basename))
+ {
+ // The existing package is the same as the current one.
+ // This is ok
+ g_debug("Multiple packages with the same checksum: %s. "
+ "Loading the info only once.", pkg->pkgId);
+ } else {
+ // The existing package is different. We have two different
+ // packages with the same checksum -> drop both of them
+ // and append this checksum to the ignored_pkgIds
+ // XXX: Note that this constrain works only for single repo!
+ // If the same cr_Metadata are loaded from several different
+ // repos, than inter-repo packages with matching checksum
+ // are not checked.
+ g_debug("Multiple different packages (basename, mtime or size "
+ "doesn't match) with the same checksum: %s. "
+ "Ignoring all packages with the checksum.", pkg->pkgId);
+ g_hash_table_remove(cb_data->ht, pkg->pkgId);
+ g_hash_table_replace(cb_data->ignored_pkgIds, g_strdup(pkg->pkgId), NULL);
+ }
+
+ // Drop the currently loaded package
+ cr_package_free(pkg);
+ return CR_CB_RET_OK;
+ }
+
+ ++cb_data->pkgKey;
+ pkg->pkgKey = cb_data->pkgKey;
return CR_CB_RET_OK;
}
static int
newpkgcb(cr_Package **pkg,
const char *pkgId,
- const char *name,
- const char *arch,
+ G_GNUC_UNUSED const char *name,
+ G_GNUC_UNUSED const char *arch,
void *cbdata,
- GError **err)
+ G_GNUC_UNUSED GError **err)
{
cr_CbData *cb_data = cbdata;
- CR_UNUSED(name);
- CR_UNUSED(arch);
- CR_UNUSED(err);
-
assert(*pkg == NULL);
assert(pkgId);
*pkg = g_hash_table_lookup(cb_data->ht, pkgId);
- if (*pkg && cb_data->chunk) {
- assert(!(*pkg)->chunk);
- (*pkg)->chunk = cb_data->chunk;
+ if (*pkg) {
+ // If package with the pkgId was parsed from primary.xml, then...
+
+ if (cb_data->state == PARSING_FIL) {
+ if ((*pkg)->loadingflags & CR_PACKAGE_LOADED_FIL) {
+ // For package with this checksum, the filelist was
+ // already loaded.
+ *pkg = NULL;
+ } else {
+ // Make a note that filelist is parsed
+ (*pkg)->loadingflags |= CR_PACKAGE_LOADED_FIL;
+ }
+ }
+
+ if (cb_data->state == PARSING_OTH) {
+ if ((*pkg)->loadingflags & CR_PACKAGE_LOADED_OTH) {
+ // For package with this checksum, the other (changelogs) were
+ // already loaded.
+ *pkg = NULL;
+ } else {
+ // Make a note that other is parsed
+ (*pkg)->loadingflags |= CR_PACKAGE_LOADED_OTH;
+ }
+ }
+
+ if (*pkg && cb_data->chunk) {
+ assert(!(*pkg)->chunk);
+ (*pkg)->chunk = cb_data->chunk;
+ }
}
return CR_CB_RET_OK;
}
static int
-pkgcb(cr_Package *pkg, void *cbdata, GError **err)
+pkgcb(cr_Package *pkg, void *cbdata, G_GNUC_UNUSED GError **err)
{
cr_CbData *cb_data = cbdata;
- CR_UNUSED(err);
-
if (cb_data->chunk) {
assert(pkg->chunk == cb_data->chunk);
pkg->chunk = NULL;
assert(hashtable);
// Prepare cb data
- cb_data.ht = hashtable;
- cb_data.chunk = chunk;
- cb_data.pkglist_ht = pkglist_ht;
+ cb_data.state = PARSING_PRI;
+ cb_data.ht = hashtable;
+ cb_data.chunk = chunk;
+ cb_data.pkglist_ht = pkglist_ht;
+ cb_data.ignored_pkgIds = g_hash_table_new_full(g_str_hash, g_str_equal,
+ g_free, NULL);
+ cb_data.pkgKey = G_GINT64_CONSTANT(0);
cr_xml_parse_primary(primary_xml_path,
primary_newpkgcb,
"Primary XML parser",
(filelists_xml_path) ? 0 : 1,
&tmp_err);
+
+ g_hash_table_destroy(cb_data.ignored_pkgIds);
+ cb_data.ignored_pkgIds = NULL;
+
if (tmp_err) {
int code = tmp_err->code;
g_debug("primary.xml parsing error: %s", tmp_err->message);
return code;
}
+ cb_data.state = PARSING_FIL;
+
if (filelists_xml_path) {
cr_xml_parse_filelists(filelists_xml_path,
newpkgcb,
}
}
+ cb_data.state = PARSING_OTH;
+
if (other_xml_path) {
cr_xml_parse_other(other_xml_path,
newpkgcb,
int result;
GError *tmp_err = NULL;
GHashTable *intern_hashtable; // key is checksum (pkgId)
+ cr_HashTableKeyDupAction dupaction = md->dupaction;
assert(md);
assert(ml);
assert(!err || *err == NULL);
if (!ml->pri_xml_href) {
- g_set_error(err, CR_LOAD_METADATA_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"primary.xml file is missing");
return CRE_BADARG;
}
GHashTableIter iter;
gpointer p_key, p_value;
+ GHashTable *ignored_keys = g_hash_table_new_full(g_str_hash, g_str_equal,
+ g_free, NULL);
g_hash_table_iter_init (&iter, intern_hashtable);
while (g_hash_table_iter_next (&iter, &p_key, &p_value)) {
cr_Package *pkg = (cr_Package *) p_value;
+ cr_Package *epkg;
gpointer new_key;
switch (md->key) {
// and it SHOULD set only valid key values)
g_critical("%s: Unknown hash table key selected", __func__);
assert(0);
- g_set_error(err, CR_LOAD_METADATA_ERROR, CRE_ASSERT,
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT,
"Bad db type");
return CRE_ASSERT;
}
- if (g_hash_table_lookup(md->ht, new_key)) {
- g_debug("%s: Key \"%s\" already exists in hashtable",
- __func__, (char *) new_key);
+ epkg = g_hash_table_lookup(md->ht, new_key);
+ if (epkg) {
+ // Such key already exists
+ if (dupaction == CR_HT_DUPACT_KEEPFIRST) {
+ g_debug("%s: Key \"%s\" already exists in hashtable - Keeping the first occurrence",
+ __func__, (char *) new_key);
+ } else {
+ if (pkg->time_file != epkg->time_file
+ || pkg->size_package != epkg->size_package
+ || g_strcmp0(pkg->pkgId, epkg->pkgId)
+ || g_strcmp0(cr_get_filename(pkg->location_href),
+ cr_get_filename(epkg->location_href))
+ )
+ {
+ // We got a key (checksum, filename, pkg name, ..)
+ // which has a multiple occurences which are different.
+ // Ignore such key
+ g_debug("%s: Key \"%s\" is present multiple times and with "
+ "different values. Ignoring all occurrences. "
+ "[size_package: %"G_GINT64_FORMAT"|%"G_GINT64_FORMAT
+ "; time_file: %"G_GINT64_FORMAT"|%"G_GINT64_FORMAT
+ "; pkgId: %s|%s; basename: %s|%s]",
+ __func__, (gchar *) new_key,
+ pkg->size_package, epkg->size_package,
+ pkg->time_file, epkg->time_file,
+ pkg->pkgId, epkg->pkgId,
+ cr_get_filename(pkg->location_href),
+ cr_get_filename(epkg->location_href));
+ g_hash_table_insert(ignored_keys, g_strdup((gchar *) new_key), NULL);
+ }
+ }
+ // Remove the package from the iterator anyway
g_hash_table_iter_remove(&iter);
} else {
g_hash_table_insert(md->ht, new_key, p_value);
}
}
+ // Remove ignored_keys from resulting hashtable
+ g_hash_table_iter_init(&iter, ignored_keys);
+ while (g_hash_table_iter_next(&iter, &p_key, &p_value)) {
+ char *key = (gchar *) p_key;
+ g_hash_table_remove(md->ht, key);
+ }
+
+ // How much items we really use
+ g_debug("%s: Really usable items: %d", __func__,
+ g_hash_table_size(md->ht));
// Cleanup
+ g_hash_table_destroy(ignored_keys);
cr_destroy_metadata_hashtable(intern_hashtable);
return CRE_OK;
assert(md);
assert(repopath);
- ml = cr_locate_metadata(repopath, 1, &tmp_err);
- if (tmp_err) {
+ ml = cr_locate_metadata(repopath, TRUE, &tmp_err);
+ if (!ml) {
int code = tmp_err->code;
g_propagate_error(err, tmp_err);
return code;
CR_HT_KEY_SENTINEL, /*!< last element, terminator, .. */
} cr_HashTableKey;
+/** Internally, and by default, the loaded metadata are
+ * indexed by pkgId (pkg's hash). But when you select different
+ * attribute for indexing (see cr_HashTableKey).
+ * The situation that multiple packages has the same (key) attribute.
+ * This enum lists provided methos for resolution of such conflicts.
+ */
+typedef enum {
+ CR_HT_DUPACT_KEEPFIRST = 0, /*!<
+ First encontered item wins, other will be removed - Default */
+ CR_HT_DUPACT_REMOVEALL, /*!<
+ Remove all conflicting items. */
+ CR_HT_DUPACT_SENTINEL, /*!<
+ Last element, terminator, ... */
+} cr_HashTableKeyDupAction;
+
/** Metadata object
*/
typedef struct _cr_Metadata cr_Metadata;
int use_single_chunk,
GSList *pkglist);
+/** Set action how to deal with duplicated items.
+ */
+gboolean
+cr_metadata_set_dupaction(cr_Metadata *md, cr_HashTableKeyDupAction dupaction);
+
/** Destroy metadata.
* @param md cr_Metadata object
*/
#include "locate_metadata.h"
#include "repomd.h"
#include "xml_parser.h"
+#include "cleanup.h"
-#define TMPDIR_PATTERN "/tmp/createrepo_c_tmp_repo_XXXXXX"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
+#define TMPDIR_PATTERN "createrepo_c_tmp_repo_XXXXXX"
#define FORMAT_XML 1
#define FORMAT_LEVEL 0
void
cr_metadatalocation_free(struct cr_MetadataLocation *ml)
{
- if (!ml) {
+ if (!ml)
return;
- }
if (ml->tmp && ml->local_path) {
g_debug("%s: Removing %s", __func__, ml->local_path);
cr_xml_parse_repomd(repomd_path, repomd, cr_warning_cb,
"Repomd xml parser", &tmp_err);
if (tmp_err) {
- g_error("%s: %s", __func__, tmp_err->message);
+ g_critical("%s: %s", __func__, tmp_err->message);
g_error_free(tmp_err);
+ cr_repomd_free(repomd);
return NULL;
}
for (GSList *elem = repomd->records; elem; elem = g_slist_next(elem)) {
cr_RepomdRecord *record = elem->data;
- gchar *full_location_href = g_strconcat(repopath,
- (char *) record->location_href,
- NULL);
+ gchar *full_location_href = g_build_filename(
+ repopath,
+ (char *) record->location_href,
+ NULL);
if (!g_strcmp0(record->type, "primary"))
mdloc->pri_xml_href = full_location_href;
}
static struct cr_MetadataLocation *
-cr_get_local_metadata(const char *in_repopath, int ignore_sqlite)
+cr_get_local_metadata(const char *repopath, gboolean ignore_sqlite)
{
+ _cleanup_free_ gchar *repomd = NULL;
struct cr_MetadataLocation *ret = NULL;
- if (!in_repopath) {
+ if (!repopath)
return ret;
- }
- if (!g_file_test(in_repopath, G_FILE_TEST_EXISTS|G_FILE_TEST_IS_DIR)) {
- g_warning("%s: %s is not a directory", __func__, in_repopath);
+ if (!g_file_test(repopath, G_FILE_TEST_EXISTS|G_FILE_TEST_IS_DIR)) {
+ g_warning("%s: %s is not a directory", __func__, repopath);
return ret;
}
-
// Create path to repomd.xml and check if it exists
-
- gchar *repomd;
- gchar *repopath;
- if (!g_str_has_suffix(in_repopath, "/"))
- repopath = g_strconcat(in_repopath, "/", NULL);
- else
- repopath = g_strdup(in_repopath);
- repomd = g_strconcat(repopath, "repodata/repomd.xml", NULL);
-
+ repomd = g_build_filename(repopath, "repodata", "repomd.xml", NULL);
if (!g_file_test(repomd, G_FILE_TEST_EXISTS)) {
g_debug("%s: %s doesn't exists", __func__, repomd);
- g_free(repomd);
- g_free(repopath);
return ret;
}
ret = cr_parse_repomd(repomd, repopath, ignore_sqlite);
- g_free(repomd);
- g_free(repopath);
-
return ret;
}
static struct cr_MetadataLocation *
-cr_get_remote_metadata(const char *repopath, int ignore_sqlite)
+cr_get_remote_metadata(const char *repopath, gboolean ignore_sqlite)
{
- gchar *url = NULL;
- gchar *tmp_repomd = NULL;
- gchar *tmp_repodata = NULL;
- FILE *f_repomd = NULL;
CURL *handle = NULL;
- CURLcode rcode;
- char errorbuf[CURL_ERROR_SIZE];
- gchar tmp_dir[] = TMPDIR_PATTERN;
+ _cleanup_free_ gchar *tmp_dir = NULL;
+ _cleanup_free_ gchar *tmp_repodata = NULL;
+ _cleanup_free_ gchar *tmp_repomd = NULL;
+ _cleanup_free_ gchar *url = NULL;
struct cr_MetadataLocation *r_location = NULL;
struct cr_MetadataLocation *ret = NULL;
+ _cleanup_error_free_ GError *tmp_err = NULL;
- if (!repopath) {
+ if (!repopath)
return ret;
- }
-
// Create temporary repo in /tmp
-
+ tmp_dir = g_build_filename(g_get_tmp_dir(), TMPDIR_PATTERN, NULL);
if(!mkdtemp(tmp_dir)) {
g_critical("%s: Cannot create a temporary directory: %s",
- __func__, strerror(errno));
+ __func__, g_strerror(errno));
return ret;
}
- tmp_repodata = g_strconcat(tmp_dir, "/repodata/", NULL);
+ g_debug("%s: Using tmp dir: %s", __func__, tmp_dir);
+ // Create repodata subdir in tmp dir
+ tmp_repodata = g_build_filename(tmp_dir, "repodata", NULL);
if (g_mkdir (tmp_repodata, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) {
g_critical("%s: Cannot create a temporary directory", __func__);
return ret;
}
+ // Prepare temporary repomd.xml filename
+ tmp_repomd = g_build_filename(tmp_repodata, "repomd.xml", NULL);
- // Prepare temporary repomd.xml
-
- tmp_repomd = g_strconcat(tmp_repodata, "repomd.xml", NULL);
- f_repomd = fopen(tmp_repomd, "w");
- if (!f_repomd) {
- g_critical("%s: Cannot open %s", __func__, tmp_repomd);
- goto get_remote_metadata_cleanup;
- }
-
- g_debug("%s: Using tmp dir: %s", __func__, tmp_dir);
-
-
- // Download repo files
+ // Prepare repomd.xml URL
+ if (g_str_has_suffix(repopath, "/"))
+ url = g_strconcat(repopath, "repodata/repomd.xml", NULL);
+ else
+ url = g_strconcat(repopath, "/repodata/repomd.xml", NULL);
- url = g_strconcat(repopath, "repodata/repomd.xml", NULL);
+ // Create and setup CURL handle
handle = curl_easy_init();
- // Set error buffer
- if (curl_easy_setopt(handle, CURLOPT_ERRORBUFFER, errorbuf) != CURLE_OK) {
- g_critical("%s: curl_easy_setopt(CURLOPT_ERRORBUFFER) error", __func__);
- goto get_remote_metadata_cleanup;
- }
-
- // Set URL
- if (curl_easy_setopt(handle, CURLOPT_URL, url) != CURLE_OK) {
- g_critical("%s: curl_easy_setopt(CURLOPT_URL) error", __func__);
+ // Fail on HTTP error (return code >= 400)
+ if (curl_easy_setopt(handle, CURLOPT_FAILONERROR, 1) != CURLE_OK) {
+ g_critical("%s: curl_easy_setopt(CURLOPT_FAILONERROR) error", __func__);
goto get_remote_metadata_cleanup;
}
- // Set output file descriptor
- if (curl_easy_setopt(handle, CURLOPT_WRITEDATA, f_repomd) != CURLE_OK) {
- g_critical("%s: curl_easy_setopt(CURLOPT_WRITEDATA) error", __func__);
+ // Follow redirs
+ if (curl_easy_setopt(handle, CURLOPT_FOLLOWLOCATION, 1) != CURLE_OK) {
+ g_critical("%s: curl_easy_setopt(CURLOPT_FOLLOWLOCATION) error", __func__);
goto get_remote_metadata_cleanup;
}
- // Fail on HTTP error (return code >= 400)
- if (curl_easy_setopt(handle, CURLOPT_FAILONERROR, 1) != CURLE_OK) {
- g_critical("%s: curl_easy_setopt(CURLOPT_FAILONERROR) error", __func__);
+ // Maximal number of redirects
+ if (curl_easy_setopt(handle, CURLOPT_MAXREDIRS, 6) != CURLE_OK) {
+ g_critical("%s: curl_easy_setopt(CURLOPT_MAXREDIRS) error", __func__);
goto get_remote_metadata_cleanup;
}
-
- // Perform a file transfer of repomd.xml
-
- rcode = curl_easy_perform(handle);
- if (rcode != 0) {
- g_critical("%s: curl_easy_perform() error: %s", __func__, errorbuf);
+ // Download repomd.xml
+ cr_download(handle, url, tmp_repomd, &tmp_err);
+ if (tmp_err) {
+ g_critical("%s: %s", __func__, tmp_err->message);
goto get_remote_metadata_cleanup;
}
- fclose(f_repomd);
- f_repomd = NULL;
-
-
// Parse downloaded repomd.xml
-
r_location = cr_parse_repomd(tmp_repomd, repopath, ignore_sqlite);
if (!r_location) {
g_critical("%s: repomd.xml parser failed on %s", __func__, tmp_repomd);
goto get_remote_metadata_cleanup;
}
-
// Download all other repofiles
- GError *tmp_err = NULL;
-
if (r_location->pri_xml_href)
cr_download(handle, r_location->pri_xml_href, tmp_repodata, &tmp_err);
if (!tmp_err && r_location->fil_xml_href)
if (!tmp_err && r_location->updateinfo_href)
cr_download(handle, r_location->updateinfo_href, tmp_repodata, &tmp_err);
+ cr_metadatalocation_free(r_location);
+
if (tmp_err) {
g_critical("%s: Error while downloadig files: %s",
__func__, tmp_err->message);
- g_error_free(tmp_err);
goto get_remote_metadata_cleanup;
}
g_debug("%s: Remote metadata was successfully downloaded", __func__);
-
// Parse downloaded data
-
ret = cr_get_local_metadata(tmp_dir, ignore_sqlite);
- if (ret) ret->tmp = 1;
-
+ if (ret)
+ ret->tmp = 1;
get_remote_metadata_cleanup:
- if (f_repomd) fclose(f_repomd);
- g_free(tmp_repomd);
- g_free(tmp_repodata);
- g_free(url);
- curl_easy_cleanup(handle);
+ if (handle)
+ curl_easy_cleanup(handle);
if (!ret) cr_remove_dir(tmp_dir, NULL);
- if (r_location) cr_metadatalocation_free(r_location);
return ret;
}
+// XXX: err is not really used in this function yet
struct cr_MetadataLocation *
-cr_locate_metadata(const char *in_repopath, int ignore_sqlite, GError **err)
+cr_locate_metadata(const char *repopath, gboolean ignore_sqlite, GError **err)
{
- gchar *repopath;
struct cr_MetadataLocation *ret = NULL;
- assert(in_repopath);
+ assert(repopath);
assert(!err || *err == NULL);
- // XXX: err is not really used in this function yet
-
-
- // repopath must ends with slash
-
- if (g_str_has_suffix(in_repopath, "/"))
- repopath = g_strdup(in_repopath);
- else
- repopath = g_strconcat(in_repopath, "/", NULL);
-
if (g_str_has_prefix(repopath, "ftp://") ||
g_str_has_prefix(repopath, "http://") ||
g_str_has_prefix(repopath, "https://"))
{
- // Download data via curl
+ // Remote metadata - Download them via curl
ret = cr_get_remote_metadata(repopath, ignore_sqlite);
} else {
- const char *path = repopath;
- if (g_str_has_prefix(repopath, "file://")) {
- path = repopath+7;
- }
- ret = cr_get_local_metadata(path, ignore_sqlite);
+ // Local metadata
+ if (g_str_has_prefix(repopath, "file://"))
+ repopath += 7;
+ ret = cr_get_local_metadata(repopath, ignore_sqlite);
}
if (ret)
- ret->original_url = g_strdup(in_repopath);
-
- g_free(repopath);
+ ret->original_url = g_strdup(repopath);
// XXX
if (!ret) {
- g_set_error(err, CR_LOCATE_METADATA_ERROR, CRE_NODIR,
+ g_set_error(err, ERR_DOMAIN, CRE_NODIR,
"Cannot locate metadata");
+ return NULL;
}
return ret;
}
-
-
-// Return list of non-null pointers on strings in the passed structure
-static GSList *
-cr_get_list_of_md_locations(struct cr_MetadataLocation *ml)
-{
- GSList *list = NULL;
-
- if (!ml) {
- return list;
- }
-
- if (ml->pri_xml_href) list = g_slist_prepend(list, (gpointer) ml->pri_xml_href);
- if (ml->fil_xml_href) list = g_slist_prepend(list, (gpointer) ml->fil_xml_href);
- if (ml->oth_xml_href) list = g_slist_prepend(list, (gpointer) ml->oth_xml_href);
- if (ml->pri_sqlite_href) list = g_slist_prepend(list, (gpointer) ml->pri_sqlite_href);
- if (ml->fil_sqlite_href) list = g_slist_prepend(list, (gpointer) ml->fil_sqlite_href);
- if (ml->oth_sqlite_href) list = g_slist_prepend(list, (gpointer) ml->oth_sqlite_href);
- if (ml->groupfile_href) list = g_slist_prepend(list, (gpointer) ml->groupfile_href);
- if (ml->cgroupfile_href) list = g_slist_prepend(list, (gpointer) ml->cgroupfile_href);
- if (ml->updateinfo_href) list = g_slist_prepend(list, (gpointer) ml->updateinfo_href);
- if (ml->repomd) list = g_slist_prepend(list, (gpointer) ml->repomd);
-
- return list;
-}
-
-
-static void
-cr_free_list_of_md_locations(GSList *list)
-{
- if (list) {
- g_slist_free(list);
- }
-}
-
-
-int
-cr_remove_metadata(const char *repopath, GError **err)
-{
- int removed_files = 0;
- gchar *full_repopath;
- const gchar *file;
- GDir *repodir;
- struct cr_MetadataLocation *ml;
- GError *tmp_err = NULL;
-
- assert(repopath);
- assert(!err || *err == NULL);
-
- if (!g_file_test(repopath, G_FILE_TEST_EXISTS|G_FILE_TEST_IS_DIR)) {
- g_debug("%s: remove_old_metadata: Cannot remove %s", __func__, repopath);
- g_set_error(err, CR_LOCATE_METADATA_ERROR, CRE_NODIR,
- "Directory %s doesn't exists", repopath);
- return -1;
- }
-
- full_repopath = g_strconcat(repopath, "/repodata/", NULL);
-
- repodir = g_dir_open(full_repopath, 0, &tmp_err);
- if (tmp_err) {
- g_debug("%s: Path %s doesn't exists", __func__, repopath);
- g_set_error(err, CR_LOCATE_METADATA_ERROR, CRE_IO,
- "Cannot open directory %s: %s", repopath, strerror(errno));
- g_free(full_repopath);
- return -1;
- }
-
-
- // Remove all metadata listed in repomd.xml
-
- ml = cr_locate_metadata(repopath, 0, &tmp_err);
- if (tmp_err) {
- g_propagate_error(err, tmp_err);
- goto cleanup;
- }
-
- if (ml) {
- GSList *list = cr_get_list_of_md_locations(ml);
- GSList *element;
-
- for (element=list; element; element=element->next) {
- gchar *path = (char *) element->data;
-
- g_debug("%s: Removing: %s (path obtained from repomd.xml)", __func__, path);
- if (g_remove(path) != -1) {
- removed_files++;
- } else {
- g_warning("%s: Cannot remove %s", __func__, path);
- g_set_error(err, CR_LOCATE_METADATA_ERROR, CRE_IO,
- "Cannot remove %s: %s", path, strerror(errno));
- cr_free_list_of_md_locations(list);
- cr_metadatalocation_free(ml);
- goto cleanup;
- }
- }
-
- cr_free_list_of_md_locations(list);
- cr_metadatalocation_free(ml);
- }
-
-
- // (Just for sure) List dir and remove all files which could be related to an old metadata
-
- while ((file = g_dir_read_name (repodir))) {
- if (g_str_has_suffix(file, "primary.xml.gz") ||
- g_str_has_suffix(file, "filelists.xml.gz") ||
- g_str_has_suffix(file, "other.xml.gz") ||
- g_str_has_suffix(file, "primary.xml.bz2") ||
- g_str_has_suffix(file, "filelists.xml.bz2") ||
- g_str_has_suffix(file, "other.xml.bz2") ||
- g_str_has_suffix(file, "primary.xml.xz") ||
- g_str_has_suffix(file, "filelists.xml.xz") ||
- g_str_has_suffix(file, "other.xml.xz") ||
- g_str_has_suffix(file, "primary.xml") ||
- g_str_has_suffix(file, "filelists.xml") ||
- g_str_has_suffix(file, "other.xml") ||
- g_str_has_suffix(file, "updateinfo.xml") ||
- !g_strcmp0(file, "repomd.xml"))
- {
- gchar *path = g_strconcat(full_repopath, file, NULL);
- g_debug("%s: Removing: %s", __func__, path);
- if (g_remove(path) != -1) {
- removed_files++;
- } else {
- g_warning("%s: Cannot remove %s", __func__, path);
- g_set_error(err, CR_LOCATE_METADATA_ERROR, CRE_IO,
- "Cannot remove %s: %s", path, strerror(errno));
- g_free(path);
- goto cleanup;
- }
- }
- }
-
-cleanup:
- g_dir_close(repodir);
- g_free(full_repopath);
-
- return removed_files;
-}
-
-
-typedef struct _old_file {
- time_t mtime;
- gchar *path;
-} OldFile;
-
-
-static void
-cr_free_old_file(gpointer data)
-{
- OldFile *old_file = (OldFile *) data;
- g_free(old_file->path);
- g_free(old_file);
-}
-
-
-static gint
-cr_cmp_old_repodata_files(gconstpointer a, gconstpointer b)
-{
- if (((OldFile *) a)->mtime < ((OldFile *) b)->mtime)
- return 1;
- if (((OldFile *) a)->mtime > ((OldFile *) b)->mtime)
- return -1;
- return 0;
-}
-
-
-static void
-cr_stat_and_insert(const gchar *dirname, const gchar *filename, GSList **list)
-{
- struct stat buf;
- OldFile *old_file;
- gchar *path = g_strconcat(dirname, filename, NULL);
- if (stat(path, &buf) == -1)
- buf.st_mtime = 1;
- old_file = g_malloc0(sizeof(OldFile));
- old_file->mtime = buf.st_mtime;
- old_file->path = path;
- *list = g_slist_insert_sorted(*list, old_file, cr_cmp_old_repodata_files);
-}
-
-
-static int
-cr_remove_listed_files(GSList *list, int retain, GError **err)
-{
- int removed = 0;
-
- assert(!err || *err == NULL);
-
- if (retain < 0) retain = 0;
-
- for (GSList *el = g_slist_nth(list, retain); el; el = g_slist_next(el)) {
- OldFile *of = (OldFile *) el->data;
- g_debug("%s: Removing: %s", __func__, of->path);
- if (g_remove(of->path) != -1) {
- ++removed;
- } else {
- g_warning("%s: Cannot remove %s", __func__, of->path);
- g_set_error(err, CR_LOCATE_METADATA_ERROR, CRE_IO,
- "Cannot remove %s: %s", of->path, strerror(errno));
- break;
- }
- }
-
- return removed;
-}
-
-
-int
-cr_remove_metadata_classic(const char *repopath, int retain, GError **err)
-{
- gchar *full_repopath, *repomd_path;
- GDir *repodir;
- const gchar *file;
- GSList *pri_lst = NULL, *pri_db_lst = NULL;
- GSList *fil_lst = NULL, *fil_db_lst = NULL;
- GSList *oth_lst = NULL, *oth_db_lst = NULL;
- GError *tmp_err = NULL;
-
- assert(repopath);
- assert(!err || *err == NULL);
-
- if (!g_file_test(repopath, G_FILE_TEST_EXISTS|G_FILE_TEST_IS_DIR)) {
- g_debug("%s: Cannot remove %s", __func__, repopath);
- g_set_error(err, CR_LOCATE_METADATA_ERROR, CRE_NODIR,
- "Directory %s doesn't exist", repopath);
- return CRE_NODIR;
- }
-
- full_repopath = g_strconcat(repopath, "/repodata/", NULL);
- repodir = g_dir_open(full_repopath, 0, &tmp_err);
- if (tmp_err) {
- g_debug("%s: Path %s doesn't exist", __func__, repopath);
- g_propagate_prefixed_error(err, tmp_err, "Cannot open a dir: ");
- g_free(full_repopath);
- return CRE_IO;
- }
-
-
- // Create sorted (by mtime) lists of old metadata files
- // More recent files are first
-
- while ((file = g_dir_read_name (repodir))) {
- // Get filename without suffix
- gchar *name_without_suffix, *dot = NULL, *i = (gchar *) file;
- for (; *i != '\0'; i++) if (*i == '.') dot = i;
- if (!dot) continue; // Filename doesn't contain '.'
- name_without_suffix = g_strndup(file, (dot - file));
-
- if (g_str_has_suffix(name_without_suffix, "primary.xml")) {
- cr_stat_and_insert(full_repopath, file, &pri_lst);
- } else if (g_str_has_suffix(name_without_suffix, "primary.sqlite")) {
- cr_stat_and_insert(full_repopath, file, &pri_db_lst);
- } else if (g_str_has_suffix(name_without_suffix, "filelists.xml")) {
- cr_stat_and_insert(full_repopath, file, &fil_lst);
- } else if (g_str_has_suffix(name_without_suffix, "filelists.sqlite")) {
- cr_stat_and_insert(full_repopath, file, &fil_db_lst);
- } else if (g_str_has_suffix(name_without_suffix, "other.xml")) {
- cr_stat_and_insert(full_repopath, file, &oth_lst);
- } else if (g_str_has_suffix(name_without_suffix, "other.sqlite")) {
- cr_stat_and_insert(full_repopath, file, &oth_db_lst);
- }
- g_free(name_without_suffix);
- }
-
- g_dir_close(repodir);
-
- // Remove old metadata
-
- int ret = CRE_OK;
- GSList *lists[] = { pri_lst, pri_db_lst,
- fil_lst, fil_db_lst,
- oth_lst, oth_db_lst };
-
-
- // Remove repomd.xml
-
- repomd_path = g_strconcat(full_repopath, "repomd.xml", NULL);
-
- g_debug("%s: Removing: %s", __func__, repomd_path);
- if (g_remove(repomd_path) == -1) {
- g_set_error(err, CR_LOCATE_METADATA_ERROR, CRE_IO,
- "Cannot remove %s: %s", repomd_path, strerror(errno));
- ret = CRE_IO;
- goto cleanup;
- }
-
-
- // Remove listed files
-
- for (int x = 0; x < 6; x++) {
- cr_remove_listed_files(lists[x], retain, &tmp_err);
- if (tmp_err) {
- ret = tmp_err->code;
- g_propagate_error(err, tmp_err);
- break;
- }
- }
-
-cleanup:
- g_free(repomd_path);
- g_free(full_repopath);
-
- for (int x = 0; x < 6; x++)
- cr_slist_free_full(lists[x], cr_free_old_file);
-
- return ret;
-}
#ifndef __C_CREATEREPOLIB_LOCATE_METADATA_H__
#define __C_CREATEREPOLIB_LOCATE_METADATA_H__
+#include <glib.h>
+
#ifdef __cplusplus
extern "C" {
#endif
* @return filled cr_MetadataLocation structure or NULL
*/
struct cr_MetadataLocation *cr_locate_metadata(const char *repopath,
- int ignore_sqlite,
+ gboolean ignore_sqlite,
GError **err);
/** Free cr_MetadataLocation. If repodata were downloaded remove
*/
void cr_metadatalocation_free(struct cr_MetadataLocation *ml);
-
-/** Remove files related to repodata from the specified path.
- * Files not listed in repomd.xml and with nonstandard names (standard names
- * are names with suffixes like primary.xml.*, primary.sqlite.*, other.xml.*,
- * etc.) are keep untouched (repodata/ subdirectory IS NOT removed!).
- * @param repopath path to directory with repodata/ subdirectory
- * @param err GError **
- * @return number of removed files
- */
-int cr_remove_metadata(const char *repopath, GError **err);
-
-/** Remove repodata in same manner as classic createrepo.
- * This function removes only (primary|filelists|other)[.sqlite].* files
- * from repodata.
- * @param repopath path to directory with repodata/subdirectory
- * @param retain keep around the latest N old, uniquely named primary,
- * filelists and otherdata xml and sqlite files.
- * If <1 no old files will be kept.
- * @param err GError **
- * @return cr_Error code
- */
-int cr_remove_metadata_classic(const char *repopath, int retain, GError **err);
-
/** @} */
#ifdef __cplusplus
#include <errno.h>
#include <string.h>
#include "error.h"
+#include "createrepo_shared.h"
#include "version.h"
+#include "helpers.h"
#include "compression_wrapper.h"
#include "misc.h"
#include "locate_metadata.h"
#include "sqlite.h"
#include "threads.h"
#include "xml_file.h"
+#include "cleanup.h"
// TODO:
char *noarch_repo_url;
gboolean unique_md_filenames;
gboolean simple_md_filenames;
+ gboolean omit_baseurl;
// Koji mergerepos specific options
gboolean koji;
NULL },
{ "simple-md-filenames", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.simple_md_filenames),
"Do not include the file's checksum in the metadata filename.", NULL },
+ { "omit-baseurl", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.omit_baseurl),
+ "Don't add a baseurl to packages that don't have one before." , NULL},
// -- Options related to Koji-mergerepos behaviour
{ "koji", 'k', 0, G_OPTION_ARG_NONE, &(_cmd_options.koji),
"Path to groupfile to include in metadata.", "GROUPFILE" },
{ "blocked", 'b', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.blocked),
"A file containing a list of srpm names to exclude from the merged repo. "
- "Only works with combination with --koji/-k.",
- "FILE" },
+ "Only works with combination with --koji/-k.", "FILE" },
// -- Options related to Koji-mergerepos behaviour - end
{ NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL }
};
GSList *
-append_arch(GSList *list, gchar *arch, gboolean koji)
+append_arch(GSList *list, gchar *arch, gboolean expand)
{
- GSList *elem;
-
// Try to find arch in the list
- for (elem = list; elem; elem = g_slist_next(elem)) {
- if (!strcmp(elem->data, arch))
+ for (GSList *elem = list; elem; elem = g_slist_next(elem)) {
+ if (!g_strcmp0(elem->data, arch))
return list; // Arch already exists
}
list = g_slist_prepend(list, g_strdup(arch));
- if (koji) {
+ if (expand) {
// expand arch
- if (!strcmp(arch, "i386")) {
+ if (!g_strcmp0(arch, "i386")) {
list = append_arch(list, "i486", FALSE);
list = append_arch(list, "i586", FALSE);
list = append_arch(list, "geode", FALSE);
list = append_arch(list, "i686", FALSE);
list = append_arch(list, "athlon", FALSE);
- } else if (!strcmp(arch, "x86_64")) {
+ } else if (!g_strcmp0(arch, "x86_64")) {
list = append_arch(list, "ia32e", FALSE);
list = append_arch(list, "amd64", FALSE);
- } else if (!strcmp(arch, "ppc64")) {
+ } else if (!g_strcmp0(arch, "ppc64")) {
list = append_arch(list, "ppc64pseries", FALSE);
list = append_arch(list, "ppc64iseries", FALSE);
- } else if (!strcmp(arch, "sparc64")) {
+ } else if (!g_strcmp0(arch, "sparc64")) {
list = append_arch(list, "sparc64v", FALSE);
list = append_arch(list, "sparc64v2", FALSE);
- } else if (!strcmp(arch, "sparc")) {
+ } else if (!g_strcmp0(arch, "sparc")) {
list = append_arch(list, "sparcv8", FALSE);
list = append_arch(list, "sparcv9", FALSE);
list = append_arch(list, "sparcv9v", FALSE);
list = append_arch(list, "sparcv9v2", FALSE);
- } else if (!strcmp(arch, "alpha")) {
+ } else if (!g_strcmp0(arch, "alpha")) {
list = append_arch(list, "alphaev4", FALSE);
list = append_arch(list, "alphaev45", FALSE);
list = append_arch(list, "alphaev5", FALSE);
list = append_arch(list, "alphaev67", FALSE);
list = append_arch(list, "alphaev68", FALSE);
list = append_arch(list, "alphaev7", FALSE);
- } else if (!strcmp(arch, "armhfp")) {
+ } else if (!g_strcmp0(arch, "armhfp")) {
list = append_arch(list, "armv7hl", FALSE);
list = append_arch(list, "armv7hnl", FALSE);
- } else if (!strcmp(arch, "arm")) {
+ } else if (!g_strcmp0(arch, "arm")) {
list = append_arch(list, "rmv5tel", FALSE);
list = append_arch(list, "armv5tejl", FALSE);
list = append_arch(list, "armv6l", FALSE);
list = append_arch(list, "armv7l", FALSE);
- } else if (!strcmp(arch, "sh4")) {
+ } else if (!g_strcmp0(arch, "sh4")) {
list = append_arch(list, "sh4a", FALSE);
}
}
}
+GSList *
+append_multilib_arch(GSList *list, gchar *arch)
+{
+ if (!g_strcmp0(arch, "x86_64"))
+ list = append_arch(list, "i386", TRUE);
+ else if (!g_strcmp0(arch, "ppc64"))
+ list = append_arch(list, "ppc", TRUE);
+ else if (!g_strcmp0(arch, "s390x"))
+ list = append_arch(list, "s390", TRUE);
+
+ return list;
+}
+
gboolean
check_arguments(struct CmdOptions *options)
{
while (arch_set && arch_set[x] != NULL) {
gchar *arch = arch_set[x];
if (arch[0] != '\0') {
+ // Append (and expand) the arch
options->arch_list = append_arch(options->arch_list,
arch,
options->koji);
+ // Support multilib repos
+ if (options->koji)
+ options->arch_list = append_multilib_arch(options->arch_list,
+ arch);
}
x++;
}
g_strfreev(arch_set);
+ } else if (options->koji) {
+ // Work only with noarch packages if --koji and no archlist specified
+ options->arch_list = append_arch(options->arch_list, "noarch", TRUE);
}
// Compress type
GError *error = NULL;
GOptionContext *context;
- context = g_option_context_new(": take 2 or more repositories and merge "
- "their metadata into a new repo");
+ context = g_option_context_new("--repo=url --repo=url");
+ g_option_context_set_summary(context, "Take 2 or more repositories and "
+ "merge their metadata into a new repo");
g_option_context_add_main_entries(context, cmd_entries, NULL);
gboolean ret = g_option_context_parse(context, argc, argv, &error);
}
+/** Prepend protocol if necessary
+ */
+static gchar *
+prepend_protocol(const gchar *url)
+{
+ if (url && *url == '/')
+ return g_strconcat("file://", url, NULL);
+ return g_strdup(url);
+}
+
+
int
koji_stuff_prepare(struct KojiMergedReposStuff **koji_stuff_ptr,
struct CmdOptions *cmd_options,
g_hash_table_iter_init(&iter, cr_metadata_hashtable(metadata));
while (g_hash_table_iter_next(&iter, &key, &void_pkg)) {
cr_Package *pkg = (cr_Package *) void_pkg;
- struct cr_NVREA *nvrea;
+ cr_NEVRA *nevra;
gpointer data;
gboolean blocked = FALSE;
struct srpm_val *srpm_value_new;
- nvrea = cr_split_rpm_filename(pkg->rpm_sourcerpm);
+ if (!pkg->rpm_sourcerpm) {
+ g_warning("Package '%s' from '%s' doesn't have specified source srpm",
+ pkg->location_href, ml->original_url);
+ continue;
+ }
+
+ nevra = cr_split_rpm_filename(pkg->rpm_sourcerpm);
if (blocked_srpms) {
// Check if srpm is blocked
blocked = g_hash_table_lookup_extended(blocked_srpms,
- nvrea->name,
+ nevra->name,
NULL,
NULL);
}
if (blocked) {
g_debug("Srpm is blocked: %s", pkg->rpm_sourcerpm);
- cr_nvrea_free(nvrea);
+ cr_nevra_free(nevra);
continue;
}
- data = g_hash_table_lookup(include_srpms, nvrea->name);
+ data = g_hash_table_lookup(include_srpms, nevra->name);
if (data) {
// We have already seen build with the same name
int cmp;
- struct cr_NVREA *nvrea_existing;
+ cr_NEVRA *nevra_existing;
struct srpm_val *srpm_value_existing = data;
if (srpm_value_existing->repo_id != repoid) {
// We found a rpm built from an srpm with the same name in
// a previous repo. The previous repo takes precendence,
// so ignore the srpm found here.
- cr_nvrea_free(nvrea);
+ cr_nevra_free(nevra);
g_debug("Srpm already loaded from previous repo %s",
pkg->rpm_sourcerpm);
continue;
}
// We're in the same repo, so compare srpm NVRs
- nvrea_existing = cr_split_rpm_filename(srpm_value_existing->sourcerpm);
- cmp = cr_cmp_nvrea(nvrea, nvrea_existing);
- cr_nvrea_free(nvrea_existing);
+ nevra_existing = cr_split_rpm_filename(srpm_value_existing->sourcerpm);
+ cmp = cr_cmp_nevra(nevra, nevra_existing);
+ cr_nevra_free(nevra_existing);
if (cmp < 1) {
// Existing package is from the newer srpm
- cr_nvrea_free(nvrea);
+ cr_nevra_free(nevra);
g_debug("Srpm already exists in newer version %s",
pkg->rpm_sourcerpm);
continue;
srpm_value_new->repo_id = repoid;
srpm_value_new->sourcerpm = g_strdup(pkg->rpm_sourcerpm);
g_hash_table_replace(include_srpms,
- g_strdup(nvrea->name),
+ g_strdup(nevra->name),
srpm_value_new);
- cr_nvrea_free(nvrea);
+ cr_nevra_free(nevra);
}
cr_metadata_free(metadata);
// 0 = Package was not added
// 1 = Package was added
// 2 = Package replaced old package
-int
+static int
add_package(cr_Package *pkg,
gchar *repopath,
GHashTable *merged,
GSList *arch_list,
MergeMethod merge_method,
gboolean include_all,
- struct KojiMergedReposStuff *koji_stuff)
+ struct KojiMergedReposStuff *koji_stuff,
+ gboolean omit_baseurl,
+ int repoid)
{
GSList *list, *element;
+ if (omit_baseurl)
+ repopath = NULL;
+
// Check if the package meet the command line architecture constraints
if (arch_list) {
// Koji-mergerepos specific behaviour -----------------------
if (koji_stuff) {
- struct cr_NVREA *nvrea;
- struct srpm_val *value;
gchar *nvra;
gboolean seen;
- nvrea = cr_split_rpm_filename(pkg->rpm_sourcerpm);
- value = g_hash_table_lookup(koji_stuff->include_srpms, nvrea->name);
- cr_nvrea_free(nvrea);
- if (!value || g_strcmp0(pkg->rpm_sourcerpm, value->sourcerpm)) {
- // Srpm of the package is not allowed
- g_debug("Package %s has forbidden srpm %s", pkg->name,
- pkg->rpm_sourcerpm);
- return 0;
+ if (pkg->rpm_sourcerpm) {
+ // Sometimes, there are metadata that don't contain sourcerpm
+ // items for their packages.
+ // I don't know if better is to include or exclude such packages.
+ // Original mergerepos script doesn't expect such situation.
+ // So for now, include them. But it can be changed anytime
+ // in future.
+ struct srpm_val *value;
+ cr_NEVRA *nevra = cr_split_rpm_filename(pkg->rpm_sourcerpm);
+ value = g_hash_table_lookup(koji_stuff->include_srpms, nevra->name);
+ cr_nevra_free(nevra);
+ if (!value || g_strcmp0(pkg->rpm_sourcerpm, value->sourcerpm)) {
+ // Srpm of the package is not allowed
+ g_debug("Package %s has forbidden srpm %s", pkg->name,
+ pkg->rpm_sourcerpm);
+ return 0;
+ }
}
- nvra = cr_package_nvra(pkg);;
+ // Check if we have already seen this package before
+ nvra = cr_package_nvra(pkg);
seen = g_hash_table_lookup_extended(koji_stuff->seen_rpms,
nvra,
NULL,
return 0;
}
+ // For first repo (with --koji) ignore baseURL (RhBug: 1220082)
+ if (repoid == 0)
+ repopath = NULL;
+
+ // Make a note that we have seen this package
g_hash_table_replace(koji_stuff->seen_rpms, nvra, NULL);
}
// Koji-mergerepos specific behaviour end --------------------
if (!list) {
list = g_slist_prepend(list, pkg);
- if (!pkg->location_base) {
- pkg->location_base = g_string_chunk_insert(pkg->chunk, repopath);
+ if ((!pkg->location_base || *pkg->location_base == '\0') && repopath) {
+ _cleanup_free_ gchar *repopath_with_protocol = NULL;
+ repopath_with_protocol = prepend_protocol(repopath);
+ pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, repopath_with_protocol);
}
g_hash_table_insert (merged, (gpointer) pkg->name, (gpointer) list);
return 1;
cr_package_free(c_pkg);
// Replace package in element
if (!pkg->location_base)
- pkg->location_base = g_string_chunk_insert(pkg->chunk,
+ pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk,
repopath);
element->data = pkg;
return 2;
// NVR merge method
} else if (merge_method == MM_NVR) {
- int cmp_res = cr_cmp_version_str(pkg->version, c_pkg->version);
- long pkg_release = (pkg->release) ? strtol(pkg->release, NULL, 10) : 0;
- long c_pkg_release = (c_pkg->release) ? strtol(c_pkg->release, NULL, 10) : 0;
- if (cmp_res == 1 || (cmp_res == 0 && pkg_release > c_pkg_release)) {
+ gboolean pkg_is_newer = FALSE;
+
+ int epoch_cmp = cr_cmp_version_str(pkg->epoch, c_pkg->epoch);
+ int version_cmp = cr_cmp_version_str(pkg->version, c_pkg->version);
+ int release_cmp = cr_cmp_version_str(pkg->release, c_pkg->release);
+
+
+ if (epoch_cmp == 1)
+ pkg_is_newer = TRUE;
+ else if (epoch_cmp == 0 && version_cmp == 1)
+ pkg_is_newer = TRUE;
+ else if (epoch_cmp == 0 && version_cmp == 0 && release_cmp == 1)
+ pkg_is_newer = TRUE;
+
+ if (pkg_is_newer) {
// Remove older package
cr_package_free(c_pkg);
// Replace package in element
if (!pkg->location_base)
- pkg->location_base = g_string_chunk_insert(pkg->chunk, repopath);
+ pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, repopath);
element->data = pkg;
return 2;
} else {
- g_debug("Newer version of package %s (%s) already exists",
- pkg->name, pkg->arch);
+ g_debug("Newer version of package %s.%s "
+ "(epoch: %s) (ver: %s) (rel: %s) already exists",
+ pkg->name, pkg->arch,
+ pkg->epoch ? pkg->epoch : "0",
+ pkg->version ? pkg->version : "N/A",
+ pkg->release ? pkg->release : "N/A");
return 0;
}
}
-
} else {
-
// Two packages has same name and arch but all param is used
- int cmp_res = cr_cmp_version_str(pkg->version, c_pkg->version);
- long pkg_release = (pkg->release) ? strtol(pkg->release, NULL, 10) : 0;
- long c_pkg_release = (c_pkg->release) ? strtol(c_pkg->release, NULL, 10) : 0;
-
- if (cmp_res == 0 && pkg_release == c_pkg_release) {
- // Package with same name, arch, version and release
- // is already listed
- g_debug("Same version of package %s (%s) already exists",
- pkg->name, pkg->arch);
+ // We want to check if two packages are the same.
+ // We already know that name and arch matches.
+ // We need to check version and release
+ if ((cr_cmp_version_str(pkg->epoch, c_pkg->epoch) == 0)
+ && (cr_cmp_version_str(pkg->version, c_pkg->version) == 0)
+ && (cr_cmp_version_str(pkg->release, c_pkg->release) == 0))
+ {
+ // Both packages are the same (at least by NEVRA values)
+ g_debug("Same version of package %s.%s "
+ "(epoch: %s) (ver: %s) (rel: %s) already exists",
+ pkg->name, pkg->arch,
+ pkg->epoch ? pkg->epoch : "0",
+ pkg->version ? pkg->version : "N/A",
+ pkg->release ? pkg->release : "N/A");
return 0;
}
}
// Add package
-
if (!pkg->location_base) {
- pkg->location_base = g_string_chunk_insert(pkg->chunk, repopath);
+ pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, repopath);
}
// XXX: The first list element (pointed from hashtable) must stay first!
// g_slist_append() is suitable but non effective, insert a new element
// right after first element is optimal (at least for now)
- assert(g_slist_insert(list, pkg, 1) == list);
+ if (g_slist_insert(list, pkg, 1) != list) {
+ assert(0);
+ }
return 1;
}
-
long
merge_repos(GHashTable *merged,
GSList *repo_list,
MergeMethod merge_method,
gboolean include_all,
GHashTable *noarch_hashtable,
- struct KojiMergedReposStuff *koji_stuff)
+ struct KojiMergedReposStuff *koji_stuff,
+ gboolean omit_baseurl)
{
long loaded_packages = 0;
GSList *used_noarch_keys = NULL;
// Load all repos
+ int repoid = 0;
GSList *element = NULL;
- for (element = repo_list; element; element = g_slist_next(element)) {
+ for (element = repo_list; element; element = g_slist_next(element), repoid++) {
gchar *repopath; // base url of current repodata
cr_Metadata *metadata; // current repodata
struct cr_MetadataLocation *ml; // location of current repodata
}
}
+ g_debug("Reading metadata for %s (%s-%s.%s)",
+ pkg->name, pkg->version, pkg->release, pkg->arch);
+
// Add package
ret = add_package(pkg,
repopath,
arch_list,
merge_method,
include_all,
- koji_stuff);
+ koji_stuff,
+ omit_baseurl,
+ repoid);
if (ret > 0) {
if (!noarch_pkg_used) {
repo_loaded_packages++;
// Koji-mergerepos specific behaviour -----------
if (koji_stuff && koji_stuff->pkgorigins) {
- gchar *nvra = cr_package_nvra(pkg);
+ _cleanup_free_ gchar *nvra = cr_package_nvra(pkg);
+ _cleanup_free_ gchar *url = prepend_protocol(ml->original_url);
+
cr_printf(NULL,
koji_stuff->pkgorigins,
"%s\t%s\n",
- nvra, ml->original_url);
- g_free(nvra);
+ nvra, url);
}
// Koji-mergerepos specific behaviour - end -----
}
pkg = (cr_Package *) element->data;
res = cr_xml_dump(pkg, NULL);
+ g_debug("Writing metadata for %s (%s-%s.%s)",
+ pkg->name, pkg->version, pkg->release, pkg->arch);
+
cr_xmlfile_add_chunk(pri_f, (const char *) res.primary, NULL);
cr_xmlfile_add_chunk(fil_f, (const char *) res.filelists, NULL);
cr_xmlfile_add_chunk(oth_f, (const char *) res.other, NULL);
CR_CW_MODE_WRITE,
cmd_options->groupfile_compression_type,
&tmp_err);
- if (!tmp_err) {
+ if (update_info) {
cr_puts(update_info,
"<?xml version=\"1.0\"?>\n<updates></updates>\n",
NULL);
int
main(int argc, char **argv)
{
+ _cleanup_error_free_ GError *tmp_err = NULL;
+
// Parse arguments
struct CmdOptions *cmd_options;
// Set logging
- g_log_set_default_handler (cr_log_fn, NULL);
-
- if (cmd_options->verbose) {
- // Verbose mode
- GLogLevelFlags levels = G_LOG_LEVEL_MESSAGE | G_LOG_LEVEL_INFO | G_LOG_LEVEL_DEBUG | G_LOG_LEVEL_WARNING;
- g_log_set_handler(NULL, levels, cr_log_fn, NULL);
- g_log_set_handler("C_CREATEREPOLIB", levels, cr_log_fn, NULL);
- } else {
- // Standard mode
- GLogLevelFlags levels = G_LOG_LEVEL_DEBUG;
- g_log_set_handler(NULL, levels, cr_null_log_fn, NULL);
- g_log_set_handler("C_CREATEREPOLIB", levels, cr_null_log_fn, NULL);
- }
+ cr_setup_logging(FALSE, cmd_options->verbose);
// Check arguments
if (g_slist_length(cmd_options->repo_list) < 2) {
free_options(cmd_options);
- fprintf(stderr, "Usage: %s [options]\n\n"
- "%s: take 2 or more repositories and merge their "
- "metadata into a new repo\n\n",
- cr_get_filename(argv[0]), cr_get_filename(argv[0]));
+ g_printerr("Usage: %s [OPTION...] --repo=url --repo=url\n\n"
+ "%s: take 2 or more repositories and merge their "
+ "metadata into a new repo\n\n",
+ cr_get_filename(argv[0]), cr_get_filename(argv[0]));
return 1;
}
+ g_debug("Version: %d.%d.%d\n", CR_VERSION_MAJOR,
+ CR_VERSION_MINOR,
+ CR_VERSION_PATCH);
+
+ g_thread_init(NULL); // Initialize threading
// Prepare out_repo
if (g_mkdir_with_parents (cmd_options->tmp_out_repo, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) {
g_critical("Error while creating temporary repodata directory %s: %s",
- cmd_options->tmp_out_repo, strerror(errno));
+ cmd_options->tmp_out_repo, g_strerror(errno));
free_options(cmd_options);
return 1;
gboolean cr_download_failed = FALSE;
for (element = cmd_options->repo_list; element; element = g_slist_next(element)) {
- struct cr_MetadataLocation *loc = cr_locate_metadata((gchar *) element->data, 1, NULL);
+ struct cr_MetadataLocation *loc = cr_locate_metadata((gchar *) element->data, TRUE, NULL);
if (!loc) {
g_warning("Downloading of repodata failed: %s", (gchar *) element->data);
cr_download_failed = TRUE;
struct cr_MetadataLocation *loc;
loc = (struct cr_MetadataLocation *) element->data;
if (!groupfile && loc->groupfile_href) {
- if (cr_copy_file(loc->groupfile_href, cmd_options->tmp_out_repo, NULL) == CRE_OK) {
+ if (cr_copy_file(loc->groupfile_href, cmd_options->tmp_out_repo, &tmp_err)) {
groupfile = g_strconcat(cmd_options->tmp_out_repo,
cr_get_filename(loc->groupfile_href),
NULL);
g_debug("Using groupfile: %s", groupfile);
break;
+ } else {
+ g_warning("Groupfile %s from repo: %s cannot be used: %s\n",
+ loc->groupfile_href, loc->original_url, tmp_err->message);
+ g_clear_error(&tmp_err);
}
}
}
} else {
// Use groupfile specified by user
- if (cr_copy_file(cmd_options->groupfile, cmd_options->tmp_out_repo, NULL) == CRE_OK) {
+ if (cr_copy_file(cmd_options->groupfile, cmd_options->tmp_out_repo, &tmp_err)) {
groupfile = g_strconcat(cmd_options->tmp_out_repo,
cr_get_filename(cmd_options->groupfile),
NULL);
g_debug("Using user specified groupfile: %s", groupfile);
- } else
- g_warning("Cannot copy groupfile %s", cmd_options->groupfile);
+ } else {
+ g_critical("Cannot copy groupfile %s: %s",
+ cmd_options->groupfile, tmp_err->message);
+ return 1;
+ }
}
// Load noarch repo
if (cmd_options->noarch_repo_url) {
struct cr_MetadataLocation *noarch_ml;
- noarch_ml = cr_locate_metadata(cmd_options->noarch_repo_url, 1, NULL);
+ noarch_ml = cr_locate_metadata(cmd_options->noarch_repo_url, TRUE, NULL);
if (!noarch_ml) {
- g_error("Cannot locate noarch repo: %s", cmd_options->noarch_repo_url);
+ g_critical("Cannot locate noarch repo: %s", cmd_options->noarch_repo_url);
return 1;
}
g_debug("Loading noarch_repo: %s", noarch_repopath);
if (cr_metadata_load_xml(noarch_metadata, noarch_ml, NULL) != CRE_OK) {
- g_error("Cannot load noarch repo: \"%s\"", noarch_ml->repomd);
+ g_critical("Cannot load noarch repo: \"%s\"", noarch_ml->repomd);
cr_metadata_free(noarch_metadata);
// TODO cleanup
cr_metadatalocation_free(noarch_ml);
noarch_metadata ?
cr_metadata_hashtable(noarch_metadata)
: NULL,
- koji_stuff);
+ koji_stuff,
+ cmd_options->omit_baseurl);
// Destroy koji stuff - we have to close pkgorigins file before dump
#define _XOPEN_SOURCE 500
+#include <glib/gstdio.h>
#include <glib.h>
+#include <arpa/inet.h>
#include <assert.h>
+#include <curl/curl.h>
#include <errno.h>
+#include <ftw.h>
+#include <rpm/rpmlib.h>
#include <stdio.h>
-#include <string.h>
#include <stdlib.h>
-#include <arpa/inet.h>
-#include <unistd.h>
-#include <ftw.h>
+#include <string.h>
+#include <sys/time.h>
#include <time.h>
-#include <curl/curl.h>
-#include <rpm/rpmlib.h>
+#include <unistd.h>
+#include "cleanup.h"
#include "error.h"
#include "misc.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
#define BUFFER_SIZE 4096
* Returned structure had all string inserted in the passed chunk.
*
*/
-struct cr_EVR
+cr_EVR *
cr_str_to_evr(const char *string, GStringChunk *chunk)
{
- struct cr_EVR evr;
- evr.epoch = NULL;
- evr.version = NULL;
- evr.release = NULL;
+ cr_EVR *evr = g_new0(cr_EVR, 1);
+ evr->epoch = NULL;
+ evr->version = NULL;
+ evr->release = NULL;
if (!string || !(strlen(string))) {
return evr;
// Epoch
+ gboolean bad_epoch = FALSE;
ptr = strstr(string, ":");
if (ptr) {
size_t len = ptr - string;
if (len) {
if (chunk) {
- evr.epoch = g_string_chunk_insert_len(chunk, string, len);
+ evr->epoch = g_string_chunk_insert_len(chunk, string, len);
} else {
- evr.epoch = g_strndup(string, len);
+ evr->epoch = g_strndup(string, len);
}
}
+ } else { // Bad (non-numerical) epoch
+ bad_epoch = TRUE;
}
} else { // There is no epoch
ptr = (char*) string-1;
}
- if (!evr.epoch) {
+ if (!evr->epoch && !bad_epoch) {
if (chunk) {
- evr.epoch = g_string_chunk_insert_const(chunk, "0");
+ evr->epoch = g_string_chunk_insert_const(chunk, "0");
} else {
- evr.epoch = g_strdup("0");
+ evr->epoch = g_strdup("0");
}
}
// Version
size_t version_len = ptr2 - (ptr+1);
if (chunk) {
- evr.version = g_string_chunk_insert_len(chunk, ptr+1, version_len);
+ evr->version = g_string_chunk_insert_len(chunk, ptr+1, version_len);
} else {
- evr.version = g_strndup(ptr+1, version_len);
+ evr->version = g_strndup(ptr+1, version_len);
}
// Release
size_t release_len = strlen(ptr2+1);
if (release_len) {
if (chunk) {
- evr.release = g_string_chunk_insert_len(chunk, ptr2+1,
+ evr->release = g_string_chunk_insert_len(chunk, ptr2+1,
release_len);
} else {
- evr.release = g_strndup(ptr2+1, release_len);
+ evr->release = g_strndup(ptr2+1, release_len);
}
}
} else { // Release is not here, just version
if (chunk) {
- evr.version = g_string_chunk_insert_const(chunk, ptr+1);
+ evr->version = g_string_chunk_insert_const(chunk, ptr+1);
} else {
- evr.version = g_strdup(ptr+1);
+ evr->version = g_strdup(ptr+1);
}
}
return evr;
}
+void
+cr_evr_free(cr_EVR *evr)
+{
+ if (!evr)
+ return;
+ g_free(evr->epoch);
+ g_free(evr->version);
+ g_free(evr->release);
+ g_free(evr);
+}
/*
inline int
FILE *fp = fopen(filename, "rb");
if (!fp) {
g_debug("%s: Cannot open file %s (%s)", __func__, filename,
- strerror(errno));
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Cannot open %s: %s", filename, strerror(errno));
+ g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open %s: %s", filename, g_strerror(errno));
return results;
}
if (fseek(fp, 104, SEEK_SET) != 0) {
g_debug("%s: fseek fail on %s (%s)", __func__, filename,
- strerror(errno));
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Cannot seek over %s: %s", filename, strerror(errno));
+ g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot seek over %s: %s", filename, g_strerror(errno));
fclose(fp);
return results;
}
unsigned int sigindex = 0;
unsigned int sigdata = 0;
if (fread(&sigindex, VAL_LEN, 1, fp) != 1) {
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "fread() error on %s: %s", filename, strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "fread() error on %s: %s", filename, g_strerror(errno));
fclose(fp);
return results;
}
sigindex = htonl(sigindex);
if (fread(&sigdata, VAL_LEN, 1, fp) != 1) {
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "fread() error on %s: %s", filename, strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "fread() error on %s: %s", filename, g_strerror(errno));
fclose(fp);
return results;
}
unsigned int hdrindex = 0;
unsigned int hdrdata = 0;
if (fread(&hdrindex, VAL_LEN, 1, fp) != 1) {
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "fread() error on %s: %s", filename, strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "fread() error on %s: %s", filename, g_strerror(errno));
fclose(fp);
return results;
}
hdrindex = htonl(hdrindex);
if (fread(&hdrdata, VAL_LEN, 1, fp) != 1) {
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "fread() error on %s: %s", filename, strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "fread() error on %s: %s", filename, g_strerror(errno));
fclose(fp);
return results;
}
if (hdrend < hdrstart) {
g_debug("%s: sanity check fail on %s (%d > %d))", __func__,
filename, hdrstart, hdrend);
- g_set_error(err, CR_MISC_ERROR, CRE_ERROR,
+ g_set_error(err, ERR_DOMAIN, CRE_ERROR,
"sanity check error on %s (hdrstart: %d > hdrend: %d)",
filename, hdrstart, hdrend);
return results;
char *
cr_get_filename(const char *filepath)
{
- char *filename = NULL;
+ char *filename;
if (!filepath)
- return filename;
+ return NULL;
filename = (char *) filepath;
size_t x = 0;
}
-int
+gboolean
cr_copy_file(const char *src, const char *in_dst, GError **err)
{
- int ret = CRE_OK;
size_t readed;
char buf[BUFFER_SIZE];
- gchar *dst = (gchar *) in_dst;
-
- FILE *orig = NULL;
- FILE *new = NULL;
+ _cleanup_free_ gchar *dst = NULL;
+ _cleanup_file_fclose_ FILE *orig = NULL;
+ _cleanup_file_fclose_ FILE *new = NULL;
assert(src);
assert(in_dst);
// If destination is dir use filename from src
if (g_str_has_suffix(in_dst, "/"))
dst = g_strconcat(in_dst, cr_get_filename(src), NULL);
+ else
+ dst = g_strdup(in_dst);
+ // Open src file
if ((orig = fopen(src, "rb")) == NULL) {
g_debug("%s: Cannot open source file %s (%s)", __func__, src,
- strerror(errno));
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Cannot open file %s: %s", src, strerror(errno));
- ret = CRE_IO;
- goto copy_file_cleanup;
+ g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open file %s: %s", src, g_strerror(errno));
+ return FALSE;
}
+ // Open dst file
if ((new = fopen(dst, "wb")) == NULL) {
g_debug("%s: Cannot open destination file %s (%s)", __func__, dst,
- strerror(errno));
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Cannot open file %s: %s", dst, strerror(errno));
- ret = CRE_IO;
- goto copy_file_cleanup;
+ g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open file %s: %s", dst, g_strerror(errno));
+ return FALSE;
}
+ // Copy content from src -> dst
while ((readed = fread(buf, 1, BUFFER_SIZE, orig)) > 0) {
if (readed != BUFFER_SIZE && ferror(orig)) {
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Error while read %s: %s", src, strerror(errno));
- ret = CRE_IO;
- goto copy_file_cleanup;
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Error while read %s: %s", src, g_strerror(errno));
+ return FALSE;
}
if (fwrite(buf, 1, readed, new) != readed) {
g_debug("%s: Error while copy %s -> %s (%s)", __func__, src,
- dst, strerror(errno));
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Error while write %s: %s", dst, strerror(errno));
- ret = CRE_IO;
- goto copy_file_cleanup;
+ dst, g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Error while write %s: %s", dst, g_strerror(errno));
+ return FALSE;
}
}
-copy_file_cleanup:
-
- if (dst != in_dst)
- g_free(dst);
-
- if (new)
- fclose(new);
-
- if (orig)
- fclose(orig);
-
- return ret;
+ return TRUE;
}
// Src must be a file NOT a directory
if (!g_file_test(src, G_FILE_TEST_IS_REGULAR)) {
g_debug("%s: Source (%s) must be a regular file!", __func__, src);
- g_set_error(err, CR_MISC_ERROR, CRE_NOFILE,
+ g_set_error(err, ERR_DOMAIN, CRE_NOFILE,
"Not a regular file: %s", src);
return CRE_NOFILE;
}
orig = fopen(src, "rb");
if (orig == NULL) {
g_debug("%s: Cannot open source file %s (%s)", __func__, src,
- strerror(errno));
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Cannot open %s: %s", src, strerror(errno));
+ g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open %s: %s", src, g_strerror(errno));
ret = CRE_IO;
goto compress_file_cleanup;
}
while ((readed = fread(buf, 1, BUFFER_SIZE, orig)) > 0) {
if (readed != BUFFER_SIZE && ferror(orig)) {
g_debug("%s: Error while copy %s -> %s (%s)", __func__, src,
- dst, strerror(errno));
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Error while read %s: %s", src, strerror(errno));
+ dst, g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Error while read %s: %s", src, g_strerror(errno));
ret = CRE_IO;
goto compress_file_cleanup;
}
// Src must be a file NOT a directory
if (!g_file_test(src, G_FILE_TEST_IS_REGULAR)) {
g_debug("%s: Source (%s) must be a regular file!", __func__, src);
- g_set_error(err, CR_MISC_ERROR, CRE_NOFILE,
+ g_set_error(err, ERR_DOMAIN, CRE_NOFILE,
"Not a regular file: %s", src);
return CRE_NOFILE;
}
}
if (compression == CR_CW_UNKNOWN_COMPRESSION) {
- g_set_error(err, CR_MISC_ERROR, CRE_UNKNOWNCOMPRESSION,
+ g_set_error(err, ERR_DOMAIN, CRE_UNKNOWNCOMPRESSION,
"Cannot detect compression type");
return CRE_UNKNOWNCOMPRESSION;
}
new = fopen(dst, "wb");
if (!new) {
g_debug("%s: Cannot open destination file %s (%s)",
- __func__, dst, strerror(errno));
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Cannot open %s: %s", src, strerror(errno));
+ __func__, dst, g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open %s: %s", src, g_strerror(errno));
ret = CRE_IO;
goto compress_file_cleanup;
}
goto compress_file_cleanup;
}
- if (fwrite(buf, 1, readed, new) != readed) {
+ if (fwrite(buf, 1, readed, new) != (size_t) readed) {
g_debug("%s: Error while copy %s -> %s (%s)",
- __func__, src, dst, strerror(errno));
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Error while write %s: %s", dst, strerror(errno));
+ __func__, src, dst, g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Error while write %s: %s", dst, g_strerror(errno));
ret = CRE_IO;
goto compress_file_cleanup;
}
int
-cr_download(CURL *handle,
+cr_download(CURL *in_handle,
const char *url,
const char *in_dst,
GError **err)
{
+ CURL *handle = NULL;
CURLcode rcode;
- FILE *file = NULL;
+ char errorbuf[CURL_ERROR_SIZE];
+ _cleanup_free_ gchar *dst = NULL;
+ _cleanup_file_fclose_ FILE *file = NULL;
+ assert(in_handle);
assert(!err || *err == NULL);
// If destination is dir use filename from src
-
- gchar *dst = NULL;
if (g_str_has_suffix(in_dst, "/"))
dst = g_strconcat(in_dst, cr_get_filename(url), NULL);
else if (g_file_test(in_dst, G_FILE_TEST_IS_DIR))
else
dst = g_strdup(in_dst);
+ // Open dst file
file = fopen(dst, "wb");
if (!file) {
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Cannot open %s: %s", dst, strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open %s: %s", dst, g_strerror(errno));
remove(dst);
- g_free(dst);
return CRE_IO;
}
+ // Dup the input handle
+ handle = curl_easy_duphandle(in_handle);
- // Set URL
+ // Set error buffer
+ errorbuf[0] = '\0';
+ rcode = curl_easy_setopt(handle, CURLOPT_ERRORBUFFER, errorbuf);
+ if (rcode != CURLE_OK) {
+ g_set_error(err, ERR_DOMAIN, CRE_CURL,
+ "curl_easy_setopt failed(CURLOPT_ERRORBUFFER): %s",
+ curl_easy_strerror(rcode));
+ return CRE_CURL;
+ }
+ // Set URL
rcode = curl_easy_setopt(handle, CURLOPT_URL, url);
if (rcode != CURLE_OK) {
- g_set_error(err, CR_MISC_ERROR, CRE_CURL,
- "curl_easy_setopt failed: %s", curl_easy_strerror(rcode));
- fclose(file);
+ g_set_error(err, ERR_DOMAIN, CRE_CURL,
+ "curl_easy_setopt failed(CURLOPT_URL): %s",
+ curl_easy_strerror(rcode));
remove(dst);
- g_free(dst);
return CRE_CURL;
}
-
// Set output file descriptor
-
rcode = curl_easy_setopt(handle, CURLOPT_WRITEDATA, file);
if (rcode != CURLE_OK) {
- g_set_error(err, CR_MISC_ERROR, CRE_CURL,
- "curl_easy_setopt failed: %s", curl_easy_strerror(rcode));
- fclose(file);
+ g_set_error(err, ERR_DOMAIN, CRE_CURL,
+ "curl_easy_setopt(CURLOPT_WRITEDATA) failed: %s",
+ curl_easy_strerror(rcode));
remove(dst);
- g_free(dst);
return CRE_CURL;
}
+ // Download the file
rcode = curl_easy_perform(handle);
if (rcode != CURLE_OK) {
- g_set_error(err, CR_MISC_ERROR, CRE_CURL,
- "curl_easy_perform failed: %s", curl_easy_strerror(rcode));
- fclose(file);
+ g_set_error(err, ERR_DOMAIN, CRE_CURL,
+ "curl_easy_perform failed: %s: %s",
+ curl_easy_strerror(rcode), errorbuf);
remove(dst);
- g_free(dst);
return CRE_CURL;
}
g_debug("%s: Successfully downloaded: %s", __func__, dst);
- fclose(file);
- g_free(dst);
-
return CRE_OK;
}
-int
+gboolean
cr_better_copy_file(const char *src, const char *in_dst, GError **err)
{
GError *tmp_err = NULL;
tmp_err->message);
g_propagate_prefixed_error(err, tmp_err,
"Error while downloading %s: ", src);
- return CRE_CURL;
+ return FALSE;
}
- return CRE_OK;
+ return TRUE;
}
int
cr_remove_dir_cb(const char *fpath,
- const struct stat *sb,
- int typeflag,
- struct FTW *ftwbuf)
+ G_GNUC_UNUSED const struct stat *sb,
+ G_GNUC_UNUSED int typeflag,
+ G_GNUC_UNUSED struct FTW *ftwbuf)
{
- CR_UNUSED(sb);
- CR_UNUSED(typeflag);
- CR_UNUSED(ftwbuf);
int rv = remove(fpath);
if (rv)
- g_warning("%s: Cannot remove: %s: %s", __func__, fpath, strerror(errno));
+ g_warning("%s: Cannot remove: %s: %s", __func__, fpath, g_strerror(errno));
return rv;
}
ret = nftw(path, cr_remove_dir_cb, 64, FTW_DEPTH | FTW_PHYS);
if (ret != 0) {
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Cannot remove dir %s: %s", path, strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot remove dir %s: %s", path, g_strerror(errno));
return CRE_IO;
}
char *endptr;
const char *ptr = str;
struct cr_Version ver;
- ver.version = 0;
- ver.release = 0;
- ver.patch = 0;
- ver.suffix = NULL;
+ ver.major = 0;
+ ver.minor = 0;
+ ver.patch = 0;
+ ver.suffix = NULL;
if (!str || str[0] == '\0') {
return ver;
}
- // Version chunk
+ // Major chunk
- ver.version = strtol(ptr, &endptr, 10);
+ ver.major = strtol(ptr, &endptr, 10);
if (!endptr || endptr[0] == '\0') {
// Whole string has been converted successfully
return ver;
}
- // Release chunk
+ // Minor chunk
- ver.release = strtol(ptr, &endptr, 10);
+ ver.minor = strtol(ptr, &endptr, 10);
if (!endptr || endptr[0] == '\0') {
// Whole string has been converted successfully
return ver;
return ver;
}
+static int
+cr_compare_values(const char *str1, const char *str2)
+{
+ if (!str1 && !str2)
+ return 0;
+ else if (str1 && !str2)
+ return 1;
+ else if (!str1 && str2)
+ return -1;
+ return rpmvercmp(str1, str2);
+}
// Return values:
// 0 - versions are same
int
cr_cmp_version_str(const char* str1, const char *str2)
{
- struct cr_Version ver1, ver2;
-
- if (!str1 && !str2) {
- return 0;
- }
-
- // Get version
- ver1 = cr_str_to_version(str1);
- ver2 = cr_str_to_version(str2);
-
- if (ver1.version > ver2.version) {
- return 1;
- } else if (ver1.version < ver2.version) {
- return 2;
- } else if (ver1.release > ver2.release) {
- return 1;
- } else if (ver1.release < ver2.release) {
- return 2;
- } else if (ver1.patch > ver2. patch) {
- return 1;
- } else if (ver1.patch < ver2.patch) {
- return 2;
- }
-
- int strcmp_res = g_strcmp0(ver1.suffix, ver2.suffix);
-
- g_free(ver1.suffix);
- g_free(ver2.suffix);
-
- if (strcmp_res > 0) {
- return 1;
- } else if (strcmp_res < 0) {
- return 2;
- }
-
- return 0;
+ int rc = cr_compare_values(str1 ? str1 : "", str2 ? str2 : "");
+ if (rc == -1) rc = 2;
+ return rc;
}
void
-cr_null_log_fn(const gchar *log_domain,
- GLogLevelFlags log_level,
- const gchar *message,
- gpointer user_data)
+cr_null_log_fn(G_GNUC_UNUSED const gchar *log_domain,
+ G_GNUC_UNUSED GLogLevelFlags log_level,
+ G_GNUC_UNUSED const gchar *message,
+ G_GNUC_UNUSED gpointer user_data)
{
- CR_UNUSED(log_domain);
- CR_UNUSED(log_level);
- CR_UNUSED(message);
- CR_UNUSED(user_data);
return;
}
cr_log_fn(const gchar *log_domain,
GLogLevelFlags log_level,
const gchar *message,
- gpointer user_data)
+ G_GNUC_UNUSED gpointer user_data)
{
- CR_UNUSED(user_data);
-
-
switch(log_level) {
case G_LOG_LEVEL_ERROR:
- if (log_domain) fprintf(stderr, "%s: ", log_domain);
- fprintf(stderr, "Error: %s\n", message);
+ if (log_domain) g_printerr("%s: ", log_domain);
+ g_printerr("Error: %s\n", message);
break;
case G_LOG_LEVEL_CRITICAL:
- if (log_domain) fprintf(stderr, "%s: ", log_domain);
- fprintf(stderr, "Critical: %s\n", message);
+ if (log_domain) g_printerr("%s: ", log_domain);
+ g_printerr("Critical: %s\n", message);
break;
case G_LOG_LEVEL_WARNING:
- if (log_domain) fprintf(stderr, "%s: ", log_domain);
- fprintf(stderr, "Warning: %s\n", message);
+ if (log_domain) g_printerr("%s: ", log_domain);
+ g_printerr("Warning: %s\n", message);
break;
case G_LOG_LEVEL_DEBUG: {
time_t rawtime;
timeinfo = localtime ( &rawtime );
strftime (buffer, 80, "%H:%M:%S", timeinfo);
- if (log_domain) fprintf(stderr, "%s: ", log_domain);
- fprintf(stderr, "%s: %s\n", buffer, message);
+ //if (log_domain) g_printerr("%s: ", log_domain);
+ g_printerr("%s: %s\n", buffer, message);
break;
}
default:
void
cr_slist_free_full(GSList *list, GDestroyNotify free_f)
{
- g_slist_foreach (list, (GFunc) free_f, NULL);
+ g_slist_foreach(list, (GFunc) free_f, NULL);
g_slist_free(list);
}
-struct cr_NVREA *
+void cr_queue_free_full(GQueue *queue, GDestroyNotify free_f)
+{
+ g_queue_foreach(queue, (GFunc) free_f, NULL);
+ g_queue_free(queue);
+}
+
+
+cr_NEVRA *
cr_split_rpm_filename(const char *filename)
{
- struct cr_NVREA *res = NULL;
- gchar *str, *copy;
+ cr_NEVRA *nevra = NULL;
+ gchar *str, *epoch = NULL;
size_t len;
- int i;
+
+ filename = cr_get_filename(filename);
if (!filename)
- return res;
+ return NULL;
- res = g_malloc0(sizeof(struct cr_NVREA));
str = g_strdup(filename);
- copy = str;
+
+ // N-V-R.rpm:E
+ if (strchr(str, ':')) {
+ gchar **filename_epoch = g_strsplit(str, ":", 2);
+ if (g_str_has_suffix(filename_epoch[0], ".rpm")) {
+ g_free(str);
+ str = filename_epoch[0];
+ epoch = filename_epoch[1];
+ } else {
+ g_strfreev(filename_epoch);
+ }
+ }
+
len = strlen(str);
// Get rid off .rpm suffix
str[len] = '\0';
}
- // Get arch
- for (i = len-1; i >= 0; i--)
- if (str[i] == '.') {
- res->arch = g_strdup(str+i+1);
- str[i] = '\0';
- len = i;
- break;
+ nevra = cr_str_to_nevra(str);
+ g_free(str);
+
+ if (epoch) {
+ g_free(nevra->epoch);
+ nevra->epoch = epoch;
+ }
+
+ return nevra;
+}
+
+/** Split N-V-R:E or E:N-V-R or N-E:V-R
+ */
+cr_NEVR *
+cr_str_to_nevr(const char *instr)
+{
+ gchar *nvr = NULL;
+ gchar *epoch = NULL;
+ gchar **nvr_epoch_list = NULL;
+ cr_NEVR *nevr = NULL;
+ size_t len;
+ int i;
+
+ if (!instr)
+ return NULL;
+
+ // 1)
+ // Try to split by ':'
+ // If we have N-V-R:E or E:N-V-R then nvr and epoch will be filled
+ // If we have N-E:V-R or N-V-R then only nvr will be filed
+
+ nvr_epoch_list = g_strsplit(instr, ":", 2);
+
+ if (!nvr_epoch_list || !(*nvr_epoch_list)) {
+ g_strfreev(nvr_epoch_list);
+ return NULL;
+ }
+
+ nvr = nvr_epoch_list[0];
+ epoch = nvr_epoch_list[1]; // May be NULL
+
+ if (epoch && strchr(epoch, '-')) {
+ if (!strchr(nvr, '-')) {
+ // Switch nvr and epoch
+ char *tmp = nvr;
+ nvr = epoch;
+ epoch = tmp;
+ } else {
+ // Probably the N-E:V-R format, handle it after the split
+ g_free(nvr);
+ g_free(epoch);
+ nvr = g_strdup(instr);
+ epoch = NULL;
}
+ }
+
+ g_free(nvr_epoch_list);
+
+ // 2)
+ // Now split the nvr by the '-' into three parts
+
+ nevr = g_new0(cr_NEVR, 1);
+ len = strlen(nvr);
// Get release
for (i = len-1; i >= 0; i--)
- if (str[i] == '-') {
- res->release = g_strdup(str+i+1);
- str[i] = '\0';
+ if (nvr[i] == '-') {
+ nevr->release = g_strdup(nvr+i+1);
+ nvr[i] = '\0';
len = i;
break;
}
// Get version
for (i = len-1; i >= 0; i--)
- if (str[i] == '-') {
- res->version = g_strdup(str+i+1);
- str[i] = '\0';
+ if (nvr[i] == '-') {
+ nevr->version = g_strdup(nvr+i+1);
+ nvr[i] = '\0';
len = i;
break;
}
- // Get epoch
- for (i = 0; i < (int) len; i++)
- if (str[i] == ':') {
- str[i] = '\0';
- res->epoch = g_strdup(str);
- str += i + 1;
- break;
- }
-
// Get name
- res->name = g_strdup(str);
- g_free(copy);
+ nevr->name = g_strdup(nvr);
- return res;
-}
+ g_free(nvr);
+
+ // 3)
+ // Now split the E:V
+ if (epoch == NULL && (nevr->version && strchr(nevr->version, ':'))) {
+ gchar **epoch_version = g_strsplit(nevr->version, ":", 2);
+ g_free(nevr->version);
+ nevr->epoch = epoch_version[0];
+ nevr->version = epoch_version[1];
+ g_free(epoch_version);
+ } else {
+ nevr->epoch = epoch;
+ }
+
+ return nevr;
+}
void
-cr_nvrea_free(struct cr_NVREA *nvrea)
+cr_nevr_free(cr_NEVR *nevr)
{
- if (!nvrea)
+ if (!nevr)
return;
-
- g_free(nvrea->name);
- g_free(nvrea->version);
- g_free(nvrea->release);
- g_free(nvrea->epoch);
- g_free(nvrea->arch);
- g_free(nvrea);
+ g_free(nevr->name);
+ g_free(nevr->epoch);
+ g_free(nevr->version);
+ g_free(nevr->release);
+ g_free(nevr);
}
-int
-cr_compare_values(const char *str1, const char *str2)
+cr_NEVRA *
+cr_str_to_nevra(const char *instr)
{
- if (!str1 && !str2)
- return 0;
- else if (str1 && !str2)
- return 1;
- else if (!str1 && str2)
- return -1;
- return rpmvercmp(str1, str2);
+ cr_NEVR *nevr;
+ cr_NEVRA *nevra = NULL;
+ gchar *str, *epoch = NULL;
+ size_t len;
+ int i;
+
+ if (!instr)
+ return NULL;
+
+ nevra = g_new0(cr_NEVRA, 1);
+ str = g_strdup(instr);
+
+ // N-V-R.A:E
+ if (strchr(str, ':')) {
+ gchar **nvra_epoch = g_strsplit(str, ":", 2);
+ char *epoch_candidate = nvra_epoch[1];
+ if (epoch_candidate
+ && !strchr(epoch_candidate, '-')
+ && !strchr(epoch_candidate, '.'))
+ {
+ // Strip epoch from the very end
+ epoch = epoch_candidate;
+ str = nvra_epoch[0];
+ } else {
+ g_strfreev(nvra_epoch);
+ }
+ }
+
+ len = strlen(str);
+
+ // Get arch
+ for (i = len-1; i >= 0; i--)
+ if (str[i] == '.') {
+ nevra->arch = g_strdup(str+i+1);
+ str[i] = '\0';
+ len = i;
+ break;
+ }
+
+ if (nevra->arch && strchr(nevra->arch, '-')) {
+ g_warning("Invalid arch %s", nevra->arch);
+ cr_nevra_free(nevra);
+ g_free(str);
+ return NULL;
+ }
+
+ nevr = cr_str_to_nevr(str);
+ nevra->name = nevr->name;
+ nevra->epoch = nevr->epoch;
+ nevra->version = nevr->version;
+ nevra->release = nevr->release;
+ g_free(nevr);
+
+ g_free(str);
+
+ if (epoch) {
+ g_free(nevra->epoch);
+ nevra->epoch = epoch;
+ }
+
+ return nevra;
}
+void
+cr_nevra_free(cr_NEVRA *nevra)
+{
+ if (!nevra)
+ return;
+ g_free(nevra->name);
+ g_free(nevra->epoch);
+ g_free(nevra->version);
+ g_free(nevra->release);
+ g_free(nevra->arch);
+ g_free(nevra);
+}
+
int
cr_cmp_evr(const char *e1, const char *v1, const char *r1,
const char *e2, const char *v2, const char *r2)
}
int
-cr_warning_cb(cr_XmlParserWarningType type,
- char *msg,
- void *cbdata,
- GError **err)
+cr_warning_cb(G_GNUC_UNUSED cr_XmlParserWarningType type,
+ char *msg,
+ void *cbdata,
+ G_GNUC_UNUSED GError **err)
{
- CR_UNUSED(type);
- CR_UNUSED(err);
+ g_warning("%s: %s", (char *) cbdata, msg);
- g_warning("%s: %s", cbdata, msg);
+ return CR_CB_RET_OK;
}
gboolean
FILE *f = fopen(filename, "w");
if (!f) {
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
- "Cannot open %s: %s", filename, strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot open %s: %s", filename, g_strerror(errno));
return FALSE;
}
gboolean ret = TRUE;
if (ferror(f)) {
- g_set_error(err, CR_MISC_ERROR, CRE_IO,
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
"Cannot write content to %s: %s",
- filename, strerror(errno));
+ filename, g_strerror(errno));
ret = FALSE;
}
return ret;
}
+
+static gboolean
+cr_run_command(char **cmd, const char *working_dir, GError **err)
+{
+ assert(cmd);
+ assert(!err || *err == NULL);
+
+ GError *tmp_err = NULL;
+ gint status = 0;
+ gchar *error_str = NULL;
+ int spawn_flags = G_SPAWN_SEARCH_PATH
+ | G_SPAWN_STDOUT_TO_DEV_NULL;
+
+ g_spawn_sync(working_dir,
+ cmd,
+ NULL, // envp
+ spawn_flags,
+ NULL, // child setup function
+ NULL, // user data for child setup
+ NULL, // stdout
+ &error_str, // stderr
+ &status,
+ &tmp_err);
+
+ if (tmp_err) {
+ g_free(error_str);
+ g_propagate_error(err, tmp_err);
+ return FALSE;
+ }
+
+ gboolean ret = cr_spawn_check_exit_status(status, &tmp_err);
+ if (!ret && error_str) {
+ // Remove newlines from error message
+ for (char *ptr = error_str; *ptr; ptr++)
+ if (*ptr == '\n') *ptr = ' ';
+
+ g_propagate_prefixed_error(err, tmp_err, "%s: ", error_str);
+ }
+
+ g_free(error_str);
+
+ return ret;
+}
+
+gboolean
+cr_cp(const char *src,
+ const char *dst,
+ cr_CpFlags flags,
+ const char *working_dir,
+ GError **err)
+{
+ assert(src);
+ assert(dst);
+ assert(!err || *err == NULL);
+
+ GPtrArray *argv_array = g_ptr_array_new();
+ g_ptr_array_add(argv_array, "cp");
+ if (flags & CR_CP_RECURSIVE)
+ g_ptr_array_add(argv_array, "-r");
+ if (flags & CR_CP_PRESERVE_ALL)
+ g_ptr_array_add(argv_array, "--preserve=all");
+ g_ptr_array_add(argv_array, (char *) src);
+ g_ptr_array_add(argv_array, (char *) dst);
+ g_ptr_array_add(argv_array, (char *) NULL);
+
+ gboolean ret = cr_run_command((char **) argv_array->pdata,
+ working_dir,
+ err);
+
+ g_ptr_array_free(argv_array, TRUE);
+
+ return ret;
+}
+
+gboolean
+cr_rm(const char *path,
+ cr_RmFlags flags,
+ const char *working_dir,
+ GError **err)
+{
+ assert(path);
+ assert(!err || *err == NULL);
+
+ GPtrArray *argv_array = g_ptr_array_new();
+ g_ptr_array_add(argv_array, "rm");
+ if (flags & CR_RM_RECURSIVE)
+ g_ptr_array_add(argv_array, "-r");
+ if (flags & CR_RM_FORCE)
+ g_ptr_array_add(argv_array, "-f");
+ g_ptr_array_add(argv_array, (char *) path);
+ g_ptr_array_add(argv_array, (char *) NULL);
+
+ gboolean ret = cr_run_command((char **) argv_array->pdata,
+ working_dir,
+ err);
+
+ g_ptr_array_free(argv_array, TRUE);
+
+ return ret;
+}
+
+gchar *
+cr_append_pid_and_datetime(const char *str, const char *suffix)
+{
+ struct tm * timeinfo;
+ struct timeval tv;
+ char datetime[80];
+
+ gettimeofday(&tv, NULL);
+ timeinfo = localtime (&(tv.tv_sec));
+ strftime(datetime, 80, "%Y%m%d%H%M%S", timeinfo);
+ gchar *result = g_strdup_printf("%s%jd.%s.%ld%s",
+ str ? str : "",
+ (intmax_t) getpid(),
+ datetime,
+ tv.tv_usec,
+ suffix ? suffix : "");
+ return result;
+}
+
+gboolean
+cr_spawn_check_exit_status(gint exit_status, GError **err)
+{
+ assert(!err || *err == NULL);
+
+ if (WIFEXITED(exit_status)) {
+ if (WEXITSTATUS(exit_status) == 0) {
+ // Exit code == 0 means success
+ return TRUE;
+ } else {
+ g_set_error (err, ERR_DOMAIN, CRE_SPAWNERRCODE,
+ "Child process exited with code %ld",
+ (long) WEXITSTATUS(exit_status));
+ }
+ } else if (WIFSIGNALED(exit_status)) {
+ g_set_error (err, ERR_DOMAIN, CRE_SPAWNKILLED,
+ "Child process killed by signal %ld",
+ (long) WTERMSIG(exit_status));
+ } else if (WIFSTOPPED(exit_status)) {
+ g_set_error (err, ERR_DOMAIN, CRE_SPAWNSTOPED,
+ "Child process stopped by signal %ld",
+ (long) WSTOPSIG(exit_status));
+ } else {
+ g_set_error (err, ERR_DOMAIN, CRE_SPAWNABNORMAL,
+ "Child process exited abnormally");
+ }
+
+ return FALSE;
+}
+
+gboolean
+cr_identical_files(const gchar *fn1,
+ const gchar *fn2,
+ gboolean *identical,
+ GError **err)
+{
+ int rc;
+ GStatBuf buf1, buf2;
+
+ *identical = FALSE;
+
+ // Stat old file
+ rc = g_stat(fn1, &buf1);
+ if (rc == -1) {
+ if (errno == ENOENT) // The first file doesn't exist
+ return TRUE;
+
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot stat %s: %s", fn1, g_strerror(errno));
+ return FALSE;
+ }
+
+ // Stat new file
+ rc = g_stat(fn2, &buf2);
+ if (rc == -1) {
+ if (errno == ENOENT) // The second file doesn't exist
+ return TRUE;
+
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot stat %s: %s", fn2, g_strerror(errno));
+ return FALSE;
+ }
+
+ // Check if both paths point to the same file
+ if (buf1.st_ino == buf2.st_ino)
+ *identical = TRUE;
+
+ return TRUE;
+}
+
+gchar *
+cr_cut_dirs(gchar *path, gint cut_dirs)
+{
+ if (!path)
+ return NULL;
+
+ if (cut_dirs < 1)
+ return path;
+
+ gchar *last_component = NULL;
+ for (gchar *p = path; *p; p++) {
+ if (*p == '/')
+ last_component = p;
+ }
+
+ if (last_component == NULL)
+ return path;
+
+ gchar *cut = path;
+ gint n = 0;
+ gint state = 0;
+
+ for (gchar *p = path; p <= last_component; p++) {
+ if (state == 0) {
+ if (*p == '/') {
+ cut = p;
+ } else {
+ state = 1;
+ if (n == cut_dirs)
+ break;
+ }
+ } else if (state == 1) {
+ if (*p == '/') {
+ cut = p;
+ state = 0;
+ n++;
+ }
+ }
+ }
+
+ return cut+1;
+}
* @{
*/
-/** Macro for supress compiler warning about unused param.
- */
-#define CR_UNUSED(x) (void)(x)
-
/** Lenght of static string (including last '\0' byte)
*/
#define CR_STATICSTRLEN(s) (sizeof(s)/sizeof(s[0]))
+/* Length of static defined array.
+ */
+#define CR_ARRAYLEN(x) ((sizeof(x)/sizeof(0[x])) / ((size_t)(!(sizeof(x) % sizeof(0[x])))))
+
/** Convert flags from RPM header to a string representation.
* @param flags flags
* @return flags as constant string
/** Epoch-Version-Release representation.
*/
-struct cr_EVR {
+typedef struct {
char *epoch; /*!< epoch */
char *version; /*!< version */
char *release; /*!< release */
-};
-
-/** Name-Version-Release-Epoch-Arch representation.
- */
-struct cr_NVREA {
- char *name; /*!< name */
- char *version; /*!< version */
- char *release; /*!< release */
- char *epoch; /*!< epoch */
- char *arch; /*!< arch */
-};
+} cr_EVR;
+
+typedef struct {
+ char *name;
+ char *epoch;
+ char *version;
+ char *release;
+} cr_NEVR;
+
+typedef struct {
+ char *name;
+ char *epoch;
+ char *version;
+ char *release;
+ char *arch;
+} cr_NEVRA;
/** Version representation
* e.g. for openssl-devel-1.0.0i = version: 1, release: 0, patch: 0, suffix: i
*/
struct cr_Version {
- long version; /*!< version */
- long release; /*!< release */
+ long major; /*!< version */
+ long minor; /*!< release */
long patch; /*!< patch */
char *suffix; /*!< rest of version string after conversion */
};
* @param chunk string chunk for strings (optional - could be NULL)
* @return filled NVR
*/
-struct cr_EVR cr_str_to_evr(const char *string, GStringChunk *chunk);
+cr_EVR *cr_str_to_evr(const char *string, GStringChunk *chunk);
+
+/** Free cr_EVR
+ * Warning: Do not use this function when a string chunk was
+ * used in the cr_str_to_evr! In that case use only g_free on
+ * the cr_EVR pointer.
+ * @param evr cr_EVR structure
+ */
+void
+cr_evr_free(cr_EVR *evr);
/** Check if the filename match pattern for primary files (files listed
* in primary.xml).
* @param src source filename
* @param dst destination (if dst is dir, filename of src is used)
* @param err GError **
- * @return cr_Error return code
+ * @return TRUE on success, FALSE if an error occured
*/
-int cr_copy_file(const char *src,
- const char *dst,
- GError **err);
+gboolean cr_copy_file(const char *src,
+ const char *dst,
+ GError **err);
/** Compress file.
* @param SRC source filename
* @param src source filename
* @param dst destination (if dst is dir, filename of src is used)
* @param err GError **
- * @return cr_Error return code
+ * @return TRUE on success, FALSE if an error occured
*/
-int cr_better_copy_file(const char *src,
- const char *dst,
- GError **err);
+gboolean cr_better_copy_file(const char *src,
+ const char *dst,
+ GError **err);
/** Recursively remove directory.
* @param path filepath
*/
void cr_slist_free_full(GSList *list, GDestroyNotify free_f);
-/** Split filename into the NVREA structure.
+/** Convenience method, which frees all the memory used by a GQueue,
+ * and calls the specified destroy function on every element's data.
+ * This is the same function as g_queue_free_full(). The original function
+ * is implemented in glib since 2.32 but we need to support the older glib too.
+ * @param queue a pointer to a GQueue
+ * @param the function to be called to free each element's data
+ */
+void cr_queue_free_full(GQueue *queue, GDestroyNotify free_f);
+
+/** Split filename into the NEVRA.
+ * Supported formats:
+ * [path/]N-V-R:E.A[.rpm]
+ * [path/]E:N-V-R.A[.rpm]
+ * [path/]N-E:V-R.A[.rpm]
+ * [path/]N-V-R.A[.rpm]:E
* @param filename filename
- * @return struct cr_NVREA
+ * @return cr_NEVRA
*/
-struct cr_NVREA *cr_split_rpm_filename(const char *filename);
+cr_NEVRA *cr_split_rpm_filename(const char *filename);
-/** Free struct cr_NVREA.
- * @param nvrea struct cr_NVREA
- */
-void cr_nvrea_free(struct cr_NVREA *nvrea);
-
-/** Compare evr of two cr_NVREA. Name and arch are ignored.
- * @param A pointer to first cr_NVREA
- * @param B pointer to second cr_NVREA
+/** Compare evr of two cr_NEVRA. Name and arch are ignored.
+ * @param A pointer to first cr_NEVRA
+ * @param B pointer to second cr_NEVRA
* @return 0 = same, 1 = first is newer, -1 = second is newer
*/
-#define cr_cmp_nvrea(A, B) (cr_cmp_evr((A)->epoch, (A)->version, (A)->release,\
+#define cr_cmp_nevra(A, B) (cr_cmp_evr((A)->epoch, (A)->version, (A)->release,\
(B)->epoch, (B)->version, (B)->release))
/** Compare two version strings splited into evr chunks.
gboolean
cr_write_to_file(GError **err, gchar *filename, const char *format, ...);
+typedef enum {
+ CR_CP_DEFAULT = (1<<0), /*!<
+ No attributes - default */
+ CR_CP_RECURSIVE = (1<<1), /*!<
+ Copy directories recursively */
+ CR_CP_PRESERVE_ALL = (1<<2), /*!<
+ preserve the all attributes (if possible) */
+} cr_CpFlags;
+
+/** Recursive copy of directory (works on files as well)
+ * @param src Source (supports wildcards)
+ * @param dst Destination (supports wildcards)
+ * @param flags Flags
+ * @param working_dir Working directory
+ * @param err GError **
+ */
+gboolean
+cr_cp(const char *src,
+ const char *dst,
+ cr_CpFlags flags,
+ const char *working_directory,
+ GError **err);
+
+typedef enum {
+ CR_RM_DEFAULT = (1<<0), /*!<
+ No attributes - default */
+ CR_RM_RECURSIVE = (1<<1), /*!<
+ Copy directories recursively */
+ CR_RM_FORCE = (1<<2), /*!<
+ Use force */
+} cr_RmFlags;
+
+/** Wrapper over rm command
+ * @param path Path (supports wildcards)
+ * @param flags Flags
+ * @param working_dir Working directory
+ * @param err GError **
+ */
+gboolean
+cr_rm(const char *path,
+ cr_RmFlags flags,
+ const char *working_dir,
+ GError **err);
+
+/** Append "YYYYmmddHHMMSS.MICROSECONDS.PID" suffix to the str.
+ * @param str String or NULL
+ * @param suffix Another string that will be appended or NULL
+ * @param return Newly allocated string
+ */
+gchar *
+cr_append_pid_and_datetime(const char *str, const char *suffix);
+
+/** Createrepo_c's reimplementation of convinient
+ * g_spawn_check_exit_status() function which is available since
+ * glib 2.34 (createrepo_c is currently compatible with glib >= 2.28)
+ * @param exit_status An exit code as returned from g_spawn_sync()
+ * @param error GError **
+ * @returns TRUE if child exited successfully,
+ * FALSE otherwise (and error will be set)
+ */
+gboolean
+cr_spawn_check_exit_status(gint exit_status, GError **error);
+
+/** Parse E:N-V-R or N-V-R:E or N-E:V-R string
+ * @param str NEVR string
+ * @returns Malloced cr_NEVR or NULL on error
+ */
+cr_NEVR *
+cr_str_to_nevr(const char *str);
+
+/** Free cr_NEVR
+ * @param nevr cr_NEVR structure
+ */
+void
+cr_nevr_free(cr_NEVR *nevr);
+
+/** Parse E:N-V-R.A, N-V-R:E.A, N-E:V-R.A or N-V-R.A:E string.
+ * @param str NEVRA string
+ * @returns Malloced cr_NEVRA or NULL on error
+ */
+cr_NEVRA *
+cr_str_to_nevra(const char *str);
+
+/** Free cr_NEVRA
+ * @param nevra cr_NEVRA structure
+ */
+void
+cr_nevra_free(cr_NEVRA *nevra);
+
+/** Are the files identical?
+ * Different paths could point to the same file.
+ * This functions checks if both paths point to the same file or not.
+ * If one of the files doesn't exists, the funcion doesn't fail
+ * and just put FALSE into "indentical" value and returns.
+ * @param fn1 First path
+ * @param fn2 Second path
+ * @param identical Are the files same or not
+ * @param err GError **
+ * @return FALSE if an error was encountered, TRUE otherwise
+ */
+gboolean
+cr_identical_files(const gchar *fn1,
+ const gchar *fn2,
+ gboolean *identical,
+ GError **err);
+
+/** Cut first N components of path.
+ * Note: Basename is never cut out.
+ */
+gchar *
+cr_cut_dirs(gchar *path, gint cut_dirs);
+
/** @} */
#ifdef __cplusplus
#include "error.h"
#include "version.h"
#include "compression_wrapper.h"
+#include "createrepo_shared.h"
#include "misc.h"
#include "locate_metadata.h"
#include "load_metadata.h"
#include "xml_file.h"
#include "modifyrepo_shared.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
typedef struct {
gboolean version;
"Do not compress the new repodata before adding it to the repo.",
NULL },
{ "compress-type", 0, 0, G_OPTION_ARG_STRING, &(options->compress_type),
- "Compression format to use.", NULL },
+ "Compression format to use.", "COMPRESS_TYPE" },
{ "checksum", 's', 0, G_OPTION_ARG_STRING, &(options->checksum),
"Specify the checksum type to use. (default: sha256)", "SUMTYPE" },
{ "unique-md-filenames", 0, 0, G_OPTION_ARG_NONE,
options->new_name = NULL;
GOptionContext *context;
- context = g_option_context_new(": Modify a repository's repomd.xml");
+ context = g_option_context_new("<input metadata> <output repodata>\n"
+ " modifyrepo_c --remove <metadata type> <output repodata>\n"
+ " modifyrepo_c [OPTION...] --batchfile <batch file> <output repodata>");
+ g_option_context_set_summary(context, "Modify a repository's repomd.xml");
g_option_context_add_main_entries(context, cmd_entries, NULL);
gboolean ret = g_option_context_parse(context, argc, argv, err);
g_option_context_free(context);
&& cr_compression_type(options->compress_type) == \
CR_CW_UNKNOWN_COMPRESSION)
{
- g_set_error(err, CR_MODIFYREPO_ERROR, CRE_ERROR,
+ g_set_error(err, ERR_DOMAIN, CRE_ERROR,
"Unknown compression type \"%s\"", options->compress_type);
return FALSE;
}
if (options->checksum
&& cr_checksum_type(options->checksum) == CR_CHECKSUM_UNKNOWN)
{
- g_set_error(err, CR_MODIFYREPO_ERROR, CRE_ERROR,
+ g_set_error(err, ERR_DOMAIN, CRE_ERROR,
"Unknown checksum type \"%s\"", options->checksum);
return FALSE;
}
// -f/--batchfile
if (options->batchfile
&& !g_file_test(options->batchfile, G_FILE_TEST_IS_REGULAR)) {
- g_set_error(err, CR_MODIFYREPO_ERROR, CRE_ERROR,
+ g_set_error(err, ERR_DOMAIN, CRE_ERROR,
"File \"%s\" doesn't exist", options->batchfile);
return FALSE;
}
g_debug("Preparing task for: %s", metadatapath);
if (metadatapath && !g_file_test(metadatapath, G_FILE_TEST_IS_REGULAR)) {
- g_set_error(err, CR_MODIFYREPO_ERROR, CRE_ERROR,
+ g_set_error(err, ERR_DOMAIN, CRE_ERROR,
"File \"%s\" is not regular file or doesn't exists",
metadatapath);
return FALSE;
// Set logging
- g_log_set_default_handler(cr_log_fn, NULL);
- if (options.verbose) {
- // Verbose mode
- GLogLevelFlags levels = G_LOG_LEVEL_MESSAGE | G_LOG_LEVEL_INFO | G_LOG_LEVEL_DEBUG | G_LOG_LEVEL_WARNING;
- g_log_set_handler(NULL, levels, cr_log_fn, NULL);
- g_log_set_handler("C_CREATEREPOLIB", levels, cr_log_fn, NULL);
- } else {
- // Standard mode
- GLogLevelFlags levels = G_LOG_LEVEL_DEBUG;
- g_log_set_handler(NULL, levels, cr_null_log_fn, NULL);
- g_log_set_handler("C_CREATEREPOLIB", levels, cr_null_log_fn, NULL);
- }
+ cr_setup_logging(FALSE, options.verbose);
+
// Print version if required
exit(EXIT_FAILURE);
}
+ g_thread_init(NULL); // Initialize threading
+
+ // Emit debug message with version
+
+ g_debug("Version: %d.%d.%d\n", CR_VERSION_MAJOR,
+ CR_VERSION_MINOR,
+ CR_VERSION_PATCH);
+
// Prepare list of tasks to do
gchar *repodatadir = NULL;
// Process the tasks
ret = cr_modifyrepo(modifyrepotasks, repodatadir, &err);
- g_slist_free_full(modifyrepotasks, (GDestroyNotify)cr_modifyrepotask_free);
+ cr_slist_free_full(modifyrepotasks, (GDestroyNotify)cr_modifyrepotask_free);
if (!ret) {
g_printerr("%s\n", err->message);
#include <errno.h>
#include <string.h>
#include <assert.h>
+#include "cleanup.h"
#include "error.h"
#include "misc.h"
#include "checksum.h"
#include "threads.h"
#include "xml_dump.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
#define DEFAULT_COMPRESSION CR_CW_GZ_COMPRESSION
#define DEFAULT_CHECKSUM CR_CHECKSUM_SHA256
gchar *repomd_path = g_build_filename(repopath, "repomd.xml", NULL);
if (!g_file_test(repomd_path, G_FILE_TEST_IS_REGULAR)) {
- g_set_error(err, CR_MODIFYREPO_ERROR, CRE_IO,
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
"Regular file \"%s\" doesn't exists", repomd_path);
g_free(repomd_path);
return FALSE;
// Skip removing task
continue;
- gchar *src_fn = task->path;
- gchar *dst_fn = NULL;
+ gchar *src_fn = task->path; // Shortcut
+ _cleanup_free_ gchar *dst_fn = NULL;
const gchar *suffix = NULL;
cr_CompressionType compress_type = CR_CW_NO_COMPRESSION;
suffix = cr_compression_suffix(compress_type);
}
- // Prepare dst filename
- gchar *filename = NULL;
+ // Prepare dst filename - Get basename
+ _cleanup_free_ gchar *filename = NULL;
if (task->new_name)
filename = g_path_get_basename(task->new_name);
else
filename = g_path_get_basename(src_fn);
+ // Prepare dst filename - Add suffix
if (suffix) {
gchar *tmp_fn = g_strconcat(filename, suffix, NULL);
g_free(filename);
filename = tmp_fn;
}
+ // Prepare dst filename - Full path
dst_fn = g_build_filename(repopath, filename, NULL);
- g_free(filename);
+ task->dst_fn = g_string_chunk_insert(task->chunk, dst_fn);
- if (g_file_test(dst_fn, G_FILE_TEST_EXISTS)) {
- g_warning("Destination file \"%s\" already exists and will be "
- "overwritten", dst_fn);
- }
+ // Check if the src and dst is the same file
+ gboolean identical = FALSE;
+ if (!cr_identical_files(src_fn, dst_fn, &identical, err))
+ return FALSE;
- // Do the copy
- g_debug("%s: Copy & compress operation %s -> %s",
- __func__, src_fn, dst_fn);
+ if (identical) {
+ // Source and destination file is the same file
+ g_debug("Using already existing file: %s", dst_fn);
+ } else {
+ // Check if the file already exist
+ if (g_file_test(dst_fn, G_FILE_TEST_EXISTS)) {
+ g_warning("Destination file \"%s\" already exists and will be "
+ "overwritten", dst_fn);
+ }
- if (cr_compress_file(src_fn, dst_fn, compress_type, err) != CRE_OK) {
- g_debug("%s: Copy & compress operation failed", __func__);
- cr_repomd_free(repomd);
- g_free(repomd_path);
- g_free(dst_fn);
- return FALSE;
+ // Do the copy
+ g_debug("%s: Copy & compress operation %s -> %s",
+ __func__, src_fn, dst_fn);
+
+ if (cr_compress_file(src_fn, dst_fn, compress_type, err) != CRE_OK) {
+ g_debug("%s: Copy & compress operation failed", __func__);
+ cr_repomd_free(repomd);
+ g_free(repomd_path);
+ return FALSE;
+ }
}
task->repopath = cr_safe_string_chunk_insert_null(task->chunk, dst_fn);
- g_free(dst_fn);
}
// Prepare new repomd records
// Add records into repomd
for (GSList *elem = repomdrecords; elem; elem = g_slist_next(elem)) {
cr_RepomdRecord *rec = elem->data;
+ g_debug("Adding record \"%s\"", rec->type);
cr_repomd_set_record(repomd, rec);
}
g_slist_free(repomdrecords);
// Do not even try to remove records with base location
continue;
+ // Construct filename
+ _cleanup_free_ gchar *realpath = g_build_filename(repopath,
+ "../",
+ rec->location_href,
+ NULL);
+
// Firstly check if the file that should be deleted isn't
// really used by other record anymore.
// It could happend if user add a file, that already exists,
// in repodata. Then we don't want to remove this file.
gboolean remove_this = TRUE;
+
+ // Check if a file that is referenced by a record that should
+ // be removed belongs to any other record (a record that
+ // shouldn't be removed).
for (GSList *e = repomd->records; e; e = g_slist_next(e)) {
cr_RepomdRecord *lrec = e->data;
+ _cleanup_free_ gchar *lrealpath = NULL;
- if (!g_strcmp0(rec->location_href, lrec->location_href)) {
+ // Construct filename
+ lrealpath = g_build_filename(repopath,
+ "../",
+ lrec->location_href,
+ NULL);
+
+ // Check if files are identical
+ gboolean identical = FALSE;
+ if (!cr_identical_files(realpath, lrealpath, &identical, err))
+ return FALSE;
+
+ // If yes, do not remove it
+ if (identical) {
remove_this = FALSE;
break;
}
}
if (!remove_this)
- break;
-
- gchar *realpath = g_build_filename(repopath,
- "../",
- rec->location_href,
- NULL);
+ continue;
g_debug("%s: Removing \"%s\"", __func__, realpath);
if (remove(realpath) == -1)
- g_warning("Cannot remove \"%s\": %s", realpath, strerror(errno));
- g_free(realpath);
+ g_warning("Cannot remove \"%s\": %s", realpath, g_strerror(errno));
}
- g_slist_free_full(recordstoremove, (GDestroyNotify)cr_repomd_record_free);
+ cr_slist_free_full(recordstoremove, (GDestroyNotify)cr_repomd_record_free);
cr_repomd_free(repomd);
task->unique_md_filenames, task->checksum_type,
cr_checksum_name_str(task->checksum_type), task->new_name);
- }
+ }
g_strfreev(groups);
if (success) {
*modifyrepotasks = g_slist_concat(*modifyrepotasks, tasks);
} else {
- g_slist_free_full(tasks, (GDestroyNotify)cr_modifyrepotask_free);
+ cr_slist_free_full(tasks, (GDestroyNotify)cr_modifyrepotask_free);
}
g_key_file_free(keyfile);
extern "C" {
#endif
+#include <glib.h>
#include "checksum.h"
#include "compression_wrapper.h"
#include "package.h"
// Internal use
gchar *repopath;
+ gchar *dst_fn;
GStringChunk *chunk;
} cr_ModifyRepoTask;
return entry;
}
+cr_BinaryData *
+cr_binary_data_new(void)
+{
+ return (cr_BinaryData *) g_new0(cr_BinaryData, 1);
+}
+
cr_Package *
cr_package_new(void)
{
if (!package)
return;
- if (package->chunk)
+ if (package->chunk && !(package->loadingflags & CR_PACKAGE_SINGLE_CHUNK))
g_string_chunk_free (package->chunk);
/* Note: Since glib 2.28
g_slist_free (package->obsoletes);
}
+ if (package->suggests) {
+ g_slist_foreach (package->suggests, (GFunc) g_free, NULL);
+ g_slist_free (package->suggests);
+ }
+
+ if (package->enhances) {
+ g_slist_foreach (package->enhances, (GFunc) g_free, NULL);
+ g_slist_free (package->enhances);
+ }
+
+ if (package->recommends) {
+ g_slist_foreach (package->recommends, (GFunc) g_free, NULL);
+ g_slist_free (package->recommends);
+ }
+
+ if (package->supplements) {
+ g_slist_foreach (package->supplements, (GFunc) g_free, NULL);
+ g_slist_free (package->supplements);
+ }
+
if (package->files) {
g_slist_foreach (package->files, (GFunc) g_free, NULL);
g_slist_free (package->files);
g_slist_free (package->changelogs);
}
+ g_free(package->siggpg);
+ g_free(package->sigpgp);
+
g_free (package);
}
pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, orig->location_base);
pkg->checksum_type = cr_safe_string_chunk_insert(pkg->chunk, orig->checksum_type);
- pkg->requires = cr_dependency_dup(pkg->chunk, orig->requires);
- pkg->provides = cr_dependency_dup(pkg->chunk, orig->provides);
- pkg->conflicts = cr_dependency_dup(pkg->chunk, orig->conflicts);
- pkg->obsoletes = cr_dependency_dup(pkg->chunk, orig->obsoletes);
+ pkg->requires = cr_dependency_dup(pkg->chunk, orig->requires);
+ pkg->provides = cr_dependency_dup(pkg->chunk, orig->provides);
+ pkg->conflicts = cr_dependency_dup(pkg->chunk, orig->conflicts);
+ pkg->obsoletes = cr_dependency_dup(pkg->chunk, orig->obsoletes);
+ pkg->suggests = cr_dependency_dup(pkg->chunk, orig->suggests);
+ pkg->enhances = cr_dependency_dup(pkg->chunk, orig->enhances);
+ pkg->recommends = cr_dependency_dup(pkg->chunk, orig->recommends);
+ pkg->supplements = cr_dependency_dup(pkg->chunk, orig->supplements);
for (GSList *elem = orig->files; elem; elem = g_slist_next(elem)) {
cr_PackageFile *orig_file = elem->data;
* @{
*/
+typedef enum {
+ CR_PACKAGE_FROM_HEADER = (1<<1), /*!< Metadata parsed from header */
+ CR_PACKAGE_FROM_XML = (1<<2), /*!< Metadata parsed xml */
+ /* Some values are reserved (for sqlite, solv, etc..) */
+ CR_PACKAGE_LOADED_PRI = (1<<10), /*!< Primary metadata was loaded */
+ CR_PACKAGE_LOADED_FIL = (1<<11), /*!< Filelists metadata was loaded */
+ CR_PACKAGE_LOADED_OTH = (1<<12), /*!< Other metadata was loaded */
+ CR_PACKAGE_SINGLE_CHUNK = (1<<13), /*!< Package uses single chunk */
+} cr_PackageLoadingFlags;
+
/** Dependency (Provides, Conflicts, Obsoletes, Requires).
*/
typedef struct {
char *changelog; /*!< text of changelog */
} cr_ChangelogEntry;
+/** Binary data.
+ */
+typedef struct {
+ void *data;
+ gsize size;
+} cr_BinaryData;
+
/** Package
*/
typedef struct {
GSList *provides; /*!< provides (list of cr_Dependency structs) */
GSList *conflicts; /*!< conflicts (list of cr_Dependency structs) */
GSList *obsoletes; /*!< obsoletes (list of cr_Dependency structs) */
+ GSList *suggests; /*!< suggests (list of cr_Dependency structs) */
+ GSList *enhances; /*!< enhances (list of cr_Dependency structs) */
+ GSList *recommends; /*!< recommends (list of cr_Dependency structs) */
+ GSList *supplements; /*!< supplements (list of cr_Dependency structs) */
GSList *files; /*!< files in the package (list of
cr_PackageFile structs) */
GSList *changelogs; /*!< changelogs (list of cr_ChangelogEntry
structs) */
+ char *hdrid;
+ cr_BinaryData *siggpg;
+ cr_BinaryData *sigpgp;
+
GStringChunk *chunk; /*!< string chunk for store all package strings
on the single place */
+
+ cr_PackageLoadingFlags loadingflags; /*!<
+ Bitfield flags with information about package loading */
} cr_Package;
/** Create new (empty) dependency structure.
*/
cr_ChangelogEntry *cr_changelog_entry_new(void);
+/** Create new (empty) structure for binary data
+ * @return new mepty cr_BinaryData
+ */
+cr_BinaryData *cr_binary_data_new(void);
+
/** Create new (empty) package structure.
* @return new empty cr_Package
*/
#include "parsehdr.h"
#include "xml_dump.h"
#include "misc.h"
-
+#include "cleanup.h"
+
+#if defined(RPMTAG_SUGGESTS) && defined(RPMTAG_ENHANCES) \
+ && defined(RPMTAG_RECOMMENDS) && defined(RPMTAG_SUPPLEMENTS)
+#define RPM_WEAK_DEPS_SUPPORT 1
+#endif
+
+typedef enum DepType_e {
+ DEP_PROVIDES,
+ DEP_CONFLICTS,
+ DEP_OBSOLETES,
+ DEP_REQUIRES,
+ DEP_SUGGESTS,
+ DEP_ENHANCES,
+ DEP_RECOMMENDS,
+ DEP_SUPPLEMENTS,
+ DEP_SENTINEL
+} DepType;
+
+typedef struct DepItem_s {
+ DepType type;
+ int nametag;
+ int flagstag;
+ int versiontag;
+} DepItem;
+
+// Keep this list sorted in the same order as the enum DepType_e!
+static DepItem dep_items[] = {
+ { DEP_PROVIDES, RPMTAG_PROVIDENAME, RPMTAG_PROVIDEFLAGS, RPMTAG_PROVIDEVERSION },
+ { DEP_CONFLICTS, RPMTAG_CONFLICTNAME, RPMTAG_CONFLICTFLAGS, RPMTAG_CONFLICTVERSION },
+ { DEP_OBSOLETES, RPMTAG_OBSOLETENAME, RPMTAG_OBSOLETEFLAGS, RPMTAG_OBSOLETEVERSION },
+ { DEP_REQUIRES, RPMTAG_REQUIRENAME, RPMTAG_REQUIREFLAGS, RPMTAG_REQUIREVERSION },
+#ifdef RPM_WEAK_DEPS_SUPPORT
+ { DEP_SUGGESTS, RPMTAG_SUGGESTNAME, RPMTAG_SUGGESTFLAGS, RPMTAG_SUGGESTVERSION },
+ { DEP_ENHANCES, RPMTAG_ENHANCENAME, RPMTAG_ENHANCEFLAGS, RPMTAG_ENHANCEVERSION },
+ { DEP_RECOMMENDS, RPMTAG_RECOMMENDNAME, RPMTAG_RECOMMENDFLAGS, RPMTAG_RECOMMENDVERSION },
+ { DEP_SUPPLEMENTS, RPMTAG_SUPPLEMENTNAME, RPMTAG_SUPPLEMENTFLAGS, RPMTAG_SUPPLEMENTVERSION },
+#endif
+ { DEP_SENTINEL, 0, 0, 0 },
+};
static inline int
cr_compare_dependency(const char *dep1, const char *dep2)
cr_Package *
-cr_package_from_header(Header hdr, gint64 mtime, gint64 size,
- const char *checksum, const char *checksum_type,
- const char *location_href, const char *location_base,
- int changelog_limit, gint64 hdr_start, gint64 hdr_end,
- GError **err)
+cr_package_from_header(Header hdr,
+ int changelog_limit,
+ cr_HeaderReadingFlags hdrrflags,
+ G_GNUC_UNUSED GError **err)
{
cr_Package *pkg;
assert(hdr);
assert(!err || *err == NULL);
- CR_UNUSED(err); // In fact, GError is not used in this function yet.
-
// Create new package structure
pkg = cr_package_new();
+ pkg->loadingflags |= CR_PACKAGE_FROM_HEADER;
+ pkg->loadingflags |= CR_PACKAGE_LOADED_PRI;
+ pkg->loadingflags |= CR_PACKAGE_LOADED_FIL;
+ pkg->loadingflags |= CR_PACKAGE_LOADED_OTH;
// Create rpm tag data container
// Fill package structure
- pkg->pkgId = cr_safe_string_chunk_insert(pkg->chunk, checksum);
pkg->name = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_NAME));
gint64 is_src = headerGetNumber(hdr, RPMTAG_SOURCEPACKAGE);
pkg->release = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_RELEASE));
pkg->summary = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_SUMMARY));
- pkg->description = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_DESCRIPTION));
+ pkg->description = cr_safe_string_chunk_insert_null(pkg->chunk, headerGetString(hdr, RPMTAG_DESCRIPTION));
pkg->url = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_URL));
- pkg->time_file = mtime;
if (headerGet(hdr, RPMTAG_BUILDTIME, td, flags)) {
pkg->time_build = rpmtdGetNumber(td);
}
pkg->rpm_group = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_GROUP));
pkg->rpm_buildhost = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_BUILDHOST));
pkg->rpm_sourcerpm = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_SOURCERPM));
- pkg->rpm_header_start = hdr_start;
- pkg->rpm_header_end = hdr_end;
pkg->rpm_packager = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_PACKAGER));
- pkg->size_package = size;
if (headerGet(hdr, RPMTAG_SIZE, td, flags)) {
pkg->size_installed = rpmtdGetNumber(td);
}
if (headerGet(hdr, RPMTAG_ARCHIVESIZE, td, flags)) {
pkg->size_archive = rpmtdGetNumber(td);
}
- pkg->location_href = cr_safe_string_chunk_insert(pkg->chunk, location_href);
- pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, location_base);
- pkg->checksum_type = cr_safe_string_chunk_insert(pkg->chunk, checksum_type);
rpmtdFreeData(td);
rpmtdFree(td);
rpmtd fileversions = rpmtdNew();
- enum pcor {PROVIDES, CONFLICTS, OBSOLETES, REQUIRES};
-
- int file_tags[REQUIRES+1] = { RPMTAG_PROVIDENAME,
- RPMTAG_CONFLICTNAME,
- RPMTAG_OBSOLETENAME,
- RPMTAG_REQUIRENAME
- };
-
- int flag_tags[REQUIRES+1] = { RPMTAG_PROVIDEFLAGS,
- RPMTAG_CONFLICTFLAGS,
- RPMTAG_OBSOLETEFLAGS,
- RPMTAG_REQUIREFLAGS
- };
-
- int version_tags[REQUIRES+1] = { RPMTAG_PROVIDEVERSION,
- RPMTAG_CONFLICTVERSION,
- RPMTAG_OBSOLETEVERSION,
- RPMTAG_REQUIREVERSION
- };
-
// Struct used as value in ap_hashtable
struct ap_value_struct {
const char *flags;
NULL,
free);
- int pcor_type;
- for (pcor_type=0; pcor_type <= REQUIRES; pcor_type++) {
- if (headerGet(hdr, file_tags[pcor_type], filenames, flags) &&
- headerGet(hdr, flag_tags[pcor_type], fileflags, flags) &&
- headerGet(hdr, version_tags[pcor_type], fileversions, flags))
+ for (int deptype=0; dep_items[deptype].type != DEP_SENTINEL; deptype++) {
+ if (headerGet(hdr, dep_items[deptype].nametag, filenames, flags) &&
+ headerGet(hdr, dep_items[deptype].flagstag, fileflags, flags) &&
+ headerGet(hdr, dep_items[deptype].versiontag, fileversions, flags))
{
// Because we have to select only libc.so with highest version
const char *full_version = rpmtdGetString(fileversions);
// Requires specific stuff
- if (pcor_type == REQUIRES) {
+ if (deptype == DEP_REQUIRES) {
// Skip requires which start with "rpmlib("
if (!strncmp("rpmlib(", filename, 7)) {
continue;
}
}
+ // Parse dep string
+ cr_EVR *evr = cr_str_to_evr(full_version, pkg->chunk);
+ if ((full_version && *full_version) && !evr->epoch) {
+ // NULL in epoch mean that the epoch was bad (non-numerical)
+ _cleanup_free_ gchar *pkg_nevra = cr_package_nevra(pkg);
+ g_warning("Bad epoch in version string \"%s\" for dependency \"%s\" in package \"%s\"",
+ full_version, filename, pkg_nevra);
+ g_warning("Skipping this dependency");
+ g_free(evr);
+ continue;
+ }
+
// Create dynamic dependency object
cr_Dependency *dependency = cr_dependency_new();
dependency->name = cr_safe_string_chunk_insert(pkg->chunk, filename);
dependency->flags = cr_safe_string_chunk_insert(pkg->chunk, flags);
- struct cr_EVR evr = cr_str_to_evr(full_version, pkg->chunk);
- dependency->epoch = evr.epoch;
- dependency->version = evr.version;
- dependency->release = evr.release;
+ dependency->epoch = evr->epoch;
+ dependency->version = evr->version;
+ dependency->release = evr->release;
+ g_free(evr);
- switch (pcor_type) {
- case PROVIDES:
+ switch (deptype) {
+ case DEP_PROVIDES:
g_hash_table_replace(provided_hashtable, dependency->name, dependency->name);
pkg->provides = g_slist_prepend(pkg->provides, dependency);
break;
- case CONFLICTS:
+ case DEP_CONFLICTS:
pkg->conflicts = g_slist_prepend(pkg->conflicts, dependency);
break;
- case OBSOLETES:
+ case DEP_OBSOLETES:
pkg->obsoletes = g_slist_prepend(pkg->obsoletes, dependency);
break;
- case REQUIRES:
+ case DEP_REQUIRES:
dependency->pre = pre;
// XXX: libc.so filtering ////////////////////////////
value->pre = dependency->pre;
g_hash_table_replace(ap_hashtable, dependency->name, value);
break; //case REQUIRES end
+ case DEP_SUGGESTS:
+ pkg->suggests = g_slist_prepend(pkg->suggests, dependency);
+ break;
+ case DEP_ENHANCES:
+ pkg->enhances = g_slist_prepend(pkg->enhances, dependency);
+ break;
+ case DEP_RECOMMENDS:
+ pkg->recommends = g_slist_prepend(pkg->recommends, dependency);
+ break;
+ case DEP_SUPPLEMENTS:
+ pkg->supplements = g_slist_prepend(pkg->supplements, dependency);
+ break;
} // Switch end
} // While end
// XXX: libc.so filtering ////////////////////////////////
- if (pcor_type == REQUIRES && libc_require_highest)
+ if (deptype == DEP_REQUIRES && libc_require_highest)
pkg->requires = g_slist_prepend(pkg->requires, libc_require_highest);
// XXX: libc.so filtering - END ////////////////////////////////
}
rpmtdFreeData(fileversions);
}
- pkg->provides = g_slist_reverse (pkg->provides);
- pkg->conflicts = g_slist_reverse (pkg->conflicts);
- pkg->obsoletes = g_slist_reverse (pkg->obsoletes);
- pkg->requires = g_slist_reverse (pkg->requires);
+ pkg->provides = g_slist_reverse (pkg->provides);
+ pkg->conflicts = g_slist_reverse (pkg->conflicts);
+ pkg->obsoletes = g_slist_reverse (pkg->obsoletes);
+ pkg->requires = g_slist_reverse (pkg->requires);
+ pkg->suggests = g_slist_reverse (pkg->suggests);
+ pkg->enhances = g_slist_reverse (pkg->enhances);
+ pkg->recommends = g_slist_reverse (pkg->recommends);
+ pkg->supplements = g_slist_reverse (pkg->supplements);
g_hash_table_remove_all(filenames_hashtable);
g_hash_table_remove_all(provided_hashtable);
headerGet(hdr, RPMTAG_CHANGELOGNAME, changelognames, flags) &&
headerGet(hdr, RPMTAG_CHANGELOGTEXT, changelogtexts, flags))
{
- gint64 last_time = 0;
+ gint64 last_time = G_GINT64_CONSTANT(0);
rpmtdInit(changelogtimes);
rpmtdInit(changelognames);
rpmtdInit(changelogtexts);
while ((rpmtdNext(changelogtimes) != -1) &&
(rpmtdNext(changelognames) != -1) &&
(rpmtdNext(changelogtexts) != -1) &&
- (changelog_limit > 0))
+ (changelog_limit > 0 || changelog_limit == -1))
{
gint64 time = rpmtdGetNumber(changelogtimes);
}
pkg->changelogs = g_slist_prepend(pkg->changelogs, changelog);
- changelog_limit--;
+ if (changelog_limit != -1)
+ changelog_limit--;
// If a previous entry has the same time, increment time of the previous
// entry by one. Ugly but works!
rpmtdFree(changelognames);
rpmtdFree(changelogtexts);
- return pkg;
-}
-
-
-struct cr_XmlStruct
-cr_xml_from_header(Header hdr,
- gint64 mtime,
- gint64 size,
- const char *checksum,
- const char *checksum_type,
- const char *location_href,
- const char *location_base,
- int changelog_limit,
- gint64 hdr_start,
- gint64 hdr_end,
- GError **err)
-{
- cr_Package *pkg;
- struct cr_XmlStruct result;
- GError *tmp_err = NULL;
-
- assert(hdr);
- assert(!err || *err == NULL);
+ //
+ // Keys and hdrid (data used for caching when the --cachedir is specified)
+ //
- result.primary = NULL;
- result.filelists = NULL;
- result.other = NULL;
+ if (hdrrflags & CR_HDRR_LOADHDRID)
+ pkg->hdrid = cr_safe_string_chunk_insert(pkg->chunk,
+ headerGetString(hdr, RPMTAG_HDRID));
- pkg = cr_package_from_header(hdr, mtime, size, checksum, checksum_type,
- location_href, location_base,
- changelog_limit, hdr_start, hdr_end, &tmp_err);
- if (tmp_err) {
- g_propagate_error(err, tmp_err);
- goto cleanup;
- }
+ if (hdrrflags & CR_HDRR_LOADSIGNATURES) {
+ rpmtd gpgtd = rpmtdNew();
+ rpmtd pgptd = rpmtdNew();
- result.primary = cr_xml_dump_primary(pkg, &tmp_err);
- if (tmp_err) {
- g_propagate_error(err, tmp_err);
- goto cleanup;
- }
+ if (headerGet(hdr, RPMTAG_SIGGPG, gpgtd, hdrrflags)
+ && gpgtd->count > 0)
+ {
+ pkg->siggpg = cr_binary_data_new();
+ pkg->siggpg->size = gpgtd->count;
+ pkg->siggpg->data = g_string_chunk_insert_len(pkg->chunk,
+ gpgtd->data,
+ gpgtd->count);
+ }
- result.filelists = cr_xml_dump_filelists(pkg, &tmp_err);
- if (tmp_err) {
- result.primary = NULL;
- g_propagate_error(err, tmp_err);
- goto cleanup;
- }
+ if (headerGet(hdr, RPMTAG_SIGPGP, pgptd, hdrrflags)
+ && pgptd->count > 0)
+ {
+ pkg->sigpgp = cr_binary_data_new();
+ pkg->sigpgp->size = pgptd->count;
+ pkg->sigpgp->data = g_string_chunk_insert_len(pkg->chunk,
+ pgptd->data,
+ pgptd->count);
+ }
- result.other = cr_xml_dump_other(pkg, &tmp_err);
- if (tmp_err) {
- result.primary = NULL;
- result.filelists = NULL;
- g_propagate_error(err, tmp_err);
- goto cleanup;
+ rpmtdFree(gpgtd);
+ rpmtdFree(pgptd);
}
-cleanup:
- cr_package_free(pkg);
-
- return result;
+ return pkg;
}
extern "C" {
#endif
-#include <rpm/rpmlib.h>
#include <glib.h>
+#include <rpm/rpmlib.h>
#include "package.h"
-#include "xml_dump.h"
/** \defgroup parsehdr Header parser API.
* \addtogroup parsehdr
* @{
*/
+/** Flags
+ */
+typedef enum {
+ CR_HDRR_NONE = (1 << 0),
+ CR_HDRR_LOADHDRID = (1 << 1), /*!< Load hdrid */
+ CR_HDRR_LOADSIGNATURES = (1 << 2), /*!< Load siggpg and siggpg */
+} cr_HeaderReadingFlags;
+
/** Read data from header and return filled cr_Package structure.
* All const char * params could be NULL.
* @param hdr Header
- * @param mtime mtime of rpm file
- * @param size size of rpm file (in bytes)
- * @param checksum checksum of rpm file
- * @param checksum_type used checksum algorithm
- * @param location_href location of package inside repository
- * @param location_base location (url) of repository
* @param changelog_limit number of changelog entries
- * @param hdr_start start byte of header
- * @param hdr_end last byte of header
+ * @param flags Flags for header reading
* @param err GError **
- * @return cr_Package
+ * @return Newly allocated cr_Package or NULL on error
*/
cr_Package *cr_package_from_header(Header hdr,
- gint64 mtime,
- gint64 size,
- const char *checksum,
- const char *checksum_type,
- const char *location_href,
- const char *location_base,
int changelog_limit,
- gint64 hdr_start,
- gint64 hdr_end,
+ cr_HeaderReadingFlags flags,
GError **err);
-/** Read data from header and return struct cr_XmlStruct.
- * All const char * params could be NULL.
- * @param hdr Header
- * @param mtime mtime of rpm file
- * @param size size of rpm file (in bytes)
- * @param checksum checksum of rpm file
- * @param checksum_type used checksum algorithm
- * @param location_href location of package inside repository
- * @param location_base location (url) of repository
- * @param changelog_limit number of changelog entries
- * @param hdr_start start byte of header
- * @param hdr_end last byte of header
- * @param err GError **
- * @return XML chunks for primary, filelists and other
- * (in struct cr_XmlStruct)
- */
-struct cr_XmlStruct cr_xml_from_header(Header hdr,
- gint64 mtime,
- gint64 size,
- const char *checksum,
- const char *checksum_type,
- const char *location_href,
- const char *location_base,
- int changelog_limit,
- gint64 hdr_start,
- gint64 hdr_end,
- GError **err);
-
/** @} */
#ifdef __cplusplus
#include <rpm/rpmkeyring.h>
#include "error.h"
#include "parsehdr.h"
+#include "parsepkg.h"
#include "misc.h"
#include "checksum.h"
-volatile short cr_initialized = 0;
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
+
rpmts cr_ts = NULL;
-void
-cr_package_parser_init()
+static gpointer
+cr_package_parser_init_once_cb(gpointer user_data G_GNUC_UNUSED)
{
- if (cr_initialized)
- return;
- cr_initialized = 1;
rpmReadConfigFiles(NULL, NULL);
cr_ts = rpmtsCreate();
if (!cr_ts)
vsflags |= _RPMVSF_NOSIGNATURES;
vsflags |= RPMVSF_NOHDRCHK;
rpmtsSetVSFlags(cr_ts, vsflags);
+ return NULL;
}
-
void
-cr_package_parser_cleanup()
+cr_package_parser_init()
{
- if (cr_ts)
+ static GOnce package_parser_init_once = G_ONCE_INIT;
+ g_once(&package_parser_init_once, cr_package_parser_init_once_cb, NULL);
+}
+
+static gpointer
+cr_package_parser_cleanup_once_cb(gpointer user_data G_GNUC_UNUSED)
+{
+ if (cr_ts) {
rpmtsFree(cr_ts);
+ cr_ts = NULL;
+ }
rpmFreeMacros(NULL);
rpmFreeRpmrc();
+ return NULL;
}
+void
+cr_package_parser_cleanup()
+{
+ static GOnce package_parser_cleanup_once = G_ONCE_INIT;
+ g_once(&package_parser_cleanup_once, cr_package_parser_cleanup_once_cb, NULL);
+}
-static int
+static gboolean
read_header(const char *filename, Header *hdr, GError **err)
{
assert(filename);
FD_t fd = Fopen(filename, "r.ufdio");
if (!fd) {
g_warning("%s: Fopen of %s failed %s",
- __func__, filename, strerror(errno));
- g_set_error(err, CR_PARSEPKG_ERROR, CRE_IO,
- "Fopen failed: %s", strerror(errno));
- return CRE_IO;
+ __func__, filename, g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Fopen failed: %s", g_strerror(errno));
+ return FALSE;
}
int rc = rpmReadPackageFile(cr_ts, fd, NULL, hdr);
__func__, filename);
break;
default:
- g_warning("%s: rpmReadPackageFile() error (%s)",
- __func__, strerror(errno));
- g_set_error(err, CR_PARSEPKG_ERROR, CRE_IO,
- "rpmReadPackageFile() error: %s", strerror(errno));
+ g_warning("%s: rpmReadPackageFile() error",
+ __func__);
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "rpmReadPackageFile() error");
Fclose(fd);
- return CRE_IO;
+ return FALSE;
}
}
Fclose(fd);
- return CRE_OK;
+ return TRUE;
+}
+
+cr_Package *
+cr_package_from_rpm_base(const char *filename,
+ int changelog_limit,
+ cr_HeaderReadingFlags flags,
+ GError **err)
+{
+ Header hdr;
+ cr_Package *pkg;
+
+ assert(filename);
+ assert(!err || *err == NULL);
+
+ if (!read_header(filename, &hdr, err))
+ return NULL;
+
+ pkg = cr_package_from_header(hdr, changelog_limit, flags, err);
+ headerFree(hdr);
+ return pkg;
}
cr_Package *
const char *location_base,
int changelog_limit,
struct stat *stat_buf,
+ cr_HeaderReadingFlags flags,
GError **err)
{
cr_Package *pkg = NULL;
- const char *checksum_type_str;
GError *tmp_err = NULL;
assert(filename);
assert(!err || *err == NULL);
- checksum_type_str = cr_checksum_name_str(checksum_type);
+ // Get a package object
+ pkg = cr_package_from_rpm_base(filename, changelog_limit, flags, err);
+ if (!pkg)
+ goto errexit;
+ pkg->location_href = cr_safe_string_chunk_insert(pkg->chunk, location_href);
+ pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, location_base);
- // Read header
-
- Header hdr;
- read_header(filename, &hdr, &tmp_err);
- if (tmp_err) {
- g_propagate_error(err, tmp_err);
- return NULL;
- }
-
+ // Get checksum type string
+ pkg->checksum_type = cr_safe_string_chunk_insert(pkg->chunk,
+ cr_checksum_name_str(checksum_type));
// Get file stat
-
- gint64 mtime;
- gint64 size;
-
if (!stat_buf) {
struct stat stat_buf_own;
if (stat(filename, &stat_buf_own) == -1) {
- g_warning("%s: stat() error (%s)", __func__, strerror(errno));
- g_set_error(err, CR_PARSEPKG_ERROR, CRE_IO, "stat() failed");
- headerFree(hdr);
- return NULL;
+ g_warning("%s: stat(%s) error (%s)", __func__,
+ filename, g_strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_IO, "stat(%s) failed: %s",
+ filename, g_strerror(errno));
+ goto errexit;
}
- mtime = stat_buf_own.st_mtime;
- size = stat_buf_own.st_size;
+ pkg->time_file = stat_buf_own.st_mtime;
+ pkg->size_package = stat_buf_own.st_size;
} else {
- mtime = stat_buf->st_mtime;
- size = stat_buf->st_size;
+ pkg->time_file = stat_buf->st_mtime;
+ pkg->size_package = stat_buf->st_size;
}
-
// Compute checksum
-
char *checksum = cr_checksum_file(filename, checksum_type, &tmp_err);
- if (tmp_err) {
+ if (!checksum) {
g_propagate_prefixed_error(err, tmp_err,
"Error while checksum calculation: ");
- headerFree(hdr);
- return NULL;
+ goto errexit;
}
-
+ pkg->pkgId = cr_safe_string_chunk_insert(pkg->chunk, checksum);
+ free(checksum);
// Get header range
-
struct cr_HeaderRangeStruct hdr_r = cr_get_header_byte_range(filename,
&tmp_err);
if (tmp_err) {
g_propagate_prefixed_error(err, tmp_err,
"Error while determinig header range: ");
- return NULL;
+ goto errexit;
}
-
- // Get package object
-
- pkg = cr_package_from_header(hdr, mtime, size, checksum, checksum_type_str,
- location_href, location_base, changelog_limit,
- hdr_r.start, hdr_r.end, &tmp_err);
- free(checksum);
- headerFree(hdr);
-
- if (tmp_err) {
- g_propagate_prefixed_error(err, tmp_err,
- "Error while checksum calculation:");
- return NULL;
- }
+ pkg->rpm_header_start = hdr_r.start;
+ pkg->rpm_header_end = hdr_r.end;
return pkg;
+
+errexit:
+ cr_package_free(pkg);
+ return NULL;
}
struct stat *stat_buf,
GError **err)
{
- const char *checksum_type_str;
+ cr_Package *pkg;
struct cr_XmlStruct result;
- GError *tmp_err = NULL;
assert(filename);
assert(!err || *err == NULL);
result.filelists = NULL;
result.other = NULL;
- checksum_type_str = cr_checksum_name_str(checksum_type);
-
-
- // Read header
-
- Header hdr;
- read_header(filename, &hdr, &tmp_err);
- if (tmp_err) {
- g_propagate_error(err, tmp_err);
+ pkg = cr_package_from_rpm(filename,
+ checksum_type,
+ location_href,
+ location_base,
+ changelog_limit,
+ stat_buf,
+ CR_HDRR_NONE,
+ err);
+ if (!pkg)
return result;
- }
-
-
- // Get file stat
-
- gint64 mtime;
- gint64 size;
-
- if (!stat_buf) {
- struct stat stat_buf_own;
- if (stat(filename, &stat_buf_own) == -1) {
- g_warning("%s: stat() error (%s)", __func__, strerror(errno));
- g_set_error(err, CR_PARSEPKG_ERROR, CRE_IO, "stat() failed");
- headerFree(hdr);
- return result;
- }
- mtime = stat_buf_own.st_mtime;
- size = stat_buf_own.st_size;
- } else {
- mtime = stat_buf->st_mtime;
- size = stat_buf->st_size;
- }
-
-
- // Compute checksum
-
- char *checksum = cr_checksum_file(filename, checksum_type, &tmp_err);
- if (tmp_err) {
- g_propagate_prefixed_error(err, tmp_err,
- "Error while checksum calculation: ");
- headerFree(hdr);
- return result;
- }
-
-
- // Get header range
-
- struct cr_HeaderRangeStruct hdr_r = cr_get_header_byte_range(filename,
- &tmp_err);
- if (tmp_err) {
- g_propagate_prefixed_error(err, tmp_err,
- "Error while determinig header range: ");
- return result;
- }
-
-
- // Gen XML
-
- result = cr_xml_from_header(hdr, mtime, size, checksum, checksum_type_str,
- location_href, location_base, changelog_limit,
- hdr_r.start, hdr_r.end, &tmp_err);
- free(checksum);
- headerFree(hdr);
-
- if (tmp_err) {
- g_propagate_prefixed_error(err, tmp_err,
- "Error while checksum calculation:");
- return result;
- }
+ result = cr_xml_dump(pkg, err);
+ cr_package_free(pkg);
return result;
}
#include <glib.h>
#include "checksum.h"
+#include "parsehdr.h"
#include "package.h"
+#include "xml_dump.h"
/** \defgroup parsepkg Package parser API.
* \addtogroup parsepkg
* @{
*/
-/** Status of initialization of global structures for package parsing.
- */
-extern short cr_initialized;
-
/** Initialize global structures for package parsing.
* This function call rpmReadConfigFiles() and create global transaction set.
* This function should be called only once! This function is not thread safe!
*/
void cr_package_parser_cleanup();
-/** Generate package object from package file.
+/** Generate a package object from a package file.
+ * Some attributes like pkgId (checksum), checksum_type, time_file,
+ * location_href, location_base, rpm_header_start, rpm_header_end
+ * are not filled.
+ * @param filename filename
+ * @param changelog_limit number of changelogs that will be loaded
+ * @param flags Flags for header reading
+ * @param err GError **
+ * @return cr_Package or NULL on error
+ */
+cr_Package *
+cr_package_from_rpm_base(const char *filename,
+ int changelog_limit,
+ cr_HeaderReadingFlags flags,
+ GError **err);
+
+/** Generate a package object from a package file.
* @param filename filename
* @param checksum_type type of checksum to be used
* @param location_href package location inside repository
* @param changelog_limit number of changelog entries
* @param stat_buf struct stat of the filename
* (optional - could be NULL)
+ * @param flags Flags for header reading
* @param err GError **
- * @return cr_Package
+ * @return cr_Package or NULL on error
*/
cr_Package *cr_package_from_rpm(const char *filename,
cr_ChecksumType checksum_type,
const char *location_base,
int changelog_limit,
struct stat *stat_buf,
+ cr_HeaderReadingFlags flags,
GError **err);
/** Generate XML for the specified package.
-FIND_PACKAGE (PythonLibs 2)
-FIND_PACKAGE (PythonInterp 2 REQUIRED)
+if (${PYTHON_DESIRED} STREQUAL "2")
+ unset(PYTHON_LIBRARY)
+ unset(PYTHON_INCLUDE_DIR)
+ unset(PYTHON_EXECUTABLE)
+ unset(PYTHON_LIBRARY CACHE)
+ unset(PYTHON_INCLUDE_DIR CACHE)
+ unset(PYTHON_EXECUTABLE CACHE)
+ FIND_PACKAGE(PythonLibs 2)
+ FIND_PACKAGE(PythonInterp 2 REQUIRED)
+else()
+ SET(Python_ADDITIONAL_VERSIONS 3.0)
+ unset(PYTHON_LIBRARY)
+ unset(PYTHON_INCLUDE_DIR)
+ unset(PYTHON_EXECUTABLE)
+ unset(PYTHON_LIBRARY CACHE)
+ unset(PYTHON_INCLUDE_DIR CACHE)
+ unset(PYTHON_EXECUTABLE CACHE)
+ FIND_PACKAGE(PythonLibs 3.0)
+ FIND_PACKAGE(PythonInterp 3.0 REQUIRED)
+endif()
+
EXECUTE_PROCESS(COMMAND ${PYTHON_EXECUTABLE} -c "from sys import stdout; from distutils import sysconfig; stdout.write(sysconfig.get_python_lib(True))" OUTPUT_VARIABLE PYTHON_INSTALL_DIR)
INCLUDE_DIRECTORIES (${PYTHON_INCLUDE_PATH})
FILE(COPY __init__.py DESTINATION ./createrepo_c/)
ENDIF()
-SET (craeterepo_cmodule_SRCS
+SET (createrepo_cmodule_SRCS
checksum-py.c
compression_wrapper-py.c
contentstat-py.c
repomdrecord-py.c
sqlite-py.c
typeconversion.c
+ updatecollection-py.c
+ updatecollectionpackage-py.c
+ updateinfo-py.c
+ updaterecord-py.c
+ updatereference-py.c
xml_dump-py.c
xml_file-py.c
xml_parser-py.c
)
-ADD_LIBRARY(_createrepo_cmodule SHARED ${craeterepo_cmodule_SRCS})
-SET_TARGET_PROPERTIES(_createrepo_cmodule PROPERTIES PREFIX "")
-SET_TARGET_PROPERTIES(_createrepo_cmodule PROPERTIES LIBRARY_OUTPUT_DIRECTORY "./createrepo_c")
-TARGET_LINK_LIBRARIES(_createrepo_cmodule libcreaterepo_c)
-TARGET_LINK_LIBRARIES(_createrepo_cmodule
+ADD_LIBRARY(_createrepo_c SHARED ${createrepo_cmodule_SRCS})
+SET_TARGET_PROPERTIES(_createrepo_c PROPERTIES PREFIX "")
+SET_TARGET_PROPERTIES(_createrepo_c PROPERTIES LIBRARY_OUTPUT_DIRECTORY "./createrepo_c")
+TARGET_LINK_LIBRARIES(_createrepo_c libcreaterepo_c)
+TARGET_LINK_LIBRARIES(_createrepo_c
${EXPAT_LIBRARIES}
${CURL_LIBRARY}
${PYTHON_LIBRARY}
)
INSTALL(FILES __init__.py DESTINATION ${PYTHON_INSTALL_DIR}/createrepo_c)
-INSTALL(TARGETS _createrepo_cmodule LIBRARY DESTINATION ${PYTHON_INSTALL_DIR}/createrepo_c)
+INSTALL(TARGETS _createrepo_c LIBRARY DESTINATION ${PYTHON_INSTALL_DIR}/createrepo_c)
"""
"""
-import _createrepo_c
-from _createrepo_c import *
+from . import _createrepo_c
+from ._createrepo_c import *
VERSION_MAJOR = _createrepo_c.VERSION_MAJOR #: Major version
VERSION_MINOR = _createrepo_c.VERSION_MINOR #: Minor version
HT_KEY_NAME = _createrepo_c.HT_KEY_NAME #: Package name as a key
HT_KEY_FILENAME = _createrepo_c.HT_KEY_FILENAME #: Package filename as a key
+HT_DUPACT_KEEPFIRST = _createrepo_c.HT_DUPACT_KEEPFIRST #: If an key is duplicated, keep only the first occurrence
+HT_DUPACT_REMOVEALL = _createrepo_c.HT_DUPACT_REMOVEALL #: If an key is duplicated, discard all occurrences
+
DB_PRIMARY = _createrepo_c.DB_PRIMARY #: Primary database
DB_FILELISTS = _createrepo_c.DB_FILELISTS #: Filelists database
DB_OTHER = _createrepo_c.DB_OTHER #: Other database
-XMLFILE_PRIMARY = _createrepo_c.XMLFILE_PRIMARY #: Primary xml file
-XMLFILE_FILELISTS = _createrepo_c.XMLFILE_FILELISTS #: Filelists xml file
-XMLFILE_OTHER = _createrepo_c.XMLFILE_OTHER #: Other xml file
+XMLFILE_PRIMARY = _createrepo_c.XMLFILE_PRIMARY #: Primary xml file
+XMLFILE_FILELISTS = _createrepo_c.XMLFILE_FILELISTS #: Filelists xml file
+XMLFILE_OTHER = _createrepo_c.XMLFILE_OTHER #: Other xml file
+XMLFILE_PRESTODELTA = _createrepo_c.XMLFILE_PRESTODELTA #: Prestodelta xml file
+XMLFILE_UPDATEINFO = _createrepo_c.XMLFILE_UPDATEINFO #: Updateinfo xml file
#: XML warning - Unknown tag
XML_WARNING_UNKNOWNTAG = _createrepo_c.XML_WARNING_UNKNOWNTAG
if path:
xml_parse_repomd(path, self)
+ def __iter__(self):
+ for rec in self.records:
+ yield rec
+ return
+
+ def __getitem__(self, key):
+ for rec in self.records:
+ if rec.type == key:
+ return rec
+ self.__missing__(key)
+
+ def __missing__(self, key):
+ raise KeyError("Record with type '%s' doesn't exist" % key)
+
+ def __contains__(self, key):
+ for rec in self.records:
+ if rec.type == key:
+ return True
+ return False
+
# RepomdRecord class
class RepomdRecord(_createrepo_c.RepomdRecord):
""":arg path: Path to the other.sqlite database"""
Sqlite.__init__(self, path, DB_OTHER)
+
+# UpdateCollection class
+
+UpdateCollection = _createrepo_c.UpdateCollection
+
+
+# UpdateCollectionPackage class
+
+UpdateCollectionPackage = _createrepo_c.UpdateCollectionPackage
+
+
+# UpdateInfo class
+
+class UpdateInfo(_createrepo_c.UpdateInfo):
+ def __init__(self, path=None):
+ """:arg path: Path to existing updateinfo.xml or None"""
+ _createrepo_c.UpdateInfo.__init__(self)
+ if path:
+ xml_parse_updateinfo(path, self)
+
+
+# UpdateRecord class
+
+UpdateRecord = _createrepo_c.UpdateRecord
+
+
+# UpdateReference class
+
+UpdateReference = _createrepo_c.UpdateReference
+
+
# XmlFile class
XmlFile = _createrepo_c.XmlFile
XmlFile.__init__(self, path, XMLFILE_OTHER,
compressiontype, contentstat)
+class UpdateInfoXmlFile(XmlFile):
+ def __init__(self, path, compressiontype=GZ_COMPRESSION,
+ contentstat=None):
+ """:arg path: Path to the updateinfo xml file
+ :arg compressiontype: Compression type
+ :arg contentstat: ContentStat object"""
+ XmlFile.__init__(self, path, XMLFILE_UPDATEINFO,
+ compressiontype, contentstat)
+
# Functions
def package_from_rpm(filename, checksum_type=SHA256, location_href=None,
return _createrepo_c.xml_from_rpm(filename, checksum_type,
location_href, location_base, changelog_limit)
-xml_dump_primary = _createrepo_c.xml_dump_primary
-xml_dump_filelists = _createrepo_c.xml_dump_filelists
-xml_dump_other = _createrepo_c.xml_dump_other
-xml_dump = _createrepo_c.xml_dump
+xml_dump_primary = _createrepo_c.xml_dump_primary
+xml_dump_filelists = _createrepo_c.xml_dump_filelists
+xml_dump_other = _createrepo_c.xml_dump_other
+xml_dump_updaterecord = _createrepo_c.xml_dump_updaterecord
+xml_dump = _createrepo_c.xml_dump
def xml_parse_primary(path, newpkgcb=None, pkgcb=None,
warningcb=None, do_files=1):
"""Parse other.xml"""
return _createrepo_c.xml_parse_other(path, newpkgcb, pkgcb, warningcb)
+def xml_parse_updateinfo(path, updateinfoobj, warningcb=None):
+ """Parse updateinfo.xml"""
+ return _createrepo_c.xml_parse_updateinfo(path, updateinfoobj, warningcb)
+
def xml_parse_repomd(path, repomdobj, warningcb=None):
"""Parse repomd.xml"""
return _createrepo_c.xml_parse_repomd(path, repomdobj, warningcb)
#include "exception-py.h"
PyObject *
-py_checksum_name_str(PyObject *self, PyObject *args)
+py_checksum_name_str(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
int type;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "i:py_checksum_name_Str", &type))
return NULL;
- return PyStringOrNone_FromString(cr_checksum_name_str(type));
+ return PyUnicodeOrNone_FromString(cr_checksum_name_str(type));
}
PyObject *
-py_checksum_type(PyObject *self, PyObject *args)
+py_checksum_type(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
char *type;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "s:py_checksum_type", &type))
return NULL;
*/
PyObject *
-py_compression_suffix(PyObject *self, PyObject *args)
+py_compression_suffix(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
int type;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "i:py_compression_suffix", &type))
return NULL;
- return PyStringOrNone_FromString(cr_compression_suffix(type));
+ return PyUnicodeOrNone_FromString(cr_compression_suffix(type));
}
PyObject *
-py_detect_compression(PyObject *self, PyObject *args)
+py_detect_compression(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
long type;
char *filename;
GError *tmp_err = NULL;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "s:py_detect_compression", &filename))
return NULL;
}
PyObject *
-py_compression_type(PyObject *self, PyObject *args)
+py_compression_type(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
char *name;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "z:py_compression_type", &name))
return NULL;
/* Function on the type */
static PyObject *
-crfile_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+crfile_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(args);
- CR_UNUSED(kwds);
_CrFileObject *self = (_CrFileObject *)type->tp_alloc(type, 0);
if (self) {
self->f = NULL;
}
static int
-crfile_init(_CrFileObject *self, PyObject *args, PyObject *kwds)
+crfile_init(_CrFileObject *self, PyObject *args, G_GNUC_UNUSED PyObject *kwds)
{
char *path;
int mode, comtype;
PyObject *py_stat, *ret;
cr_ContentStat *stat;
- CR_UNUSED(kwds);
-
if (!PyArg_ParseTuple(args, "siiO|:crfile_init",
&path, &mode, &comtype, &py_stat))
return -1;
mode = "Unknown mode";
}
- return PyString_FromFormat("<createrepo_c.CrFile %s object>", mode);
+ return PyUnicode_FromFormat("<createrepo_c.CrFile %s object>", mode);
}
/* CrFile methods */
"Close the file");
static PyObject *
-py_close(_CrFileObject *self, void *nothing)
+py_close(_CrFileObject *self, G_GNUC_UNUSED void *nothing)
{
GError *tmp_err = NULL;
- CR_UNUSED(nothing);
-
if (self->f) {
cr_close(self->f, &tmp_err);
self->f = NULL;
};
PyTypeObject CrFile_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
+ PyVarObject_HEAD_INIT(NULL, 0)
"createrepo_c.CrFile", /* tp_name */
sizeof(_CrFileObject), /* tp_basicsize */
0, /* tp_itemsize */
/* Function on the type */
static PyObject *
-contentstat_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+contentstat_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(args);
- CR_UNUSED(kwds);
_ContentStatObject *self = (_ContentStatObject *)type->tp_alloc(type, 0);
if (self)
self->stat = NULL;
" :arg checksum_type: Type of checksum that should be used\n");
static int
-contentstat_init(_ContentStatObject *self, PyObject *args, PyObject *kwds)
+contentstat_init(_ContentStatObject *self,
+ PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(kwds);
-
int type;
GError *tmp_err = NULL;
}
static PyObject *
-contentstat_repr(_ContentStatObject *self)
+contentstat_repr(G_GNUC_UNUSED _ContentStatObject *self)
{
- CR_UNUSED(self);
- return PyString_FromFormat("<createrepo_c.ContentStat object>");
+ return PyUnicode_FromFormat("<createrepo_c.ContentStat object>");
}
/* getsetters */
if (check_ContentStatStatus(self))
return NULL;
cr_ContentStat *rec = self->stat;
- gint64 val = *((gint64 *) ((size_t)rec + (size_t) member_offset));
+ gint64 val = (gint64) *((gint64 *) ((size_t)rec + (size_t) member_offset));
return PyLong_FromLongLong((long long) val);
}
if (check_ContentStatStatus(self))
return NULL;
cr_ContentStat *rec = self->stat;
- gint64 val = *((int *) ((size_t)rec + (size_t) member_offset));
+ gint64 val = (gint64) *((int *) ((size_t)rec + (size_t) member_offset));
return PyLong_FromLongLong((long long) val);
}
char *str = *((char **) ((size_t) rec + (size_t) member_offset));
if (str == NULL)
Py_RETURN_NONE;
- return PyString_FromString(str);
+ return PyUnicode_FromString(str);
}
static int
return -1;
if (PyLong_Check(value)) {
val = (gint64) PyLong_AsLong(value);
+ } else if (PyFloat_Check(value)) {
+ val = (gint64) PyFloat_AS_DOUBLE(value);
+#if PY_MAJOR_VERSION < 3
} else if (PyInt_Check(value)) {
val = (gint64) PyInt_AS_LONG(value);
+#endif
} else {
PyErr_SetString(PyExc_TypeError, "Number expected!");
return -1;
return -1;
if (PyLong_Check(value)) {
val = PyLong_AsLong(value);
+ } else if (PyFloat_Check(value)) {
+ val = (gint64) PyFloat_AS_DOUBLE(value);
+#if PY_MAJOR_VERSION < 3
} else if (PyInt_Check(value)) {
- val = PyInt_AS_LONG(value);
+ val = (gint64) PyInt_AS_LONG(value);
+#endif
} else {
PyErr_SetString(PyExc_TypeError, "Number expected!");
return -1;
{
if (check_ContentStatStatus(self))
return -1;
- if (!PyString_Check(value) && value != Py_None) {
- PyErr_SetString(PyExc_TypeError, "String or None expected!");
+ if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) {
+ PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!");
return -1;
}
cr_ContentStat *rec = self->stat;
/* Object */
PyTypeObject ContentStat_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
+ PyVarObject_HEAD_INIT(NULL, 0)
"createrepo_c.ContentStat", /* tp_name */
sizeof(_ContentStatObject), /* tp_basicsize */
0, /* tp_itemsize */
*/
#include <Python.h>
+#include <datetime.h> // from python
#include "src/createrepo_c.h"
#include "repomd-py.h"
#include "repomdrecord-py.h"
#include "sqlite-py.h"
+#include "updatecollection-py.h"
+#include "updatecollectionpackage-py.h"
+#include "updateinfo-py.h"
+#include "updaterecord-py.h"
+#include "updatereference-py.h"
#include "xml_dump-py.h"
#include "xml_file-py.h"
#include "xml_parser-py.h"
+struct module_state {
+ PyObject *error;
+};
+
static struct PyMethodDef createrepo_c_methods[] = {
{"package_from_rpm", (PyCFunction)py_package_from_rpm,
METH_VARARGS | METH_KEYWORDS, package_from_rpm__doc__},
{"xml_from_rpm", (PyCFunction)py_xml_from_rpm,
- METH_VARARGS | METH_KEYWORDS, package_from_rpm__doc__},
+ METH_VARARGS | METH_KEYWORDS, xml_from_rpm__doc__},
{"xml_dump_primary", (PyCFunction)py_xml_dump_primary,
METH_VARARGS, xml_dump_primary__doc__},
{"xml_dump_filelists", (PyCFunction)py_xml_dump_filelists,
METH_VARARGS, xml_dump_filelists__doc__},
{"xml_dump_other", (PyCFunction)py_xml_dump_other,
METH_VARARGS, xml_dump_other__doc__},
+ {"xml_dump_updaterecord", (PyCFunction)py_xml_dump_updaterecord,
+ METH_VARARGS, xml_dump_updaterecord__doc__},
{"xml_dump", (PyCFunction)py_xml_dump,
METH_VARARGS, xml_dump__doc__},
{"xml_parse_primary", (PyCFunction)py_xml_parse_primary,
METH_VARARGS, xml_parse_other__doc__},
{"xml_parse_repomd", (PyCFunction)py_xml_parse_repomd,
METH_VARARGS, xml_parse_repomd__doc__},
+ {"xml_parse_updateinfo", (PyCFunction)py_xml_parse_updateinfo,
+ METH_VARARGS, xml_parse_updateinfo__doc__},
{"checksum_name_str", (PyCFunction)py_checksum_name_str,
METH_VARARGS, checksum_name_str__doc__},
{"checksum_type", (PyCFunction)py_checksum_type,
{ NULL }
};
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef createrepo_c_module_def = {
+ PyModuleDef_HEAD_INIT,
+ "_createrepo_c",
+ NULL,
+ sizeof(struct module_state),
+ createrepo_c_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+#define FAILURE NULL
+
+PyObject *
+PyInit__createrepo_c(void)
+
+#else
+
+#define FAILURE
+
PyMODINIT_FUNC
init_createrepo_c(void)
+#endif
{
+#if PY_MAJOR_VERSION >= 3
+ PyObject *m = PyModule_Create(&createrepo_c_module_def);
+#else
PyObject *m = Py_InitModule("_createrepo_c", createrepo_c_methods);
+#endif
if (!m)
- return;
+ return FAILURE;
/* Exceptions */
if (!init_exceptions())
- return;
+ return FAILURE;
PyModule_AddObject(m, "CreaterepoCError", CrErr_Exception);
/* Objects */
/* _createrepo_c.ContentStat */
if (PyType_Ready(&ContentStat_Type) < 0)
- return;
+ return FAILURE;
Py_INCREF(&ContentStat_Type);
PyModule_AddObject(m, "ContentStat", (PyObject *)&ContentStat_Type);
/* _createrepo_c.CrFile */
if (PyType_Ready(&CrFile_Type) < 0)
- return;
+ return FAILURE;
Py_INCREF(&CrFile_Type);
PyModule_AddObject(m, "CrFile", (PyObject *)&CrFile_Type);
/* _createrepo_c.Package */
if (PyType_Ready(&Package_Type) < 0)
- return;
+ return FAILURE;
Py_INCREF(&Package_Type);
PyModule_AddObject(m, "Package", (PyObject *)&Package_Type);
/* _createrepo_c.Metadata */
if (PyType_Ready(&Metadata_Type) < 0)
- return;
+ return FAILURE;
Py_INCREF(&Metadata_Type);
PyModule_AddObject(m, "Metadata", (PyObject *)&Metadata_Type);
/* _createrepo_c.MetadataLocation */
if (PyType_Ready(&MetadataLocation_Type) < 0)
- return;
+ return FAILURE;
Py_INCREF(&MetadataLocation_Type);
PyModule_AddObject(m, "MetadataLocation", (PyObject *)&MetadataLocation_Type);
/* _createrepo_c.Repomd */
if (PyType_Ready(&Repomd_Type) < 0)
- return;
+ return FAILURE;
Py_INCREF(&Repomd_Type);
PyModule_AddObject(m, "Repomd", (PyObject *)&Repomd_Type);
/* _createrepo_c.RepomdRecord */
if (PyType_Ready(&RepomdRecord_Type) < 0)
- return;
+ return FAILURE;
Py_INCREF(&RepomdRecord_Type);
PyModule_AddObject(m, "RepomdRecord", (PyObject *)&RepomdRecord_Type);
/* _createrepo_c.Sqlite */
if (PyType_Ready(&Sqlite_Type) < 0)
- return;
+ return FAILURE;
Py_INCREF(&Sqlite_Type);
PyModule_AddObject(m, "Sqlite", (PyObject *)&Sqlite_Type);
+ /* _createrepo_c.UpdateCollection */
+ if (PyType_Ready(&UpdateCollection_Type) < 0)
+ return FAILURE;
+ Py_INCREF(&UpdateCollection_Type);
+ PyModule_AddObject(m, "UpdateCollection",
+ (PyObject *)&UpdateCollection_Type);
+
+ /* _createrepo_c.UpdateCollectionPackage */
+ if (PyType_Ready(&UpdateCollectionPackage_Type) < 0)
+ return FAILURE;
+ Py_INCREF(&UpdateCollectionPackage_Type);
+ PyModule_AddObject(m, "UpdateCollectionPackage",
+ (PyObject *)&UpdateCollectionPackage_Type);
+
+ /* _createrepo_c.UpdateInfo */
+ if (PyType_Ready(&UpdateInfo_Type) < 0)
+ return FAILURE;
+ Py_INCREF(&UpdateInfo_Type);
+ PyModule_AddObject(m, "UpdateInfo", (PyObject *)&UpdateInfo_Type);
+
+ /* _createrepo_c.UpdateRecord */
+ if (PyType_Ready(&UpdateRecord_Type) < 0)
+ return FAILURE;
+ Py_INCREF(&UpdateRecord_Type);
+ PyModule_AddObject(m, "UpdateRecord", (PyObject *)&UpdateRecord_Type);
+
+ /* _createrepo_c.UpdateReference */
+ if (PyType_Ready(&UpdateReference_Type) < 0)
+ return FAILURE;
+ Py_INCREF(&UpdateReference_Type);
+ PyModule_AddObject(m, "UpdateReference", (PyObject *)&UpdateReference_Type);
+
/* _createrepo_c.XmlFile */
if (PyType_Ready(&XmlFile_Type) < 0)
- return;
+ return FAILURE;
Py_INCREF(&XmlFile_Type);
PyModule_AddObject(m, "XmlFile", (PyObject *)&XmlFile_Type);
Py_AtExit(cr_xml_dump_cleanup);
Py_AtExit(cr_package_parser_cleanup);
+ /* Python macro to use datetime objects */
+
+ PyDateTime_IMPORT;
+
/* Module constants */
/* Version */
PyModule_AddIntConstant(m, "HT_KEY_NAME", CR_HT_KEY_NAME);
PyModule_AddIntConstant(m, "HT_KEY_FILENAME", CR_HT_KEY_FILENAME);
+ /* Load Metadata key dup action */
+ PyModule_AddIntConstant(m, "HT_DUPACT_KEEPFIRST", CR_HT_DUPACT_KEEPFIRST);
+ PyModule_AddIntConstant(m, "HT_DUPACT_REMOVEALL", CR_HT_DUPACT_REMOVEALL);
+
/* Sqlite DB types */
PyModule_AddIntConstant(m, "DB_PRIMARY", CR_DB_PRIMARY);
PyModule_AddIntConstant(m, "DB_FILELISTS", CR_DB_FILELISTS);
PyModule_AddIntConstant(m, "XMLFILE_PRIMARY", CR_XMLFILE_PRIMARY);
PyModule_AddIntConstant(m, "XMLFILE_FILELISTS", CR_XMLFILE_FILELISTS);
PyModule_AddIntConstant(m, "XMLFILE_OTHER", CR_XMLFILE_OTHER);
+ PyModule_AddIntConstant(m, "XMLFILE_PRESTODELTA", CR_XMLFILE_PRESTODELTA);
+ PyModule_AddIntConstant(m, "XMLFILE_UPDATEINFO", CR_XMLFILE_UPDATEINFO);
/* XmlParser types */
PyModule_AddIntConstant(m, "XML_WARNING_UNKNOWNTAG", CR_XML_WARNING_UNKNOWNTAG);
PyModule_AddIntConstant(m, "XML_WARNING_MISSINGATTR", CR_XML_WARNING_MISSINGATTR);
PyModule_AddIntConstant(m, "XML_WARNING_UNKNOWNVAL", CR_XML_WARNING_UNKNOWNVAL);
PyModule_AddIntConstant(m, "XML_WARNING_BADATTRVAL", CR_XML_WARNING_BADATTRVAL);
+
+#if PY_MAJOR_VERSION >= 3
+ return m;
+#else
+ return;
+#endif
}
int
init_exceptions()
{
+#if (PY_MAJOR_VERSION == 2) && (PY_MINOR_VERSION < 7)
+ CrErr_Exception = PyErr_NewException("createrepo_c.CreaterepoCError", NULL, NULL);
+#else
CrErr_Exception = PyErr_NewExceptionWithDoc("createrepo_c.CreaterepoCError",
"Createrepo_c library exception", NULL, NULL);
+#endif
+
if (!CrErr_Exception)
return 0;
Py_INCREF(CrErr_Exception);
/* Function on the type */
static PyObject *
-metadata_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+metadata_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(args);
- CR_UNUSED(kwds);
-
_MetadataObject *self = (_MetadataObject *)type->tp_alloc(type, 0);
if (self)
self->md = NULL;
}
static PyObject *
-metadata_repr(_MetadataObject *self)
+metadata_repr(G_GNUC_UNUSED _MetadataObject *self)
{
- CR_UNUSED(self);
- return PyString_FromFormat("<createrepo_c.Metadata object>");
+ return PyUnicode_FromFormat("<createrepo_c.Metadata object>");
}
/* Getters */
static PyObject *
-get_key(_MetadataObject *self, void *nothing)
+get_key(_MetadataObject *self, G_GNUC_UNUSED void *nothing)
{
- CR_UNUSED(nothing);
if (check_MetadataStatus(self))
return NULL;
cr_HashTableKey val = cr_metadata_key(self->md);
"Number of packages");
static PyObject *
-ht_len(_MetadataObject *self, PyObject *noarg)
+ht_len(_MetadataObject *self, G_GNUC_UNUSED PyObject *noarg)
{
- CR_UNUSED(noarg);
unsigned long len = 0;
if (check_MetadataStatus(self))
return NULL;
"List of all keys");
static PyObject *
-ht_keys(_MetadataObject *self, PyObject *args)
+ht_keys(_MetadataObject *self, G_GNUC_UNUSED PyObject *args)
{
- CR_UNUSED(args);
-
if (check_MetadataStatus(self))
return NULL;
PyObject *list = PyList_New(0);
for (GList *elem = keys; elem; elem = g_list_next(elem)) {
- PyObject *py_str = PyString_FromString(elem->data);
+ PyObject *py_str = PyUnicode_FromString(elem->data);
assert(py_str);
if (PyList_Append(list, py_str) == -1) {
Py_XDECREF(list);
return (Object_FromPackage_WithParent(pkg, 0, (PyObject *) self));
}
+PyDoc_STRVAR(metadata_dupaction__doc__,
+".. method:: dupaction(dupaction)\n\n"
+" :arg dupation: What to do when we encounter already existing key.\n"
+" use constants prefixed with HT_DUPACT_. I.e. \n"
+" HT_DUPACT_KEEPFIRST, HT_DUPACT_REMOVEALL.\n");
+
+static PyObject *
+metadata_dupaction(_MetadataObject *self, PyObject *args)
+{
+ int dupaction;
+ gboolean res = TRUE;
+
+ if (!PyArg_ParseTuple(args, "i:dupaction", &dupaction))
+ return NULL;
+
+ res = cr_metadata_set_dupaction(self->md, dupaction);
+ if (!res) {
+ PyErr_SetString(CrErr_Exception, "Cannot set specified action");
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
static struct PyMethodDef metadata_methods[] = {
{"load_xml", (PyCFunction)load_xml, METH_VARARGS,
load_xml__doc__},
{"keys", (PyCFunction)ht_keys, METH_NOARGS, keys__doc__},
{"remove", (PyCFunction)ht_remove, METH_VARARGS, remove__doc__},
{"get", (PyCFunction)ht_get, METH_VARARGS, get__doc__},
+ {"dupaction",(PyCFunction)metadata_dupaction, METH_VARARGS, metadata_dupaction__doc__},
{NULL} /* sentinel */
};
/* Object */
PyTypeObject Metadata_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
+ PyVarObject_HEAD_INIT(NULL, 0)
"createrepo_c.Metadata", /* tp_name */
sizeof(_MetadataObject), /* tp_basicsize */
0, /* tp_itemsize */
/* Function on the type */
static PyObject *
-metadatalocation_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+metadatalocation_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(args);
- CR_UNUSED(kwds);
-
_MetadataLocationObject *self = (_MetadataLocationObject *)type->tp_alloc(type, 0);
if (self)
self->ml = NULL;
" databases will not be downloaded)\n");
static int
-metadatalocation_init(_MetadataLocationObject *self, PyObject *args, PyObject *kwds)
+metadatalocation_init(_MetadataLocationObject *self,
+ PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(kwds);
char *repopath;
- int ignore_db;
+ PyObject *py_ignore_db = NULL;
GError *tmp_err = NULL;
- if (!PyArg_ParseTuple(args, "si|:metadatalocation_init", &repopath, &ignore_db))
+ if (!PyArg_ParseTuple(args, "sO|:metadatalocation_init", &repopath, &py_ignore_db))
return -1;
/* Free all previous resources when reinitialization */
}
/* Init */
- self->ml = cr_locate_metadata(repopath, ignore_db, &tmp_err);
+ self->ml = cr_locate_metadata(repopath, PyObject_IsTrue(py_ignore_db), &tmp_err);
if (tmp_err) {
nice_exception(&tmp_err, NULL);
return -1;
}
static PyObject *
-metadatalocation_repr(_MetadataLocationObject *self)
+metadatalocation_repr(G_GNUC_UNUSED _MetadataLocationObject *self)
{
- CR_UNUSED(self);
- return PyString_FromFormat("<createrepo_c.MetadataLocation object>");
+ return PyUnicode_FromFormat("<createrepo_c.MetadataLocation object>");
}
/* MetadataLocation methods */
if (check_MetadataLocationStatus(self))
return NULL;
- if (!PyString_Check(pykey)) {
- PyErr_SetString(PyExc_TypeError, "String expected!");
+ if (!PyUnicode_Check(pykey) && !PyBytes_Check(pykey)) {
+ PyErr_SetString(PyExc_TypeError, "Unicode or bytes expected!");
return NULL;
}
- key = PyString_AsString(pykey);
+ if (PyUnicode_Check(pykey)) {
+ pykey = PyUnicode_AsUTF8String(pykey);
+ }
+
+ key = PyBytes_AsString(pykey);
+
value = NULL;
if (!strcmp(key, "primary")) {
}
if (value)
- return PyString_FromString(value);
+ return PyUnicode_FromString(value);
else
Py_RETURN_NONE;
}
/* Object */
PyTypeObject MetadataLocation_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
+ PyVarObject_HEAD_INIT(NULL, 0)
"createrepo_c.MetadataLocation",/* tp_name */
sizeof(_MetadataLocationObject),/* tp_basicsize */
0, /* tp_itemsize */
#include "contentstat-py.h"
PyObject *
-py_compress_file_with_stat(PyObject *self, PyObject *args)
+py_compress_file_with_stat(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
int type;
char *src, *dst;
cr_ContentStat *contentstat;
GError *tmp_err = NULL;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "sziO:py_compress_file", &src, &dst, &type,
&py_contentstat))
return NULL;
}
PyObject *
-py_decompress_file_with_stat(PyObject *self, PyObject *args)
+py_decompress_file_with_stat(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
int type;
char *src, *dst;
cr_ContentStat *contentstat;
GError *tmp_err = NULL;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "sziO:py_decompress_file", &src, &dst, &type,
&py_contentstat))
return NULL;
/* Function on the type */
static PyObject *
-package_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+package_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(args);
- CR_UNUSED(kwds);
_PackageObject *self = (_PackageObject *)type->tp_alloc(type, 0);
if (self) {
self->package = NULL;
cr_Package *pkg = self->package;
PyObject *repr;
if (pkg) {
- repr = PyString_FromFormat("<createrepo_c.Package object id %s, %s>",
+ repr = PyUnicode_FromFormat("<createrepo_c.Package object id %s, %s>",
(pkg->pkgId ? pkg->pkgId : "-"),
(pkg->name ? pkg->name : "-"));
} else {
- repr = PyString_FromFormat("<createrepo_c.Package object id -, ->");
+ repr = PyUnicode_FromFormat("<createrepo_c.Package object id -, ->");
}
return repr;
}
return NULL;
if (self->package) {
char *nevra = cr_package_nvra(self->package);
- ret = PyString_FromString(nevra);
+ ret = PyUnicode_FromString(nevra);
free(nevra);
} else {
- ret = PyString_FromString("-");
+ ret = PyUnicode_FromString("-");
}
return ret;
}
"Package NVRA string (Name-Version-Release-Architecture)");
static PyObject *
-nvra(_PackageObject *self, void *nothing)
+nvra(_PackageObject *self, G_GNUC_UNUSED void *nothing)
{
- CR_UNUSED(nothing);
PyObject *pystr;
if (check_PackageStatus(self))
return NULL;
char *nvra = cr_package_nvra(self->package);
- pystr = PyStringOrNone_FromString(nvra);
+ pystr = PyUnicodeOrNone_FromString(nvra);
free(nvra);
return pystr;
}
"Package NEVRA string (Name-Epoch-Version-Release-Architecture)");
static PyObject *
-nevra(_PackageObject *self, void *nothing)
+nevra(_PackageObject *self, G_GNUC_UNUSED void *nothing)
{
- CR_UNUSED(nothing);
PyObject *pystr;
if (check_PackageStatus(self))
return NULL;
char *nevra = cr_package_nevra(self->package);
- pystr = PyStringOrNone_FromString(nevra);
+ pystr = PyUnicodeOrNone_FromString(nevra);
free(nevra);
return pystr;
}
"Copy of the package object");
static PyObject *
-copy_pkg(_PackageObject *self, void *nothing)
+copy_pkg(_PackageObject *self, G_GNUC_UNUSED void *nothing)
{
- CR_UNUSED(nothing);
if (check_PackageStatus(self))
return NULL;
return Object_FromPackage(cr_package_copy(self->package), 1);
if (check_PackageStatus(self))
return NULL;
cr_Package *pkg = self->package;
- gint64 val = *((gint64 *) ((size_t)pkg + (size_t) member_offset));
+ gint64 val = (gint64) *((gint64 *) ((size_t)pkg + (size_t) member_offset));
return PyLong_FromLongLong((long long) val);
}
char *str = *((char **) ((size_t) pkg + (size_t) member_offset));
if (str == NULL)
Py_RETURN_NONE;
- return PyString_FromString(str);
+ return PyUnicode_FromString(str);
}
/** Return offset of a selected member of cr_Package structure. */
/** List of convertors for converting a lists in cr_Package. */
static ListConvertor list_convertors[] = {
- { offsetof(cr_Package, requires), PyObject_FromDependency,
- CheckPyDependency, PyObject_ToDependency },
- { offsetof(cr_Package, provides), PyObject_FromDependency,
- CheckPyDependency, PyObject_ToDependency },
- { offsetof(cr_Package, conflicts), PyObject_FromDependency,
- CheckPyDependency, PyObject_ToDependency },
- { offsetof(cr_Package, obsoletes), PyObject_FromDependency,
- CheckPyDependency, PyObject_ToDependency },
- { offsetof(cr_Package, files), PyObject_FromPackageFile,
- CheckPyPackageFile, PyObject_ToPackageFile },
- { offsetof(cr_Package, changelogs), PyObject_FromChangelogEntry,
- CheckPyChangelogEntry, PyObject_ToChangelogEntry },
+ { offsetof(cr_Package, requires),
+ (ConversionFromFunc) PyObject_FromDependency,
+ (ConversionToCheckFunc) CheckPyDependency,
+ (ConversionToFunc) PyObject_ToDependency },
+ { offsetof(cr_Package, provides),
+ (ConversionFromFunc) PyObject_FromDependency,
+ (ConversionToCheckFunc) CheckPyDependency,
+ (ConversionToFunc) PyObject_ToDependency },
+ { offsetof(cr_Package, conflicts),
+ (ConversionFromFunc) PyObject_FromDependency,
+ (ConversionToCheckFunc) CheckPyDependency,
+ (ConversionToFunc) PyObject_ToDependency },
+ { offsetof(cr_Package, obsoletes),
+ (ConversionFromFunc) PyObject_FromDependency,
+ (ConversionToCheckFunc) CheckPyDependency,
+ (ConversionToFunc) PyObject_ToDependency },
+ { offsetof(cr_Package, suggests),
+ (ConversionFromFunc) PyObject_FromDependency,
+ (ConversionToCheckFunc) CheckPyDependency,
+ (ConversionToFunc) PyObject_ToDependency },
+ { offsetof(cr_Package, enhances),
+ (ConversionFromFunc) PyObject_FromDependency,
+ (ConversionToCheckFunc) CheckPyDependency,
+ (ConversionToFunc) PyObject_ToDependency },
+ { offsetof(cr_Package, recommends),
+ (ConversionFromFunc) PyObject_FromDependency,
+ (ConversionToCheckFunc) CheckPyDependency,
+ (ConversionToFunc) PyObject_ToDependency },
+ { offsetof(cr_Package, supplements),
+ (ConversionFromFunc) PyObject_FromDependency,
+ (ConversionToCheckFunc) CheckPyDependency,
+ (ConversionToFunc) PyObject_ToDependency },
+ { offsetof(cr_Package, files),
+ (ConversionFromFunc) PyObject_FromPackageFile,
+ (ConversionToCheckFunc) CheckPyPackageFile,
+ (ConversionToFunc) PyObject_ToPackageFile },
+ { offsetof(cr_Package, changelogs),
+ (ConversionFromFunc) PyObject_FromChangelogEntry,
+ (ConversionToCheckFunc) CheckPyChangelogEntry,
+ (ConversionToFunc) PyObject_ToChangelogEntry },
};
static PyObject *
return -1;
if (PyLong_Check(value)) {
val = (gint64) PyLong_AsLong(value);
+ } else if (PyFloat_Check(value)) {
+ val = (gint64) PyFloat_AS_DOUBLE(value);
+#if PY_MAJOR_VERSION < 3
} else if (PyInt_Check(value)) {
val = (gint64) PyInt_AS_LONG(value);
+#endif
} else {
PyErr_SetString(PyExc_TypeError, "Number expected!");
return -1;
{
if (check_PackageStatus(self))
return -1;
- if (!PyString_Check(value) && value != Py_None) {
- PyErr_SetString(PyExc_TypeError, "String or None expected!");
+ if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) {
+ PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!");
return -1;
}
cr_Package *pkg = self->package;
if (!pkg->chunk)
pkg->chunk = g_string_chunk_new(0);
- char *str = g_string_chunk_insert(pkg->chunk, PyString_AsString(value));
+ if (PyUnicode_Check(value)) {
+ value = PyUnicode_AsUTF8String(value);
+ }
+
+ char *str = g_string_chunk_insert(pkg->chunk, PyBytes_AsString(value));
*((char **) ((size_t) pkg + (size_t) member_offset)) = str;
return 0;
}
{"provides", (getter)get_list, (setter)set_list,
"Capabilities the package provides", &(list_convertors[1])},
{"conflicts", (getter)get_list, (setter)set_list,
- "Capability the package conflicts with", &(list_convertors[2])},
+ "Capabilities the package conflicts with", &(list_convertors[2])},
{"obsoletes", (getter)get_list, (setter)set_list,
- "Capability the package obsoletes", &(list_convertors[3])},
+ "Capabilities the package obsoletes", &(list_convertors[3])},
+ {"suggests", (getter)get_list, (setter)set_list,
+ "Capabilities the package suggests", &(list_convertors[4])},
+ {"enhances", (getter)get_list, (setter)set_list,
+ "Capabilities the package enhances", &(list_convertors[5])},
+ {"recommends", (getter)get_list, (setter)set_list,
+ "Capabilities the package recommends", &(list_convertors[6])},
+ {"supplements", (getter)get_list, (setter)set_list,
+ "Capabilities the package supplements", &(list_convertors[7])},
{"files", (getter)get_list, (setter)set_list,
- "Files that package contains", &(list_convertors[4])},
+ "Files that package contains", &(list_convertors[8])},
{"changelogs", (getter)get_list, (setter)set_list,
- "Changelogs that package contains", &(list_convertors[5])},
+ "Changelogs that package contains", &(list_convertors[9])},
{NULL, NULL, NULL, NULL, NULL} /* sentinel */
};
/* Object */
PyTypeObject Package_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
+ PyVarObject_HEAD_INIT(NULL, 0)
"createrepo_c.Package", /* tp_name */
sizeof(_PackageObject), /* tp_basicsize */
0, /* tp_itemsize */
#include "exception-py.h"
PyObject *
-py_package_from_rpm(PyObject *self, PyObject *args)
+py_package_from_rpm(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
- CR_UNUSED(self);
-
PyObject *ret;
cr_Package *pkg;
int checksum_type, changelog_limit;
char *filename, *location_href, *location_base;
GError *tmp_err = NULL;
+ cr_HeaderReadingFlags flags = CR_HDRR_NONE; // TODO - support for flags
if (!PyArg_ParseTuple(args, "sizzi:py_package_from_rpm",
&filename,
}
pkg = cr_package_from_rpm(filename, checksum_type, location_href,
- location_base, changelog_limit, NULL, &tmp_err);
+ location_base, changelog_limit, NULL,
+ flags, &tmp_err);
if (tmp_err) {
nice_exception(&tmp_err, "Cannot load %s: ", filename);
return NULL;
}
PyObject *
-py_xml_from_rpm(PyObject *self, PyObject *args)
+py_xml_from_rpm(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
- CR_UNUSED(self);
-
PyObject *tuple;
int checksum_type, changelog_limit;
char *filename, *location_href, *location_base;
if ((tuple = PyTuple_New(3)) == NULL)
goto py_xml_from_rpm_end; // Free xml_res and return NULL
- PyTuple_SetItem(tuple, 0, PyStringOrNone_FromString(xml_res.primary));
- PyTuple_SetItem(tuple, 1, PyStringOrNone_FromString(xml_res.filelists));
- PyTuple_SetItem(tuple, 2, PyStringOrNone_FromString(xml_res.other));
+ PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(xml_res.primary));
+ PyTuple_SetItem(tuple, 1, PyUnicodeOrNone_FromString(xml_res.filelists));
+ PyTuple_SetItem(tuple, 2, PyUnicodeOrNone_FromString(xml_res.other));
py_xml_from_rpm_end:
free(xml_res.primary);
/* Function on the type */
static PyObject *
-repomd_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+repomd_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(args);
- CR_UNUSED(kwds);
-
_RepomdObject *self = (_RepomdObject *)type->tp_alloc(type, 0);
if (self) {
self->repomd = NULL;
"Repomd object");
static int
-repomd_init(_RepomdObject *self, PyObject *args, PyObject *kwds)
+repomd_init(_RepomdObject *self,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(args);
- CR_UNUSED(kwds);
-
/* Free all previous resources when reinitialization */
if (self->repomd) {
cr_repomd_free(self->repomd);
}
static PyObject *
-repomd_repr(_RepomdObject *self)
+repomd_repr(G_GNUC_UNUSED _RepomdObject *self)
{
- CR_UNUSED(self);
- return PyString_FromFormat("<createrepo_c.Repomd object>");
+ return PyUnicode_FromString("<createrepo_c.Repomd object>");
}
/* Repomd methods */
Py_RETURN_NONE;
}
+PyDoc_STRVAR(set_contenthash__doc__,
+"set_contenthash(contenthash, contenthash_type) -> None\n\n"
+"Set contenthash value and contenthash_type");
+
+static PyObject *
+set_contenthash(_RepomdObject *self, PyObject *args)
+{
+ char *contenthash, *contenthash_type;
+ if (!PyArg_ParseTuple(args, "zz:set_contenthash", &contenthash, &contenthash_type))
+ return NULL;
+ if (check_RepomdStatus(self))
+ return NULL;
+ cr_repomd_set_contenthash(self->repomd, contenthash, contenthash_type);
+ Py_RETURN_NONE;
+}
+
PyDoc_STRVAR(add_distro_tag__doc__,
"add_distro_tag(tag[, cpeid=None]) -> None\n\n"
"Add distro tag");
"Sort repomd records to the createrepo_c prefered order");
static PyObject *
-sort_records(_RepomdObject *self, void *nothing)
+sort_records(_RepomdObject *self, G_GNUC_UNUSED void *nothing)
{
- CR_UNUSED(nothing);
cr_repomd_sort_records(self->repomd);
Py_RETURN_NONE;
}
"Generate xml representation of the repomd");
static PyObject *
-xml_dump(_RepomdObject *self, void *nothing)
+xml_dump(_RepomdObject *self, G_GNUC_UNUSED void *nothing)
{
- CR_UNUSED(nothing);
PyObject *py_str;
GError *tmp_err = NULL;
char *xml = cr_xml_dump_repomd(self->repomd, &tmp_err);
nice_exception(&tmp_err, NULL);
return NULL;
}
- py_str = PyStringOrNone_FromString(xml);
+ py_str = PyUnicodeOrNone_FromString(xml);
free(xml);
return py_str;
}
set_revision__doc__},
{"set_repoid", (PyCFunction)set_repoid, METH_VARARGS,
set_repoid__doc__},
+ {"set_contenthash", (PyCFunction)set_contenthash, METH_VARARGS,
+ set_contenthash__doc__},
{"add_distro_tag", (PyCFunction)add_distro_tag, METH_VARARGS|METH_KEYWORDS,
add_distro_tag__doc__},
{"add_repo_tag", (PyCFunction)add_repo_tag, METH_VARARGS,
typedef void *(*ConversionToFunc)(PyObject *, GStringChunk *);
static int
-CheckPyString(PyObject *dep)
+CheckPyUnicode(PyObject *dep)
{
- if (!PyString_Check(dep)) {
+ if (!PyUnicode_Check(dep)) {
PyErr_SetString(PyExc_TypeError, "Element of list has to be a string");
return 1;
}
/** List of convertors for converting a lists in cr_Package. */
static ListConvertor list_convertors[] = {
- { offsetof(cr_Repomd, repo_tags), PyStringOrNone_FromString,
- CheckPyString, PyObject_ToChunkedString },
- { offsetof(cr_Repomd, distro_tags), PyObject_FromDistroTag,
- CheckPyDistroTag, PyObject_ToDistroTag },
- { offsetof(cr_Repomd, content_tags), PyStringOrNone_FromString,
- CheckPyString, PyObject_ToChunkedString },
- { offsetof(cr_Repomd, records), PyObject_FromRepomdRecord,
- NULL, NULL },
+ { offsetof(cr_Repomd, repo_tags),
+ (ConversionFromFunc) PyUnicodeOrNone_FromString,
+ (ConversionToCheckFunc) CheckPyUnicode,
+ (ConversionToFunc) PyObject_ToChunkedString },
+ { offsetof(cr_Repomd, distro_tags),
+ (ConversionFromFunc) PyObject_FromDistroTag,
+ (ConversionToCheckFunc) CheckPyDistroTag,
+ (ConversionToFunc) PyObject_ToDistroTag },
+ { offsetof(cr_Repomd, content_tags),
+ (ConversionFromFunc) PyUnicodeOrNone_FromString,
+ (ConversionToCheckFunc) CheckPyUnicode,
+ (ConversionToFunc) PyObject_ToChunkedString },
+ { offsetof(cr_Repomd, records),
+ (ConversionFromFunc) PyObject_FromRepomdRecord,
+ (ConversionToCheckFunc) NULL,
+ (ConversionToFunc) NULL },
};
/* Getters */
char *str = *((char **) ((size_t) repomd + (size_t) member_offset));
if (str == NULL)
Py_RETURN_NONE;
- return PyString_FromString(str);
+ return PyUnicode_FromString(str);
}
static PyObject *
{
if (check_RepomdStatus(self))
return -1;
- if (!PyString_Check(value) && value != Py_None) {
- PyErr_SetString(PyExc_TypeError, "String or None expected!");
+ if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) {
+ PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!");
return -1;
}
cr_Repomd *repomd = self->repomd;
"Repoid value", OFFSET(repoid)},
{"repoid_type", (getter)get_str, (setter)set_str,
"Repoid type value", OFFSET(repoid_type)},
+ {"contenthash", (getter)get_str, (setter)set_str,
+ "Contenthash value", OFFSET(contenthash)},
+ {"contenthash_type", (getter)get_str, (setter)set_str,
+ "contenthash type value", OFFSET(contenthash_type)},
{"repo_tags", (getter)get_list, (setter)set_list,
"List of repo tags", &(list_convertors[0])},
{"distro_tags", (getter)get_list, (setter)set_list,
PyTypeObject Repomd_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
+ PyVarObject_HEAD_INIT(NULL, 0)
"createrepo_c.Repomd", /* tp_name */
sizeof(_RepomdObject), /* tp_basicsize */
0, /* tp_itemsize */
/* Function on the type */
static PyObject *
-repomdrecord_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+repomdrecord_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(args);
- CR_UNUSED(kwds);
_RepomdRecordObject *self = (_RepomdRecordObject *)type->tp_alloc(type, 0);
if (self) {
self->record = NULL;
" :arg path: Path to the file\n");
static int
-repomdrecord_init(_RepomdRecordObject *self, PyObject *args, PyObject *kwds)
+repomdrecord_init(_RepomdRecordObject *self,
+ PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(kwds);
-
char *type = NULL, *path = NULL;
if (!PyArg_ParseTuple(args, "|zz:repomdrecord_init", &type, &path))
}
static PyObject *
-repomdrecord_repr(_RepomdRecordObject *self)
+repomdrecord_repr(G_GNUC_UNUSED _RepomdRecordObject *self)
{
- CR_UNUSED(self);
if (self->record->type)
- return PyString_FromFormat("<createrepo_c.RepomdRecord %s object>",
+ return PyUnicode_FromFormat("<createrepo_c.RepomdRecord %s object>",
self->record->type);
else
- return PyString_FromFormat("<createrepo_c.RepomdRecord object>");
+ return PyUnicode_FromFormat("<createrepo_c.RepomdRecord object>");
}
/* RepomdRecord methods */
"Return copy of the RepomdRecord object");
static PyObject *
-copy_repomdrecord(_RepomdRecordObject *self, void *nothing)
+copy_repomdrecord(_RepomdRecordObject *self, G_GNUC_UNUSED void *nothing)
{
- CR_UNUSED(nothing);
if (check_RepomdRecordStatus(self))
return NULL;
return Object_FromRepomdRecord(cr_repomd_record_copy(self->record));
"Add (prepend) file checksum to the filename");
static PyObject *
-rename_file(_RepomdRecordObject *self, void *nothing)
+rename_file(_RepomdRecordObject *self, G_GNUC_UNUSED void *nothing)
{
GError *err = NULL;
- CR_UNUSED(nothing);
-
cr_repomd_record_rename_file(self->record, &err);
if (err) {
nice_exception(&err, NULL);
if (check_RepomdRecordStatus(self))
return NULL;
cr_RepomdRecord *rec = self->record;
- gint64 val = *((gint64 *) ((size_t)rec + (size_t) member_offset));
+ gint64 val = (gint64) *((gint64 *) ((size_t)rec + (size_t) member_offset));
return PyLong_FromLongLong((long long) val);
}
if (check_RepomdRecordStatus(self))
return NULL;
cr_RepomdRecord *rec = self->record;
- gint64 val = *((int *) ((size_t)rec + (size_t) member_offset));
+ gint64 val = (gint64) *((int *) ((size_t)rec + (size_t) member_offset));
return PyLong_FromLongLong((long long) val);
}
char *str = *((char **) ((size_t) rec + (size_t) member_offset));
if (str == NULL)
Py_RETURN_NONE;
- return PyString_FromString(str);
+ return PyUnicode_FromString(str);
}
static int
return -1;
if (PyLong_Check(value)) {
val = (gint64) PyLong_AsLong(value);
+ } else if (PyFloat_Check(value)) {
+ val = (gint64) PyFloat_AS_DOUBLE(value);
+#if PY_MAJOR_VERSION < 3
} else if (PyInt_Check(value)) {
val = (gint64) PyInt_AS_LONG(value);
+#endif
} else {
PyErr_SetString(PyExc_TypeError, "Number expected!");
return -1;
return -1;
if (PyLong_Check(value)) {
val = PyLong_AsLong(value);
+ } else if (PyFloat_Check(value)) {
+ val = (long long) PyFloat_AS_DOUBLE(value);
+#if PY_MAJOR_VERSION < 3
} else if (PyInt_Check(value)) {
val = PyInt_AS_LONG(value);
+#endif
} else {
PyErr_SetString(PyExc_TypeError, "Number expected!");
return -1;
{
if (check_RepomdRecordStatus(self))
return -1;
- if (!PyString_Check(value) && value != Py_None) {
- PyErr_SetString(PyExc_TypeError, "String or None expected!");
+ if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) {
+ PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!");
return -1;
}
cr_RepomdRecord *rec = self->record;
/* Object */
PyTypeObject RepomdRecord_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
+ PyVarObject_HEAD_INIT(NULL, 0)
"createrepo_c.RepomdRecord", /* tp_name */
sizeof(_RepomdRecordObject), /* tp_basicsize */
0, /* tp_itemsize */
/* Function on the type */
static PyObject *
-sqlite_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+sqlite_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(args);
- CR_UNUSED(kwds);
_SqliteObject *self = (_SqliteObject *)type->tp_alloc(type, 0);
if (self)
self->db = NULL;
" :arg db_type: One from DB_PRIMARY, DB_FILELISTS, DB_OTHER constans\n");
static int
-sqlite_init(_SqliteObject *self, PyObject *args, PyObject *kwds)
+sqlite_init(_SqliteObject *self, PyObject *args, G_GNUC_UNUSED PyObject *kwds)
{
char *path;
int db_type;
GError *err = NULL;
PyObject *ret;
- CR_UNUSED(kwds);
-
if (!PyArg_ParseTuple(args, "si|:sqlite_init", &path, &db_type))
return -1;
else if (self->db->type == CR_DB_FILELISTS) type = "FilelistsDb";
else if (self->db->type == CR_DB_OTHER) type = "OtherDb";
else type = "UnknownDb";
- return PyString_FromFormat("<createrepo_c.Sqlite %s object>", type);
+ return PyUnicode_FromFormat("<createrepo_c.Sqlite %s object>", type);
}
/* Sqlite methods */
"Close the sqlite database");
static PyObject *
-close_db(_SqliteObject *self, void *nothing)
+close_db(_SqliteObject *self, G_GNUC_UNUSED void *nothing)
{
GError *err = NULL;
- CR_UNUSED(nothing);
-
if (self->db) {
cr_db_close(self->db, &err);
self->db = NULL;
};
PyTypeObject Sqlite_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
+ PyVarObject_HEAD_INIT(NULL, 0)
"createrepo_c.Sqlite", /* tp_name */
sizeof(_SqliteObject), /* tp_basicsize */
0, /* tp_itemsize */
#include "src/createrepo_c.h"
#include "src/repomd_internal.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
+
void
PyErr_ToGError(GError **err)
{
if (!pystr) {
PyErr_Clear();
- g_set_error(err, CR_XML_PARSER_ERROR, CRE_XMLPARSER,
+ g_set_error(err, ERR_DOMAIN, CRE_XMLPARSER,
"Error while error handling");
} else {
- g_set_error(err, CR_XML_PARSER_ERROR, CRE_XMLPARSER,
- "%s", PyString_AsString(pystr));
+ if (PyUnicode_Check(pystr)) {
+ pystr = PyUnicode_AsUTF8String(pystr);
+ }
+ g_set_error(err, ERR_DOMAIN, CRE_XMLPARSER,
+ "%s", PyBytes_AsString(pystr));
}
Py_XDECREF(pystr);
}
PyObject *
-PyStringOrNone_FromString(const char *str)
+PyUnicodeOrNone_FromString(const char *str)
{
if (str == NULL)
Py_RETURN_NONE;
- return PyString_FromString(str);
+ return PyUnicode_FromString(str);
}
char *
PyObject_ToStrOrNull(PyObject *pyobj)
{
// String returned by this function shoud not be freed or modified
- if (PyString_Check(pyobj))
- return PyString_AsString(pyobj);
+ if (PyUnicode_Check(pyobj)) {
+ pyobj = PyUnicode_AsUTF8String(pyobj);
+ }
+
+ if (PyBytes_Check(pyobj)) {
+ return PyBytes_AsString(pyobj);
+ }
+
// TODO: ? Add support for pyobj like: ("xxx",) and ["xxx"]
return NULL;
}
PyObject_ToLongLongOrZero(PyObject *pyobj)
{
long long num = 0;
- if (PyLong_Check(pyobj))
+ if (PyLong_Check(pyobj)) {
num = (long long) PyLong_AsLongLong(pyobj);
- else if (PyInt_Check(pyobj))
+ } else if (PyFloat_Check(pyobj)) {
+ num = (long long) PyFloat_AS_DOUBLE(pyobj);
+#if PY_MAJOR_VERSION < 3
+ } else if (PyInt_Check(pyobj)) {
num = (long long) PyInt_AS_LONG(pyobj);
+#endif
+ }
return num;
}
if ((tuple = PyTuple_New(6)) == NULL)
return NULL;
- PyTuple_SetItem(tuple, 0, PyStringOrNone_FromString(dep->name));
- PyTuple_SetItem(tuple, 1, PyStringOrNone_FromString(dep->flags));
- PyTuple_SetItem(tuple, 2, PyStringOrNone_FromString(dep->epoch));
- PyTuple_SetItem(tuple, 3, PyStringOrNone_FromString(dep->version));
- PyTuple_SetItem(tuple, 4, PyStringOrNone_FromString(dep->release));
+ PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(dep->name));
+ PyTuple_SetItem(tuple, 1, PyUnicodeOrNone_FromString(dep->flags));
+ PyTuple_SetItem(tuple, 2, PyUnicodeOrNone_FromString(dep->epoch));
+ PyTuple_SetItem(tuple, 3, PyUnicodeOrNone_FromString(dep->version));
+ PyTuple_SetItem(tuple, 4, PyUnicodeOrNone_FromString(dep->release));
PyTuple_SetItem(tuple, 5, PyBool_FromLong((long) dep->pre));
return tuple;
if ((tuple = PyTuple_New(3)) == NULL)
return NULL;
- PyTuple_SetItem(tuple, 0, PyStringOrNone_FromString(file->type));
- PyTuple_SetItem(tuple, 1, PyStringOrNone_FromString(file->path));
- PyTuple_SetItem(tuple, 2, PyStringOrNone_FromString(file->name));
+ PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(file->type));
+ PyTuple_SetItem(tuple, 1, PyUnicodeOrNone_FromString(file->path));
+ PyTuple_SetItem(tuple, 2, PyUnicodeOrNone_FromString(file->name));
return tuple;
}
if ((tuple = PyTuple_New(3)) == NULL)
return NULL;
- PyTuple_SetItem(tuple, 0, PyStringOrNone_FromString(log->author));
+ PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(log->author));
PyTuple_SetItem(tuple, 1, PyLong_FromLong((long) log->date));
- PyTuple_SetItem(tuple, 2, PyStringOrNone_FromString(log->changelog));
+ PyTuple_SetItem(tuple, 2, PyUnicodeOrNone_FromString(log->changelog));
return tuple;
}
if ((tuple = PyTuple_New(2)) == NULL)
return NULL;
- PyTuple_SetItem(tuple, 0, PyStringOrNone_FromString(tag->cpeid));
- PyTuple_SetItem(tuple, 1, PyStringOrNone_FromString(tag->val));
+ PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(tag->cpeid));
+ PyTuple_SetItem(tuple, 1, PyUnicodeOrNone_FromString(tag->val));
return tuple;
}
for (Py_ssize_t x=0; x < size; x++) {
PyObject *py_str = PyList_GetItem(py_list, x);
assert(py_str != NULL);
- if (!PyString_Check(py_str))
+ if (!PyUnicode_Check(py_str) && !PyBytes_Check(py_str))
// Hmm, element is not a string, just skip it
continue;
- list = g_slist_prepend(list, PyString_AsString(py_str));
+
+ if (PyUnicode_Check(py_str))
+ py_str = PyUnicode_AsUTF8String(py_str);
+
+ list = g_slist_prepend(list, PyBytes_AsString(py_str));
}
return list;
// Clears the current Python Exception and return its representation in GError
void PyErr_ToGError(GError **err);
-PyObject *PyStringOrNone_FromString(const char *str);
+PyObject *PyUnicodeOrNone_FromString(const char *str);
char *PyObject_ToStrOrNull(PyObject *pyobj);
char *PyObject_ToChunkedString(PyObject *pyobj, GStringChunk *chunk);
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <Python.h>
+#include <assert.h>
+#include <stddef.h>
+
+#include "updatecollection-py.h"
+#include "updatecollectionpackage-py.h"
+#include "exception-py.h"
+#include "typeconversion.h"
+#include "contentstat-py.h"
+
+typedef struct {
+ PyObject_HEAD
+ cr_UpdateCollection *collection;
+} _UpdateCollectionObject;
+
+PyObject *
+Object_FromUpdateCollection(cr_UpdateCollection *rec)
+{
+ PyObject *py_rec;
+
+ if (!rec) {
+ PyErr_SetString(PyExc_ValueError, "Expected a cr_UpdateCollection pointer not NULL.");
+ return NULL;
+ }
+
+ py_rec = PyObject_CallObject((PyObject *) &UpdateCollection_Type, NULL);
+ cr_updatecollection_free(((_UpdateCollectionObject *)py_rec)->collection);
+ ((_UpdateCollectionObject *)py_rec)->collection = rec;
+
+ return py_rec;
+}
+
+cr_UpdateCollection *
+UpdateCollection_FromPyObject(PyObject *o)
+{
+ if (!UpdateCollectionObject_Check(o)) {
+ PyErr_SetString(PyExc_TypeError, "Expected a UpdateCollection object.");
+ return NULL;
+ }
+ return ((_UpdateCollectionObject *)o)->collection;
+}
+
+static int
+check_UpdateCollectionStatus(const _UpdateCollectionObject *self)
+{
+ assert(self != NULL);
+ assert(UpdateCollectionObject_Check(self));
+ if (self->collection == NULL) {
+ PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateCollection object.");
+ return -1;
+ }
+ return 0;
+}
+
+/* Function on the type */
+
+static PyObject *
+updatecollection_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
+{
+ _UpdateCollectionObject *self = (_UpdateCollectionObject *)type->tp_alloc(type, 0);
+ if (self) {
+ self->collection = NULL;
+ }
+ return (PyObject *)self;
+}
+
+PyDoc_STRVAR(updatecollection_init__doc__,
+".. method:: __init__()\n\n");
+
+static int
+updatecollection_init(_UpdateCollectionObject *self,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
+{
+ /* Free all previous resources when reinitialization */
+ if (self->collection)
+ cr_updatecollection_free(self->collection);
+
+ /* Init */
+ self->collection = cr_updatecollection_new();
+ if (self->collection == NULL) {
+ PyErr_SetString(CrErr_Exception, "UpdateCollection initialization failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+updatecollection_dealloc(_UpdateCollectionObject *self)
+{
+ if (self->collection)
+ cr_updatecollection_free(self->collection);
+ Py_TYPE(self)->tp_free(self);
+}
+
+static PyObject *
+updatecollection_repr(G_GNUC_UNUSED _UpdateCollectionObject *self)
+{
+ return PyUnicode_FromFormat("<createrepo_c.UpdateCollection object>");
+}
+
+/* UpdateCollection methods */
+
+PyDoc_STRVAR(append__doc__,
+"append(updatecollectionpackage) -> None\n\n"
+"Add UpdateCollectionPackage");
+
+static PyObject *
+append(_UpdateCollectionObject *self, PyObject *args)
+{
+ PyObject *pkg;
+ cr_UpdateCollectionPackage *orig, *new;
+
+ if (!PyArg_ParseTuple(args, "O!:append", &UpdateCollectionPackage_Type,
+ &pkg))
+ return NULL;
+ if (check_UpdateCollectionStatus(self))
+ return NULL;
+
+ orig = UpdateCollectionPackage_FromPyObject(pkg);
+ new = cr_updatecollectionpackage_copy(orig);
+ cr_updatecollection_append_package(self->collection, new);
+ Py_RETURN_NONE;
+}
+
+
+PyDoc_STRVAR(copy__doc__,
+"copy() -> UpdateCollection\n\n"
+"Return copy of the UpdateCollection object");
+
+static PyObject *
+copy_updatecollection(_UpdateCollectionObject *self,
+ G_GNUC_UNUSED void *nothing)
+{
+ if (check_UpdateCollectionStatus(self))
+ return NULL;
+ return Object_FromUpdateCollection(cr_updatecollection_copy(self->collection));
+}
+
+static struct PyMethodDef updatecollection_methods[] = {
+ {"append", (PyCFunction)append, METH_VARARGS,
+ append__doc__},
+ {"copy", (PyCFunction)copy_updatecollection, METH_NOARGS,
+ copy__doc__},
+ {NULL} /* sentinel */
+};
+
+/* Convertors for getsetters */
+
+/** Convert C object to PyObject.
+ * @param C object
+ * @return PyObject representation
+ */
+typedef PyObject *(*ConversionFromFunc)(void *);
+
+/** Check an element from a list if has a valid format.
+ * @param a single list element
+ * @return 0 if ok, 1 otherwise
+ */
+typedef int (*ConversionToCheckFunc)(PyObject *);
+
+/** Convert PyObject to C representation.
+ * @param PyObject
+ * @return C representation
+ */
+typedef void *(*ConversionToFunc)(PyObject *, GStringChunk *);
+
+PyObject *
+PyObject_FromUpdateCollectionPackage(cr_UpdateCollectionPackage *pkg)
+{
+ return Object_FromUpdateCollectionPackage(
+ cr_updatecollectionpackage_copy(pkg));
+}
+
+typedef struct {
+ size_t offset; /*!< Ofset of the list in cr_UpdateInfo */
+ ConversionFromFunc f; /*!< Conversion func to PyObject from a C object */
+ ConversionToCheckFunc t_check; /*!< Check func for a single element of list */
+ ConversionToFunc t; /*!< Conversion func to C object from PyObject */
+} ListConvertor;
+
+/** List of convertors for converting a lists in cr_Package. */
+static ListConvertor list_convertors[] = {
+ { offsetof(cr_UpdateCollection, packages),
+ (ConversionFromFunc) PyObject_FromUpdateCollectionPackage,
+ (ConversionToCheckFunc) NULL,
+ (ConversionToFunc) NULL },
+};
+
+/* getsetters */
+
+#define OFFSET(member) (void *) offsetof(cr_UpdateCollection, member)
+
+static PyObject *
+get_str(_UpdateCollectionObject *self, void *member_offset)
+{
+ if (check_UpdateCollectionStatus(self))
+ return NULL;
+ cr_UpdateCollection *rec = self->collection;
+ char *str = *((char **) ((size_t) rec + (size_t) member_offset));
+ if (str == NULL)
+ Py_RETURN_NONE;
+ return PyUnicode_FromString(str);
+}
+
+static PyObject *
+get_list(_UpdateCollectionObject *self, void *conv)
+{
+ ListConvertor *convertor = conv;
+ PyObject *list;
+ cr_UpdateCollection *collection = self->collection;
+ GSList *glist = *((GSList **) ((size_t) collection + (size_t) convertor->offset));
+
+ if (check_UpdateCollectionStatus(self))
+ return NULL;
+
+ if ((list = PyList_New(0)) == NULL)
+ return NULL;
+
+ for (GSList *elem = glist; elem; elem = g_slist_next(elem)) {
+ PyObject *obj = convertor->f(elem->data);
+ if (!obj) continue;
+ PyList_Append(list, obj);
+ Py_DECREF(obj);
+ }
+
+ return list;
+}
+
+static int
+set_str(_UpdateCollectionObject *self, PyObject *value, void *member_offset)
+{
+ if (check_UpdateCollectionStatus(self))
+ return -1;
+ if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) {
+ PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!");
+ return -1;
+ }
+ cr_UpdateCollection *rec = self->collection;
+ char *str = cr_safe_string_chunk_insert(rec->chunk,
+ PyObject_ToStrOrNull(value));
+ *((char **) ((size_t) rec + (size_t) member_offset)) = str;
+ return 0;
+}
+
+static PyGetSetDef updatecollection_getsetters[] = {
+ {"shortname", (getter)get_str, (setter)set_str,
+ "Short name", OFFSET(shortname)},
+ {"name", (getter)get_str, (setter)set_str,
+ "Name of the collection", OFFSET(name)},
+ {"packages", (getter)get_list, (setter)NULL,
+ "List of packages", &(list_convertors[0])},
+ {NULL, NULL, NULL, NULL, NULL} /* sentinel */
+};
+
+/* Object */
+
+PyTypeObject UpdateCollection_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "createrepo_c.UpdateCollection",/* tp_name */
+ sizeof(_UpdateCollectionObject),/* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor) updatecollection_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ (reprfunc) updatecollection_repr,/* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /* tp_flags */
+ updatecollection_init__doc__, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ PyObject_SelfIter, /* tp_iter */
+ 0, /* tp_iternext */
+ updatecollection_methods, /* tp_methods */
+ 0, /* tp_members */
+ updatecollection_getsetters, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc) updatecollection_init,/* tp_init */
+ 0, /* tp_alloc */
+ updatecollection_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+};
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef CR_UPDATECOLLECTION_PY_H
+#define CR_UPDATECOLLECTION_PY_H
+
+#include "src/createrepo_c.h"
+
+extern PyTypeObject UpdateCollection_Type;
+
+#define UpdateCollectionObject_Check(o) \
+ PyObject_TypeCheck(o, &UpdateCollection_Type)
+
+PyObject *Object_FromUpdateCollection(cr_UpdateCollection *rec);
+cr_UpdateCollection *UpdateCollection_FromPyObject(PyObject *o);
+
+#endif
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2013 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <Python.h>
+#include <assert.h>
+#include <stddef.h>
+
+#include "updatecollectionpackage-py.h"
+#include "exception-py.h"
+#include "typeconversion.h"
+#include "contentstat-py.h"
+
+typedef struct {
+ PyObject_HEAD
+ cr_UpdateCollectionPackage *pkg;
+} _UpdateCollectionPackageObject;
+
+PyObject *
+Object_FromUpdateCollectionPackage(cr_UpdateCollectionPackage *pkg)
+{
+ PyObject *py_rec;
+
+ if (!pkg) {
+ PyErr_SetString(PyExc_ValueError, "Expected a cr_UpdateCollectionPackage pointer not NULL.");
+ return NULL;
+ }
+
+ py_rec = PyObject_CallObject((PyObject *) &UpdateCollectionPackage_Type, NULL);
+ cr_updatecollectionpackage_free(((_UpdateCollectionPackageObject *)py_rec)->pkg);
+ ((_UpdateCollectionPackageObject *)py_rec)->pkg = pkg;
+
+ return py_rec;
+}
+
+cr_UpdateCollectionPackage *
+UpdateCollectionPackage_FromPyObject(PyObject *o)
+{
+ if (!UpdateCollectionPackageObject_Check(o)) {
+ PyErr_SetString(PyExc_TypeError, "Expected a UpdateCollectionPackage object.");
+ return NULL;
+ }
+ return ((_UpdateCollectionPackageObject *)o)->pkg;
+}
+
+static int
+check_UpdateCollectionPackageStatus(const _UpdateCollectionPackageObject *self)
+{
+ assert(self != NULL);
+ assert(UpdateCollectionPackageObject_Check(self));
+ if (self->pkg == NULL) {
+ PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateCollectionPackage object.");
+ return -1;
+ }
+ return 0;
+}
+
+/* Function on the type */
+
+static PyObject *
+updatecollectionpackage_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
+{
+ _UpdateCollectionPackageObject *self = (_UpdateCollectionPackageObject *)type->tp_alloc(type, 0);
+ if (self) {
+ self->pkg = NULL;
+ }
+ return (PyObject *)self;
+}
+
+PyDoc_STRVAR(updatecollectionpackage_init__doc__,
+".. method:: __init__()\n\n");
+
+static int
+updatecollectionpackage_init(_UpdateCollectionPackageObject *self,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
+{
+ /* Free all previous resources when reinitialization */
+ if (self->pkg)
+ cr_updatecollectionpackage_free(self->pkg);
+
+ /* Init */
+ self->pkg = cr_updatecollectionpackage_new();
+ if (self->pkg == NULL) {
+ PyErr_SetString(CrErr_Exception, "UpdateCollectionPackage initialization failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+updatecollectionpackage_dealloc(_UpdateCollectionPackageObject *self)
+{
+ if (self->pkg)
+ cr_updatecollectionpackage_free(self->pkg);
+ Py_TYPE(self)->tp_free(self);
+}
+
+static PyObject *
+updatecollectionpackage_repr(G_GNUC_UNUSED _UpdateCollectionPackageObject *self)
+{
+ return PyUnicode_FromFormat("<createrepo_c.UpdateCollectionPackage object>");
+}
+
+/* UpdateCollectionPackage methods */
+
+PyDoc_STRVAR(copy__doc__,
+"copy() -> UpdateCollectionPackage\n\n"
+"Return copy of the UpdateCollectionPackage object");
+
+static PyObject *
+copy_updatecollectionpackage(_UpdateCollectionPackageObject *self,
+ G_GNUC_UNUSED void *nothing)
+{
+ if (check_UpdateCollectionPackageStatus(self))
+ return NULL;
+ return Object_FromUpdateCollectionPackage(cr_updatecollectionpackage_copy(self->pkg));
+}
+
+static struct PyMethodDef updatecollectionpackage_methods[] = {
+ {"copy", (PyCFunction)copy_updatecollectionpackage, METH_NOARGS,
+ copy__doc__},
+ {NULL} /* sentinel */
+};
+
+/* getsetters */
+
+#define OFFSET(member) (void *) offsetof(cr_UpdateCollectionPackage, member)
+
+static PyObject *
+get_int(_UpdateCollectionPackageObject *self, void *member_offset)
+{
+ if (check_UpdateCollectionPackageStatus(self))
+ return NULL;
+ cr_UpdateCollectionPackage *pkg = self->pkg;
+ gint64 val = *((int *) ((size_t)pkg + (size_t) member_offset));
+ return PyLong_FromLongLong((long long) val);
+}
+
+static PyObject *
+get_str(_UpdateCollectionPackageObject *self, void *member_offset)
+{
+ if (check_UpdateCollectionPackageStatus(self))
+ return NULL;
+ cr_UpdateCollectionPackage *pkg = self->pkg;
+ char *str = *((char **) ((size_t) pkg + (size_t) member_offset));
+ if (str == NULL)
+ Py_RETURN_NONE;
+ return PyUnicode_FromString(str);
+}
+
+static int
+set_int(_UpdateCollectionPackageObject *self, PyObject *value, void *member_offset)
+{
+ long val;
+ if (check_UpdateCollectionPackageStatus(self))
+ return -1;
+ if (PyLong_Check(value)) {
+ val = PyLong_AsLong(value);
+ } else if (PyFloat_Check(value)) {
+ val = (long long) PyFloat_AS_DOUBLE(value);
+#if PY_MAJOR_VERSION < 3
+ } else if (PyInt_Check(value)) {
+ val = PyInt_AS_LONG(value);
+#endif
+ } else {
+ PyErr_SetString(PyExc_TypeError, "Number expected!");
+ return -1;
+ }
+ cr_UpdateCollectionPackage *pkg = self->pkg;
+ *((int *) ((size_t) pkg + (size_t) member_offset)) = (int) val;
+ return 0;
+}
+
+static int
+set_str(_UpdateCollectionPackageObject *self, PyObject *value, void *member_offset)
+{
+ if (check_UpdateCollectionPackageStatus(self))
+ return -1;
+ if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) {
+ PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!");
+ return -1;
+ }
+ cr_UpdateCollectionPackage *pkg = self->pkg;
+ char *str = cr_safe_string_chunk_insert(pkg->chunk,
+ PyObject_ToStrOrNull(value));
+ *((char **) ((size_t) pkg + (size_t) member_offset)) = str;
+ return 0;
+}
+
+static PyGetSetDef updatecollectionpackage_getsetters[] = {
+ {"name", (getter)get_str, (setter)set_str,
+ "Name", OFFSET(name)},
+ {"version", (getter)get_str, (setter)set_str,
+ "Version", OFFSET(version)},
+ {"release", (getter)get_str, (setter)set_str,
+ "Release", OFFSET(release)},
+ {"epoch", (getter)get_str, (setter)set_str,
+ "Epoch", OFFSET(epoch)},
+ {"arch", (getter)get_str, (setter)set_str,
+ "Architecture", OFFSET(arch)},
+ {"src", (getter)get_str, (setter)set_str,
+ "Source filename", OFFSET(src)},
+ {"filename", (getter)get_str, (setter)set_str,
+ "Filename", OFFSET(filename)},
+ {"sum", (getter)get_str, (setter)set_str,
+ "Checksum", OFFSET(sum)},
+ {"sum_type", (getter)get_int, (setter)set_int,
+ "Type of checksum", OFFSET(sum_type)},
+ {"reboot_suggested", (getter)get_int, (setter)set_int,
+ "Size of the file", OFFSET(reboot_suggested)},
+ {NULL, NULL, NULL, NULL, NULL} /* sentinel */
+};
+
+/* Object */
+
+PyTypeObject UpdateCollectionPackage_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "createrepo_c.UpdateCollectionPackage", /* tp_name */
+ sizeof(_UpdateCollectionPackageObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor) updatecollectionpackage_dealloc,/* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ (reprfunc) updatecollectionpackage_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /* tp_flags */
+ updatecollectionpackage_init__doc__, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ PyObject_SelfIter, /* tp_iter */
+ 0, /* tp_iternext */
+ updatecollectionpackage_methods, /* tp_methods */
+ 0, /* tp_members */
+ updatecollectionpackage_getsetters, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc) updatecollectionpackage_init, /* tp_init */
+ 0, /* tp_alloc */
+ updatecollectionpackage_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+};
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef CR_UPDATECOLLECTIONPACKAGE_PY_H
+#define CR_UPDATECOLLECTIONPACKAGE_PY_H
+
+#include "src/createrepo_c.h"
+
+extern PyTypeObject UpdateCollectionPackage_Type;
+
+#define UpdateCollectionPackageObject_Check(o) \
+ PyObject_TypeCheck(o, &UpdateCollectionPackage_Type)
+
+PyObject *Object_FromUpdateCollectionPackage(cr_UpdateCollectionPackage *rec);
+cr_UpdateCollectionPackage *UpdateCollectionPackage_FromPyObject(PyObject *o);
+
+#endif
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2013 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <Python.h>
+#include <assert.h>
+#include <stddef.h>
+
+#include "updateinfo-py.h"
+#include "updaterecord-py.h"
+#include "exception-py.h"
+#include "typeconversion.h"
+
+typedef struct {
+ PyObject_HEAD
+ cr_UpdateInfo *updateinfo;
+} _UpdateInfoObject;
+
+cr_UpdateInfo *
+UpdateInfo_FromPyObject(PyObject *o)
+{
+ if (!UpdateInfoObject_Check(o)) {
+ PyErr_SetString(PyExc_TypeError, "Expected a createrepo_c.UpdateInfo object.");
+ return NULL;
+ }
+ return ((_UpdateInfoObject *)o)->updateinfo;
+}
+
+ static int
+check_UpdateInfoStatus(const _UpdateInfoObject *self)
+{
+ assert(self != NULL);
+ assert(UpdateInfoObject_Check(self));
+ if (self->updateinfo == NULL) {
+ PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateInfo object.");
+ return -1;
+ }
+ return 0;
+}
+
+/* Function on the type */
+
+static PyObject *
+updateinfo_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
+{
+ _UpdateInfoObject *self = (_UpdateInfoObject *)type->tp_alloc(type, 0);
+ if (self) {
+ self->updateinfo = NULL;
+ }
+ return (PyObject *)self;
+}
+
+PyDoc_STRVAR(updateinfo_init__doc__,
+"UpdateInfo object");
+
+static int
+updateinfo_init(_UpdateInfoObject *self,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
+{
+ /* Free all previous resources when reinitialization */
+ if (self->updateinfo) {
+ cr_updateinfo_free(self->updateinfo);
+ }
+
+ /* Init */
+ self->updateinfo = cr_updateinfo_new();
+ if (self->updateinfo == NULL) {
+ PyErr_SetString(CrErr_Exception, "UpdateInfo initialization failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+updateinfo_dealloc(_UpdateInfoObject *self)
+{
+ if (self->updateinfo)
+ cr_updateinfo_free(self->updateinfo);
+ Py_TYPE(self)->tp_free(self);
+}
+
+static PyObject *
+updateinfo_repr(G_GNUC_UNUSED _UpdateInfoObject *self)
+{
+ return PyUnicode_FromFormat("<createrepo_c.UpdateInfo object>");
+}
+
+/* UpdateInfo methods */
+
+PyDoc_STRVAR(append__doc__,
+"append(updaterecord) -> None\n\n"
+"Append UpdateRecord");
+
+static PyObject *
+append(_UpdateInfoObject *self, PyObject *args)
+{
+ PyObject *record;
+ cr_UpdateRecord *orig, *new;
+
+ if (!PyArg_ParseTuple(args, "O!:append", &UpdateRecord_Type, &record))
+ return NULL;
+ if (check_UpdateInfoStatus(self))
+ return NULL;
+
+ orig = UpdateRecord_FromPyObject(record);
+ new = cr_updaterecord_copy(orig);
+ cr_updateinfo_apped_record(self->updateinfo, new);
+ Py_RETURN_NONE;
+}
+
+PyDoc_STRVAR(xml_dump__doc__,
+"xml_dump() -> str\n\n"
+"Generate xml representation of the updateinfo");
+
+static PyObject *
+xml_dump(_UpdateInfoObject *self, G_GNUC_UNUSED void *nothing)
+{
+ PyObject *py_str;
+ GError *tmp_err = NULL;
+ char *xml = cr_xml_dump_updateinfo(self->updateinfo, &tmp_err);
+ if (tmp_err) {
+ nice_exception(&tmp_err, NULL);
+ return NULL;
+ }
+ py_str = PyUnicodeOrNone_FromString(xml);
+ free(xml);
+ return py_str;
+}
+
+static struct PyMethodDef updateinfo_methods[] = {
+ {"append", (PyCFunction)append, METH_VARARGS,
+ append__doc__},
+ {"xml_dump", (PyCFunction)xml_dump, METH_NOARGS,
+ xml_dump__doc__},
+ {NULL} /* sentinel */
+};
+
+/* Convertors for getsetters */
+
+/** Convert C object to PyObject.
+ * @param C object
+ * @return PyObject representation
+ */
+typedef PyObject *(*ConversionFromFunc)(void *);
+
+/** Check an element from a list if has a valid format.
+ * @param a single list element
+ * @return 0 if ok, 1 otherwise
+ */
+typedef int (*ConversionToCheckFunc)(PyObject *);
+
+/** Convert PyObject to C representation.
+ * @param PyObject
+ * @return C representation
+ */
+typedef void *(*ConversionToFunc)(PyObject *, GStringChunk *);
+
+PyObject *
+PyObject_FromUpdateRecord(cr_UpdateRecord *rec)
+{
+ return Object_FromUpdateRecord(cr_updaterecord_copy(rec));
+}
+
+typedef struct {
+ size_t offset; /*!< Ofset of the list in cr_UpdateInfo */
+ ConversionFromFunc f; /*!< Conversion func to PyObject from a C object */
+ ConversionToCheckFunc t_check; /*!< Check func for a single element of list */
+ ConversionToFunc t; /*!< Conversion func to C object from PyObject */
+} ListConvertor;
+
+/** List of convertors for converting a lists in cr_Package. */
+static ListConvertor list_convertors[] = {
+ { offsetof(cr_UpdateInfo, updates),
+ (ConversionFromFunc) PyObject_FromUpdateRecord,
+ (ConversionToCheckFunc) NULL,
+ (ConversionToFunc) NULL },
+};
+
+/* Getters */
+
+static PyObject *
+get_list(_UpdateInfoObject *self, void *conv)
+{
+ ListConvertor *convertor = conv;
+ PyObject *list;
+ cr_UpdateInfo *updateinfo = self->updateinfo;
+ GSList *glist = *((GSList **) ((size_t) updateinfo + (size_t) convertor->offset));
+
+ if (check_UpdateInfoStatus(self))
+ return NULL;
+
+ if ((list = PyList_New(0)) == NULL)
+ return NULL;
+
+ for (GSList *elem = glist; elem; elem = g_slist_next(elem)) {
+ PyObject *obj = convertor->f(elem->data);
+ if (!obj) continue;
+ PyList_Append(list, obj);
+ Py_DECREF(obj);
+ }
+
+ return list;
+}
+
+/* Setters */
+
+/*
+static int
+set_list(_UpdateInfoObject *self, PyObject *list, void *conv)
+{
+ ListConvertor *convertor = conv;
+ cr_UpdateInfo *updateinfo = self->updateinfo;
+ GSList *glist = NULL;
+
+ if (check_UpdateInfoStatus(self))
+ return -1;
+
+ if (!PyList_Check(list)) {
+ PyErr_SetString(PyExc_TypeError, "List expected!");
+ return -1;
+ }
+
+ Py_ssize_t len = PyList_Size(list);
+
+ // Check all elements
+ for (Py_ssize_t x = 0; x < len; x++) {
+ PyObject *elem = PyList_GetItem(list, x);
+ if (convertor->t_check && convertor->t_check(elem))
+ return -1;
+ }
+
+ for (Py_ssize_t x = 0; x < len; x++) {
+ glist = g_slist_prepend(glist,
+ convertor->t(PyList_GetItem(list, x), updateinfo->chunk));
+ }
+
+ *((GSList **) ((size_t) updateinfo + (size_t) convertor->offset)) = glist;
+ return 0;
+}
+*/
+
+/** Return offset of a selected member of cr_UpdateInfo structure. */
+#define OFFSET(member) (void *) offsetof(cr_UpdateInfo, member)
+
+static PyGetSetDef updateinfo_getsetters[] = {
+ {"updates", (getter)get_list, (setter)NULL,
+ "List of UpdateRecords", &(list_convertors[0])},
+ {NULL, NULL, NULL, NULL, NULL} /* sentinel */
+};
+
+/* Object */
+
+
+PyTypeObject UpdateInfo_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "createrepo_c.UpdateInfo", /* tp_name */
+ sizeof(_UpdateInfoObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor) updateinfo_dealloc,/* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ (reprfunc) updateinfo_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /* tp_flags */
+ updateinfo_init__doc__, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ PyObject_SelfIter, /* tp_iter */
+ 0, /* tp_iternext */
+ updateinfo_methods, /* tp_methods */
+ 0, /* tp_members */
+ updateinfo_getsetters, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc) updateinfo_init, /* tp_init */
+ 0, /* tp_alloc */
+ updateinfo_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+};
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef CR_UPDATEINFO_PY_H
+#define CR_UPDATEINFO_PY_H
+
+#include "src/createrepo_c.h"
+
+extern PyTypeObject UpdateInfo_Type;
+
+#define UpdateInfoObject_Check(o) PyObject_TypeCheck(o, &UpdateInfo_Type)
+
+cr_UpdateInfo *UpdateInfo_FromPyObject(PyObject *o);
+
+#endif
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2013 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <Python.h>
+#include <datetime.h> // from python
+#include <assert.h>
+#include <stddef.h>
+#include <time.h>
+
+#include "updaterecord-py.h"
+#include "updatereference-py.h"
+#include "updatecollection-py.h"
+#include "exception-py.h"
+#include "typeconversion.h"
+#include "contentstat-py.h"
+
+typedef struct {
+ PyObject_HEAD
+ cr_UpdateRecord *record;
+} _UpdateRecordObject;
+
+PyObject *
+Object_FromUpdateRecord(cr_UpdateRecord *rec)
+{
+ PyObject *py_rec;
+
+ if (!rec) {
+ PyErr_SetString(PyExc_ValueError, "Expected a cr_UpdateRecord pointer not NULL.");
+ return NULL;
+ }
+
+ py_rec = PyObject_CallObject((PyObject *) &UpdateRecord_Type, NULL);
+ cr_updaterecord_free(((_UpdateRecordObject *)py_rec)->record);
+ ((_UpdateRecordObject *)py_rec)->record = rec;
+
+ return py_rec;
+}
+
+cr_UpdateRecord *
+UpdateRecord_FromPyObject(PyObject *o)
+{
+ if (!UpdateRecordObject_Check(o)) {
+ PyErr_SetString(PyExc_TypeError, "Expected a UpdateRecord object.");
+ return NULL;
+ }
+ return ((_UpdateRecordObject *)o)->record;
+}
+
+static int
+check_UpdateRecordStatus(const _UpdateRecordObject *self)
+{
+ assert(self != NULL);
+ assert(UpdateRecordObject_Check(self));
+ if (self->record == NULL) {
+ PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateRecord object.");
+ return -1;
+ }
+ return 0;
+}
+
+/* Function on the type */
+
+static PyObject *
+updaterecord_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
+{
+ _UpdateRecordObject *self = (_UpdateRecordObject *)type->tp_alloc(type, 0);
+ if (self) {
+ self->record = NULL;
+ }
+ return (PyObject *)self;
+}
+
+PyDoc_STRVAR(updaterecord_init__doc__,
+".. method:: __init__()\n\n");
+
+static int
+updaterecord_init(_UpdateRecordObject *self,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
+{
+ /* Free all previous resources when reinitialization */
+ if (self->record)
+ cr_updaterecord_free(self->record);
+
+ /* Init */
+ self->record = cr_updaterecord_new();
+ if (self->record == NULL) {
+ PyErr_SetString(CrErr_Exception, "UpdateRecord initialization failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+updaterecord_dealloc(_UpdateRecordObject *self)
+{
+ if (self->record)
+ cr_updaterecord_free(self->record);
+ Py_TYPE(self)->tp_free(self);
+}
+
+static PyObject *
+updaterecord_repr(G_GNUC_UNUSED _UpdateRecordObject *self)
+{
+ return PyUnicode_FromFormat("<createrepo_c.UpdateRecord object>");
+}
+
+/* UpdateRecord methods */
+
+PyDoc_STRVAR(append_reference__doc__,
+"append_reference(reference) -> None\n\n"
+"Append UpdateReference");
+
+static PyObject *
+append_reference(_UpdateRecordObject *self, PyObject *args)
+{
+ PyObject *pkg;
+ cr_UpdateReference *orig, *new;
+
+ if (!PyArg_ParseTuple(args, "O!:append_reference",
+ &UpdateReference_Type, &pkg))
+ return NULL;
+ if (check_UpdateRecordStatus(self))
+ return NULL;
+
+ orig = UpdateReference_FromPyObject(pkg);
+ new = cr_updatereference_copy(orig);
+ cr_updaterecord_append_reference(self->record, new);
+ Py_RETURN_NONE;
+}
+
+PyDoc_STRVAR(append_collection__doc__,
+"append_collection(collection) -> None\n\n"
+"Append UpdateCollection");
+
+static PyObject *
+append_collection(_UpdateRecordObject *self, PyObject *args)
+{
+ PyObject *pkg;
+ cr_UpdateCollection *orig, *new;
+
+ if (!PyArg_ParseTuple(args, "O!:append_collection",
+ &UpdateCollection_Type, &pkg))
+ return NULL;
+ if (check_UpdateRecordStatus(self))
+ return NULL;
+
+ orig = UpdateCollection_FromPyObject(pkg);
+ new = cr_updatecollection_copy(orig);
+ cr_updaterecord_append_collection(self->record, new);
+ Py_RETURN_NONE;
+}
+
+
+PyDoc_STRVAR(copy__doc__,
+"copy() -> UpdateRecord\n\n"
+"Return copy of the UpdateRecord object");
+
+static PyObject *
+copy_updaterecord(_UpdateRecordObject *self, G_GNUC_UNUSED void *nothing)
+{
+ if (check_UpdateRecordStatus(self))
+ return NULL;
+ return Object_FromUpdateRecord(cr_updaterecord_copy(self->record));
+}
+
+static struct PyMethodDef updaterecord_methods[] = {
+ {"append_reference", (PyCFunction)append_reference, METH_VARARGS,
+ append_reference__doc__},
+ {"append_collection", (PyCFunction)append_collection, METH_VARARGS,
+ append_collection__doc__},
+ {"copy", (PyCFunction)copy_updaterecord, METH_NOARGS,
+ copy__doc__},
+ {NULL} /* sentinel */
+};
+
+/* Convertors for getsetters */
+
+/** Convert C object to PyObject.
+ * @param C object
+ * @return PyObject representation
+ */
+typedef PyObject *(*ConversionFromFunc)(void *);
+
+/** Check an element from a list if has a valid format.
+ * @param a single list element
+ * @return 0 if ok, 1 otherwise
+ */
+typedef int (*ConversionToCheckFunc)(PyObject *);
+
+/** Convert PyObject to C representation.
+ * @param PyObject
+ * @return C representation
+ */
+typedef void *(*ConversionToFunc)(PyObject *, GStringChunk *);
+
+PyObject *
+PyObject_FromUpdateReference(cr_UpdateReference *ref)
+{
+ return Object_FromUpdateReference(cr_updatereference_copy(ref));
+}
+
+PyObject *
+PyObject_FromUpdateCollection(cr_UpdateCollection *col)
+{
+ return Object_FromUpdateCollection(cr_updatecollection_copy(col));
+}
+
+typedef struct {
+ size_t offset; /*!< Ofset of the list in cr_UpdateInfo */
+ ConversionFromFunc f; /*!< Conversion func to PyObject from a C object */
+ ConversionToCheckFunc t_check; /*!< Check func for a single element of list */
+ ConversionToFunc t; /*!< Conversion func to C object from PyObject */
+} ListConvertor;
+
+/** List of convertors for converting a lists in cr_Package. */
+static ListConvertor list_convertors[] = {
+ { offsetof(cr_UpdateRecord, references),
+ (ConversionFromFunc) PyObject_FromUpdateReference,
+ (ConversionToCheckFunc) NULL,
+ (ConversionToFunc) NULL },
+ { offsetof(cr_UpdateRecord, collections),
+ (ConversionFromFunc) PyObject_FromUpdateCollection,
+ (ConversionToCheckFunc) NULL,
+ (ConversionToFunc) NULL },
+};
+
+/* getsetters */
+
+#define OFFSET(member) (void *) offsetof(cr_UpdateRecord, member)
+
+static PyObject *
+get_str(_UpdateRecordObject *self, void *member_offset)
+{
+ if (check_UpdateRecordStatus(self))
+ return NULL;
+ cr_UpdateRecord *rec = self->record;
+ char *str = *((char **) ((size_t) rec + (size_t) member_offset));
+ if (str == NULL)
+ Py_RETURN_NONE;
+ return PyUnicode_FromString(str);
+}
+
+static PyObject *
+get_datetime(_UpdateRecordObject *self, void *member_offset)
+{
+ PyDateTime_IMPORT;
+
+ if (check_UpdateRecordStatus(self))
+ return NULL;
+ cr_UpdateRecord *rec = self->record;
+ char *str = *((char **) ((size_t) rec + (size_t) member_offset));
+ if (str == NULL)
+ Py_RETURN_NONE;
+
+ struct tm *dt = malloc(sizeof(struct tm));
+ char *res = strptime(str, "%Y-%m-%d %H:%M:%S", dt);
+ if (res == NULL) {
+ memset(res,0,sizeof(dt));
+ res = strptime(str, "%Y-%m-%d", dt);
+ if (res == NULL)
+ PyErr_SetString(CrErr_Exception, "Invalid date");
+ }
+ PyObject *py_dt = PyDateTime_FromDateAndTime(dt->tm_year + 1900,
+ dt->tm_mon + 1, dt->tm_mday,
+ dt->tm_hour, dt->tm_min, dt->tm_sec, 0);
+ free(dt);
+ return py_dt;
+}
+
+static PyObject *
+get_list(_UpdateRecordObject *self, void *conv)
+{
+ ListConvertor *convertor = conv;
+ PyObject *list;
+ cr_UpdateRecord *rec = self->record;
+ GSList *glist = *((GSList **) ((size_t) rec + (size_t) convertor->offset));
+
+ if (check_UpdateRecordStatus(self))
+ return NULL;
+
+ if ((list = PyList_New(0)) == NULL)
+ return NULL;
+
+ for (GSList *elem = glist; elem; elem = g_slist_next(elem)) {
+ PyObject *obj = convertor->f(elem->data);
+ if (!obj) continue;
+ PyList_Append(list, obj);
+ Py_DECREF(obj);
+ }
+
+ return list;
+}
+
+static int
+set_str(_UpdateRecordObject *self, PyObject *value, void *member_offset)
+{
+ if (check_UpdateRecordStatus(self))
+ return -1;
+ if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) {
+ PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!");
+ return -1;
+ }
+ cr_UpdateRecord *rec = self->record;
+ char *str = cr_safe_string_chunk_insert(rec->chunk,
+ PyObject_ToStrOrNull(value));
+ *((char **) ((size_t) rec + (size_t) member_offset)) = str;
+ return 0;
+}
+
+static int
+set_datetime(_UpdateRecordObject *self, PyObject *value, void *member_offset)
+{
+ PyDateTime_IMPORT;
+
+ if (check_UpdateRecordStatus(self))
+ return -1;
+ if (!PyDateTime_Check(value) && value != Py_None) {
+ PyErr_SetString(PyExc_TypeError, "DateTime or None expected!");
+ return -1;
+ }
+ cr_UpdateRecord *rec = self->record;
+
+ /* Length is 20: yyyy-mm-dd HH:MM:SS */
+ char *date = malloc(20 * sizeof(char));
+ snprintf(date, 20, "%04d-%02d-%02d %02d:%02d:%02d",
+ PyDateTime_GET_YEAR(value), PyDateTime_GET_MONTH(value),
+ PyDateTime_GET_DAY(value), PyDateTime_DATE_GET_HOUR(value),
+ PyDateTime_DATE_GET_MINUTE(value), PyDateTime_DATE_GET_SECOND(value));
+
+ char *str = cr_safe_string_chunk_insert(rec->chunk, date);
+ free(date);
+ *((char **) ((size_t) rec + (size_t) member_offset)) = str;
+ return 0;
+}
+
+static PyGetSetDef updaterecord_getsetters[] = {
+ {"fromstr", (getter)get_str, (setter)set_str,
+ "Who issued this update", OFFSET(from)},
+ {"status", (getter)get_str, (setter)set_str,
+ "Status of the update", OFFSET(status)},
+ {"type", (getter)get_str, (setter)set_str,
+ "Update type", OFFSET(type)},
+ {"version", (getter)get_str, (setter)set_str,
+ "Version of update", OFFSET(version)},
+ {"id", (getter)get_str, (setter)set_str,
+ "Update id", OFFSET(id)},
+ {"title", (getter)get_str, (setter)set_str,
+ "Update title", OFFSET(title)},
+ {"issued_date", (getter)get_datetime, (setter)set_datetime,
+ "Date when the update was issued", OFFSET(issued_date)},
+ {"updated_date", (getter)get_datetime, (setter)set_datetime,
+ "Date when the update was updated", OFFSET(updated_date)},
+ {"rights", (getter)get_str, (setter)set_str,
+ "Copyrights", OFFSET(rights)},
+ {"release", (getter)get_str, (setter)set_str,
+ "Update release", OFFSET(release)},
+ {"pushcount", (getter)get_str, (setter)set_str,
+ "Pushcount", OFFSET(pushcount)},
+ {"severity", (getter)get_str, (setter)set_str,
+ "Severity", OFFSET(severity)},
+ {"summary", (getter)get_str, (setter)set_str,
+ "Short summary", OFFSET(summary)},
+ {"description", (getter)get_str, (setter)set_str,
+ "Description", OFFSET(description)},
+ {"solution", (getter)get_str, (setter)set_str,
+ "Solution", OFFSET(solution)},
+ {"references", (getter)get_list, (setter)NULL,
+ "List of UpdateReferences", &(list_convertors[0])},
+ {"collections", (getter)get_list, (setter)NULL,
+ "List of UpdateCollections", &(list_convertors[1])},
+ {NULL, NULL, NULL, NULL, NULL} /* sentinel */
+};
+
+/* Object */
+
+PyTypeObject UpdateRecord_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "createrepo_c.UpdateRecord", /* tp_name */
+ sizeof(_UpdateRecordObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor) updaterecord_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ (reprfunc) updaterecord_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /* tp_flags */
+ updaterecord_init__doc__, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ PyObject_SelfIter, /* tp_iter */
+ 0, /* tp_iternext */
+ updaterecord_methods, /* tp_methods */
+ 0, /* tp_members */
+ updaterecord_getsetters, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc) updaterecord_init, /* tp_init */
+ 0, /* tp_alloc */
+ updaterecord_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+};
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2013 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef CR_UPDATERECORD_PY_H
+#define CR_UPDATERECORD_PY_H
+
+#include "src/createrepo_c.h"
+
+extern PyTypeObject UpdateRecord_Type;
+
+#define UpdateRecordObject_Check(o) PyObject_TypeCheck(o, &UpdateRecord_Type)
+
+PyObject *Object_FromUpdateRecord(cr_UpdateRecord *rec);
+cr_UpdateRecord *UpdateRecord_FromPyObject(PyObject *o);
+
+#endif
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2013 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <Python.h>
+#include <assert.h>
+#include <stddef.h>
+
+#include "updatereference-py.h"
+#include "exception-py.h"
+#include "typeconversion.h"
+#include "contentstat-py.h"
+
+typedef struct {
+ PyObject_HEAD
+ cr_UpdateReference *reference;
+} _UpdateReferenceObject;
+
+PyObject *
+Object_FromUpdateReference(cr_UpdateReference *ref)
+{
+ PyObject *py_rec;
+
+ if (!ref) {
+ PyErr_SetString(PyExc_ValueError, "Expected a cr_UpdateReference pointer not NULL.");
+ return NULL;
+ }
+
+ py_rec = PyObject_CallObject((PyObject *) &UpdateReference_Type, NULL);
+ cr_updatereference_free(((_UpdateReferenceObject *)py_rec)->reference);
+ ((_UpdateReferenceObject *)py_rec)->reference = ref;
+
+ return py_rec;
+}
+
+cr_UpdateReference *
+UpdateReference_FromPyObject(PyObject *o)
+{
+ if (!UpdateReferenceObject_Check(o)) {
+ PyErr_SetString(PyExc_TypeError, "Expected a UpdateReference object.");
+ return NULL;
+ }
+ return ((_UpdateReferenceObject *)o)->reference;
+}
+
+static int
+check_UpdateReferenceStatus(const _UpdateReferenceObject *self)
+{
+ assert(self != NULL);
+ assert(UpdateReferenceObject_Check(self));
+ if (self->reference == NULL) {
+ PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateReference object.");
+ return -1;
+ }
+ return 0;
+}
+
+/* Function on the type */
+
+static PyObject *
+updatereference_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
+{
+ _UpdateReferenceObject *self = (_UpdateReferenceObject *)type->tp_alloc(type, 0);
+ if (self) {
+ self->reference = NULL;
+ }
+ return (PyObject *)self;
+}
+
+PyDoc_STRVAR(updatereference_init__doc__,
+".. method:: __init__()\n\n");
+
+static int
+updatereference_init(_UpdateReferenceObject *self,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
+{
+ /* Free all previous resources when reinitialization */
+ if (self->reference)
+ cr_updatereference_free(self->reference);
+
+ /* Init */
+ self->reference = cr_updatereference_new();
+ if (self->reference == NULL) {
+ PyErr_SetString(CrErr_Exception, "UpdateReference initialization failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+updatereference_dealloc(_UpdateReferenceObject *self)
+{
+ if (self->reference)
+ cr_updatereference_free(self->reference);
+ Py_TYPE(self)->tp_free(self);
+}
+
+static PyObject *
+updatereference_repr(G_GNUC_UNUSED _UpdateReferenceObject *self)
+{
+ if (self->reference->type)
+ return PyUnicode_FromFormat("<createrepo_c.UpdateReference %s object>",
+ self->reference->type);
+ else
+ return PyUnicode_FromFormat("<createrepo_c.UpdateReference object>");
+}
+
+/* UpdateReference methods */
+
+PyDoc_STRVAR(copy__doc__,
+"copy() -> UpdateReference\n\n"
+"Return copy of the UpdateReference object");
+
+static PyObject *
+copy_updatereference(_UpdateReferenceObject *self, G_GNUC_UNUSED void *nothing)
+{
+ if (check_UpdateReferenceStatus(self))
+ return NULL;
+ return Object_FromUpdateReference(cr_updatereference_copy(self->reference));
+}
+
+static struct PyMethodDef updatereference_methods[] = {
+ {"copy", (PyCFunction)copy_updatereference, METH_NOARGS,
+ copy__doc__},
+ {NULL} /* sentinel */
+};
+
+/* getsetters */
+
+#define OFFSET(member) (void *) offsetof(cr_UpdateReference, member)
+
+static PyObject *
+get_str(_UpdateReferenceObject *self, void *member_offset)
+{
+ if (check_UpdateReferenceStatus(self))
+ return NULL;
+ cr_UpdateReference *ref = self->reference;
+ char *str = *((char **) ((size_t) ref + (size_t) member_offset));
+ if (str == NULL)
+ Py_RETURN_NONE;
+ return PyUnicode_FromString(str);
+}
+
+static int
+set_str(_UpdateReferenceObject *self, PyObject *value, void *member_offset)
+{
+ if (check_UpdateReferenceStatus(self))
+ return -1;
+ if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) {
+ PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!");
+ return -1;
+ }
+
+ if (PyUnicode_Check(value)) {
+ value = PyUnicode_AsUTF8String(value);
+ }
+
+ cr_UpdateReference *ref = self->reference;
+ char *str = cr_safe_string_chunk_insert(ref->chunk,
+ PyObject_ToStrOrNull(value));
+
+ *((char **) ((size_t) ref + (size_t) member_offset)) = str;
+ return 0;
+}
+
+static PyGetSetDef updatereference_getsetters[] = {
+ {"href", (getter)get_str, (setter)set_str,
+ "Reference URL",OFFSET(href)},
+ {"id", (getter)get_str, (setter)set_str,
+ "ID", OFFSET(id)},
+ {"type", (getter)get_str, (setter)set_str,
+ "Type", OFFSET(type)},
+ {"title", (getter)get_str, (setter)set_str,
+ "Title", OFFSET(title)},
+ {NULL, NULL, NULL, NULL, NULL} /* sentinel */
+};
+
+/* Object */
+
+PyTypeObject UpdateReference_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "createrepo_c.UpdateReference", /* tp_name */
+ sizeof(_UpdateReferenceObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor) updatereference_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ (reprfunc) updatereference_repr,/* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /* tp_flags */
+ updatereference_init__doc__, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ PyObject_SelfIter, /* tp_iter */
+ 0, /* tp_iternext */
+ updatereference_methods, /* tp_methods */
+ 0, /* tp_members */
+ updatereference_getsetters, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc) updatereference_init,/* tp_init */
+ 0, /* tp_alloc */
+ updatereference_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+};
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2013 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef CR_UPDATEREFERENCE_PY_H
+#define CR_UPDATEREFERENCE_PY_H
+
+#include "src/createrepo_c.h"
+
+extern PyTypeObject UpdateReference_Type;
+
+#define UpdateReferenceObject_Check(o) PyObject_TypeCheck(o, &UpdateReference_Type)
+
+PyObject *Object_FromUpdateReference(cr_UpdateReference *rec);
+cr_UpdateReference *UpdateReference_FromPyObject(PyObject *o);
+
+#endif
#include "typeconversion.h"
#include "package-py.h"
#include "exception-py.h"
+#include "updaterecord-py.h"
PyObject *
-py_xml_dump_primary(PyObject *self, PyObject *args)
+py_xml_dump_primary(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
PyObject *py_pkg, *py_str;
char *xml;
GError *err = NULL;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "O!:py_xml_dump_primary", &Package_Type, &py_pkg))
return NULL;
return NULL;
}
- py_str = PyStringOrNone_FromString(xml);
+ py_str = PyUnicodeOrNone_FromString(xml);
free(xml);
return py_str;
}
PyObject *
-py_xml_dump_filelists(PyObject *self, PyObject *args)
+py_xml_dump_filelists(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
PyObject *py_pkg, *py_str;
char *xml;
GError *err = NULL;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "O!:py_xml_dump_filelists", &Package_Type, &py_pkg))
return NULL;
return NULL;
}
- py_str = PyStringOrNone_FromString(xml);
+ py_str = PyUnicodeOrNone_FromString(xml);
free(xml);
return py_str;
}
PyObject *
-py_xml_dump_other(PyObject *self, PyObject *args)
+py_xml_dump_other(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
PyObject *py_pkg, *py_str;
char *xml;
GError *err = NULL;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "O!:py_xml_dump_other", &Package_Type, &py_pkg))
return NULL;
return NULL;
}
- py_str = PyStringOrNone_FromString(xml);
+ py_str = PyUnicodeOrNone_FromString(xml);
free(xml);
return py_str;
}
PyObject *
-py_xml_dump(PyObject *self, PyObject *args)
+py_xml_dump(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
PyObject *py_pkg, *tuple;
struct cr_XmlStruct xml_res;
GError *err = NULL;
- CR_UNUSED(self);
-
if (!PyArg_ParseTuple(args, "O!:py_xml_dump", &Package_Type, &py_pkg))
return NULL;
return NULL;
}
- PyTuple_SetItem(tuple, 0, PyStringOrNone_FromString(xml_res.primary));
- PyTuple_SetItem(tuple, 1, PyStringOrNone_FromString(xml_res.filelists));
- PyTuple_SetItem(tuple, 2, PyStringOrNone_FromString(xml_res.other));
+ PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(xml_res.primary));
+ PyTuple_SetItem(tuple, 1, PyUnicodeOrNone_FromString(xml_res.filelists));
+ PyTuple_SetItem(tuple, 2, PyUnicodeOrNone_FromString(xml_res.other));
free(xml_res.primary);
free(xml_res.filelists);
return tuple;
}
+
+PyObject *
+py_xml_dump_updaterecord(G_GNUC_UNUSED PyObject *self, PyObject *args)
+{
+ PyObject *py_rec, *py_str;
+ char *xml = NULL;
+ GError *err = NULL;
+
+ if (!PyArg_ParseTuple(args, "O!:py_xml_dump_updaterecord",
+ &UpdateRecord_Type, &py_rec))
+ return NULL;
+
+ xml = cr_xml_dump_updaterecord(UpdateRecord_FromPyObject(py_rec), &err);
+ if (err) {
+ nice_exception(&err, NULL);
+ free(xml);
+ return NULL;
+ }
+
+ py_str = PyUnicodeOrNone_FromString(xml);
+ free(xml);
+ return py_str;
+}
PyObject *py_xml_dump(PyObject *self, PyObject *args);
+PyDoc_STRVAR(xml_dump_updaterecord__doc__,
+"xml_dump_updaterecord(pkg) -> str\n\n"
+"Generate xml chunk from UpdateRecord");
+
+PyObject *py_xml_dump_updaterecord(PyObject *self, PyObject *args);
+
#endif
/* Function on the type */
static PyObject *
-xmlfile_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+xmlfile_new(PyTypeObject *type,
+ G_GNUC_UNUSED PyObject *args,
+ G_GNUC_UNUSED PyObject *kwds)
{
- CR_UNUSED(args);
- CR_UNUSED(kwds);
_XmlFileObject *self = (_XmlFileObject *)type->tp_alloc(type, 0);
if (self) {
self->xmlfile = NULL;
" :arg contentstat: ContentStat object to gather content statistics or None");
static int
-xmlfile_init(_XmlFileObject *self, PyObject *args, PyObject *kwds)
+xmlfile_init(_XmlFileObject *self, PyObject *args, G_GNUC_UNUSED PyObject *kwds)
{
char *path;
int type, comtype;
PyObject *py_stat, *ret;
cr_ContentStat *stat;
- CR_UNUSED(kwds);
-
if (!PyArg_ParseTuple(args, "siiO|:xmlfile_init",
&path, &type, &comtype, &py_stat))
return -1;
type = "Unknown";
}
- return PyString_FromFormat("<createrepo_c.XmlFile %s object>", type);
+ return PyUnicode_FromFormat("<createrepo_c.XmlFile %s object>", type);
}
/* XmlFile methods */
"Close the XML file");
static PyObject *
-xmlfile_close(_XmlFileObject *self, void *nothing)
+xmlfile_close(_XmlFileObject *self, G_GNUC_UNUSED void *nothing)
{
GError *err = NULL;
- CR_UNUSED(nothing);
-
if (self->xmlfile) {
cr_xmlfile_close(self->xmlfile, &err);
self->xmlfile = NULL;
};
PyTypeObject XmlFile_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
+ PyVarObject_HEAD_INIT(NULL, 0)
"createrepo_c.XmlFile", /* tp_name */
sizeof(_XmlFileObject), /* tp_basicsize */
0, /* tp_itemsize */
#include "typeconversion.h"
#include "package-py.h"
#include "repomd-py.h"
+#include "updateinfo-py.h"
#include "exception-py.h"
typedef struct {
}
PyObject *
-py_xml_parse_primary(PyObject *self, PyObject *args)
+py_xml_parse_primary(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
- CR_UNUSED(self);
-
char *filename;
int do_files;
PyObject *py_newpkgcb, *py_pkgcb, *py_warningcb;
}
PyObject *
-py_xml_parse_filelists(PyObject *self, PyObject *args)
+py_xml_parse_filelists(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
- CR_UNUSED(self);
-
char *filename;
PyObject *py_newpkgcb, *py_pkgcb, *py_warningcb;
CbData cbdata;
}
PyObject *
-py_xml_parse_other(PyObject *self, PyObject *args)
+py_xml_parse_other(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
- CR_UNUSED(self);
-
char *filename;
PyObject *py_newpkgcb, *py_pkgcb, *py_warningcb;
CbData cbdata;
}
PyObject *
-py_xml_parse_repomd(PyObject *self, PyObject *args)
+py_xml_parse_repomd(G_GNUC_UNUSED PyObject *self, PyObject *args)
{
- CR_UNUSED(self);
-
char *filename;
PyObject *py_repomd, *py_warningcb;
CbData cbdata;
Py_RETURN_NONE;
}
+
+PyObject *
+py_xml_parse_updateinfo(G_GNUC_UNUSED PyObject *self, PyObject *args)
+{
+ char *filename;
+ PyObject *py_updateinfo, *py_warningcb;
+ CbData cbdata;
+ cr_UpdateInfo *updateinfo;
+ GError *tmp_err = NULL;
+
+ if (!PyArg_ParseTuple(args, "sO!O:py_xml_parse_updateinfo",
+ &filename,
+ &UpdateInfo_Type,
+ &py_updateinfo,
+ &py_warningcb)) {
+ return NULL;
+ }
+
+ if (!PyCallable_Check(py_warningcb) && py_warningcb != Py_None) {
+ PyErr_SetString(PyExc_TypeError, "warningcb must be callable or None");
+ return NULL;
+ }
+
+ Py_XINCREF(py_updateinfo);
+ Py_XINCREF(py_warningcb);
+
+ cr_XmlParserWarningCb ptr_c_warningcb = NULL;
+
+ if (py_warningcb != Py_None)
+ ptr_c_warningcb = c_warningcb;
+
+ cbdata.py_newpkgcb = NULL;
+ cbdata.py_pkgcb = NULL;
+ cbdata.py_warningcb = py_warningcb;
+ cbdata.py_pkg = NULL;
+
+ updateinfo = UpdateInfo_FromPyObject(py_updateinfo);
+
+ cr_xml_parse_updateinfo(filename,
+ updateinfo,
+ ptr_c_warningcb,
+ &cbdata,
+ &tmp_err);
+
+ Py_XDECREF(py_updateinfo);
+ Py_XDECREF(py_warningcb);
+
+ if (tmp_err) {
+ nice_exception(&tmp_err, NULL);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
PyObject *py_xml_parse_repomd(PyObject *self, PyObject *args);
+PyDoc_STRVAR(xml_parse_updateinfo__doc__,
+"xml_parse_updateinfo(filename, updateinfo_object, warningcb) -> None\n\n"
+"Parse updateinfo.xml");
+
+PyObject *py_xml_parse_updateinfo(PyObject *self, PyObject *args);
+
#endif
#include "repomd_internal.h"
#include "compression_wrapper.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
#define LOCATION_HREF_PREFIX "repodata/"
#define DEFAULT_DATABASE_VERSION 10
#define BUFFER_SIZE 8192
cr_RepomdRecord *md = g_malloc0(sizeof(*md));
md->chunk = g_string_chunk_new(128);
md->type = cr_safe_string_chunk_insert(md->chunk, type);
+ md->size_open = G_GINT64_CONSTANT(-1);
+
if (path) {
gchar *filename = cr_get_filename(path);
gchar *location_href = g_strconcat(LOCATION_HREF_PREFIX, filename, NULL);
md->location_href = g_string_chunk_insert(md->chunk, location_href);
g_free(location_href);
}
+
return md;
}
assert(!err || *err == NULL);
if (!g_file_test(filename, G_FILE_TEST_IS_REGULAR)) {
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_NOFILE,
+ g_set_error(err, ERR_DOMAIN, CRE_NOFILE,
"File %s doesn't exists or not a regular file", filename);
return NULL;
}
return NULL;
}
- gint64 size = 0;
+ gint64 size = G_GINT64_CONSTANT(0);
long readed;
unsigned char buffer[BUFFER_SIZE];
result->checksum = cr_checksum_final(checksum, NULL);
result->size = size;
} else {
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_MEMORY,
+ g_set_error(err, ERR_DOMAIN, CRE_MEMORY,
"Cannot allocate memory");
}
assert(!err || *err == NULL);
if (!(md->location_real) || !strlen(md->location_real)) {
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Empty locations in repomd record object.");
return CRE_BADARG;
}
if (!g_file_test(path, G_FILE_TEST_IS_REGULAR)) {
// File doesn't exists
g_warning("%s: File %s doesn't exists", __func__, path);
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_NOFILE,
+ g_set_error(err, ERR_DOMAIN, CRE_NOFILE,
"File %s doesn't exists or not a regular file", path);
return CRE_NOFILE;
}
gchar *chksum;
chksum = cr_checksum_file(path, checksum_t, &tmp_err);
- if (tmp_err) {
+ if (!chksum) {
int code = tmp_err->code;
g_propagate_prefixed_error(err, tmp_err,
"Error while checksum calculation of %s:", path);
// Compute checksum of non compressed content and its size
- if (!md->checksum_open_type || !md->checksum_open || !md->size_open) {
+ if (!md->checksum_open_type || !md->checksum_open || md->size_open == G_GINT64_CONSTANT(-1)) {
cr_CompressionType com_type = cr_detect_compression(path, &tmp_err);
if (tmp_err) {
int code = tmp_err->code;
md->checksum_open_type = g_string_chunk_insert(md->chunk, checksum_str);
md->checksum_open = g_string_chunk_insert(md->chunk, open_stat->checksum);
- if (!md->size_open)
+ if (md->size_open == G_GINT64_CONSTANT(-1))
md->size_open = open_stat->size;
g_free(open_stat->checksum);
g_free(open_stat);
}
md->checksum_open_type = NULL;
md->checksum_open = NULL;
- md->size_open = -1;
+ md->size_open = G_GINT64_CONSTANT(-1);
}
}
}
} else {
g_warning("%s: Stat on file \"%s\" failed", __func__, path);
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_STAT,
- "Stat() on %s failed: %s", path, strerror(errno));
+ g_set_error(err, ERR_DOMAIN, CRE_STAT,
+ "Stat() on %s failed: %s", path, g_strerror(errno));
return CRE_STAT;
}
}
cr_CompressionType record_compression,
GError **err)
{
+ int ret = CRE_OK;
const char *suffix;
gchar *path, *cpath;
gchar *clocation_real, *clocation_href;
- gchar *checksum, *cchecksum;
+ gchar *checksum = NULL;
+ gchar *cchecksum = NULL;
int readed;
char buf[BUFFER_SIZE];
CR_FILE *cw_plain;
CR_FILE *cw_compressed;
- gint64 gf_size = -1, cgf_size = -1;
- gint64 gf_time = -1, cgf_time = -1;
+ gint64 gf_size = G_GINT64_CONSTANT(-1), cgf_size = G_GINT64_CONSTANT(-1);
+ gint64 gf_time = G_GINT64_CONSTANT(-1), cgf_time = G_GINT64_CONSTANT(-1);
struct stat gf_stat, cgf_stat;
const char *checksum_str = cr_checksum_name_str(checksum_type);
GError *tmp_err = NULL;
assert(!err || *err == NULL);
if (!(record->location_real) || !strlen(record->location_real)) {
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Empty locations in repomd record object");
return CRE_BADARG;
}
if (!g_file_test(record->location_real, G_FILE_TEST_IS_REGULAR)) {
// File doesn't exists
g_warning("%s: File %s doesn't exists", __func__, record->location_real);
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_NOFILE,
+ g_set_error(err, ERR_DOMAIN, CRE_NOFILE,
"File %s doesn't exists or not a regular file",
record->location_real);
return CRE_NOFILE;;
CR_CW_NO_COMPRESSION,
&tmp_err);
if (!cw_plain) {
- int code = tmp_err->code;
+ ret = tmp_err->code;
g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", path);
- return code;
+ return ret;
}
cw_compressed = cr_open(cpath,
record_compression,
&tmp_err);
if (!cw_compressed) {
- int code = tmp_err->code;
+ ret = tmp_err->code;
g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", cpath);
- return code;
+ return ret;
}
while ((readed = cr_read(cw_plain, buf, BUFFER_SIZE, &tmp_err)) > 0) {
cr_close(cw_plain, NULL);
if (tmp_err) {
- int code = tmp_err->code;
+ ret = tmp_err->code;
cr_close(cw_compressed, NULL);
g_debug("%s: Error while repomd record compression: %s", __func__,
tmp_err->message);
g_propagate_prefixed_error(err, tmp_err,
"Error while compression %s -> %s:", path, cpath);
- return code;
+ return ret;
}
cr_close(cw_compressed, &tmp_err);
if (tmp_err) {
- int code = tmp_err->code;
+ ret = tmp_err->code;
g_propagate_prefixed_error(err, tmp_err,
"Error while closing %s: ", path);
- return code;
+ return ret;
}
-
// Compute checksums
checksum = cr_checksum_file(path, checksum_type, &tmp_err);
- if (tmp_err) {
- int code = tmp_err->code;
+ if (!checksum) {
+ ret = tmp_err->code;
g_propagate_prefixed_error(err, tmp_err,
"Error while checksum calculation:");
- return code;
+ goto end;
}
cchecksum = cr_checksum_file(cpath, checksum_type, &tmp_err);
- if (tmp_err) {
- int code = tmp_err->code;
+ if (!cchecksum) {
+ ret = tmp_err->code;
g_propagate_prefixed_error(err, tmp_err,
"Error while checksum calculation:");
- return code;
+ goto end;
}
if (stat(path, &gf_stat)) {
g_debug("%s: Error while stat() on %s", __func__, path);
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_IO,
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
"Cannot stat %s", path);
- return CRE_IO;
- } else {
- gf_size = gf_stat.st_size;
- gf_time = gf_stat.st_mtime;
+ ret = CRE_IO;
+ goto end;
}
+ gf_size = gf_stat.st_size;
+ gf_time = gf_stat.st_mtime;
+
if (stat(cpath, &cgf_stat)) {
g_debug("%s: Error while stat() on %s", __func__, cpath);
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_IO,
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
"Cannot stat %s", cpath);
- return CRE_IO;
- } else {
- cgf_size = cgf_stat.st_size;
- cgf_time = cgf_stat.st_mtime;
+ ret = CRE_IO;
+ goto end;
}
+ cgf_size = cgf_stat.st_size;
+ cgf_time = cgf_stat.st_mtime;
+
// Results
record->checksum_open_type = NULL;
record->timestamp = gf_time;
record->size = gf_size;
- record->size_open = -1;
+ record->size_open = G_GINT64_CONSTANT(-1);
crecord->checksum = g_string_chunk_insert(crecord->chunk, cchecksum);
crecord->checksum_type = g_string_chunk_insert(crecord->chunk, checksum_str);
crecord->size = cgf_size;
crecord->size_open = gf_size;
+end:
g_free(checksum);
g_free(cchecksum);
- return CRE_OK;
+ return ret;
}
int
if (!(md->location_real) || !strlen(md->location_real)) {
g_debug("Empty locations in repomd record object");
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Empty locations in repomd record object");
return CRE_BADARG;
}
if (!md->checksum) {
g_debug("Record doesn't contain checksum");
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_BADARG,
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
"Record doesn't contain checksum");
return CRE_BADARG;
}
g_critical("%s: Cannot delete old %s",
__func__,
new_location_real);
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_IO,
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
"File with name %s already exists and cannot be deleted",
new_location_real);
g_free(new_location_real);
__func__,
md->location_real,
new_location_real);
- g_set_error(err, CR_REPOMD_RECORD_ERROR, CRE_IO,
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
"Cannot rename %s to %s", md->location_real,
new_location_real);
g_free(new_location_real);
return repomd;
}
+cr_Repomd *
+cr_repomd_copy(cr_Repomd *orig)
+{
+ cr_Repomd *new = cr_repomd_new();
+
+ cr_safe_string_chunk_insert(new->chunk, orig->revision);
+ cr_safe_string_chunk_insert(new->chunk, orig->repoid);
+ cr_safe_string_chunk_insert(new->chunk, orig->repoid_type);
+ cr_safe_string_chunk_insert(new->chunk, orig->contenthash);
+ cr_safe_string_chunk_insert(new->chunk, orig->contenthash_type);
+
+ for (GSList *elem = orig->repo_tags; elem; elem = g_slist_next(elem)) {
+ gchar *str = elem->data;
+ cr_repomd_add_repo_tag(new, str);
+ }
+ new->repo_tags = g_slist_reverse(new->repo_tags);
+
+ for (GSList *elem = orig->content_tags; elem; elem = g_slist_next(elem)) {
+ gchar *str = elem->data;
+ cr_repomd_add_content_tag(new, str);
+ }
+ new->content_tags = g_slist_reverse(new->content_tags);
+
+ for (GSList *elem = orig->distro_tags; elem; elem = g_slist_next(elem)) {
+ cr_DistroTag *tag = elem->data;
+ cr_repomd_add_distro_tag(new, tag->cpeid, tag->val);
+ }
+ new->distro_tags = g_slist_reverse(new->distro_tags);
+
+ for (GSList *elem = orig->records; elem; elem = g_slist_next(elem)) {
+ cr_RepomdRecord *rec = elem->data;
+ rec = cr_repomd_record_copy(rec);
+ cr_repomd_set_record(new, rec);
+ }
+ new->records = g_slist_reverse(new->records);
+
+ return new;
+}
+
void
cr_repomd_free(cr_Repomd *repomd)
{
repomd->repoid_type = cr_safe_string_chunk_insert(repomd->chunk, type);
}
+void
+cr_repomd_set_contenthash(cr_Repomd *repomd, const char *hash, const char *type)
+{
+ if (!repomd) return;
+ repomd->contenthash = cr_safe_string_chunk_insert(repomd->chunk, hash);
+ repomd->contenthash_type = cr_safe_string_chunk_insert(repomd->chunk, type);
+}
+
void
cr_repomd_add_distro_tag(cr_Repomd *repomd,
const char *cpeid,
void
cr_repomd_detach_record(cr_Repomd *repomd, cr_RepomdRecord *rec)
{
- if (!repomd || !rec);
+ if (!repomd || !rec) return;
repomd->records = g_slist_remove(repomd->records, rec);
}
+void
+cr_repomd_remove_record(cr_Repomd *repomd, const char *type)
+{
+ cr_RepomdRecord *rec = cr_repomd_get_record(repomd, type);
+ if (!rec) return;
+ cr_repomd_detach_record(repomd, rec);
+ cr_repomd_record_free(rec);
+}
+
cr_RepomdRecord *
cr_repomd_get_record(cr_Repomd *repomd, const char *type)
{
extern "C" {
#endif
+#include <glib.h>
#include "checksum.h"
#include "compression_wrapper.h"
#include "package.h"
*/
typedef struct {
gchar *revision; /*!< Revison */
- gchar *repoid; /*!< RepoId */
- gchar *repoid_type; /*!< RepoId type ("sha256", ...) */
+ gchar *repoid; /*!< OBSOLETE, replaced by contenthash */
+ gchar *repoid_type; /*!< OBSOLETE, replaced by contenthash_type */
+ gchar *contenthash; /*!< Content hash */
+ gchar *contenthash_type; /*!< Content hash type ("sha256", ...) */
GSList *repo_tags; /*!< List of strings */
GSList *content_tags; /*!< List of strings */
GSList *distro_tags; /*!< List of cr_DistroTag* */
*/
cr_Repomd *cr_repomd_new();
+/** Create copy of cr_Repomd
+ * @param repomd cr_Repomd object
+ * @return Copy of the input cr_Repomd object
+ */
+cr_Repomd *cr_repomd_copy(cr_Repomd *repomd);
+
/** Set cr_Repomd record into cr_Repomd object.
* @param repomd cr_Repomd object
* @param record cr_RepomdRecord object
*/
void cr_repomd_set_revision(cr_Repomd *repomd, const char *revision);
-/** Set a repoid
+/** Set a repoid - OBSOLETE, use cr_repomd_set_contenthash instead
* @param repomd cr_Repomd object
- * @param type Type of hash function used to calculate repoid
* @param repoid RepoId
+ * @param type Type of hash function used to calculate repoid
*/
void cr_repomd_set_repoid(cr_Repomd *repomd,
const char *repoid,
const char *type);
+/** Set a contenthash
+ * @param repomd cr_Repomd object
+ * @param hash content hash
+ * @param type Type of hash function
+ */
+void cr_repomd_set_contenthash(cr_Repomd *repomd,
+ const char *hash,
+ const char *type);
+
/** Add distro tag.
* @param repomd cr_Repomd object
* @param cpeid cpeid string (could be NULL)
/** Get repomd record from the repomd object.
* @param repomd cr_Repomd object
- * @param type Type of the record
+ * @param type Type of the record ("primary", "primary_db", ..)
* @return Pointer to a record of desired type or NULL
*/
cr_RepomdRecord *cr_repomd_get_record(cr_Repomd *repomd, const char *type);
*/
void cr_repomd_detach_record(cr_Repomd *repomd, cr_RepomdRecord *rec);
+/** Remove first record of the specified type
+ * @param repomd cr_Repomd object
+ * @param type Type of the record ("primary", "primary_db", ..)
+ */
+void cr_repomd_remove_record(cr_Repomd *repomd, const char *type);
+
/** Records are stored in order they were added to the repomd.
* Because sometimes deterministic output is desirable this function
* exists.
#include <assert.h>
#include <string.h>
#include <stdlib.h>
+#include <errno.h>
+#include <libxml/encoding.h>
#include "misc.h"
#include "sqlite.h"
#include "error.h"
+#include "xml_dump.h"
-#define ENCODED_PACKAGE_FILE_FILES 2048
-#define ENCODED_PACKAGE_FILE_TYPES 60
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+#define ENCODED_PACKAGE_FILE_FILES 2048
+#define ENCODED_PACKAGE_FILE_TYPES 60
struct _DbPrimaryStatements {
sqlite3 *db;
sqlite3_stmt *conflicts_handle;
sqlite3_stmt *obsoletes_handle;
sqlite3_stmt *requires_handle;
+ sqlite3_stmt *suggests_handle;
+ sqlite3_stmt *enhances_handle;
+ sqlite3_stmt *recommends_handle;
+ sqlite3_stmt *supplements_handle;
sqlite3_stmt *files_handle;
};
sqlite3_stmt *changelog_handle;
};
+static inline int cr_sqlite3_bind_text(sqlite3_stmt *stmt, int i,
+ const char *orig_content, int len,
+ void(*desctructor)(void *))
+{
+ int ret;
+ int free_content = 0;
+ unsigned char *content;
+
+ if (!orig_content) {
+ content = (unsigned char *) orig_content;
+ } else if (xmlCheckUTF8((const unsigned char *) orig_content)
+ && !cr_hascontrollchars((const unsigned char *) orig_content)) {
+ content = (unsigned char *) orig_content;
+ } else {
+ desctructor = SQLITE_TRANSIENT;
+ size_t llen = strlen((const char *) orig_content);
+ content = malloc(sizeof(unsigned char)*llen*2 + 1);
+ cr_latin1_to_utf8((const unsigned char *) orig_content, content);
+ free_content = 1;
+ }
+
+ ret = sqlite3_bind_text(stmt, i, (char *) content, len, desctructor);
+
+ if (free_content)
+ free(content);
+
+ return ret;
+}
+
/*
* Base DB operation
* - Open db
rc = sqlite3_open(path, &db);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not open SQL database: %s", sqlite3_errmsg(db));
sqlite3_close(db);
db = NULL;
sql = "CREATE TABLE db_info (dbversion INTEGER, checksum TEXT)";
rc = sqlite3_exec(db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create db_info table: %s",
sqlite3_errmsg (db));
}
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create packages table: %s",
sqlite3_errmsg (db));
return;
" pkgKey INTEGER)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create files table: %s",
sqlite3_errmsg (db));
return;
" release TEXT,"
" pkgKey INTEGER %s)";
- const char *deps[] = { "requires", "provides", "conflicts", "obsoletes", NULL };
+ const char *deps[] = { "requires", "provides", "conflicts", "obsoletes",
+ "suggests", "enhances", "recommends", "supplements",
+ NULL };
int i;
for (i = 0; deps[i]; i++) {
g_free (query);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create %s table: %s",
deps[i], sqlite3_errmsg (db));
return;
" DELETE FROM provides WHERE pkgKey = old.pkgKey;"
" DELETE FROM conflicts WHERE pkgKey = old.pkgKey;"
" DELETE FROM obsoletes WHERE pkgKey = old.pkgKey;"
+ " DELETE FROM suggests WHERE pkgKey = old.pkgKey;"
+ " DELETE FROM enhances WHERE pkgKey = old.pkgKey;"
+ " DELETE FROM recommends WHERE pkgKey = old.pkgKey;"
+ " DELETE FROM supplements WHERE pkgKey = old.pkgKey;"
" END;";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create removals trigger: %s",
sqlite3_errmsg (db));
return;
" pkgId TEXT)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create packages table: %s",
sqlite3_errmsg (db));
return;
" filetypes TEXT)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create filelist table: %s",
sqlite3_errmsg (db));
return;
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create remove_filelist trigger: %s",
sqlite3_errmsg (db));
return;
" pkgId TEXT)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create packages table: %s",
sqlite3_errmsg (db));
return;
" changelog TEXT)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create changelog table: %s",
sqlite3_errmsg (db));
return;
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create remove_changelogs trigger: %s",
sqlite3_errmsg (db));
return;
static void
-db_tweak(sqlite3 *db, GError **err)
+db_tweak(sqlite3 *db, G_GNUC_UNUSED GError **err)
{
- CR_UNUSED(err);
-
assert(!err || *err == NULL);
// Do not wait for disk writes to be fully
sql = "CREATE INDEX IF NOT EXISTS packagename ON packages (name)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create packagename index: %s",
sqlite3_errmsg (db));
return;
sql = "CREATE INDEX IF NOT EXISTS packageId ON packages (pkgId)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create packageId index: %s",
sqlite3_errmsg (db));
return;
sql = "CREATE INDEX IF NOT EXISTS filenames ON files (name)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create filenames index: %s",
sqlite3_errmsg (db));
return;
sql = "CREATE INDEX IF NOT EXISTS pkgfiles ON files (pkgKey)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create index on files table: %s",
sqlite3_errmsg (db));
return;
"provides",
"conflicts",
"obsoletes",
+ "suggests",
+ "enhances",
+ "recommends",
+ "supplements",
NULL
};
g_free (query);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create index on %s table: %s",
deps[i], sqlite3_errmsg (db));
return;
rc = sqlite3_exec (db, query, NULL, NULL, NULL);
g_free(query);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create %sname index: %s",
deps[i], sqlite3_errmsg (db));
return;
sql = "CREATE INDEX IF NOT EXISTS keyfile ON filelist (pkgKey)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create keyfile index: %s",
sqlite3_errmsg (db));
return;
sql = "CREATE INDEX IF NOT EXISTS pkgId ON packages (pkgId)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create pkgId index: %s",
sqlite3_errmsg (db));
return;
sql = "CREATE INDEX IF NOT EXISTS dirnames ON filelist (dirname)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create dirnames index: %s",
sqlite3_errmsg (db));
return;
sql = "CREATE INDEX IF NOT EXISTS keychange ON changelog (pkgKey)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create keychange index: %s",
sqlite3_errmsg (db));
return;
sql = "CREATE INDEX IF NOT EXISTS pkgId ON packages (pkgId)";
rc = sqlite3_exec (db, sql, NULL, NULL, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not create pkgId index: %s",
sqlite3_errmsg (db));
return;
/* Prepare insert statement */
rc = sqlite3_prepare_v2(sqlitedb->db, query, -1, &handle, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Cannot prepare db_info update: %s",
sqlite3_errmsg(sqlitedb->db));
g_critical("%s: Cannot prepare db_info update statement: %s",
/* Perform insert */
sqlite3_bind_int(handle, 1, CR_DB_CACHE_DBVERSION);
- sqlite3_bind_text(handle, 2, checksum, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text(handle, 2, checksum, -1, SQLITE_STATIC);
rc = sqlite3_step(handle);
if (rc != SQLITE_DONE) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Cannot update dbinfo table: %s",
sqlite3_errmsg (sqlitedb->db));
g_critical("%s: Cannot update dbinfo table: %s",
rc = sqlite3_finalize(handle);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Cannot update dbinfo table: %s",
sqlite3_errmsg (sqlitedb->db));
g_critical("%s: Cannot update dbinfo table: %s",
rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Cannot prepare packages insertion: %s",
sqlite3_errmsg (db));
sqlite3_finalize (handle);
return str;
}
-
static void
db_package_write (sqlite3 *db,
sqlite3_stmt *handle,
assert(!err || *err == NULL);
- sqlite3_bind_text (handle, 1, p->pkgId, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 2, p->name, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 3, p->arch, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 4, p->version, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 5, p->epoch, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 6, p->release, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 7, p->summary, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 8, p->description, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 9, force_null(p->url), -1, SQLITE_STATIC); // {null}
+ cr_sqlite3_bind_text (handle, 1, p->pkgId, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 2, p->name, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 3, p->arch, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 4, p->version, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 5, p->epoch, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 6, p->release, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 7, p->summary, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 8, p->description, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 9, force_null(p->url), -1, SQLITE_STATIC); // {null}
sqlite3_bind_int (handle, 10, p->time_file);
sqlite3_bind_int (handle, 11, p->time_build);
- sqlite3_bind_text (handle, 12, p->rpm_license, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 13, prevent_null(p->rpm_vendor), -1, SQLITE_STATIC); // ""
- sqlite3_bind_text (handle, 14, p->rpm_group, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 15, p->rpm_buildhost, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 16, prevent_null(p->rpm_sourcerpm), -1, SQLITE_STATIC); // ""
+ cr_sqlite3_bind_text (handle, 12, p->rpm_license, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 13, prevent_null(p->rpm_vendor), -1, SQLITE_STATIC); // ""
+ cr_sqlite3_bind_text (handle, 14, p->rpm_group, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 15, p->rpm_buildhost, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 16, prevent_null(p->rpm_sourcerpm), -1, SQLITE_STATIC); // ""
sqlite3_bind_int (handle, 17, p->rpm_header_start);
sqlite3_bind_int (handle, 18, p->rpm_header_end);
- sqlite3_bind_text (handle, 19, force_null(p->rpm_packager), -1, SQLITE_STATIC); // {null}
+ cr_sqlite3_bind_text (handle, 19, force_null(p->rpm_packager), -1, SQLITE_STATIC); // {null}
sqlite3_bind_int64(handle, 20, p->size_package);
sqlite3_bind_int64(handle, 21, p->size_installed);
sqlite3_bind_int64(handle, 22, p->size_archive);
- sqlite3_bind_text (handle, 23, p->location_href, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 24, force_null(p->location_base), -1, SQLITE_STATIC); // {null}
- sqlite3_bind_text (handle, 25, p->checksum_type, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 23, p->location_href, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 24, force_null(p->location_base), -1, SQLITE_STATIC); // {null}
+ cr_sqlite3_bind_text (handle, 25, p->checksum_type, -1, SQLITE_STATIC);
rc = sqlite3_step (handle);
sqlite3_reset (handle);
} else {
g_critical ("Error adding package to db: %s",
sqlite3_errmsg(db));
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Error adding package to db: %s",
sqlite3_errmsg(db));
}
g_free (query);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Cannot prepare dependency insertion: %s",
sqlite3_errmsg (db));
sqlite3_finalize (handle);
assert(!err || *err == NULL);
- sqlite3_bind_text (handle, 1, dep->name, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 2, dep->flags, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 3, dep->epoch, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 4, dep->version, -1, SQLITE_STATIC);
- sqlite3_bind_text (handle, 5, dep->release, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 1, dep->name, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 2, dep->flags, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 3, dep->epoch, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 4, dep->version, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 5, dep->release, -1, SQLITE_STATIC);
sqlite3_bind_int (handle, 6, pkgKey);
if (isRequirement) {
if (dep->pre)
- sqlite3_bind_text (handle, 7, "TRUE", -1, SQLITE_TRANSIENT);
+ cr_sqlite3_bind_text (handle, 7, "TRUE", -1, SQLITE_TRANSIENT);
else
- sqlite3_bind_text (handle, 7, "FALSE", -1, SQLITE_TRANSIENT);
+ cr_sqlite3_bind_text (handle, 7, "FALSE", -1, SQLITE_TRANSIENT);
}
rc = sqlite3_step (handle);
if (rc != SQLITE_DONE) {
g_critical ("Error adding package dependency to db: %s",
sqlite3_errmsg (db));
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Error adding package dependency to db: %s",
sqlite3_errmsg(db));
}
rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not prepare file insertion: %s",
sqlite3_errmsg (db));
sqlite3_finalize (handle);
file_type = "file";
}
- sqlite3_bind_text (handle, 1, fullpath, -1, SQLITE_TRANSIENT);
+ cr_sqlite3_bind_text (handle, 1, fullpath, -1, SQLITE_TRANSIENT);
g_free(fullpath);
- sqlite3_bind_text (handle, 2, file_type, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 2, file_type, -1, SQLITE_STATIC);
sqlite3_bind_int (handle, 3, pkgKey);
rc = sqlite3_step (handle);
if (rc != SQLITE_DONE) {
g_critical ("Error adding package file to db: %s",
sqlite3_errmsg (db));
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Error adding package file to db: %s",
sqlite3_errmsg(db));
}
rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not prepare filelist insertion: %s",
sqlite3_errmsg (db));
sqlite3_finalize (handle);
}
sqlite3_bind_int (handle, 1, pkgKey);
- sqlite3_bind_text(handle, 2, (const char *) key, (int) key_len, SQLITE_STATIC);
- sqlite3_bind_text(handle, 3, file->files->str, -1, SQLITE_STATIC);
- sqlite3_bind_text(handle, 4, file->types->str, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text(handle, 2, (const char *) key, (int) key_len, SQLITE_STATIC);
+ cr_sqlite3_bind_text(handle, 3, file->files->str, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text(handle, 4, file->types->str, -1, SQLITE_STATIC);
rc = sqlite3_step (handle);
sqlite3_reset (handle);
if (rc != SQLITE_DONE) {
g_critical ("Error adding file records to db: %s",
sqlite3_errmsg (db));
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Error adding file records to db : %s",
sqlite3_errmsg(db));
}
rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not prepare changelog insertion: %s",
sqlite3_errmsg (db));
sqlite3_finalize (handle);
query = "INSERT INTO packages (pkgId) VALUES (?)";
rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL);
if (rc != SQLITE_OK) {
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Can not prepare package ids insertion: %s",
sqlite3_errmsg (db));
sqlite3_finalize (handle);
assert(!err || *err == NULL);
- sqlite3_bind_text (handle, 1, pkg->pkgId, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 1, pkg->pkgId, -1, SQLITE_STATIC);
rc = sqlite3_step (handle);
sqlite3_reset (handle);
} else {
g_critical("Error adding package to db: %s",
sqlite3_errmsg(db));
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Error adding package to db: %s",
sqlite3_errmsg(db));
}
// Primary.sqlite interface
+void
+cr_db_destroy_primary_statements(cr_DbPrimaryStatements stmts)
+{
+ if (!stmts)
+ return;
+
+ if (stmts->pkg_handle)
+ sqlite3_finalize(stmts->pkg_handle);
+ if (stmts->provides_handle)
+ sqlite3_finalize(stmts->provides_handle);
+ if (stmts->conflicts_handle)
+ sqlite3_finalize(stmts->conflicts_handle);
+ if (stmts->obsoletes_handle)
+ sqlite3_finalize(stmts->obsoletes_handle);
+ if (stmts->requires_handle)
+ sqlite3_finalize(stmts->requires_handle);
+ if (stmts->suggests_handle)
+ sqlite3_finalize(stmts->suggests_handle);
+ if (stmts->enhances_handle)
+ sqlite3_finalize(stmts->enhances_handle);
+ if (stmts->recommends_handle)
+ sqlite3_finalize(stmts->recommends_handle);
+ if (stmts->supplements_handle)
+ sqlite3_finalize(stmts->supplements_handle);
+ if (stmts->files_handle)
+ sqlite3_finalize(stmts->files_handle);
+ free(stmts);
+}
+
+
cr_DbPrimaryStatements
cr_db_prepare_primary_statements(sqlite3 *db, GError **err)
{
+ assert(!err || *err == NULL);
+
GError *tmp_err = NULL;
cr_DbPrimaryStatements ret = malloc(sizeof(*ret));
- assert(!err || *err == NULL);
-
- ret->db = db;
- ret->pkg_handle = NULL;
- ret->provides_handle = NULL;
- ret->conflicts_handle = NULL;
- ret->obsoletes_handle = NULL;
- ret->requires_handle = NULL;
- ret->files_handle = NULL;
+ ret->db = db;
+ ret->pkg_handle = NULL;
+ ret->provides_handle = NULL;
+ ret->conflicts_handle = NULL;
+ ret->obsoletes_handle = NULL;
+ ret->requires_handle = NULL;
+ ret->suggests_handle = NULL;
+ ret->enhances_handle = NULL;
+ ret->recommends_handle = NULL;
+ ret->supplements_handle = NULL;
+ ret->files_handle = NULL;
ret->pkg_handle = db_package_prepare(db, &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
- return ret;
+ goto error;
}
ret->provides_handle = db_dependency_prepare(db, "provides", &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
- return ret;
+ goto error;
}
ret->conflicts_handle = db_dependency_prepare(db, "conflicts", &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
- return ret;
+ goto error;
}
ret->obsoletes_handle = db_dependency_prepare(db, "obsoletes", &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
- return ret;
+ goto error;
}
ret->requires_handle = db_dependency_prepare(db, "requires", &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
- return ret;
+ goto error;
}
- ret->files_handle = db_file_prepare(db, &tmp_err);
+ ret->suggests_handle = db_dependency_prepare(db, "suggests", &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
- return ret;
+ goto error;
}
- return ret;
-}
+ ret->enhances_handle = db_dependency_prepare(db, "enhances", &tmp_err);
+ if (tmp_err) {
+ g_propagate_error(err, tmp_err);
+ goto error;
+ }
+ ret->recommends_handle = db_dependency_prepare(db, "recommends", &tmp_err);
+ if (tmp_err) {
+ g_propagate_error(err, tmp_err);
+ goto error;
+ }
-void
-cr_db_destroy_primary_statements(cr_DbPrimaryStatements stmts)
-{
- if (!stmts)
- return;
+ ret->supplements_handle = db_dependency_prepare(db, "supplements", &tmp_err);
+ if (tmp_err) {
+ g_propagate_error(err, tmp_err);
+ goto error;
+ }
- if (stmts->pkg_handle)
- sqlite3_finalize(stmts->pkg_handle);
- if (stmts->provides_handle)
- sqlite3_finalize(stmts->provides_handle);
- if (stmts->conflicts_handle)
- sqlite3_finalize(stmts->conflicts_handle);
- if (stmts->obsoletes_handle)
- sqlite3_finalize(stmts->obsoletes_handle);
- if (stmts->requires_handle)
- sqlite3_finalize(stmts->requires_handle);
- if (stmts->files_handle)
- sqlite3_finalize(stmts->files_handle);
- free(stmts);
+ ret->files_handle = db_file_prepare(db, &tmp_err);
+ if (tmp_err) {
+ g_propagate_error(err, tmp_err);
+ goto error;
+ }
+
+ return ret;
+
+error:
+ cr_db_destroy_primary_statements(ret);
+ return NULL;
}
}
}
+ for (iter = pkg->suggests; iter; iter = iter->next) {
+ db_dependency_write(stmts->db,
+ stmts->suggests_handle,
+ pkg->pkgKey,
+ (cr_Dependency *) iter->data,
+ TRUE,
+ &tmp_err);
+ if (tmp_err) {
+ g_propagate_error(err, tmp_err);
+ return;
+ }
+ }
+
+ for (iter = pkg->enhances; iter; iter = iter->next) {
+ db_dependency_write(stmts->db,
+ stmts->enhances_handle,
+ pkg->pkgKey,
+ (cr_Dependency *) iter->data,
+ TRUE,
+ &tmp_err);
+ if (tmp_err) {
+ g_propagate_error(err, tmp_err);
+ return;
+ }
+ }
+
+ for (iter = pkg->recommends; iter; iter = iter->next) {
+ db_dependency_write(stmts->db,
+ stmts->recommends_handle,
+ pkg->pkgKey,
+ (cr_Dependency *) iter->data,
+ TRUE,
+ &tmp_err);
+ if (tmp_err) {
+ g_propagate_error(err, tmp_err);
+ return;
+ }
+ }
+
+ for (iter = pkg->supplements; iter; iter = iter->next) {
+ db_dependency_write(stmts->db,
+ stmts->supplements_handle,
+ pkg->pkgKey,
+ (cr_Dependency *) iter->data,
+ TRUE,
+ &tmp_err);
+ if (tmp_err) {
+ g_propagate_error(err, tmp_err);
+ return;
+ }
+ }
+
for (iter = pkg->files; iter; iter = iter->next) {
db_file_write(stmts->db, stmts->files_handle, pkg->pkgKey,
(cr_PackageFile *) iter->data, &tmp_err);
// filelists.sqlite interface
+void
+cr_db_destroy_filelists_statements(cr_DbFilelistsStatements stmts)
+{
+ if (!stmts)
+ return;
+
+ if (stmts->package_id_handle)
+ sqlite3_finalize(stmts->package_id_handle);
+ if (stmts->filelists_handle)
+ sqlite3_finalize(stmts->filelists_handle);
+ free(stmts);
+}
+
+
cr_DbFilelistsStatements
cr_db_prepare_filelists_statements(sqlite3 *db, GError **err)
{
ret->package_id_handle = db_package_ids_prepare(db, &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
- return ret;
+ goto error;
}
ret->filelists_handle = db_filelists_prepare(db, &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
- return ret;
+ goto error;
}
return ret;
-}
-
-void
-cr_db_destroy_filelists_statements(cr_DbFilelistsStatements stmts)
-{
- if (!stmts)
- return;
-
- if (stmts->package_id_handle)
- sqlite3_finalize(stmts->package_id_handle);
- if (stmts->filelists_handle)
- sqlite3_finalize(stmts->filelists_handle);
- free(stmts);
+error:
+ cr_db_destroy_filelists_statements(ret);
+ return NULL;
}
// other.sqlite interface
+void
+cr_db_destroy_other_statements(cr_DbOtherStatements stmts)
+{
+ if (!stmts)
+ return;
+
+ if (stmts->package_id_handle)
+ sqlite3_finalize(stmts->package_id_handle);
+ if (stmts->changelog_handle)
+ sqlite3_finalize(stmts->changelog_handle);
+ free(stmts);
+}
+
+
cr_DbOtherStatements
cr_db_prepare_other_statements(sqlite3 *db, GError **err)
{
ret->package_id_handle = db_package_ids_prepare(db, &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
- return ret;
+ goto error;
}
ret->changelog_handle = db_changelog_prepare(db, &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
- return ret;
+ goto error;
}
return ret;
-}
-
-
-void
-cr_db_destroy_other_statements(cr_DbOtherStatements stmts)
-{
- if (!stmts)
- return;
- if (stmts->package_id_handle)
- sqlite3_finalize(stmts->package_id_handle);
- if (stmts->changelog_handle)
- sqlite3_finalize(stmts->changelog_handle);
- free(stmts);
+error:
+ cr_db_destroy_other_statements(ret);
+ return NULL;
}
entry = (cr_ChangelogEntry *) iter->data;
sqlite3_bind_int (handle, 1, pkg->pkgKey);
- sqlite3_bind_text (handle, 2, entry->author, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 2, entry->author, -1, SQLITE_STATIC);
sqlite3_bind_int (handle, 3, entry->date);
- sqlite3_bind_text (handle, 4, entry->changelog, -1, SQLITE_STATIC);
+ cr_sqlite3_bind_text (handle, 4, entry->changelog, -1, SQLITE_STATIC);
rc = sqlite3_step (handle);
sqlite3_reset (handle);
if (rc != SQLITE_DONE) {
g_critical ("Error adding changelog to db: %s",
sqlite3_errmsg (stmts->db));
- g_set_error(err, CR_DB_ERROR, CRE_DB,
+ g_set_error(err, ERR_DOMAIN, CRE_DB,
"Error adding changelog to db : %s",
sqlite3_errmsg(stmts->db));
return;
assert(!err || *err == NULL);
if (path[0] == '\0') {
- g_set_error(err, CR_DB_ERROR, CRE_BADARG, "Bad path: \"%s\"", path);
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad path: \"%s\"", path);
return NULL;
}
exists = g_file_test(path, G_FILE_TEST_IS_REGULAR);
+ if (exists) {
+ struct stat stat_buf;
+ if (stat(path, &stat_buf) == -1) {
+ g_set_error(err, ERR_DOMAIN, CRE_IO,
+ "Cannot stat %s: %s", path, g_strerror(errno));
+ return NULL;
+ }
+
+ if (stat_buf.st_size == 0)
+ // File exists, but is just a placeholder created by g_mkstemp()
+ // because --local-sqlite option was used
+ exists = FALSE;
+ }
sqlite3_enable_shared_cache(1);
default:
g_critical("%s: Bad db_type", __func__);
assert(0);
- g_set_error(err, CR_DB_ERROR, CRE_ASSERT, "Bad db type");
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type");
return NULL;
}
default:
g_critical("%s: Bad db_type", __func__);
assert(0);
- g_set_error(err, CR_DB_ERROR, CRE_ASSERT, "Bad db type");
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type");
return NULL;
}
- if (tmp_err) {
+ if (!statements) {
g_propagate_error(err, tmp_err);
sqlite3_close(db);
return NULL;
default:
g_critical("%s: Bad db_type", __func__);
assert(0);
- g_set_error(err, CR_DB_ERROR, CRE_ASSERT, "Bad db type");
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type");
return NULL;
}
default:
g_critical("%s: Bad db type", __func__);
assert(0);
- g_set_error(err, CR_DB_ERROR, CRE_ASSERT, "Bad db type");
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type");
return CRE_ASSERT;
}
default:
g_critical("%s: Bad db type", __func__);
assert(0);
- g_set_error(err, CR_DB_ERROR, CRE_ASSERT, "Bad db type");
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type");
return CRE_ASSERT;
}
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2013 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <glib.h>
+#include <glib/gstdio.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <string.h>
+#include "error.h"
+#include "cleanup.h"
+#include "version.h"
+#include "compression_wrapper.h"
+#include "misc.h"
+#include "createrepo_shared.h"
+#include "locate_metadata.h"
+#include "load_metadata.h"
+#include "package.h"
+#include "repomd.h"
+#include "sqlite.h"
+#include "xml_file.h"
+#include "modifyrepo_shared.h"
+#include "threads.h"
+#include "xml_dump.h"
+
+
+#define DEFAULT_CHECKSUM CR_CHECKSUM_SHA256
+
+/**
+ * Command line options
+ */
+typedef struct {
+
+ /* Items filled by cmd option parser */
+
+ gboolean version; /*!< print program version */
+ gboolean quiet; /*!< quiet mode */
+ gboolean verbose; /*!< verbose mode */
+ gboolean force; /*!< overwrite existing DBs */
+ gboolean keep_old; /*!< keep old DBs around */
+ gboolean xz_compression; /*!< use xz for DBs compression */
+ gchar *compress_type; /*!< which compression type to use */
+ gboolean local_sqlite; /*!< gen sqlite locally into a directory
+ temporary files. (For situations when
+ sqlite has a trouble to gen DBs
+ on NFS mounts.)*/
+ gchar *chcksum_type; /*!< type of checksum in repomd.xml */
+
+ /* Items filled by check_sqliterepo_arguments() */
+
+ cr_CompressionType compression_type; /*!< compression type */
+ cr_ChecksumType checksum_type; /*!< checksum type */
+
+} SqliterepoCmdOptions;
+
+static SqliterepoCmdOptions *
+sqliterepocmdoptions_new(void)
+{
+ SqliterepoCmdOptions *options;
+
+ options = g_new(SqliterepoCmdOptions, 1);
+ options->version = FALSE;
+ options->quiet = FALSE;
+ options->verbose = FALSE;
+ options->force = FALSE;
+ options->keep_old = FALSE;
+ options->xz_compression = FALSE;
+ options->compress_type = NULL;
+ options->chcksum_type = NULL;
+ options->local_sqlite = FALSE;
+ options->compression_type = CR_CW_BZ2_COMPRESSION;
+ options->checksum_type = CR_CHECKSUM_UNKNOWN;
+
+ return options;
+}
+
+static void
+sqliterepocmdoptions_free(SqliterepoCmdOptions *options)
+{
+ g_free(options->compress_type);
+ g_free(options);
+}
+
+CR_DEFINE_CLEANUP_FUNCTION0(SqliterepoCmdOptions*, cr_local_sqliterepocmdoptions_free, sqliterepocmdoptions_free)
+#define _cleanup_sqliterepocmdoptions_free_ __attribute__ ((cleanup(cr_local_sqliterepocmdoptions_free)))
+
+/**
+ * Parse commandline arguments for sqliterepo utility
+ */
+static gboolean
+parse_sqliterepo_arguments(int *argc,
+ char ***argv,
+ SqliterepoCmdOptions *options,
+ GError **err)
+{
+ const GOptionEntry cmd_entries[] = {
+
+ { "version", 'V', 0, G_OPTION_ARG_NONE, &(options->version),
+ "Show program's version number and exit.", NULL},
+ { "quiet", 'q', 0, G_OPTION_ARG_NONE, &(options->quiet),
+ "Run quietly.", NULL },
+ { "verbose", 'v', 0, G_OPTION_ARG_NONE, &(options->verbose),
+ "Run verbosely.", NULL },
+ { "force", 'f', 0, G_OPTION_ARG_NONE, &(options->force),
+ "Overwirte existing DBs.", NULL },
+ { "keep-old", '\0', 0, G_OPTION_ARG_NONE, &(options->keep_old),
+ "Do not remove old DBs. Use only with combination with --force.", NULL },
+ { "xz", '\0', 0, G_OPTION_ARG_NONE, &(options->xz_compression),
+ "Use xz for repodata compression.", NULL },
+ { "compress-type", '\0', 0, G_OPTION_ARG_STRING, &(options->compress_type),
+ "Which compression type to use.", "<compress_type>" },
+ { "checksum", '\0', 0, G_OPTION_ARG_STRING, &(options->chcksum_type),
+ "Which checksum type to use in repomd.xml for sqlite DBs.", "<checksum_type>" },
+ { "local-sqlite", '\0', 0, G_OPTION_ARG_NONE, &(options->local_sqlite),
+ "Gen sqlite DBs locally (into a directory for temporary files). "
+ "Sometimes, sqlite has a trouble to gen DBs on a NFS mount, "
+ "use this option in such cases. "
+ "This option could lead to a higher memory consumption "
+ "if TMPDIR is set to /tmp or not set at all, because then the /tmp is "
+ "used and /tmp dir is often a ramdisk.", NULL },
+ { NULL },
+ };
+
+ // Parse cmd arguments
+ GOptionContext *context;
+ context = g_option_context_new("<repo_directory>");
+ g_option_context_set_summary(context, "Generate sqlite DBs from XML repodata.");
+ g_option_context_add_main_entries(context, cmd_entries, NULL);
+ gboolean ret = g_option_context_parse(context, argc, argv, err);
+ g_option_context_free(context);
+ return ret;
+}
+
+/**
+ * Check parsed arguments and fill some other attributes
+ * of option struct accordingly.
+ */
+static gboolean
+check_arguments(SqliterepoCmdOptions *options, GError **err)
+{
+ // --compress-type
+ if (options->compress_type) {
+ options->compression_type = cr_compression_type(options->compress_type);
+ if (options->compression_type == CR_CW_UNKNOWN_COMPRESSION) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_ERROR,
+ "Unknown compression type \"%s\"", options->compress_type);
+ return FALSE;
+ }
+ }
+
+ // --checksum
+ if (options->chcksum_type) {
+ cr_ChecksumType type;
+ type = cr_checksum_type(options->chcksum_type);
+ if (type == CR_CHECKSUM_UNKNOWN) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG,
+ "Unknown/Unsupported checksum type \"%s\"",
+ options->chcksum_type);
+ return FALSE;
+ }
+ options->checksum_type = type;
+ }
+
+ // --xz
+ if (options->xz_compression)
+ options->compression_type = CR_CW_XZ_COMPRESSION;
+
+ return TRUE;
+}
+
+// Common
+
+static int
+warningcb(G_GNUC_UNUSED cr_XmlParserWarningType type,
+ char *msg,
+ void *cbdata,
+ G_GNUC_UNUSED GError **err)
+{
+ g_warning("XML parser warning (%s): %s\n", (gchar *) cbdata, msg);
+ return CR_CB_RET_OK;
+}
+
+static int
+pkgcb(cr_Package *pkg,
+ void *cbdata,
+ GError **err)
+{
+ int rc;
+ rc = cr_db_add_pkg((cr_SqliteDb *) cbdata, pkg, err);
+ cr_package_free(pkg);
+ if (rc != CRE_OK)
+ return CR_CB_RET_ERR;
+ return CR_CB_RET_OK;
+}
+
+// Primary sqlite db
+
+static gboolean
+primary_to_sqlite(const gchar *pri_xml_path,
+ cr_SqliteDb *pri_db,
+ GError **err)
+{
+ int rc;
+ rc = cr_xml_parse_primary(pri_xml_path,
+ NULL,
+ NULL,
+ pkgcb,
+ (void *) pri_db,
+ warningcb,
+ (void *) pri_xml_path,
+ TRUE,
+ err);
+ if (rc != CRE_OK)
+ return FALSE;
+ return TRUE;
+}
+
+// Filelists
+
+static gboolean
+filelists_to_sqlite(const gchar *fil_xml_path,
+ cr_SqliteDb *fil_db,
+ GError **err)
+{
+ int rc;
+ rc = cr_xml_parse_filelists(fil_xml_path,
+ NULL,
+ NULL,
+ pkgcb,
+ (void *) fil_db,
+ warningcb,
+ (void *) fil_xml_path,
+ err);
+ if (rc != CRE_OK)
+ return FALSE;
+ return TRUE;
+}
+
+// Other
+
+static gboolean
+other_to_sqlite(const gchar *oth_xml_path,
+ cr_SqliteDb *oth_db,
+ GError **err)
+{
+ int rc;
+ rc = cr_xml_parse_other(oth_xml_path,
+ NULL,
+ NULL,
+ pkgcb,
+ (void *) oth_db,
+ warningcb,
+ (void *) oth_xml_path,
+ err);
+ if (rc != CRE_OK)
+ return FALSE;
+ return TRUE;
+}
+
+// Main
+
+static gboolean
+xml_to_sqlite(const gchar *pri_xml_path,
+ const gchar *fil_xml_path,
+ const gchar *oth_xml_path,
+ cr_SqliteDb *pri_db,
+ cr_SqliteDb *fil_db,
+ cr_SqliteDb *oth_db,
+ GError **err)
+{
+ gboolean ret;
+
+ if (pri_xml_path && pri_db) {
+ ret = primary_to_sqlite(pri_xml_path,
+ pri_db,
+ err);
+ if (!ret)
+ return FALSE;
+ g_debug("Primary sqlite done");
+ }
+
+ if (fil_xml_path && fil_db) {
+ ret = filelists_to_sqlite(fil_xml_path,
+ fil_db,
+ err);
+ if (!ret)
+ return FALSE;
+ g_debug("Filelists sqlite done");
+ }
+
+ if (oth_xml_path && oth_db) {
+ ret = other_to_sqlite(oth_xml_path,
+ oth_db,
+ err);
+ if (!ret)
+ return FALSE;
+ g_debug("Other sqlite done");
+ }
+
+ return TRUE;
+}
+
+static gboolean
+sqlite_dbinfo_update(cr_Repomd *repomd,
+ cr_SqliteDb *pri_db,
+ cr_SqliteDb *fil_db,
+ cr_SqliteDb *oth_db,
+ GError **err)
+{
+ cr_RepomdRecord *rec = NULL;
+
+ // Parse repomd.xml
+ // Get files checksums and insert them into sqlite dbs
+ if (pri_db) {
+ rec = cr_repomd_get_record(repomd, "primary");
+ if (rec && rec->checksum)
+ if (cr_db_dbinfo_update(pri_db, rec->checksum, err) != CRE_OK)
+ return FALSE;
+ }
+
+ if (fil_db) {
+ rec = cr_repomd_get_record(repomd, "filelists");
+ if (rec && rec->checksum)
+ if (cr_db_dbinfo_update(fil_db, rec->checksum, err) != CRE_OK)
+ return FALSE;
+ }
+
+ if (oth_db) {
+ rec = cr_repomd_get_record(repomd, "other");
+ if (rec && rec->checksum)
+ if (cr_db_dbinfo_update(oth_db, rec->checksum, err) != CRE_OK)
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static gboolean
+compress_sqlite_dbs(const gchar *tmp_out_repo,
+ const gchar *pri_db_filename,
+ cr_RepomdRecord **in_pri_db_rec,
+ const gchar *fil_db_filename,
+ cr_RepomdRecord **in_fil_db_rec,
+ const gchar *oth_db_filename,
+ cr_RepomdRecord **in_oth_db_rec,
+ cr_CompressionType compression_type,
+ cr_ChecksumType checksum_type,
+ GError **err)
+{
+ cr_CompressionTask *pri_db_task;
+ cr_CompressionTask *fil_db_task;
+ cr_CompressionTask *oth_db_task;
+ const char *sqlite_compression_suffix;
+ cr_RepomdRecord *pri_db_rec = NULL;
+ cr_RepomdRecord *fil_db_rec = NULL;
+ cr_RepomdRecord *oth_db_rec = NULL;
+
+ // Prepare thread pool for compression tasks
+ GThreadPool *compress_pool = g_thread_pool_new(cr_compressing_thread,
+ NULL, 3, FALSE, NULL);
+
+ // Prepare output filenames
+ sqlite_compression_suffix = cr_compression_suffix(compression_type);
+ gchar *pri_db_name = g_strconcat(tmp_out_repo, "/primary.sqlite",
+ sqlite_compression_suffix, NULL);
+ gchar *fil_db_name = g_strconcat(tmp_out_repo, "/filelists.sqlite",
+ sqlite_compression_suffix, NULL);
+ gchar *oth_db_name = g_strconcat(tmp_out_repo, "/other.sqlite",
+ sqlite_compression_suffix, NULL);
+
+ // Prepare compression tasks
+ pri_db_task = cr_compressiontask_new(pri_db_filename,
+ pri_db_name,
+ compression_type,
+ checksum_type,
+ 1, NULL);
+ g_thread_pool_push(compress_pool, pri_db_task, NULL);
+
+ fil_db_task = cr_compressiontask_new(fil_db_filename,
+ fil_db_name,
+ compression_type,
+ checksum_type,
+ 1, NULL);
+ g_thread_pool_push(compress_pool, fil_db_task, NULL);
+
+ oth_db_task = cr_compressiontask_new(oth_db_filename,
+ oth_db_name,
+ compression_type,
+ checksum_type,
+ 1, NULL);
+ g_thread_pool_push(compress_pool, oth_db_task, NULL);
+
+ // Wait till all tasks are complete and free the thread pool
+ g_thread_pool_free(compress_pool, FALSE, TRUE);
+
+ // Remove uncompressed DBs
+ cr_rm(pri_db_filename, CR_RM_FORCE, NULL, NULL);
+ cr_rm(fil_db_filename, CR_RM_FORCE, NULL, NULL);
+ cr_rm(oth_db_filename, CR_RM_FORCE, NULL, NULL);
+
+ // Prepare repomd records
+ pri_db_rec = cr_repomd_record_new("primary_db", pri_db_name);
+ fil_db_rec = cr_repomd_record_new("filelists_db", fil_db_name);
+ oth_db_rec = cr_repomd_record_new("other_db", oth_db_name);
+
+ *in_pri_db_rec = pri_db_rec;
+ *in_fil_db_rec = fil_db_rec;
+ *in_oth_db_rec = oth_db_rec;
+
+ // Free paths to compressed files
+ g_free(pri_db_name);
+ g_free(fil_db_name);
+ g_free(oth_db_name);
+
+ // Fill repomd records from stats gathered during compression
+ cr_repomd_record_load_contentstat(pri_db_rec, pri_db_task->stat);
+ cr_repomd_record_load_contentstat(fil_db_rec, fil_db_task->stat);
+ cr_repomd_record_load_contentstat(oth_db_rec, oth_db_task->stat);
+
+ // Free the compression tasks
+ cr_compressiontask_free(pri_db_task, NULL);
+ cr_compressiontask_free(fil_db_task, NULL);
+ cr_compressiontask_free(oth_db_task, NULL);
+
+ // Prepare thread pool for repomd record filling tasks
+ GThreadPool *fill_pool = g_thread_pool_new(cr_repomd_record_fill_thread,
+ NULL, 3, FALSE, NULL);
+
+ // Prepare the tasks themselves
+ cr_RepomdRecordFillTask *pri_db_fill_task;
+ cr_RepomdRecordFillTask *fil_db_fill_task;
+ cr_RepomdRecordFillTask *oth_db_fill_task;
+
+ pri_db_fill_task = cr_repomdrecordfilltask_new(pri_db_rec,
+ checksum_type,
+ NULL);
+ g_thread_pool_push(fill_pool, pri_db_fill_task, NULL);
+
+ fil_db_fill_task = cr_repomdrecordfilltask_new(fil_db_rec,
+ checksum_type,
+ NULL);
+ g_thread_pool_push(fill_pool, fil_db_fill_task, NULL);
+
+ oth_db_fill_task = cr_repomdrecordfilltask_new(oth_db_rec,
+ checksum_type,
+ NULL);
+ g_thread_pool_push(fill_pool, oth_db_fill_task, NULL);
+
+ // Wait till the all tasks are finished and free the pool
+ g_thread_pool_free(fill_pool, FALSE, TRUE);
+
+ // Clear the tasks
+ cr_repomdrecordfilltask_free(pri_db_fill_task, NULL);
+ cr_repomdrecordfilltask_free(fil_db_fill_task, NULL);
+ cr_repomdrecordfilltask_free(oth_db_fill_task, NULL);
+
+ return TRUE;
+}
+
+static gboolean
+uses_simple_md_filename(cr_Repomd *repomd,
+ gboolean *simple_md,
+ GError **err)
+{
+ cr_RepomdRecord *rec = NULL;
+
+ // Get primary record
+ rec = cr_repomd_get_record(repomd, "primary");
+ if (!rec) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_ERROR,
+ "Repomd doen't contain primary.xml");
+ return FALSE;
+ }
+
+ if (!rec->location_href) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_ERROR,
+ "Primary repomd record doesn't contain location href");
+ return FALSE;
+ }
+
+ // Check if it's prefixed by checksum or not
+ _cleanup_free_ gchar *basename = NULL;
+
+ basename = g_path_get_basename(rec->location_href);
+
+ if (g_str_has_prefix(basename, "primary"))
+ *simple_md = TRUE;
+ else
+ *simple_md = FALSE;
+
+ return TRUE;
+}
+
+/* Prepare new repomd.xml
+ * Detect if unique or simple md filenames should be used.
+ * Rename the files if necessary (add checksums into prefixes)
+ * Add the records for databases
+ * Write the updated repomd.xml into tmp_out_repo
+ */
+static gboolean
+gen_new_repomd(const gchar *tmp_out_repo,
+ cr_Repomd *in_repomd,
+ cr_RepomdRecord *in_pri_db_rec,
+ cr_RepomdRecord *in_fil_db_rec,
+ cr_RepomdRecord *in_oth_db_rec,
+ GError **err)
+{
+ cr_Repomd *repomd = NULL;
+ cr_RepomdRecord *pri_db_rec = NULL;
+ cr_RepomdRecord *fil_db_rec = NULL;
+ cr_RepomdRecord *oth_db_rec = NULL;
+ gboolean simple_md_filename = FALSE;
+
+ // Create copy of repomd
+ repomd = cr_repomd_copy(in_repomd);
+
+ // Check if a unique md filename should be used or not
+ if (!uses_simple_md_filename(in_repomd, &simple_md_filename, err))
+ return FALSE;
+
+ // Prepend checksum if unique md filename should be used
+ if (!simple_md_filename) {
+ g_debug("Renaming generated DBs to unique filenames..");
+ cr_repomd_record_rename_file(in_pri_db_rec, NULL);
+ cr_repomd_record_rename_file(in_fil_db_rec, NULL);
+ cr_repomd_record_rename_file(in_oth_db_rec, NULL);
+ }
+
+ // Remove existing DBs
+ cr_repomd_remove_record(repomd, "primary_db");
+ cr_repomd_remove_record(repomd, "filelists_db");
+ cr_repomd_remove_record(repomd, "other_db");
+
+ // Create copy of the records
+ //
+ // Note: We do this copy, because once we set a record into
+ // a repomd, the repomd overtake the ownership of the record,
+ // but we don't want to lose ownership in this case.
+ //
+ // Note: We do this copy intentionaly after the rename,
+ // because we want to have the rename propagated into
+ // original records (the ones referenced in caller function).
+ pri_db_rec = cr_repomd_record_copy(in_pri_db_rec);
+ fil_db_rec = cr_repomd_record_copy(in_fil_db_rec);
+ oth_db_rec = cr_repomd_record_copy(in_oth_db_rec);
+
+ // Add records to repomd.xml
+ cr_repomd_set_record(repomd, pri_db_rec);
+ cr_repomd_set_record(repomd, fil_db_rec);
+ cr_repomd_set_record(repomd, oth_db_rec);
+
+ // Sort the records
+ cr_repomd_sort_records(repomd);
+
+ // Dump the repomd.xml content
+ _cleanup_free_ gchar *repomd_content = NULL;
+ repomd_content = cr_xml_dump_repomd(repomd, err);
+ if (!repomd_content)
+ return FALSE;
+
+ // Prepare output repomd.xml path
+ _cleanup_free_ gchar *repomd_path = NULL;
+ repomd_path = g_build_filename(tmp_out_repo, "repomd.xml", NULL);
+
+ // Write the repomd.xml
+ _cleanup_file_fclose_ FILE *f_repomd = NULL;
+ if (!(f_repomd = fopen(repomd_path, "w"))) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot open %s: %s", repomd_path, g_strerror(errno));
+ return FALSE;
+ }
+
+ // Write the content
+ fputs(repomd_content, f_repomd);
+
+ // Cleanup
+ cr_repomd_free(repomd);
+
+ return TRUE;
+}
+
+
+/** Intelligently move content of tmp_out_repo to in_repo
+ * (the repomd.xml is moved as a last file)
+ */
+static gboolean
+move_results(const gchar *tmp_out_repo,
+ const gchar *in_repo,
+ GError **err)
+{
+ _cleanup_dir_close_ GDir *dirp = NULL;
+ _cleanup_error_free_ GError *tmp_err = NULL;
+
+ // Open the source directory
+ dirp = g_dir_open(tmp_out_repo, 0, &tmp_err);
+ if (!dirp) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot open dir %s: %s",
+ tmp_out_repo, tmp_err->message);
+ return FALSE;
+ }
+
+ // Iterate over its content
+ const gchar *filename;
+ while ((filename = g_dir_read_name(dirp))) {
+ _cleanup_free_ gchar *src_path = NULL;
+ _cleanup_free_ gchar *dst_path = NULL;
+
+ // Skip repomd.xml
+ if (!g_strcmp0(filename, "repomd.xml"))
+ continue;
+
+ // Get full src path
+ src_path = g_build_filename(tmp_out_repo, filename, NULL);
+
+ // Prepare full dst path
+ dst_path = g_build_filename(in_repo, filename, NULL);
+
+ // Move the file
+ if (g_rename(src_path, dst_path) == -1) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot move: %s to: %s: %s",
+ src_path, dst_path, g_strerror(errno));
+ return FALSE;
+ }
+ }
+
+ // The last step - move of the repomd.xml
+ {
+ _cleanup_free_ gchar *src_path = NULL;
+ _cleanup_free_ gchar *dst_path = NULL;
+ src_path = g_build_filename(tmp_out_repo, "repomd.xml", NULL);
+ dst_path = g_build_filename(in_repo, "repomd.xml", NULL);
+ if (g_rename(src_path, dst_path) == -1) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot move: %s to: %s: %s",
+ src_path, dst_path, g_strerror(errno));
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+static gboolean
+remove_old_if_different(const gchar *repo_path,
+ cr_RepomdRecord *old_rec,
+ cr_RepomdRecord *new_rec,
+ GError **err)
+{
+ int rc;
+ _cleanup_free_ gchar *old_fn = NULL;
+ _cleanup_free_ gchar *new_fn = NULL;
+
+ // Input check
+ if (!old_rec)
+ return TRUE;
+
+ // Build filenames
+ old_fn = g_build_filename(repo_path, old_rec->location_href, NULL);
+ new_fn = g_build_filename(repo_path, new_rec->location_href, NULL);
+
+ // Check if the files are the same
+ gboolean identical = FALSE;
+ if (!cr_identical_files(old_fn, new_fn, &identical, err))
+ return FALSE;
+
+ if (identical) {
+ g_debug("Old DB file %s has been overwritten by the new one.", new_fn);
+ return TRUE;
+ }
+
+ // Remove file referenced by the old record
+ g_debug("Removing old DB file %s", old_fn);
+ rc = g_remove(old_fn);
+ if (rc == -1) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot remove %s: %s", old_fn, g_strerror(errno));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static gboolean
+generate_sqlite_from_xml(const gchar *path,
+ cr_CompressionType compression_type,
+ cr_ChecksumType checksum_type,
+ gboolean local_sqlite,
+ gboolean force,
+ gboolean keep_old,
+ GError **err)
+{
+ _cleanup_free_ gchar *in_dir = NULL; // path/to/repo/
+ _cleanup_free_ gchar *in_repo = NULL; // path/to/repo/repodata/
+ _cleanup_free_ gchar *out_dir = NULL; // path/to/out_repo/
+ _cleanup_free_ gchar *out_repo = NULL; // path/to/out_repo/repodata/
+ _cleanup_free_ gchar *tmp_out_repo = NULL; // usually path/to/out_repo/.repodata/
+ _cleanup_free_ gchar *lock_dir = NULL; // path/to/out_repo/.repodata/
+ gboolean ret;
+ GError *tmp_err = NULL;
+
+ // Check if input dir exists
+ in_dir = cr_normalize_dir_path(path);
+ if (!g_file_test(in_dir, G_FILE_TEST_IS_DIR)) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Directory %s must exist\n", in_dir);
+ return FALSE;
+ }
+
+ // Set other paths
+ in_repo = g_build_filename(in_dir, "repodata/", NULL);
+ out_dir = g_strdup(in_dir);
+ out_repo = g_strdup(in_repo);
+ lock_dir = g_build_filename(out_dir, ".repodata/", NULL);
+ tmp_out_repo = g_build_filename(out_dir, ".repodata/", NULL);
+
+ // Block signals that terminates the process
+ if (!cr_block_terminating_signals(err))
+ return FALSE;
+
+ // Check if lock exists & Create lock dir
+ if (!cr_lock_repo(out_dir, FALSE, &lock_dir, &tmp_out_repo, err))
+ return FALSE;
+
+ // Setup cleanup handlers
+ if (!cr_set_cleanup_handler(lock_dir, tmp_out_repo, err))
+ return FALSE;
+
+ // Unblock the blocked signals
+ if (!cr_unblock_terminating_signals(err))
+ return FALSE;
+
+ // Locate repodata
+ struct cr_MetadataLocation *md_loc = NULL;
+ _cleanup_free_ gchar *pri_xml_path = NULL;
+ _cleanup_free_ gchar *fil_xml_path = NULL;
+ _cleanup_free_ gchar *oth_xml_path = NULL;
+ _cleanup_free_ gchar *repomd_path = NULL;
+
+ md_loc = cr_locate_metadata(in_dir, TRUE, NULL);
+ if (!md_loc || !md_loc->repomd) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_NOFILE,
+ "repomd.xml doesn't exist");
+ return FALSE;
+ }
+
+ repomd_path = g_build_filename(md_loc->repomd, NULL);
+
+ if (md_loc->pri_xml_href)
+ pri_xml_path = g_build_filename(md_loc->pri_xml_href, NULL);
+ if (md_loc->fil_xml_href)
+ fil_xml_path = g_build_filename(md_loc->fil_xml_href, NULL);
+ if (md_loc->oth_xml_href)
+ oth_xml_path = g_build_filename(md_loc->oth_xml_href, NULL);
+ cr_metadatalocation_free(md_loc);
+
+ // Parse repomd.xml
+ int rc;
+ cr_Repomd *repomd = cr_repomd_new();
+
+ rc = cr_xml_parse_repomd(repomd_path,
+ repomd,
+ warningcb,
+ (void *) repomd_path,
+ err);
+ if (rc != CRE_OK)
+ return FALSE;
+
+ // Check if DBs already exist or not
+ gboolean dbs_already_exist = FALSE;
+
+ if (cr_repomd_get_record(repomd, "primary_db")
+ || cr_repomd_get_record(repomd, "filename_db")
+ || cr_repomd_get_record(repomd, "other_db"))
+ {
+ dbs_already_exist = TRUE;
+ }
+
+ if (dbs_already_exist && !force) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_ERROR,
+ "Repository already has sqlitedb present "
+ "in repomd.xml (You may use --force)");
+ return FALSE;
+ }
+
+ // Auto-detect used checksum algorithm if not specified explicitly
+ if (checksum_type == CR_CHECKSUM_UNKNOWN) {
+ cr_RepomdRecord *rec = cr_repomd_get_record(repomd, "primary");
+
+ if (!rec) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_ERROR,
+ "repomd.xml is missing primary metadata");
+ return FALSE;
+ }
+
+ if (rec->checksum_type)
+ checksum_type = cr_checksum_type(rec->checksum_type);
+ else if (rec->checksum_open_type)
+ checksum_type = cr_checksum_type(rec->checksum_open_type);
+
+ if (checksum_type == CR_CHECKSUM_UNKNOWN) {
+ g_debug("Cannot auto-detect checksum type, using default %s",
+ cr_checksum_name_str(DEFAULT_CHECKSUM));
+ checksum_type = DEFAULT_CHECKSUM;
+ }
+ }
+
+ // Open sqlite databases
+ _cleanup_free_ gchar *pri_db_filename = NULL;
+ _cleanup_free_ gchar *fil_db_filename = NULL;
+ _cleanup_free_ gchar *oth_db_filename = NULL;
+ cr_SqliteDb *pri_db = NULL;
+ cr_SqliteDb *fil_db = NULL;
+ cr_SqliteDb *oth_db = NULL;
+
+ _cleanup_file_close_ int pri_db_fd = -1;
+ _cleanup_file_close_ int fil_db_fd = -1;
+ _cleanup_file_close_ int oth_db_fd = -1;
+
+ g_message("Preparing sqlite DBs");
+ if (!local_sqlite) {
+ g_debug("Creating databases");
+ pri_db_filename = g_strconcat(tmp_out_repo, "/primary.sqlite", NULL);
+ fil_db_filename = g_strconcat(tmp_out_repo, "/filelists.sqlite", NULL);
+ oth_db_filename = g_strconcat(tmp_out_repo, "/other.sqlite", NULL);
+ } else {
+ g_debug("Creating databases localy");
+ const gchar *tmpdir = g_get_tmp_dir();
+ pri_db_filename = g_build_filename(tmpdir, "primary.XXXXXX.sqlite", NULL);
+ fil_db_filename = g_build_filename(tmpdir, "filelists.XXXXXX.sqlite", NULL);
+ oth_db_filename = g_build_filename(tmpdir, "other.XXXXXXX.sqlite", NULL);
+ pri_db_fd = g_mkstemp(pri_db_filename);
+ g_debug("%s", pri_db_filename);
+ if (pri_db_fd == -1) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot open %s: %s", pri_db_filename, g_strerror(errno));
+ return FALSE;
+ }
+ fil_db_fd = g_mkstemp(fil_db_filename);
+ g_debug("%s", fil_db_filename);
+ if (fil_db_fd == -1) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot open %s: %s", fil_db_filename, g_strerror(errno));
+ return FALSE;
+ }
+ oth_db_fd = g_mkstemp(oth_db_filename);
+ g_debug("%s", oth_db_filename);
+ if (oth_db_fd == -1) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_IO,
+ "Cannot open %s: %s", oth_db_filename, g_strerror(errno));
+ return FALSE;
+ }
+ }
+
+ pri_db = cr_db_open_primary(pri_db_filename, err);
+ if (!pri_db)
+ return FALSE;
+
+ fil_db = cr_db_open_filelists(fil_db_filename, err);
+ assert(fil_db || tmp_err);
+ if (!fil_db)
+ return FALSE;
+
+ oth_db = cr_db_open_other(oth_db_filename, err);
+ assert(oth_db || tmp_err);
+ if (!oth_db)
+ return FALSE;
+
+ // XML to Sqlite
+ ret = xml_to_sqlite(pri_xml_path,
+ fil_xml_path,
+ oth_xml_path,
+ pri_db,
+ fil_db,
+ oth_db,
+ err);
+ if (!ret)
+ return FALSE;
+
+ // Put checksums of XML files into Sqlite
+ ret = sqlite_dbinfo_update(repomd,
+ pri_db,
+ fil_db,
+ oth_db,
+ err);
+ if (!ret)
+ return FALSE;
+
+ // Close dbs
+ cr_db_close(pri_db, NULL);
+ cr_db_close(fil_db, NULL);
+ cr_db_close(oth_db, NULL);
+
+ // Repomd records
+ cr_RepomdRecord *pri_db_rec = NULL;
+ cr_RepomdRecord *fil_db_rec = NULL;
+ cr_RepomdRecord *oth_db_rec = NULL;
+
+ // Compress DB files and fill records
+ ret = compress_sqlite_dbs(tmp_out_repo,
+ pri_db_filename,
+ &pri_db_rec,
+ fil_db_filename,
+ &fil_db_rec,
+ oth_db_filename,
+ &oth_db_rec,
+ compression_type,
+ checksum_type,
+ err);
+ if (!ret)
+ return FALSE;
+
+ // Prepare new repomd.xml
+ ret = gen_new_repomd(tmp_out_repo,
+ repomd,
+ pri_db_rec,
+ fil_db_rec,
+ oth_db_rec,
+ err);
+ if (!ret)
+ return FALSE;
+
+ // Move the results (compressed DBs and repomd.xml) into in_repo
+ ret = move_results(tmp_out_repo,
+ in_repo,
+ err);
+ if (!ret)
+ return FALSE;
+
+ // Remove old DBs
+ if (dbs_already_exist && force && !keep_old) {
+ ret = remove_old_if_different(in_dir,
+ cr_repomd_get_record(repomd, "primary_db"),
+ pri_db_rec, err);
+ if (!ret)
+ return FALSE;
+ ret = remove_old_if_different(in_dir,
+ cr_repomd_get_record(repomd, "filelists_db"),
+ fil_db_rec, err);
+ if (!ret)
+ return FALSE;
+ ret = remove_old_if_different(in_dir,
+ cr_repomd_get_record(repomd, "other_db"),
+ oth_db_rec, err);
+ if (!ret)
+ return FALSE;
+ }
+
+ // Remove tmp_out_repo
+ g_rmdir(tmp_out_repo);
+
+ // Clean up
+ cr_repomd_free(repomd);
+ cr_repomd_record_free(pri_db_rec);
+ cr_repomd_record_free(fil_db_rec);
+ cr_repomd_record_free(oth_db_rec);
+
+ return TRUE;
+}
+
+/**
+ * Main
+ */
+int
+main(int argc, char **argv)
+{
+ gboolean ret = TRUE;
+ _cleanup_sqliterepocmdoptions_free_ SqliterepoCmdOptions *options = NULL;
+ _cleanup_error_free_ GError *tmp_err = NULL;
+
+ // Parse arguments
+ options = sqliterepocmdoptions_new();
+ if (!parse_sqliterepo_arguments(&argc, &argv, options, &tmp_err)) {
+ g_printerr("%s\n", tmp_err->message);
+ exit(EXIT_FAILURE);
+ }
+
+ // Set logging
+ cr_setup_logging(FALSE, options->verbose);
+
+ // Print version if required
+ if (options->version) {
+ printf("Version: %d.%d.%d\n", CR_VERSION_MAJOR,
+ CR_VERSION_MINOR,
+ CR_VERSION_PATCH);
+ exit(EXIT_SUCCESS);
+ }
+
+ // Check arguments
+ if (!check_arguments(options, &tmp_err)) {
+ g_printerr("%s\n", tmp_err->message);
+ exit(EXIT_FAILURE);
+ }
+
+ if (argc != 2) {
+ g_printerr("Must specify exactly one repo directory to work on\n");
+ exit(EXIT_FAILURE);
+ }
+
+ // Emit debug message with version
+ g_debug("Version: %d.%d.%d\n", CR_VERSION_MAJOR,
+ CR_VERSION_MINOR,
+ CR_VERSION_PATCH);
+
+ g_thread_init(NULL); // Initialize threading
+
+ // Gen the databases
+ ret = generate_sqlite_from_xml(argv[1],
+ options->compression_type,
+ options->checksum_type,
+ options->local_sqlite,
+ options->force,
+ options->keep_old,
+ &tmp_err);
+ if (!ret) {
+ g_printerr("%s\n", tmp_err->message);
+ exit(EXIT_FAILURE);
+ }
+
+ exit(EXIT_SUCCESS);
+}
#include "error.h"
#include "misc.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
+
/** Parallel Compression */
cr_CompressionTask *
task = g_malloc0(sizeof(cr_CompressionTask));
if (!task) {
- g_set_error(err, CR_THREADS_ERROR, CRE_MEMORY,
+ g_set_error(err, ERR_DOMAIN, CRE_MEMORY,
"Cannot allocate memory");
return NULL;
}
}
void
-cr_compressing_thread(gpointer data, gpointer user_data)
+cr_compressing_thread(gpointer data, G_GNUC_UNUSED gpointer user_data)
{
cr_CompressionTask *task = data;
GError *tmp_err = NULL;
- CR_UNUSED(user_data);
-
assert(task);
if (!task->dst)
}
void
-cr_repomd_record_fill_thread(gpointer data, gpointer user_data)
+cr_repomd_record_fill_thread(gpointer data, G_GNUC_UNUSED gpointer user_data)
{
cr_RepomdRecordFillTask *task = data;
GError *tmp_err = NULL;
- CR_UNUSED(user_data);
-
assert(task);
cr_repomd_record_fill(task->record, task->checksum_type, &tmp_err);
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <glib.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include "updateinfo.h"
+#include "error.h"
+#include "misc.h"
+#include "checksum.h"
+
+
+/*
+ * cr_UpdateCollectionPackage
+ */
+
+cr_UpdateCollectionPackage *
+cr_updatecollectionpackage_new(void)
+{
+ cr_UpdateCollectionPackage *pkg = g_malloc0(sizeof(*pkg));
+ pkg->chunk = g_string_chunk_new(0);
+ return pkg;
+}
+
+cr_UpdateCollectionPackage *
+cr_updatecollectionpackage_copy(const cr_UpdateCollectionPackage *orig)
+{
+ cr_UpdateCollectionPackage *pkg;
+
+ if (!orig) return NULL;
+
+ pkg = cr_updatecollectionpackage_new();
+
+ pkg->name = cr_safe_string_chunk_insert(pkg->chunk, orig->name);
+ pkg->version = cr_safe_string_chunk_insert(pkg->chunk, orig->version);
+ pkg->release = cr_safe_string_chunk_insert(pkg->chunk, orig->release);
+ pkg->epoch = cr_safe_string_chunk_insert(pkg->chunk, orig->epoch);
+ pkg->arch = cr_safe_string_chunk_insert(pkg->chunk, orig->arch);
+ pkg->src = cr_safe_string_chunk_insert(pkg->chunk, orig->src);
+ pkg->filename = cr_safe_string_chunk_insert(pkg->chunk, orig->filename);
+ pkg->sum = cr_safe_string_chunk_insert(pkg->chunk, orig->sum);
+
+ pkg->sum_type = orig->sum_type;
+ pkg->reboot_suggested = orig->reboot_suggested;
+
+ return pkg;
+}
+
+void
+cr_updatecollectionpackage_free(cr_UpdateCollectionPackage *pkg)
+{
+ if (!pkg)
+ return;
+ g_string_chunk_free(pkg->chunk);
+ g_free(pkg);
+}
+
+
+/*
+ * cr_UpdateCollection
+ */
+
+cr_UpdateCollection *
+cr_updatecollection_new(void)
+{
+ cr_UpdateCollection *collection = g_malloc0(sizeof(*collection));
+ collection->chunk = g_string_chunk_new(0);
+ return collection;
+}
+
+cr_UpdateCollection *
+cr_updatecollection_copy(const cr_UpdateCollection *orig)
+{
+ cr_UpdateCollection *col;
+
+ if (!orig) return NULL;
+
+ col = cr_updatecollection_new();
+
+ col->shortname = cr_safe_string_chunk_insert(col->chunk, orig->shortname);
+ col->name = cr_safe_string_chunk_insert(col->chunk, orig->name);
+
+ if (orig->packages) {
+ GSList *newlist = NULL;
+ for (GSList *elem = orig->packages; elem; elem = g_slist_next(elem)) {
+ cr_UpdateCollectionPackage *pkg = elem->data;
+ newlist = g_slist_prepend(newlist,
+ cr_updatecollectionpackage_copy(pkg));
+ }
+ col->packages = g_slist_reverse(newlist);
+ }
+
+ return col;
+}
+
+void
+cr_updatecollection_free(cr_UpdateCollection *collection)
+{
+ if (!collection)
+ return;
+ cr_slist_free_full(collection->packages,
+ (GDestroyNotify) cr_updatecollectionpackage_free);
+ g_string_chunk_free(collection->chunk);
+ g_free(collection);
+}
+
+void
+cr_updatecollection_append_package(cr_UpdateCollection *collection,
+ cr_UpdateCollectionPackage *pkg)
+{
+ if (!collection || !pkg) return;
+ collection->packages = g_slist_append(collection->packages, pkg);
+}
+
+
+/*
+ * cr_UpdateReference
+ */
+
+cr_UpdateReference *
+cr_updatereference_new(void)
+{
+ cr_UpdateReference *ref = g_malloc0(sizeof(*ref));
+ ref->chunk = g_string_chunk_new(0);
+ return ref;
+}
+
+cr_UpdateReference *
+cr_updatereference_copy(const cr_UpdateReference *orig)
+{
+ cr_UpdateReference *ref;
+
+ if (!orig) return NULL;
+
+ ref = cr_updatereference_new();
+
+ ref->href = cr_safe_string_chunk_insert(ref->chunk, orig->href);
+ ref->id = cr_safe_string_chunk_insert(ref->chunk, orig->id);
+ ref->type = cr_safe_string_chunk_insert(ref->chunk, orig->type);
+ ref->title = cr_safe_string_chunk_insert(ref->chunk, orig->title);
+
+ return ref;
+}
+
+void
+cr_updatereference_free(cr_UpdateReference *ref)
+{
+ if (!ref)
+ return;
+ g_string_chunk_free(ref->chunk);
+ g_free(ref);
+}
+
+
+/*
+ * cr_UpdateRecord
+ */
+
+cr_UpdateRecord *
+cr_updaterecord_new(void)
+{
+ cr_UpdateRecord *rec = g_malloc0(sizeof(*rec));
+ rec->chunk = g_string_chunk_new(0);
+ return rec;
+}
+
+cr_UpdateRecord *
+cr_updaterecord_copy(const cr_UpdateRecord *orig)
+{
+ cr_UpdateRecord *rec;
+
+ if (!orig) return NULL;
+
+ rec = cr_updaterecord_new();
+
+ rec->from = cr_safe_string_chunk_insert(rec->chunk, orig->from);
+ rec->status = cr_safe_string_chunk_insert(rec->chunk, orig->status);
+ rec->type = cr_safe_string_chunk_insert(rec->chunk, orig->type);
+ rec->version = cr_safe_string_chunk_insert(rec->chunk, orig->version);
+ rec->id = cr_safe_string_chunk_insert(rec->chunk, orig->id);
+ rec->title = cr_safe_string_chunk_insert(rec->chunk, orig->title);
+ rec->issued_date = cr_safe_string_chunk_insert(rec->chunk, orig->issued_date);
+ rec->updated_date = cr_safe_string_chunk_insert(rec->chunk, orig->updated_date);
+ rec->rights = cr_safe_string_chunk_insert(rec->chunk, orig->rights);
+ rec->release = cr_safe_string_chunk_insert(rec->chunk, orig->release);
+ rec->pushcount = cr_safe_string_chunk_insert(rec->chunk, orig->pushcount);
+ rec->severity = cr_safe_string_chunk_insert(rec->chunk, orig->severity);
+ rec->summary = cr_safe_string_chunk_insert(rec->chunk, orig->summary);
+ rec->description = cr_safe_string_chunk_insert(rec->chunk, orig->description);
+ rec->solution = cr_safe_string_chunk_insert(rec->chunk, orig->solution);
+
+ if (orig->references) {
+ GSList *newlist = NULL;
+ for (GSList *elem = orig->references; elem; elem = g_slist_next(elem)) {
+ cr_UpdateReference *ref = elem->data;
+ newlist = g_slist_prepend(newlist,
+ cr_updatereference_copy(ref));
+ }
+ rec->references = g_slist_reverse(newlist);
+ }
+
+ if (orig->collections) {
+ GSList *newlist = NULL;
+ for (GSList *elem = orig->collections; elem; elem = g_slist_next(elem)) {
+ cr_UpdateCollection *col = elem->data;
+ newlist = g_slist_prepend(newlist,
+ cr_updatecollection_copy(col));
+ }
+ rec->collections = g_slist_reverse(newlist);
+ }
+
+ return rec;
+}
+
+void
+cr_updaterecord_free(cr_UpdateRecord *rec)
+{
+ if (!rec)
+ return;
+ cr_slist_free_full(rec->references, (GDestroyNotify) cr_updatereference_free);
+ cr_slist_free_full(rec->collections, (GDestroyNotify) cr_updatecollection_free);
+ g_string_chunk_free(rec->chunk);
+ g_free(rec);
+}
+
+void
+cr_updaterecord_append_reference(cr_UpdateRecord *record,
+ cr_UpdateReference *ref)
+{
+ if (!record || !ref) return;
+ record->references = g_slist_append(record->references, ref);
+}
+
+void
+cr_updaterecord_append_collection(cr_UpdateRecord *record,
+ cr_UpdateCollection *collection)
+{
+ if (!record || !collection) return;
+ record->collections = g_slist_append(record->collections, collection);
+}
+
+
+/*
+ * cr_Updateinfo
+ */
+
+cr_UpdateInfo *
+cr_updateinfo_new(void)
+{
+ cr_UpdateInfo *uinfo = g_malloc0(sizeof(*uinfo));
+ return uinfo;
+}
+
+void
+cr_updateinfo_free(cr_UpdateInfo *uinfo)
+{
+ if (!uinfo)
+ return;
+ cr_slist_free_full(uinfo->updates, (GDestroyNotify) cr_updaterecord_free);
+ g_free(uinfo);
+}
+
+void
+cr_updateinfo_apped_record(cr_UpdateInfo *uinfo, cr_UpdateRecord *record)
+{
+ if (!uinfo || !record) return;
+ uinfo->updates = g_slist_append(uinfo->updates, record);
+}
+
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef __C_CREATEREPOLIB_UPDATEINFO_H__
+#define __C_CREATEREPOLIB_UPDATEINFO_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <glib.h>
+#include "checksum.h"
+
+/** \defgroup updateinfo Updateinfo API.
+ *
+ * Module for generating updateinfo.xml.
+ *
+ * \addtogroup updateinfo
+ * @{
+ */
+
+typedef struct {
+ gchar *name;
+ gchar *version;
+ gchar *release;
+ gchar *epoch;
+ gchar *arch;
+ gchar *src;
+ gchar *filename;
+ gchar *sum;
+ cr_ChecksumType sum_type;
+ gboolean reboot_suggested;
+
+ GStringChunk *chunk;
+} cr_UpdateCollectionPackage;
+
+typedef struct {
+ gchar *shortname; /*!< e.g. rhn-tools-rhel-x86_64-server-6.5.aus */
+ gchar *name; /*!< e.g. RHN Tools for RHEL AUS (v. 6.5 for 64-bit x86_64) */
+ GSList *packages; /*!< List of cr_UpdateCollectionPackage */
+ GStringChunk *chunk;
+} cr_UpdateCollection;
+
+typedef struct {
+ gchar *href; /*!< URL (e.g. to related bugzilla, errata, ...) */
+ gchar *id; /*!< id (e.g. 1035288, NULL for errata, ...) */
+ gchar *type; /*!< reference type ("self" for errata, "bugzilla", ...) */
+ gchar *title; /*!< Name of errata, name of bug, etc. */
+ GStringChunk *chunk;
+} cr_UpdateReference;
+
+typedef struct {
+ gchar *from; /*!< Source of the update (e.g. security@redhat.com) */
+ gchar *status; /*!< Update status ("final", ...) */
+ gchar *type; /*!< Update type ("enhancement", "bugfix", ...) */
+ gchar *version; /*!< Update version (probably always an integer number) */
+ gchar *id; /*!< Update id (short update name, e.g. RHEA-2013:1777) */
+ gchar *title; /*!< Update name */
+ gchar *issued_date; /*!< Date string (e.g. "2013-12-02 00:00:00") */
+ gchar *updated_date;/*!< Date string */
+ gchar *rights; /*!< Copyright */
+ gchar *release; /*!< Release */
+ gchar *pushcount; /*!< Push count */
+ gchar *severity; /*!< Severity */
+ gchar *summary; /*!< Short summary */
+ gchar *description; /*!< Update description */
+ gchar *solution; /*!< Solution */
+
+ GSList *references; /*!< List of cr_UpdateReference */
+ GSList *collections;/*!< List of cr_UpdateCollection */
+
+ GStringChunk *chunk;/*!< String chunk */
+} cr_UpdateRecord;
+
+typedef struct {
+ GSList *updates; /*!< List of cr_UpdateRecord */
+} cr_UpdateInfo;
+
+/*
+ * cr_UpdateCollectionPackage
+ */
+
+cr_UpdateCollectionPackage *
+cr_updatecollectionpackage_new(void);
+
+cr_UpdateCollectionPackage *
+cr_updatecollectionpackage_copy(const cr_UpdateCollectionPackage *orig);
+
+void
+cr_updatecollectionpackage_free(cr_UpdateCollectionPackage *pkg);
+
+/*
+ * cr_UpdateCollection
+ */
+
+cr_UpdateCollection *
+cr_updatecollection_new(void);
+
+cr_UpdateCollection *
+cr_updatecollection_copy(const cr_UpdateCollection *orig);
+
+void
+cr_updatecollection_free(cr_UpdateCollection *collection);
+
+void
+cr_updatecollection_append_package(cr_UpdateCollection *collection,
+ cr_UpdateCollectionPackage *pkg);
+
+/*
+ * cr_UpdateReference
+ */
+
+cr_UpdateReference *
+cr_updatereference_new(void);
+
+cr_UpdateReference *
+cr_updatereference_copy(const cr_UpdateReference *orig);
+
+void
+cr_updatereference_free(cr_UpdateReference *ref);
+
+/*
+ * cr_UpdateRecord
+ */
+
+cr_UpdateRecord *
+cr_updaterecord_new(void);
+
+cr_UpdateRecord *
+cr_updaterecord_copy(const cr_UpdateRecord *orig);
+
+void
+cr_updaterecord_free(cr_UpdateRecord *record);
+
+void
+cr_updaterecord_append_reference(cr_UpdateRecord *record,
+ cr_UpdateReference *ref);
+
+void
+cr_updaterecord_append_collection(cr_UpdateRecord *record,
+ cr_UpdateCollection *collection);
+
+/*
+ * cr_Updateinfo
+ */
+
+cr_UpdateInfo *
+cr_updateinfo_new(void);
+
+void
+cr_updateinfo_free(cr_UpdateInfo *uinfo);
+
+void
+cr_updateinfo_apped_record(cr_UpdateInfo *uinfo, cr_UpdateRecord *record);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __C_CREATEREPOLIB_UPDATEINFO_H__ */
xmlCleanupParser();
}
+gboolean cr_hascontrollchars(const unsigned char *str)
+{
+ while (*str) {
+ if (*str < 32 && (*str != 9 && *str != 10 && *str != 13))
+ return TRUE;
+ ++str;
+ }
+
+ return FALSE;
+}
void
cr_latin1_to_utf8(const unsigned char *in, unsigned char *out)
// This function converts latin1 to utf8 in effective and thread-safe way.
while (*in) {
if (*in<128) {
+ if (*in < 32 && (*in != 9 && *in != 10 && *in != 13)) {
+ ++in;
+ continue;
+ }
*out++=*in++;
} else if (*in<192) {
// Found latin1 (iso-8859-1) control code.
if (!orig_content) {
content = BAD_CAST "";
- } else if (xmlCheckUTF8(orig_content)) {
+ } else if (xmlCheckUTF8(orig_content) && !cr_hascontrollchars(orig_content)) {
content = (xmlChar *) orig_content;
} else {
size_t len = strlen((const char *) orig_content);
extern "C" {
#endif
+#include <glib.h>
+#include "deltarpms.h"
#include "package.h"
#include "repomd.h"
+#include "updateinfo.h"
/** \defgroup xml_dump XML dump API.
*
* cr_xml_dump_init();
* cr_package_parser_init();
*
- * pkg = cr_package_from_rpm("path/to/rpm.rpm", CR_CHECKSUM_SHA256,
- * "repodata/rpm.rpm", NULL, 10, NULL);
+ * pkg = cr_package_from_rpm_base("path/to/rpm.rpm", 5, CR_HDRR_NONE, NULL);
*
* xml = cr_xml_dump(pkg, NULL);
*
*/
char *cr_xml_dump_repomd(cr_Repomd *repomd, GError **err);
+/** Generate xml representation of cr_UpdateInfo.
+ * @param updateinfo cr_UpdateInfo
+ * @param err **GError
+ * @return repomd.xml content
+ */
+char *cr_xml_dump_updateinfo(cr_UpdateInfo *updateinfo, GError **err);
+
+/** Generate xml representation of cr_UpdateRecord
+ * @param rec cr_UpdateRecord
+ * @param err **GError
+ * @return xml chunk string or NULL on error
+ */
+char *cr_xml_dump_updaterecord(cr_UpdateRecord *rec, GError **err);
+
+/** Generate xml representation of cr_DeltaPackage
+ * @param dpkg cr_DeltaPackage
+ * @param err **GError
+ * @return xml chunk string or NULL on error
+ */
+char *cr_xml_dump_deltapackage(cr_DeltaPackage *dpkg, GError **err);
+
/** Prepare string to xml dump.
* If string is not utf8 it is converted (source encoding is supposed to be
* iso-8859-1).
+ * Control chars (chars with value <32 except 9, 10 and 13) are excluded.
+ *
* @param in input string.
* @param out output string. space of output string must be
* at least (strlen(in) * 2 + 1) * sizeof(char)
*/
-void cr_latin1_to_utf8(const unsigned char *in, unsigned char *out);
+void cr_latin1_to_utf8(const unsigned char *in,
+ unsigned char *out) __attribute__ ((hot));
+
+/**
+ * Check if string contains chars with value <32 (except 9, 10 and 13).
+ *
+ * @param str String (NOT NULL!!!!)
+ * @return TRUE if at leas one char with value <32 (except the
+ * 9, 10, 13) is present in the string.
+ */
+gboolean cr_hascontrollchars(const unsigned char *str);
/** @} */
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <glib.h>
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include <libxml/encoding.h>
+#include <libxml/xmlwriter.h>
+#include <libxml/xmlsave.h>
+#include "deltarpms.h"
+#include "error.h"
+#include "misc.h"
+#include "package.h"
+#include "xml_dump.h"
+#include "xml_dump_internal.h"
+
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+#define INDENT 4
+
+void
+cr_xml_dump_delta(xmlNodePtr root, cr_DeltaPackage *package)
+{
+ /***********************************
+ Element: delta
+ ************************************/
+
+ cr_NEVR * nevr = cr_str_to_nevr(package->nevr);
+
+ // Add oldepoch attribute
+ cr_xmlNewProp(root, BAD_CAST "oldepoch",
+ BAD_CAST ((nevr->epoch && *(nevr->epoch)) ? nevr->epoch : "0"));
+
+ // Add oldversion attribute
+ cr_xmlNewProp(root, BAD_CAST "oldversion", BAD_CAST nevr->version);
+
+ // Add oldrelease attribute
+ cr_xmlNewProp(root, BAD_CAST "oldrelease", BAD_CAST nevr->release);
+
+ cr_nevr_free(nevr);
+
+ /***********************************
+ Element: filename
+ ************************************/
+
+ cr_xmlNewTextChild(root, NULL,
+ BAD_CAST "filename",
+ BAD_CAST package->package->location_href);
+
+ /***********************************
+ Element: sequence
+ ************************************/
+
+ char *sequence = g_strconcat(package->nevr, "-", package->sequence, NULL);
+ cr_xmlNewTextChild(root, NULL,
+ BAD_CAST "sequence",
+ BAD_CAST sequence);
+ g_free(sequence);
+
+ /***********************************
+ Element: size
+ ************************************/
+
+ char size_str[SIZE_STR_MAX_LEN];
+
+ g_snprintf(size_str, SIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT,
+ package->package->size_package);
+
+ cr_xmlNewTextChild(root, NULL, BAD_CAST "size", BAD_CAST size_str);
+
+ /***********************************
+ Element: checksum
+ ************************************/
+
+ xmlNodePtr checksum;
+
+ checksum = cr_xmlNewTextChild(root,
+ NULL,
+ BAD_CAST "checksum",
+ BAD_CAST package->package->pkgId);
+
+ cr_xmlNewProp(checksum,
+ BAD_CAST "type",
+ BAD_CAST package->package->checksum_type);
+}
+
+
+char *
+cr_xml_dump_deltapackage(cr_DeltaPackage *package, GError **err)
+{
+ xmlNodePtr root;
+ char *result;
+
+ assert(!err || *err == NULL);
+
+ if (!package) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG,
+ "No package object to dump specified");
+ return NULL;
+ }
+
+
+ // Dump IT!
+
+ xmlBufferPtr buf = xmlBufferCreate();
+ if (buf == NULL) {
+ g_critical("%s: Error creating the xml buffer", __func__);
+ g_set_error(err, ERR_DOMAIN, CRE_MEMORY,
+ "Cannot create an xml buffer");
+ return NULL;
+ }
+
+ root = xmlNewNode(NULL, BAD_CAST "delta");
+ cr_xml_dump_delta(root, package);
+ // xmlNodeDump seems to be a little bit faster than xmlDocDumpFormatMemory
+ xmlNodeDump(buf, NULL, root, 2, FORMAT_XML);
+ assert(buf->content);
+ // First line in the buf is not indented, we must indent it by ourself
+ result = g_malloc(sizeof(char *) * buf->use + INDENT + 1);
+ for (int x = 0; x < INDENT; x++) result[x] = ' ';
+ memcpy((void *) result+INDENT, buf->content, buf->use);
+ result[buf->use + INDENT] = '\n';
+ result[buf->use + INDENT + 1] = '\0';
+
+
+ // Cleanup
+
+ xmlBufferFree(buf);
+ xmlFreeNode(root);
+
+ return result;
+}
#include "xml_dump.h"
#include "xml_dump_internal.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
void
cr_xml_dump_filelists_items(xmlNodePtr root, cr_Package *package)
assert(!err || *err == NULL);
- if (!package)
+ if (!package) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG,
+ "No package object to dump specified");
return NULL;
+ }
// Dump IT!
xmlBufferPtr buf = xmlBufferCreate();
if (buf == NULL) {
g_critical("%s: Error creating the xml buffer", __func__);
- g_set_error(err, CR_XML_DUMP_FILELISTS_ERROR, CRE_MEMORY,
+ g_set_error(err, ERR_DOMAIN, CRE_MEMORY,
"Cannot create an xml buffer");
return NULL;
}
const xmlChar *name,
const xmlChar *content);
+/** Inserts new node only if its value is not NULL
+ */
+static inline xmlNodePtr
+cr_xmlNewTextChild_c(xmlNodePtr parent,
+ xmlNsPtr ns,
+ const xmlChar *name,
+ const xmlChar *content)
+{
+ if (!content)
+ return NULL;
+ return cr_xmlNewTextChild(parent, ns, name, content);
+}
+
/** Createrepo_c wrapper over the libxml xmlNewProp.
* It allows content to be NULL and non UTF-8 (if content is no UTF8
* then iso-8859-1 is assumed)
const xmlChar *name,
const xmlChar *value);
+/** Inserts new proprety (attribute) only if its value is not NULL
+ */
+static inline xmlAttrPtr
+cr_xmlNewProp_c(xmlNodePtr node,
+ const xmlChar *name,
+ const xmlChar *orig_content)
+{
+ if (!orig_content)
+ return NULL;
+ return cr_xmlNewProp(node, name, orig_content);
+}
+
+
#ifdef __cplusplus
}
#endif
#include "xml_dump.h"
#include "xml_dump_internal.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
void
cr_xml_dump_other_changelog(xmlNodePtr root, cr_Package *package)
assert(!err || *err == NULL);
- if (!package)
+ if (!package) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG,
+ "No package object to dump specified");
return NULL;
+ }
// Dump IT!
xmlBufferPtr buf = xmlBufferCreate();
if (buf == NULL) {
g_critical("%s: Error creating the xml buffer", __func__);
- g_set_error(err, CR_XML_DUMP_OTHER_ERROR, CRE_MEMORY,
+ g_set_error(err, ERR_DOMAIN, CRE_MEMORY,
"Cannot create an xml buffer");
return NULL;
}
#include "xml_dump.h"
#include "xml_dump_internal.h"
-#define PROVIDES 0
-#define CONFLICTS 1
-#define OBSOLETES 2
-#define REQUIRES 3
-
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
+
+typedef enum {
+ PCO_TYPE_PROVIDES,
+ PCO_TYPE_CONFLICTS,
+ PCO_TYPE_OBSOLETES,
+ PCO_TYPE_REQUIRES,
+ PCO_TYPE_SUGGESTS,
+ PCO_TYPE_ENHANCES,
+ PCO_TYPE_RECOMMENDS,
+ PCO_TYPE_SUPPLEMENTS,
+ PCO_TYPE_SENTINEL,
+} PcoType;
+
+typedef struct {
+ const char *elemname;
+ size_t listoffset;
+} PcoInfo;
+
+// Order of this list MUST be the same as the order of the related constants above!
+static PcoInfo pco_info[] = {
+ { "rpm:provides", offsetof(cr_Package, provides) },
+ { "rpm:conflicts", offsetof(cr_Package, conflicts) },
+ { "rpm:obsoletes", offsetof(cr_Package, obsoletes) },
+ { "rpm:requires", offsetof(cr_Package, requires) },
+ { "rpm:suggests", offsetof(cr_Package, suggests) },
+ { "rpm:enhances", offsetof(cr_Package, enhances) },
+ { "rpm:recommends", offsetof(cr_Package, recommends) },
+ { "rpm:supplements", offsetof(cr_Package, supplements) },
+ { NULL, 0 },
+};
void
-cr_xml_dump_primary_dump_pco(xmlNodePtr root, cr_Package *package, int pcotype)
+cr_xml_dump_primary_dump_pco(xmlNodePtr root, cr_Package *package, PcoType pcotype)
{
const char *elem_name;
- GSList *files = NULL;
-
- const char *provides = "rpm:provides";
- const char *conflicts = "rpm:conflicts";
- const char *obsoletes = "rpm:obsoletes";
- const char *requires = "rpm:requires";
-
- if (pcotype == PROVIDES) {
- elem_name = provides;
- files = package->provides;
- } else if (pcotype == CONFLICTS) {
- elem_name = conflicts;
- files = package->conflicts;
- } else if (pcotype == OBSOLETES) {
- elem_name = obsoletes;
- files = package->obsoletes;
- } else if (pcotype == REQUIRES) {
- elem_name = requires;
- files = package->requires;
- } else {
+ GSList *list = NULL;
+
+ if (pcotype >= PCO_TYPE_SENTINEL)
return;
- }
- if (!files) {
+ elem_name = pco_info[pcotype].elemname;
+ list = *((GSList **) ((size_t) package + pco_info[pcotype].listoffset));
+
+ if (!list)
return;
- }
/***********************************
pcor_node = xmlNewChild(root, NULL, BAD_CAST elem_name, NULL);
GSList *element = NULL;
- for(element = files; element; element=element->next) {
+ for(element = list; element; element=element->next) {
cr_Dependency *entry = (cr_Dependency*) element->data;
}
}
- if (pcotype == REQUIRES && entry->pre) {
+ if (pcotype == PCO_TYPE_REQUIRES && entry->pre) {
// Add pre attribute
xmlNewProp(entry_node, BAD_CAST "pre", BAD_CAST "1");
}
// Files dump
- cr_xml_dump_primary_dump_pco(format, package, PROVIDES);
- cr_xml_dump_primary_dump_pco(format, package, REQUIRES);
- cr_xml_dump_primary_dump_pco(format, package, CONFLICTS);
- cr_xml_dump_primary_dump_pco(format, package, OBSOLETES);
+ cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_PROVIDES);
+ cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_REQUIRES);
+ cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_CONFLICTS);
+ cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_OBSOLETES);
+ cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_SUGGESTS);
+ cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_ENHANCES);
+ cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_RECOMMENDS);
+ cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_SUPPLEMENTS);
cr_xml_dump_files(format, package, 1);
}
assert(!err || *err == NULL);
- if (!package)
+ if (!package) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG,
+ "No package object to dump specified");
return NULL;
-
+ }
// Dump IT!
xmlBufferPtr buf = xmlBufferCreate();
if (buf == NULL) {
g_critical("%s: Error creating the xml buffer", __func__);
- g_set_error(err, CR_XML_DUMP_PRIMARY_ERROR, CRE_MEMORY,
+ g_set_error(err, ERR_DOMAIN, CRE_MEMORY,
"Cannot create an xml buffer");
return NULL;
}
BAD_CAST repomd->repoid_type);
}
+ // **********************************
+ // Element: Contenthash
+ // **********************************
+
+ if (repomd->contenthash) {
+ xmlNodePtr contenthash_elem = cr_xmlNewTextChild(root,
+ NULL,
+ BAD_CAST "contenthash",
+ BAD_CAST repomd->contenthash);
+ if (repomd->contenthash_type)
+ cr_xmlNewProp(contenthash_elem,
+ BAD_CAST "type",
+ BAD_CAST repomd->contenthash_type);
+ }
+
// **********************************
// Element: Tags
// **********************************
assert(!err || *err == NULL);
- if (!repomd)
+ if (!repomd) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG,
+ "No repomd object to dump specified");
return NULL;
+ }
// Dump IT!
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <glib.h>
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include <libxml/encoding.h>
+#include <libxml/xmlwriter.h>
+#include <libxml/xmlsave.h>
+#include "error.h"
+#include "updateinfo.h"
+#include "xml_dump.h"
+#include "xml_dump_internal.h"
+
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+#define INDENT 2
+
+void
+cr_xml_dump_updatecollectionpackages(xmlNodePtr collection, GSList *packages)
+{
+ for (GSList *elem = packages; elem; elem = g_slist_next(elem)) {
+ cr_UpdateCollectionPackage *pkg = elem->data;
+ xmlNodePtr package;
+
+ package = xmlNewChild(collection, NULL, BAD_CAST "package", NULL);
+ cr_xmlNewProp_c(package, BAD_CAST "name", BAD_CAST pkg->name);
+ cr_xmlNewProp_c(package, BAD_CAST "version", BAD_CAST pkg->version);
+ cr_xmlNewProp_c(package, BAD_CAST "release", BAD_CAST pkg->release);
+ cr_xmlNewProp_c(package, BAD_CAST "epoch", BAD_CAST pkg->epoch);
+ cr_xmlNewProp_c(package, BAD_CAST "arch", BAD_CAST pkg->arch);
+ cr_xmlNewProp_c(package, BAD_CAST "src", BAD_CAST pkg->src);
+ cr_xmlNewTextChild_c(package, NULL, BAD_CAST "filename",
+ BAD_CAST pkg->filename);
+
+ if (pkg->sum) {
+ xmlNodePtr sum;
+ sum = cr_xmlNewTextChild_c(package,
+ NULL,
+ BAD_CAST "sum",
+ BAD_CAST pkg->sum);
+ cr_xmlNewProp_c(sum,
+ BAD_CAST "type",
+ BAD_CAST cr_checksum_name_str(pkg->sum_type));
+ }
+
+ if (pkg->reboot_suggested)
+ xmlNewChild(package, NULL, BAD_CAST "reboot_suggested", NULL);
+ }
+}
+
+void
+cr_xml_dump_updateinforecord_pkglist(xmlNodePtr update, GSList *collections)
+{
+ xmlNodePtr pkglist;
+
+ pkglist = xmlNewChild(update, NULL, BAD_CAST "pkglist", NULL);
+
+ for (GSList *elem = collections; elem; elem = g_slist_next(elem)) {
+ cr_UpdateCollection *col = elem->data;
+ xmlNodePtr collection;
+
+ collection = xmlNewChild(pkglist, NULL, BAD_CAST "collection", NULL);
+ cr_xmlNewProp_c(collection, BAD_CAST "short", BAD_CAST col->shortname);
+ cr_xmlNewTextChild_c(collection,
+ NULL,
+ BAD_CAST "name",
+ BAD_CAST col->name);
+
+ cr_xml_dump_updatecollectionpackages(collection, col->packages);
+ }
+}
+
+void
+cr_xml_dump_updateinforecord_references(xmlNodePtr update, GSList *refs)
+{
+ xmlNodePtr references;
+ references = xmlNewChild(update, NULL, BAD_CAST "references", NULL);
+
+ for (GSList *elem = refs; elem; elem = g_slist_next(elem)) {
+ cr_UpdateReference *ref = elem->data;
+ xmlNodePtr reference;
+
+ reference = xmlNewChild(references, NULL, BAD_CAST "reference", NULL);
+ cr_xmlNewProp_c(reference, BAD_CAST "href", BAD_CAST ref->href);
+ cr_xmlNewProp_c(reference, BAD_CAST "id", BAD_CAST ref->id);
+ cr_xmlNewProp_c(reference, BAD_CAST "type", BAD_CAST ref->type);
+ cr_xmlNewProp_c(reference, BAD_CAST "title", BAD_CAST ref->title);
+ }
+}
+
+xmlNodePtr
+cr_xml_dump_updateinforecord_internal(xmlNodePtr root, cr_UpdateRecord *rec)
+{
+ xmlNodePtr update, node;
+
+ if (!rec)
+ return NULL;
+
+ // Update element
+ if (!root)
+ update = xmlNewNode(NULL, BAD_CAST "update");
+ else
+ update = xmlNewChild(root, NULL, BAD_CAST "update", NULL);
+
+ cr_xmlNewProp_c(update, BAD_CAST "from", BAD_CAST rec->from);
+ cr_xmlNewProp_c(update, BAD_CAST "status", BAD_CAST rec->status);
+ cr_xmlNewProp_c(update, BAD_CAST "type", BAD_CAST rec->type);
+ cr_xmlNewProp_c(update, BAD_CAST "version", BAD_CAST rec->version);
+
+ cr_xmlNewTextChild_c(update, NULL, BAD_CAST "id", BAD_CAST rec->id);
+ cr_xmlNewTextChild_c(update, NULL, BAD_CAST "title", BAD_CAST rec->title);
+
+ if (rec->issued_date) {
+ node = xmlNewChild(update, NULL, BAD_CAST "issued", NULL);
+ cr_xmlNewProp(node, BAD_CAST "date", BAD_CAST rec->issued_date);
+ }
+
+ if (rec->updated_date) {
+ node = xmlNewChild(update, NULL, BAD_CAST "updated", NULL);
+ cr_xmlNewProp(node, BAD_CAST "date", BAD_CAST rec->updated_date);
+ }
+
+ cr_xmlNewTextChild_c(update, NULL, BAD_CAST "rights", BAD_CAST rec->rights);
+ cr_xmlNewTextChild_c(update, NULL, BAD_CAST "release", BAD_CAST rec->release);
+ cr_xmlNewTextChild_c(update, NULL, BAD_CAST "pushcount", BAD_CAST rec->pushcount);
+ cr_xmlNewTextChild_c(update, NULL, BAD_CAST "severity", BAD_CAST rec->severity);
+ cr_xmlNewTextChild_c(update, NULL, BAD_CAST "summary", BAD_CAST rec->summary);
+ cr_xmlNewTextChild_c(update, NULL, BAD_CAST "description", BAD_CAST rec->description);
+ cr_xmlNewTextChild_c(update, NULL, BAD_CAST "solution", BAD_CAST rec->solution);
+
+ // References
+ cr_xml_dump_updateinforecord_references(update, rec->references);
+
+ // Pkglist
+ cr_xml_dump_updateinforecord_pkglist(update, rec->collections);
+
+ return update;
+}
+
+
+void
+cr_xml_dump_updateinfo_body(xmlNodePtr root, cr_UpdateInfo *ui)
+{
+ GSList *element;
+
+ // Dump updates
+ for (element = ui->updates; element; element = g_slist_next(element)) {
+ cr_UpdateRecord *rec = element->data;
+ cr_xml_dump_updateinforecord_internal(root, rec);
+ }
+}
+
+
+char *
+cr_xml_dump_updateinfo(cr_UpdateInfo *updateinfo, GError **err)
+{
+ xmlDocPtr doc;
+ xmlNodePtr root;
+ char *result;
+
+ assert(!err || *err == NULL);
+
+ if (!updateinfo)
+ return NULL;
+
+ // Dump IT!
+
+ doc = xmlNewDoc(BAD_CAST XML_DOC_VERSION);
+ root = xmlNewNode(NULL, BAD_CAST "updates");
+ cr_xml_dump_updateinfo_body(root, updateinfo);
+ xmlDocSetRootElement(doc, root);
+ xmlDocDumpFormatMemoryEnc(doc,
+ (xmlChar **) &result,
+ NULL,
+ XML_ENCODING,
+ FORMAT_XML);
+
+ // Clean up
+
+ xmlFreeDoc(doc);
+
+ return result;
+}
+
+char *
+cr_xml_dump_updaterecord(cr_UpdateRecord *rec, GError **err)
+{
+ xmlNodePtr root;
+ char *result;
+
+ assert(!err || *err == NULL);
+
+ if (!rec) {
+ g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG,
+ "No updateinfo object to dump specified");
+ return NULL;
+ }
+
+ // Dump IT!
+
+ xmlBufferPtr buf = xmlBufferCreate();
+ if (buf == NULL) {
+ g_critical("%s: Error creating the xml buffer", __func__);
+ g_set_error(err, ERR_DOMAIN, CRE_MEMORY,
+ "Cannot create an xml buffer");
+ return NULL;
+ }
+
+ root = cr_xml_dump_updateinforecord_internal(NULL, rec);
+ // xmlNodeDump seems to be a little bit faster than xmlDocDumpFormatMemory
+ xmlNodeDump(buf, NULL, root, 1, FORMAT_XML);
+ assert(buf->content);
+ // First line in the buf is not indented, we must indent it by ourself
+ result = g_malloc(sizeof(char *) * buf->use + INDENT + 1);
+ for (int x = 0; x < INDENT; x++) result[x] = ' ';
+ memcpy((void *) result+INDENT, buf->content, buf->use);
+ result[buf->use + INDENT] = '\n';
+ result[buf->use + INDENT + 1] = '\0';
+
+ // Cleanup
+
+ xmlBufferFree(buf);
+ xmlFreeNode(root);
+
+ return result;
+}
#include "compression_wrapper.h"
#include "xml_dump_internal.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
#define XML_HEADER "<?xml version=\""XML_DOC_VERSION \
"\" encoding=\""XML_ENCODING"\"?>\n"
CR_XML_FILELISTS_NS"\" packages=\"%d\">\n"
#define XML_OTHER_HEADER XML_HEADER"<otherdata xmlns=\"" \
CR_XML_OTHER_NS"\" packages=\"%d\">\n"
+#define XML_PRESTODELTA_HEADER XML_HEADER"<prestodelta>\n"
+#define XML_UPDATEINFO_HEADER XML_HEADER"<updates>\n"
#define XML_PRIMARY_FOOTER "</metadata>"
#define XML_FILELISTS_FOOTER "</filelists>"
#define XML_OTHER_FOOTER "</otherdata>"
+#define XML_PRESTODELTA_FOOTER "</prestodelta>"
+#define XML_UPDATEINFO_FOOTER "</updates>"
cr_XmlFile *
cr_xmlfile_sopen(const char *filename,
assert(!err || *err == NULL);
if (g_file_test(filename, G_FILE_TEST_EXISTS)) {
- g_set_error(err, CR_XML_FILE_ERROR, CRE_EXISTS,
+ g_set_error(err, ERR_DOMAIN, CRE_EXISTS,
"File already exists");
return NULL;
}
comtype,
stat,
&tmp_err);
- if (tmp_err) {
+ if (!cr_f) {
g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", filename);
return NULL;
}
cr_xmlfile_set_num_of_pkgs(cr_XmlFile *f, long num, GError **err)
{
assert(f);
- assert(f->header == 0);
- assert(num >= 0);
assert(!err || *err == NULL);
+ if (f->header != 0) {
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "Header was already written");
+ return CRE_BADARG;
+ }
+
+ if (num < 0) {
+ g_set_error(err, ERR_DOMAIN, CRE_BADARG,
+ "The number must be a positive integer number");
+ return CRE_BADARG;
+ }
+
f->pkgs = num;
return CRE_OK;
}
case CR_XMLFILE_OTHER:
xml_header = XML_OTHER_HEADER;
break;
+ case CR_XMLFILE_PRESTODELTA:
+ xml_header = XML_PRESTODELTA_HEADER;
+ break;
+ case CR_XMLFILE_UPDATEINFO:
+ xml_header = XML_UPDATEINFO_HEADER;
+ break;
default:
g_critical("%s: Bad file type", __func__);
assert(0);
- g_set_error(err, CR_XML_FILE_ERROR, CRE_ASSERT, "Bad file type");
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad file type");
return CRE_ASSERT;
}
case CR_XMLFILE_OTHER:
xml_footer = XML_OTHER_FOOTER;
break;
+ case CR_XMLFILE_PRESTODELTA:
+ xml_footer = XML_PRESTODELTA_FOOTER;
+ break;
+ case CR_XMLFILE_UPDATEINFO:
+ xml_footer = XML_UPDATEINFO_FOOTER;
+ break;
default:
g_critical("%s: Bad file type", __func__);
assert(0);
- g_set_error(err, CR_XML_FILE_ERROR, CRE_ASSERT, "Bad file type");
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad file type");
return CRE_ASSERT;
}
default:
g_critical("%s: Bad file type", __func__);
assert(0);
- g_set_error(err, CR_XML_FILE_ERROR, CRE_ASSERT, "Bad file type");
+ g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad file type");
return CRE_ASSERT;
}
CR_XMLFILE_PRIMARY, /*!< primary.xml */
CR_XMLFILE_FILELISTS, /*!< filelists.xml */
CR_XMLFILE_OTHER, /*!< other.xml */
+ CR_XMLFILE_PRESTODELTA, /*!< prestodelta.xml */
+ CR_XMLFILE_UPDATEINFO, /*!< updateinfo.xml */
CR_XMLFILE_SENTINEL, /*!< sentinel of the list */
} cr_XmlFileType;
#define cr_xmlfile_sopen_other(FILENAME, COMTYPE, STAT, ERR) \
cr_xmlfile_sopen(FILENAME, CR_XMLFILE_OTHER, COMTYPE, STAT, ERR)
+/** Open a new prestodelta XML file.
+ * @param FILENAME Filename.
+ * @param COMTYPE Type of used compression.
+ * @param ERR GError **
+ * @return Opened cr_XmlFile or NULL on error
+ */
+#define cr_xmlfile_open_prestodelta(FILENAME, COMTYPE, ERR) \
+ cr_xmlfile_open(FILENAME, CR_XMLFILE_PRESTODELTA, COMTYPE, ERR)
+
+/** Open a new prestodelta XML file.
+ * @param FILENAME Filename.
+ * @param COMTYPE Type of compression.
+ * @param STAT cr_ContentStat object or NULL.
+ * @param ERR GError **
+ * @return Opened cr_XmlFile or NULL on error
+ */
+#define cr_xmlfile_sopen_prestodelta(FILENAME, COMTYPE, STAT, ERR) \
+ cr_xmlfile_sopen(FILENAME, CR_XMLFILE_PRESTODELTA, COMTYPE, STAT, ERR)
+
+/** Open a new updateinfo XML file.
+ * @param FILENAME Filename.
+ * @param COMTYPE Type of used compression.
+ * @param ERR GError **
+ * @return Opened cr_XmlFile or NULL on error
+ */
+#define cr_xmlfile_open_updateinfo(FILENAME, COMTYPE, ERR) \
+ cr_xmlfile_open(FILENAME, CR_XMLFILE_UPDATEINFO, COMTYPE, ERR)
+
+/** Open a new updateinfo XML file.
+ * @param FILENAME Filename.
+ * @param COMTYPE Type of compression.
+ * @param STAT cr_ContentStat object or NULL.
+ * @param ERR GError **
+ * @return Opened cr_XmlFile or NULL on error
+ */
+#define cr_xmlfile_sopen_updateinfo(FILENAME, COMTYPE, STAT, ERR) \
+ cr_xmlfile_sopen(FILENAME, CR_XMLFILE_UPDATEINFO, COMTYPE, STAT, ERR)
+
/** Open a new XML file with stats.
* Note: Opened file must not exists! This function cannot
* open existing file!.
#include "xml_parser_internal.h"
#include "misc.h"
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+
+
cr_ParserData *
cr_xml_parser_data(unsigned int numstates)
{
g_propagate_prefixed_error(&pd->err, tmp_err,
"Parsing interrupted: ");
else
- g_set_error(&pd->err, CR_XML_PARSER_ERROR, CRE_CBINTERRUPTED,
+ g_set_error(&pd->err, ERR_DOMAIN, CRE_CBINTERRUPTED,
"Parsing interrupted");
}
int
cr_newpkgcb(cr_Package **pkg,
- const char *pkgId,
- const char *name,
- const char *arch,
- void *cbdata,
+ G_GNUC_UNUSED const char *pkgId,
+ G_GNUC_UNUSED const char *name,
+ G_GNUC_UNUSED const char *arch,
+ G_GNUC_UNUSED void *cbdata,
GError **err)
{
- CR_UNUSED(pkgId);
- CR_UNUSED(name);
- CR_UNUSED(arch);
- CR_UNUSED(cbdata);
-
assert(pkg && *pkg == NULL);
assert(!err || *err == NULL);
void *buf = XML_GetBuffer(parser, XML_BUFFER_SIZE);
if (!buf) {
ret = CRE_MEMORY;
- g_set_error(err, CR_XML_PARSER_ERROR, CRE_MEMORY,
- "Out of memory: Cannot allocate buffer for xml parser");
+ g_set_error(err, ERR_DOMAIN, CRE_MEMORY,
+ "Out of memory: Cannot allocate buffer for xml parser '%s'",
+ path);
break;
}
len = cr_read(f, buf, XML_BUFFER_SIZE, &tmp_err);
if (tmp_err) {
ret = tmp_err->code;
- g_critical("%s: Error while reading xml : %s\n",
- __func__, tmp_err->message);
+ g_critical("%s: Error while reading xml '%s': %s",
+ __func__, path, tmp_err->message);
g_propagate_prefixed_error(err, tmp_err, "Read error: ");
break;
}
if (!XML_ParseBuffer(parser, len, len == 0)) {
ret = CRE_XMLPARSER;
- g_critical("%s: parsing error: %s\n",
- __func__, XML_ErrorString(XML_GetErrorCode(parser)));
- g_set_error(err, CR_XML_PARSER_ERROR, CRE_XMLPARSER,
- "Parse error at line: %d (%s)",
+ g_critical("%s: parsing error '%s': %s",
+ __func__,
+ path,
+ XML_ErrorString(XML_GetErrorCode(parser)));
+ g_set_error(err, ERR_DOMAIN, CRE_XMLPARSER,
+ "Parse error '%s' at line: %d (%s)",
+ path,
(int) XML_GetCurrentLineNumber(parser),
(char *) XML_ErrorString(XML_GetErrorCode(parser)));
break;
extern "C" {
#endif
+#include <glib.h>
#include "package.h"
#include "repomd.h"
+#include "updateinfo.h"
/** \defgroup xml_parser XML parser API.
* \addtogroup xml_parser
CR_XML_WARNING_UNKNOWNVAL, /*!< Unknown tag or attribute value */
CR_XML_WARNING_BADATTRVAL, /*!< Bad attribute value */
CR_XML_WARNING_MISSINGVAL, /*!< Missing tag value */
+ CR_XML_WARNING_BADMDTYPE, /*!< Bad metadata type (expected mandatory tag was not found) */
CR_XML_WARNING_SENTINEL,
} cr_XmlParserWarningType;
void *warningcb_data,
GError **err);
+/** Parse updateinfo.xml. File could be compressed.
+ * @param path Path to updateinfo.xml
+ * @param updateinfo cr_UpdateInfo object.
+ * @param warningcb Callback for warning messages.
+ * @param warningcb_data User data for the warningcb.
+ * @param err GError **
+ * @return cr_Error code.
+ */
+int
+cr_xml_parse_updateinfo(const char *path,
+ cr_UpdateInfo *updateinfo,
+ cr_XmlParserWarningCb warningcb,
+ void *warningcb_data,
+ GError **err);
+
/** @} */
#ifdef __cplusplus
#include "package.h"
#include "misc.h"
-#define ERR_DOMAIN CR_XML_PARSER_FIL_ERROR
+#define ERR_DOMAIN CREATEREPO_C_ERROR
#define ERR_CODE_XML CRE_BADXMLFILELISTS
typedef enum {
switch(pd->state) {
case STATE_START:
+ break;
+
case STATE_FILELISTS:
+ pd->main_tag_found = TRUE;
break;
case STATE_PACKAGE: {
}
static void XMLCALL
-cr_end_handler(void *pdata, const char *element)
+cr_end_handler(void *pdata, G_GNUC_UNUSED const char *element)
{
cr_ParserData *pd = pdata;
GError *tmp_err = NULL;
unsigned int state = pd->state;
- CR_UNUSED(element);
-
if (pd->err)
return; // There was an error -> do nothing
if (tmp_err)
g_propagate_error(err, tmp_err);
+ // Warning if file was probably a different type than expected
+
+ if (!pd->main_tag_found && ret == CRE_OK)
+ cr_xml_parser_warning(pd, CR_XML_WARNING_BADMDTYPE,
+ "The file don't contain the expected element "
+ "\"<filelists>\" - The file probably isn't "
+ "a valid filelists.xml");
+
// Clean up
if (ret != CRE_OK && newpkgcb == cr_newpkgcb) {
#include "error.h"
#include "package.h"
#include "repomd.h"
+#include "updateinfo.h"
#define XML_BUFFER_SIZE 8192
#define CONTENT_REALLOC_STEP 256
cr_StatesSwitch **swtab; /*!< Pointers to statesswitches table */
unsigned int *sbtab; /*!< stab[to_state] = from_state */
+ /* Common stuf */
+
+ gboolean main_tag_found; /*!<
+ Was the main tag present? E.g.:
+ For primary.xml <metadata>
+ For filelists.xml <filelists>
+ For other.xml <otherdata>
+ For repomd.xml <repomd>
+ For updateinfo.xml <updates>
+ If the main tag is missing the most likely the input file
+ was a different then expected type */
+
/* Package stuff */
void *newpkgcb_data; /*!<
char *cpeid; /*!<
cpeid value for the currently parsed distro tag */
+ /* Updateinfo related stuff */
+
+ cr_UpdateInfo *updateinfo; /*!<
+ Update info object */
+ cr_UpdateRecord *updaterecord; /*!<
+ Update record object */
+ cr_UpdateCollection *updatecollection; /*!<
+ Update collection object */
+ cr_UpdateCollectionPackage *updatecollectionpackage; /*!<
+ Update collection package object */
+
} cr_ParserData;
/** Malloc and initialize common part of XML parser data.
#include "package.h"
#include "misc.h"
-#define ERR_DOMAIN CR_XML_PARSER_OTH_ERROR
+#define ERR_DOMAIN CREATEREPO_C_ERROR
#define ERR_CODE_XML CRE_BADXMLOTHER
typedef enum {
switch(pd->state) {
case STATE_START:
+ break;
+
case STATE_OTHERDATA:
+ pd->main_tag_found = TRUE;
break;
case STATE_PACKAGE: {
}
static void XMLCALL
-cr_end_handler(void *pdata, const char *element)
+cr_end_handler(void *pdata, G_GNUC_UNUSED const char *element)
{
cr_ParserData *pd = pdata;
GError *tmp_err = NULL;
unsigned int state = pd->state;
- CR_UNUSED(element);
-
if (pd->err)
return; // There was an error -> do nothing
if (tmp_err)
g_propagate_error(err, tmp_err);
+ // Warning if file was probably a different type than expected
+
+ if (!pd->main_tag_found && ret == CRE_OK)
+ cr_xml_parser_warning(pd, CR_XML_WARNING_BADMDTYPE,
+ "The file don't contain the expected element "
+ "\"<otherdata>\" - The file probably isn't "
+ "a valid other.xml");
+
// Clean up
if (ret != CRE_OK && newpkgcb == cr_newpkgcb) {
#include "package.h"
#include "misc.h"
-#define ERR_DOMAIN CR_XML_PARSER_PRI_ERROR
+#define ERR_DOMAIN CREATEREPO_C_ERROR
#define ERR_CODE_XML CRE_BADXMLPRIMARY
typedef enum {
STATE_RPM_ENTRY_CONFLICTS,
STATE_RPM_OBSOLETES,
STATE_RPM_ENTRY_OBSOLETES,
+ STATE_RPM_SUGGESTS,
+ STATE_RPM_ENTRY_SUGGESTS,
+ STATE_RPM_ENHANCES,
+ STATE_RPM_ENTRY_ENHANCES,
+ STATE_RPM_RECOMMENDS,
+ STATE_RPM_ENTRY_RECOMMENDS,
+ STATE_RPM_SUPPLEMENTS,
+ STATE_RPM_ENTRY_SUPPLEMENTS,
STATE_FILE,
NUMSTATES,
} cr_PriState;
{ STATE_FORMAT, "rpm:provides", STATE_RPM_PROVIDES, 0 },
{ STATE_FORMAT, "rpm:requires", STATE_RPM_REQUIRES, 0 },
{ STATE_FORMAT, "rpm:conflicts", STATE_RPM_CONFLICTS, 0 },
- { STATE_FORMAT, "rpm:obsoletes", STATE_RPM_OBSOLETES, 0 },
- { STATE_RPM_PROVIDES, "rpm:entry", STATE_RPM_ENTRY_PROVIDES, 0 },
- { STATE_RPM_REQUIRES, "rpm:entry", STATE_RPM_ENTRY_REQUIRES, 0 },
- { STATE_RPM_CONFLICTS, "rpm:entry", STATE_RPM_ENTRY_CONFLICTS, 0 },
- { STATE_RPM_OBSOLETES, "rpm:entry", STATE_RPM_ENTRY_OBSOLETES, 0 },
+ { STATE_FORMAT, "rpm:obsoletes", STATE_RPM_OBSOLETES, 0 },
+ { STATE_FORMAT, "rpm:suggests", STATE_RPM_SUGGESTS, 0 },
+ { STATE_FORMAT, "rpm:enhances", STATE_RPM_ENHANCES, 0 },
+ { STATE_FORMAT, "rpm:recommends", STATE_RPM_RECOMMENDS, 0 },
+ { STATE_FORMAT, "rpm:supplements", STATE_RPM_SUPPLEMENTS, 0 },
+ { STATE_RPM_PROVIDES, "rpm:entry", STATE_RPM_ENTRY_PROVIDES, 0 },
+ { STATE_RPM_REQUIRES, "rpm:entry", STATE_RPM_ENTRY_REQUIRES, 0 },
+ { STATE_RPM_CONFLICTS, "rpm:entry", STATE_RPM_ENTRY_CONFLICTS, 0 },
+ { STATE_RPM_OBSOLETES, "rpm:entry", STATE_RPM_ENTRY_OBSOLETES, 0 },
+ { STATE_RPM_SUGGESTS, "rpm:entry", STATE_RPM_ENTRY_SUGGESTS, 0 },
+ { STATE_RPM_ENHANCES, "rpm:entry", STATE_RPM_ENTRY_ENHANCES, 0 },
+ { STATE_RPM_RECOMMENDS, "rpm:entry", STATE_RPM_ENTRY_RECOMMENDS, 0 },
+ { STATE_RPM_SUPPLEMENTS,"rpm:entry", STATE_RPM_ENTRY_SUPPLEMENTS, 0 },
{ NUMSTATES, NULL, NUMSTATES, 0 },
};
switch(pd->state) {
case STATE_START:
+ break;
+
case STATE_METADATA:
+ pd->main_tag_found = TRUE;
break;
case STATE_PACKAGE:
case STATE_RPM_REQUIRES:
case STATE_RPM_CONFLICTS:
case STATE_RPM_OBSOLETES:
+ case STATE_RPM_SUGGESTS:
+ case STATE_RPM_ENHANCES:
+ case STATE_RPM_RECOMMENDS:
+ case STATE_RPM_SUPPLEMENTS:
break;
case STATE_RPM_ENTRY_PROVIDES:
case STATE_RPM_ENTRY_REQUIRES:
case STATE_RPM_ENTRY_CONFLICTS:
- case STATE_RPM_ENTRY_OBSOLETES: {
+ case STATE_RPM_ENTRY_OBSOLETES:
+ case STATE_RPM_ENTRY_SUGGESTS:
+ case STATE_RPM_ENTRY_ENHANCES:
+ case STATE_RPM_ENTRY_RECOMMENDS:
+ case STATE_RPM_ENTRY_SUPPLEMENTS:
+ {
assert(pd->pkg);
cr_Dependency *dep = cr_dependency_new();
case STATE_RPM_ENTRY_OBSOLETES:
pd->pkg->obsoletes = g_slist_prepend(pd->pkg->obsoletes, dep);
break;
+ case STATE_RPM_ENTRY_SUGGESTS:
+ pd->pkg->suggests = g_slist_prepend(pd->pkg->suggests, dep);
+ break;
+ case STATE_RPM_ENTRY_ENHANCES:
+ pd->pkg->enhances = g_slist_prepend(pd->pkg->enhances, dep);
+ break;
+ case STATE_RPM_ENTRY_RECOMMENDS:
+ pd->pkg->recommends = g_slist_prepend(pd->pkg->recommends, dep);
+ break;
+ case STATE_RPM_ENTRY_SUPPLEMENTS:
+ pd->pkg->supplements = g_slist_prepend(pd->pkg->supplements, dep);
+ break;
default: assert(0);
}
}
static void XMLCALL
-cr_end_handler(void *pdata, const char *element)
+cr_end_handler(void *pdata, G_GNUC_UNUSED const char *element)
{
cr_ParserData *pd = pdata;
GError *tmp_err = NULL;
unsigned int state = pd->state;
- CR_UNUSED(element);
-
if (pd->err)
return; // There was an error -> do nothing
pd->pkg->obsoletes = g_slist_reverse(pd->pkg->obsoletes);
break;
+ case STATE_RPM_SUGGESTS:
+ pd->pkg->suggests = g_slist_reverse(pd->pkg->suggests);
+ break;
+
+ case STATE_RPM_ENHANCES:
+ pd->pkg->enhances = g_slist_reverse(pd->pkg->enhances);
+ break;
+
+ case STATE_RPM_RECOMMENDS:
+ pd->pkg->recommends = g_slist_reverse(pd->pkg->recommends);
+ break;
+
+ case STATE_RPM_SUPPLEMENTS:
+ pd->pkg->supplements = g_slist_reverse(pd->pkg->supplements);
+ break;
+
case STATE_FILE: {
assert(pd->pkg);
if (tmp_err)
g_propagate_error(err, tmp_err);
+ // Warning if file was probably a different type than expected
+
+ if (!pd->main_tag_found && ret == CRE_OK)
+ cr_xml_parser_warning(pd, CR_XML_WARNING_BADMDTYPE,
+ "The file don't contain the expected element "
+ "\"<metadata>\" - The file probably isn't "
+ "a valid primary.xml");
+
// Clean up
if (ret != CRE_OK && newpkgcb == cr_newpkgcb) {
#include "package.h"
#include "misc.h"
-#define ERR_DOMAIN CR_XML_PARSER_REPOMD_ERROR
+#define ERR_DOMAIN CREATEREPO_C_ERROR
#define ERR_CODE_XML CRE_BADXMLREPOMD
typedef enum {
STATE_REPOMD,
STATE_REVISION,
STATE_REPOID,
+ STATE_CONTENTHASH,
STATE_TAGS,
STATE_REPO,
STATE_CONTENT,
{ STATE_START, "repomd", STATE_REPOMD, 0 },
{ STATE_REPOMD, "revision", STATE_REVISION, 1 },
{ STATE_REPOMD, "repoid", STATE_REPOID, 1 },
+ { STATE_REPOMD, "contenthash", STATE_CONTENTHASH, 1 },
{ STATE_REPOMD, "tags", STATE_TAGS, 0 },
{ STATE_REPOMD, "data", STATE_DATA, 0 },
{ STATE_TAGS, "repo", STATE_REPO, 1 },
switch(pd->state) {
case STATE_START:
+ break;
+
case STATE_REPOMD:
+ pd->main_tag_found = TRUE;
+ break;
+
case STATE_REVISION:
case STATE_TAGS:
case STATE_REPO:
val);
break;
+ case STATE_CONTENTHASH:
+ assert(pd->repomd);
+ assert(!pd->repomdrecord);
+
+ val = cr_find_attr("type", attr);
+ if (val)
+ pd->repomd->contenthash_type = g_string_chunk_insert(
+ pd->repomd->chunk, val);
+ break;
+
case STATE_DISTRO:
assert(pd->repomd);
assert(!pd->repomdrecord);
}
static void XMLCALL
-cr_end_handler(void *pdata, const char *element)
+cr_end_handler(void *pdata, G_GNUC_UNUSED const char *element)
{
cr_ParserData *pd = pdata;
unsigned int state = pd->state;
- CR_UNUSED(element);
-
if (pd->err)
return; // There was an error -> do nothing
pd->content);
break;
+ case STATE_CONTENTHASH:
+ assert(pd->repomd);
+ assert(!pd->repomdrecord);
+
+ pd->repomd->contenthash = g_string_chunk_insert(pd->repomd->chunk,
+ pd->content);
+ break;
+
case STATE_TAGS:
break;
if (tmp_err)
g_propagate_error(err, tmp_err);
+ // Warning if file was probably a different type than expected
+
+ if (!pd->main_tag_found && ret == CRE_OK)
+ cr_xml_parser_warning(pd, CR_XML_WARNING_BADMDTYPE,
+ "The file don't contain the expected element "
+ "\"<repomd>\" - The file probably isn't "
+ "a valid repomd.xml");
+
// Clean up
cr_xml_parser_data_free(pd);
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2014 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <glib.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+#include <expat.h>
+#include "xml_parser_internal.h"
+#include "xml_parser.h"
+#include "updateinfo.h"
+#include "error.h"
+#include "misc.h"
+
+#define ERR_DOMAIN CREATEREPO_C_ERROR
+#define ERR_CODE_XML CRE_BADXMLUPDATEINFO
+
+typedef enum {
+ STATE_START,
+ STATE_UPDATES,
+ STATE_UPDATE, // <update> -----------------------------
+ STATE_ID,
+ STATE_TITLE,
+ STATE_ISSUED,
+ STATE_UPDATED,
+ STATE_RIGHTS,
+ STATE_RELEASE,
+ STATE_PUSHCOUNT,
+ STATE_SEVERITY,
+ STATE_SUMMARY,
+ STATE_DESCRIPTION,
+ STATE_SOLUTION,
+ STATE_MESSAGE, // Not implemented
+ STATE_REFERENCES, // <references> -------------------------
+ STATE_REFERENCE,
+ STATE_PKGLIST, // <pkglist> ----------------------------
+ STATE_COLLECTION,
+ STATE_NAME,
+ STATE_PACKAGE,
+ STATE_FILENAME,
+ STATE_SUM,
+ STATE_REBOOTSUGGESTED,
+ STATE_RESTARTSUGGESTED, // Not implemented
+ STATE_RELOGINSUGGESTED, // Not implemented
+ NUMSTATES,
+} cr_UpdateinfoState;
+
+/* NOTE: Same states in the first column must be together!!!
+ * Performance tip: More frequent elements shoud be listed
+ * first in its group (eg: element "package" (STATE_PACKAGE)
+ * has a "file" element listed first, because it is more frequent
+ * than a "version" element). */
+static cr_StatesSwitch stateswitches[] = {
+ { STATE_START, "updates", STATE_UPDATES, 0 },
+ { STATE_UPDATES, "update", STATE_UPDATE, 0 },
+ { STATE_UPDATE, "id", STATE_ID, 1 },
+ { STATE_UPDATE, "title", STATE_TITLE, 1 },
+ { STATE_UPDATE, "issued", STATE_ISSUED, 0 },
+ { STATE_UPDATE, "updated", STATE_UPDATED, 0 },
+ { STATE_UPDATE, "rights", STATE_RIGHTS, 1 },
+ { STATE_UPDATE, "release", STATE_RELEASE, 1 },
+ { STATE_UPDATE, "pushcount", STATE_PUSHCOUNT, 1 },
+ { STATE_UPDATE, "severity", STATE_SEVERITY, 1 },
+ { STATE_UPDATE, "summary", STATE_SUMMARY, 1 },
+ { STATE_UPDATE, "description", STATE_DESCRIPTION, 1 },
+ { STATE_UPDATE, "solution", STATE_SOLUTION, 1 },
+ { STATE_UPDATE, "message", STATE_MESSAGE, 1 }, // NI
+ { STATE_UPDATE, "references", STATE_REFERENCES, 0 },
+ { STATE_UPDATE, "pkglist", STATE_PKGLIST, 0 },
+ { STATE_REFERENCES, "reference", STATE_REFERENCE, 0 },
+ { STATE_PKGLIST, "collection", STATE_COLLECTION, 0 },
+ { STATE_COLLECTION, "package", STATE_PACKAGE, 0 },
+ { STATE_COLLECTION, "name", STATE_NAME, 1 },
+ { STATE_PACKAGE, "filename", STATE_FILENAME, 1 },
+ { STATE_PACKAGE, "sum", STATE_SUM, 1 },
+ { STATE_PACKAGE, "reboot_suggested", STATE_REBOOTSUGGESTED, 0 },
+ { STATE_PACKAGE, "restart_suggested", STATE_RESTARTSUGGESTED, 0 }, // NI
+ { STATE_PACKAGE, "relogin_suggested", STATE_RELOGINSUGGESTED, 0 }, // NI
+ { NUMSTATES, NULL, NUMSTATES, 0 }
+};
+
+static void XMLCALL
+cr_start_handler(void *pdata, const char *element, const char **attr)
+{
+ cr_ParserData *pd = pdata;
+ cr_StatesSwitch *sw;
+
+ if (pd->err)
+ return; // There was an error -> do nothing
+
+ if (pd->depth != pd->statedepth) {
+ // We are inside of unknown element
+ pd->depth++;
+ return;
+ }
+ pd->depth++;
+
+ if (!pd->swtab[pd->state]) {
+ // Current element should not have any sub elements
+ return;
+ }
+
+ // Find current state by its name
+ for (sw = pd->swtab[pd->state]; sw->from == pd->state; sw++)
+ if (!strcmp(element, sw->ename))
+ break;
+ if (sw->from != pd->state) {
+ // No state for current element (unknown element)
+ cr_xml_parser_warning(pd, CR_XML_WARNING_UNKNOWNTAG,
+ "Unknown element \"%s\"", element);
+ return;
+ }
+
+ // Update parser data
+ pd->state = sw->to;
+ pd->docontent = sw->docontent;
+ pd->statedepth = pd->depth;
+ pd->lcontent = 0;
+ pd->content[0] = '\0';
+
+ const char *val;
+
+ // Shortcuts
+ cr_UpdateRecord *rec = pd->updaterecord;
+ cr_UpdateCollection *collection = pd->updatecollection;
+ cr_UpdateCollectionPackage *package = pd->updatecollectionpackage;
+
+ switch(pd->state) {
+ case STATE_START:
+ break;
+
+ case STATE_UPDATES:
+ pd->main_tag_found = TRUE;
+ break;
+
+ case STATE_ID:
+ case STATE_TITLE:
+ case STATE_RIGHTS:
+ case STATE_RELEASE:
+ case STATE_PUSHCOUNT:
+ case STATE_SEVERITY:
+ case STATE_SUMMARY:
+ case STATE_DESCRIPTION:
+ case STATE_SOLUTION:
+ case STATE_NAME:
+ case STATE_FILENAME:
+ case STATE_REFERENCES:
+ case STATE_PKGLIST:
+ default:
+ // All states which don't have attributes and no action is
+ // required for them should be skipped
+ break;
+
+ case STATE_UPDATE:
+ assert(pd->updateinfo);
+ assert(!pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+
+ rec = cr_updaterecord_new();
+ cr_updateinfo_apped_record(pd->updateinfo, rec);
+ pd->updaterecord = rec;
+
+ val = cr_find_attr("from", attr);
+ if (val)
+ rec->from = g_string_chunk_insert(rec->chunk, val);
+
+ val = cr_find_attr("status", attr);
+ if (val)
+ rec->status = g_string_chunk_insert(rec->chunk, val);
+
+ val = cr_find_attr("type", attr);
+ if (val)
+ rec->type = g_string_chunk_insert(rec->chunk, val);
+
+ val = cr_find_attr("version", attr);
+ if (val)
+ rec->version = g_string_chunk_insert(rec->chunk, val);
+
+ break;
+
+ case STATE_ISSUED:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ val = cr_find_attr("date", attr);
+ if (val)
+ rec->issued_date = g_string_chunk_insert(rec->chunk, val);
+ break;
+
+ case STATE_UPDATED:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ val = cr_find_attr("date", attr);
+ if (val)
+ rec->updated_date = g_string_chunk_insert(rec->chunk, val);
+ break;
+
+ case STATE_REFERENCE: {
+ cr_UpdateReference *ref;
+
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+
+ ref = cr_updatereference_new();
+ cr_updaterecord_append_reference(rec, ref);
+
+ val = cr_find_attr("id", attr);
+ if (val)
+ ref->id = g_string_chunk_insert(ref->chunk, val);
+
+ val = cr_find_attr("href", attr);
+ if (val)
+ ref->href = g_string_chunk_insert(ref->chunk, val);
+
+ val = cr_find_attr("type", attr);
+ if (val)
+ ref->type = g_string_chunk_insert(ref->chunk, val);
+
+ val = cr_find_attr("title", attr);
+ if (val)
+ ref->title = g_string_chunk_insert(ref->chunk, val);
+
+ break;
+ }
+
+ case STATE_COLLECTION:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+
+ collection = cr_updatecollection_new();
+ cr_updaterecord_append_collection(rec, collection);
+ pd->updatecollection = collection;
+
+ val = cr_find_attr("short", attr);
+ if (val)
+ collection->shortname = g_string_chunk_insert(collection->chunk, val);
+
+ break;
+
+ case STATE_PACKAGE:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+
+ package = cr_updatecollectionpackage_new();
+ cr_updatecollection_append_package(collection, package);
+ pd->updatecollectionpackage = package;
+
+ val = cr_find_attr("name", attr);
+ if (val)
+ package->name = g_string_chunk_insert(package->chunk, val);
+
+ val = cr_find_attr("version", attr);
+ if (val)
+ package->version = g_string_chunk_insert(package->chunk, val);
+
+ val = cr_find_attr("release", attr);
+ if (val)
+ package->release = g_string_chunk_insert(package->chunk, val);
+
+ val = cr_find_attr("epoch", attr);
+ if (val)
+ package->epoch = g_string_chunk_insert(package->chunk, val);
+
+ val = cr_find_attr("arch", attr);
+ if (val)
+ package->arch = g_string_chunk_insert(package->chunk, val);
+
+ val = cr_find_attr("src", attr);
+ if (val)
+ package->src = g_string_chunk_insert(package->chunk, val);
+
+ break;
+
+ case STATE_SUM:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(pd->updatecollection);
+ assert(pd->updatecollectionpackage);
+ val = cr_find_attr("type", attr);
+ if (val)
+ package->sum_type = cr_checksum_type(val);
+ break;
+
+ case STATE_REBOOTSUGGESTED:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(pd->updatecollection);
+ assert(pd->updatecollectionpackage);
+ package->reboot_suggested = TRUE;
+ break;
+ }
+}
+
+static void XMLCALL
+cr_end_handler(void *pdata, G_GNUC_UNUSED const char *element)
+{
+ cr_ParserData *pd = pdata;
+ unsigned int state = pd->state;
+
+ if (pd->err)
+ return; // There was an error -> do nothing
+
+ if (pd->depth != pd->statedepth) {
+ // Back from the unknown state
+ pd->depth--;
+ return;
+ }
+
+ pd->depth--;
+ pd->statedepth--;
+ pd->state = pd->sbtab[pd->state];
+ pd->docontent = 0;
+
+ // Shortcuts
+ char *content = pd->content;
+ cr_UpdateRecord *rec = pd->updaterecord;
+ cr_UpdateCollection *col = pd->updatecollection;
+ cr_UpdateCollectionPackage *package = pd->updatecollectionpackage;
+
+ switch (state) {
+ case STATE_START:
+ case STATE_UPDATES:
+ case STATE_ISSUED:
+ case STATE_UPDATED:
+ case STATE_REFERENCES:
+ case STATE_REFERENCE:
+ case STATE_PKGLIST:
+ case STATE_REBOOTSUGGESTED:
+ // All elements with no text data and without need of any
+ // post processing should go here
+ break;
+
+ case STATE_ID:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ rec->id = cr_safe_string_chunk_insert_null(rec->chunk, content);
+ break;
+
+ case STATE_TITLE:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ rec->title = cr_safe_string_chunk_insert_null(rec->chunk, content);
+ break;
+
+ case STATE_RIGHTS:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ rec->rights = cr_safe_string_chunk_insert_null(rec->chunk, content);
+ break;
+
+ case STATE_RELEASE:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ rec->release = cr_safe_string_chunk_insert_null(rec->chunk, content);
+ break;
+
+ case STATE_PUSHCOUNT:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ rec->pushcount = cr_safe_string_chunk_insert_null(rec->chunk, content);
+ break;
+
+ case STATE_SEVERITY:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ rec->severity = cr_safe_string_chunk_insert_null(rec->chunk, content);
+ break;
+
+ case STATE_SUMMARY:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ rec->summary = cr_safe_string_chunk_insert_null(rec->chunk, content);
+ break;
+
+ case STATE_DESCRIPTION:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ rec->description = cr_safe_string_chunk_insert_null(rec->chunk, content);
+ break;
+
+ case STATE_SOLUTION:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ rec->solution = cr_safe_string_chunk_insert_null(rec->chunk, content);
+ break;
+
+ case STATE_NAME:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ col->name = cr_safe_string_chunk_insert_null(col->chunk, content);
+ break;
+
+ case STATE_FILENAME:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(pd->updatecollection);
+ assert(pd->updatecollectionpackage);
+ package->filename = cr_safe_string_chunk_insert_null(package->chunk,
+ content);
+ break;
+
+ case STATE_SUM:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(pd->updatecollection);
+ assert(pd->updatecollectionpackage);
+ package->sum = cr_safe_string_chunk_insert_null(package->chunk, content);
+ break;
+
+ case STATE_PACKAGE:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(pd->updatecollection);
+ assert(pd->updatecollectionpackage);
+ pd->updatecollectionpackage = NULL;
+ break;
+
+ case STATE_COLLECTION:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ pd->updatecollection = NULL;
+ break;
+
+ case STATE_UPDATE:
+ assert(pd->updateinfo);
+ assert(pd->updaterecord);
+ assert(!pd->updatecollection);
+ assert(!pd->updatecollectionpackage);
+ pd->updaterecord = NULL;
+ break;
+
+ default:
+ break;
+ }
+}
+
+int
+cr_xml_parse_updateinfo(const char *path,
+ cr_UpdateInfo *updateinfo,
+ cr_XmlParserWarningCb warningcb,
+ void *warningcb_data,
+ GError **err)
+{
+ int ret = CRE_OK;
+ cr_ParserData *pd;
+ XML_Parser parser;
+ GError *tmp_err = NULL;
+
+ assert(path);
+ assert(updateinfo);
+ assert(!err || *err == NULL);
+
+ // Init
+
+ parser = XML_ParserCreate(NULL);
+ XML_SetElementHandler(parser, cr_start_handler, cr_end_handler);
+ XML_SetCharacterDataHandler(parser, cr_char_handler);
+
+ pd = cr_xml_parser_data(NUMSTATES);
+ pd->parser = &parser;
+ pd->state = STATE_START;
+ pd->updateinfo = updateinfo;
+ pd->warningcb = warningcb;
+ pd->warningcb_data = warningcb_data;
+ for (cr_StatesSwitch *sw = stateswitches; sw->from != NUMSTATES; sw++) {
+ if (!pd->swtab[sw->from])
+ pd->swtab[sw->from] = sw;
+ pd->sbtab[sw->to] = sw->from;
+ }
+
+ XML_SetUserData(parser, pd);
+
+ // Parsing
+
+ ret = cr_xml_parser_generic(parser, pd, path, &tmp_err);
+ if (tmp_err)
+ g_propagate_error(err, tmp_err);
+
+ // Warning if file was probably a different type than expected
+
+ if (!pd->main_tag_found && ret == CRE_OK)
+ cr_xml_parser_warning(pd, CR_XML_WARNING_BADMDTYPE,
+ "The file don't contain the expected element "
+ "\"<updates>\" - The file probably isn't "
+ "a valid updates.xml");
+
+ // Clean up
+
+ cr_xml_parser_data_free(pd);
+ XML_ParserFree(parser);
+
+ return ret;
+}
TARGET_LINK_LIBRARIES(test_xml_parser_repomd libcreaterepo_c ${GLIB2_LIBRARIES})
ADD_DEPENDENCIES(tests test_xml_parser_repomd)
+ADD_EXECUTABLE(test_xml_parser_updateinfo test_xml_parser_updateinfo.c)
+TARGET_LINK_LIBRARIES(test_xml_parser_updateinfo libcreaterepo_c ${GLIB2_LIBRARIES})
+ADD_DEPENDENCIES(tests test_xml_parser_updateinfo)
+
CONFIGURE_FILE("run_gtester.sh.in" "${CMAKE_BINARY_DIR}/tests/run_gtester.sh")
ADD_TEST(test_main run_gtester.sh)
#define TEST_REPO_01 TEST_DATA_PATH"repo_01/"
#define TEST_REPO_02 TEST_DATA_PATH"repo_02/"
#define TEST_FILES_PATH TEST_DATA_PATH"test_files/"
+#define TEST_UPDATEINFO_FILES_PATH TEST_DATA_PATH"updateinfo_files/"
// Repo files
#define NON_EXIST_FILE "/tmp/foobarfile.which.should.not.exists"
+// Updateinfo files
+
+#define TEST_UPDATEINFO_00 TEST_UPDATEINFO_FILES_PATH"updateinfo_00.xml"
+#define TEST_UPDATEINFO_01 TEST_UPDATEINFO_FILES_PATH"updateinfo_01.xml"
+#define TEST_UPDATEINFO_02 TEST_UPDATEINFO_FILES_PATH"updateinfo_02.xml.xz"
+
#endif
+# Detect nosetest version suffix
+EXECUTE_PROCESS(COMMAND ${PYTHON_EXECUTABLE} -c "import sys; sys.stdout.write('%s.%s' % (sys.version_info[0], sys.version_info[1]))" OUTPUT_VARIABLE PYTHON_MAJOR_DOT_MINOR_VERSION)
+SET(NOSETEST_VERSION_SUFFIX "-${PYTHON_MAJOR_DOT_MINOR_VERSION}")
+message("-- nosetests program is nosetests${NOSETEST_VERSION_SUFFIX}")
+
+execute_process(COMMAND which nosetests${NOSETEST_VERSION_SUFFIX}
+ OUTPUT_QUIET ERROR_QUIET
+ RESULT_VARIABLE NOSE_CHECK_RESULT)
+IF (NOT NOSE_CHECK_RESULT STREQUAL "0")
+ MESSAGE("Command 'nosetests${NOSETEST_VERSION_SUFFIX}' doesn't exist! Using only 'nosetests' instead")
+ SET(NOSETEST_VERSION_SUFFIX "")
+ENDIF()
+
CONFIGURE_FILE("run_nosetests.sh.in" "${CMAKE_BINARY_DIR}/tests/python/tests/run_nosetests.sh")
ADD_TEST(test_python run_nosetests.sh -s ${CMAKE_CURRENT_SOURCE_DIR})
-LD_LIBRARY_PATH=${CMAKE_BINARY_DIR}/src/: PYTHONPATH=${CMAKE_BINARY_DIR}/src/python/ nosetests -s -v ${CMAKE_CURRENT_SOURCE_DIR}/
+LD_LIBRARY_PATH=${CMAKE_BINARY_DIR}/src/: PYTHONPATH=${CMAKE_BINARY_DIR}/src/python/ nosetests${NOSETEST_VERSION_SUFFIX} -s -v ${CMAKE_CURRENT_SOURCE_DIR}/
import os.path
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseChecksum(unittest.TestCase):
import os.path
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseCompressionWrapper(unittest.TestCase):
def test_compression_suffix(self):
import os.path
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseContentStat(unittest.TestCase):
import os.path
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseCrFile(unittest.TestCase):
f.close()
import gzip
- content = gzip.open(path).read()
+ content = gzip.open(path).read().decode('utf-8')
self.assertEqual(content, "foobar")
def test_crfile_bz2_compression(self):
f.close()
import bz2
- content = bz2.decompress(open(path).read())
+ content = bz2.decompress(open(path, 'rb').read()).decode('utf-8')
self.assertEqual(content, "foobar")
def test_crfile_xz_compression(self):
import subprocess
p = subprocess.Popen(["unxz", "--stdout", path], stdout=subprocess.PIPE)
- content = p.stdout.read()
+ content = p.stdout.read().decode('utf-8')
self.assertEqual(content, "foobar")
import unittest
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseLoadMetadata(unittest.TestCase):
def test_load_metadata__repo00(self):
import unittest
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseMetadataLocation(unittest.TestCase):
def test_metadatalocation(self):
import shutil
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseMisc(unittest.TestCase):
self.assertTrue(os.path.isfile(dest))
self.assertEqual(stat.checksum, FILE_TEXT_SHA256SUM)
self.assertEqual(stat.checksum_type, cr.SHA256)
- self.assertEqual(stat.size, 910L)
+ self.assertEqual(stat.size, 910)
import unittest
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCasePackage(unittest.TestCase):
def test_package_empty(self):
self.assertEqual(pkg.epoch, "0")
self.assertEqual(pkg.release, "0")
self.assertEqual(pkg.summary, '""')
- self.assertEqual(pkg.description, "")
+ self.assertEqual(pkg.description, None)
self.assertEqual(pkg.url, None)
#self.assertEqual(pkg.time_file, 1340709886)
self.assertEqual(pkg.time_build, 1340696582)
])
self.assertEqual(pkg.conflicts, [])
self.assertEqual(pkg.obsoletes, [])
+ self.assertEqual(pkg.suggests, [])
+ self.assertEqual(pkg.enhances, [])
+ self.assertEqual(pkg.recommends, [])
+ self.assertEqual(pkg.supplements, [])
self.assertEqual(pkg.files, [])
self.assertEqual(pkg.changelogs, [])
('aad', 'LT', '0', '444', None, False),
('aae', 'GT', '0', '555', None, False)
])
+ self.assertEqual(pkg.suggests, [])
+ self.assertEqual(pkg.enhances, [])
+ self.assertEqual(pkg.recommends, [])
+ self.assertEqual(pkg.supplements, [])
self.assertEqual(pkg.files, [
('', '/usr/bin/', 'complex_a'),
('dir', '/usr/share/doc/', 'Archer-3.4.5'),
('', '/usr/share/doc/Archer-3.4.5/', 'README')
])
self.assertEqual(pkg.changelogs, [
- ('Tomas Mlcoch <tmlcoch@redhat.com> - 1.1.1-1', 1334664000L,
+ ('Tomas Mlcoch <tmlcoch@redhat.com> - 1.1.1-1', 1334664000,
'- First changelog.'),
- ('Tomas Mlcoch <tmlcoch@redhat.com> - 2.2.2-2', 1334750400L,
+ ('Tomas Mlcoch <tmlcoch@redhat.com> - 2.2.2-2', 1334750400,
'- That was totally ninja!'),
- ('Tomas Mlcoch <tmlcoch@redhat.com> - 3.3.3-3', 1365422400L,
+ ('Tomas Mlcoch <tmlcoch@redhat.com> - 3.3.3-3', 1365422400,
'- 3. changelog.')
])
self.assertEqual(pkg.conflicts, [('foobar', 'LT', '0', '1.0.0', None, False)])
pkg.obsoletes = [('foobar', 'GE', '0', '1.1.0', None, False)]
self.assertEqual(pkg.obsoletes, [('foobar', 'GE', '0', '1.1.0', None, False)])
+ pkg.suggests = [('foo_sug', 'GE', '0', '1.1.0', None, False)]
+ self.assertEqual(pkg.suggests, [('foo_sug', 'GE', '0', '1.1.0', None, False)])
+ pkg.enhances = [('foo_enh', 'GE', '0', '1.1.0', None, False)]
+ self.assertEqual(pkg.enhances, [('foo_enh', 'GE', '0', '1.1.0', None, False)])
+ pkg.recommends = [('foo_rec', 'GE', '0', '1.1.0', None, False)]
+ self.assertEqual(pkg.recommends, [('foo_rec', 'GE', '0', '1.1.0', None, False)])
+ pkg.supplements = [('foo_sup', 'GE', '0', '1.1.0', None, False)]
+ self.assertEqual(pkg.supplements, [('foo_sup', 'GE', '0', '1.1.0', None, False)])
pkg.files = [(None, '/foo/', 'bar')]
self.assertEqual(pkg.files, [(None, '/foo/', 'bar')])
- pkg.changelogs = [('me', 123456L, 'first commit')]
- self.assertEqual(pkg.changelogs, [('me', 123456L, 'first commit')])
+ pkg.changelogs = [('me', 123456, 'first commit')]
+ self.assertEqual(pkg.changelogs, [('me', 123456, 'first commit')])
self.assertEqual(pkg.nvra(), "bar-11-33.quantum")
self.assertEqual(pkg.nevra(), "bar-22:11-33.quantum")
import unittest
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseParsepkg(unittest.TestCase):
def test_package_from_rpm(self):
import os.path
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseRepomd(unittest.TestCase):
self.assertEqual(md.revision, "foobar")
self.assertEqual(md.repoid, None);
- md.set_repoid("fooid", "sha256")
- self.assertEqual(md.repoid, "fooid")
+ md.set_repoid("barid", "sha256")
+ self.assertEqual(md.repoid, "barid")
+
+ self.assertEqual(md.contenthash, None);
+ md.set_contenthash("fooid", "sha256")
+ self.assertEqual(md.contenthash, "fooid")
self.assertEqual(md.distro_tags, [])
md.add_distro_tag("tag1")
"""<?xml version="1.0" encoding="UTF-8"?>
<repomd xmlns="http://linux.duke.edu/metadata/repo" xmlns:rpm="http://linux.duke.edu/metadata/rpm">
<revision>foobar</revision>
- <repoid type="sha256">fooid</repoid>
+ <repoid type="sha256">barid</repoid>
+ <contenthash type="sha256">fooid</contenthash>
<tags>
<content>contenttag</content>
<repo>repotag</repo>
self.assertEqual(len(md.records), 1)
md.repoid = None
+ md.contenthash = None
xml = md.xml_dump()
self.assertEqual(xml,
self.assertEqual(repomd.content_tags, [])
self.assertEqual(len(repomd.records), 3)
+ def test_repomd_indexing_and_iteration_repo01(self):
+ repomd = cr.Repomd(REPO_01_REPOMD)
+
+ types = []
+ for rec in repomd:
+ types.append(rec.type)
+ self.assertEqual(types, ['filelists', 'other', 'primary'])
+
+ rec = repomd["primary"]
+ self.assertEqual(rec.type, "primary")
+
+ self.assertRaises(KeyError, repomd.__getitem__, "foobar")
+
+ self.assertTrue("primary" in repomd)
import os.path
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseRepomdRecord(unittest.TestCase):
self.assertEqual(rec.checksum_open_type, None)
self.assertEqual(rec.timestamp, 0)
self.assertEqual(rec.size, 0)
- self.assertEqual(rec.size_open, 0)
+ self.assertEqual(rec.size_open, -1)
self.assertEqual(rec.db_ver, 0)
rec.fill(cr.SHA256)
import sqlite3
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseSqlite(unittest.TestCase):
(u'requires',),
(u'provides',),
(u'conflicts',),
- (u'obsoletes',)])
+ (u'obsoletes',),
+ (u'suggests',),
+ (u'enhances',),
+ (u'recommends',),
+ (u'supplements',),
+ ])
# Check indexes
self.assertEqual(con.execute("""select name from sqlite_master where type="index";""").fetchall(),
[(u'packagename',),
(u'pkgprovides',),
(u'providesname',),
(u'pkgconflicts',),
- (u'pkgobsoletes',)])
+ (u'pkgobsoletes',),
+ (u'pkgsuggests',),
+ (u'pkgenhances',),
+ (u'pkgrecommends',),
+ (u'pkgsupplements',),
+ ])
# Check triggers
self.assertEqual(con.execute("""select name from sqlite_master where type="trigger";""").fetchall(),
[(u'removals',)])
--- /dev/null
+import unittest
+import shutil
+import tempfile
+import os.path
+import createrepo_c as cr
+
+from .fixtures import *
+
+class TestCaseUpdateCollection(unittest.TestCase):
+
+ def test_updatecollection_setters(self):
+ col = cr.UpdateCollection()
+ self.assertTrue(col)
+
+ self.assertEqual(col.shortname, None)
+ self.assertEqual(col.name, None)
+ self.assertEqual(col.packages, [])
+
+ pkg = cr.UpdateCollectionPackage()
+ pkg.name = "foo"
+ pkg.version = "1.2"
+ pkg.release = "3"
+ pkg.epoch = "0"
+ pkg.arch = "x86"
+ pkg.src = "foo.src.rpm"
+ pkg.filename = "foo.rpm"
+ pkg.sum = "abcdef"
+ pkg.sum_type = cr.SHA1
+ pkg.reboot_suggested = True
+
+ col.shortname = "short name"
+ col.name = "long name"
+ col.append(pkg)
+
+ self.assertEqual(col.shortname, "short name")
+ self.assertEqual(col.name, "long name")
+ self.assertEqual(len(col.packages), 1)
+
+ # Also check if the appended package was appended properly
+ pkg = col.packages[0]
+ self.assertEqual(pkg.name, "foo")
+ self.assertEqual(pkg.version, "1.2")
+ self.assertEqual(pkg.release, "3")
+ self.assertEqual(pkg.epoch, "0")
+ self.assertEqual(pkg.arch, "x86")
+ self.assertEqual(pkg.src, "foo.src.rpm")
+ self.assertEqual(pkg.filename, "foo.rpm")
+ self.assertEqual(pkg.sum, "abcdef")
+ self.assertEqual(pkg.sum_type, cr.SHA1)
+ self.assertEqual(pkg.reboot_suggested, True)
--- /dev/null
+import unittest
+import shutil
+import tempfile
+import os.path
+import createrepo_c as cr
+
+from .fixtures import *
+
+class TestCaseUpdateCollectionPackage(unittest.TestCase):
+
+ def test_updatecollectionpackage_setters(self):
+ pkg = cr.UpdateCollectionPackage()
+ self.assertTrue(pkg)
+
+ self.assertEqual(pkg.name, None)
+ self.assertEqual(pkg.version, None)
+ self.assertEqual(pkg.release, None)
+ self.assertEqual(pkg.epoch, None)
+ self.assertEqual(pkg.arch, None)
+ self.assertEqual(pkg.src, None)
+ self.assertEqual(pkg.filename, None)
+ self.assertEqual(pkg.sum, None)
+ self.assertEqual(pkg.sum_type, 0)
+ self.assertEqual(pkg.reboot_suggested, 0)
+
+ pkg.name = "foo"
+ pkg.version = "1.2"
+ pkg.release = "3"
+ pkg.epoch = "0"
+ pkg.arch = "x86"
+ pkg.src = "foo.src.rpm"
+ pkg.filename = "foo.rpm"
+ pkg.sum = "abcdef"
+ pkg.sum_type = cr.SHA1
+ pkg.reboot_suggested = True
+
+ self.assertEqual(pkg.name, "foo")
+ self.assertEqual(pkg.version, "1.2")
+ self.assertEqual(pkg.release, "3")
+ self.assertEqual(pkg.epoch, "0")
+ self.assertEqual(pkg.arch, "x86")
+ self.assertEqual(pkg.src, "foo.src.rpm")
+ self.assertEqual(pkg.filename, "foo.rpm")
+ self.assertEqual(pkg.sum, "abcdef")
+ self.assertEqual(pkg.sum_type, cr.SHA1)
+ self.assertEqual(pkg.reboot_suggested, True)
--- /dev/null
+from datetime import datetime
+import unittest
+import shutil
+import tempfile
+import os.path
+import createrepo_c as cr
+
+from .fixtures import *
+
+class TestCaseUpdateInfo(unittest.TestCase):
+
+ def test_updateinfo_setters(self):
+ now = datetime.now()
+ # Microseconds are always 0 in updateinfo
+ now = datetime(now.year, now.month, now.day, now.hour, now.minute,
+ now.second, 0)
+
+ ui = cr.UpdateInfo()
+ self.assertTrue(ui)
+
+ self.assertEqual(ui.updates, [])
+
+ rec = cr.UpdateRecord()
+ rec.fromstr = "from"
+ rec.status = "status"
+ rec.type = "type"
+ rec.version = "version"
+ rec.id = "id"
+ rec.title = "title"
+ rec.issued_date = now
+ rec.updated_date = now
+ rec.rights = "rights"
+ rec.release = "release"
+ rec.pushcount = "pushcount"
+ rec.severity = "severity"
+ rec.summary = "summary"
+ rec.description = "description"
+ rec.solution = "solution"
+
+ ui.append(rec)
+
+ self.assertEqual(len(ui.updates), 1)
+ rec = ui.updates[0]
+
+ self.assertEqual(rec.fromstr, "from")
+ self.assertEqual(rec.status, "status")
+ self.assertEqual(rec.type, "type")
+ self.assertEqual(rec.version, "version")
+ self.assertEqual(rec.id, "id")
+ self.assertEqual(rec.title, "title")
+ self.assertEqual(rec.issued_date, now)
+ self.assertEqual(rec.updated_date, now)
+ self.assertEqual(rec.rights, "rights")
+ self.assertEqual(rec.release, "release")
+ self.assertEqual(rec.pushcount, "pushcount")
+ self.assertEqual(rec.severity, "severity")
+ self.assertEqual(rec.summary, "summary")
+ self.assertEqual(rec.description, "description")
+ self.assertEqual(rec.solution, "solution")
+ self.assertEqual(len(rec.references), 0)
+ self.assertEqual(len(rec.collections), 0)
+
+ def test_updateinfo_xml_dump_01(self):
+ ui = cr.UpdateInfo()
+ xml = ui.xml_dump()
+
+ self.assertEqual(xml,
+ """<?xml version="1.0" encoding="UTF-8"?>\n<updates/>\n""")
+
+ def test_updateinfo_xml_dump_02(self):
+ now = datetime.now()
+ # Microseconds are always 0 in updateinfo
+ now = datetime(now.year, now.month, now.day, now.hour, now.minute,
+ now.second, 0)
+
+ ui = cr.UpdateInfo()
+ xml = ui.xml_dump()
+
+ rec = cr.UpdateRecord()
+ rec.fromstr = "from"
+ rec.status = "status"
+ rec.type = "type"
+ rec.version = "version"
+ rec.id = "id"
+ rec.title = "title"
+ rec.issued_date = now
+ rec.updated_date = now
+ rec.rights = "rights"
+ rec.release = "release"
+ rec.pushcount = "pushcount"
+ rec.severity = "severity"
+ rec.summary = "summary"
+ rec.description = "description"
+ rec.solution = "solution"
+
+ ui.append(rec)
+ xml = ui.xml_dump()
+
+ self.assertEqual(xml,
+"""<?xml version="1.0" encoding="UTF-8"?>
+<updates>
+ <update from="from" status="status" type="type" version="version">
+ <id>id</id>
+ <title>title</title>
+ <issued date="%(now)s"/>
+ <updated date="%(now)s"/>
+ <rights>rights</rights>
+ <release>release</release>
+ <pushcount>pushcount</pushcount>
+ <severity>severity</severity>
+ <summary>summary</summary>
+ <description>description</description>
+ <solution>solution</solution>
+ <references/>
+ <pkglist/>
+ </update>
+</updates>
+""" % {"now": now.strftime("%Y-%m-%d %H:%M:%S")})
+
+ def test_updateinfo_xml_dump_03(self):
+ now = datetime.now()
+ # Microseconds are always 0 in updateinfo
+ now = datetime(now.year, now.month, now.day, now.hour, now.minute,
+ now.second, 0)
+
+ pkg = cr.UpdateCollectionPackage()
+ pkg.name = "foo"
+ pkg.version = "1.2"
+ pkg.release = "3"
+ pkg.epoch = "0"
+ pkg.arch = "x86"
+ pkg.src = "foo.src.rpm"
+ pkg.filename = "foo.rpm"
+ pkg.sum = "abcdef"
+ pkg.sum_type = cr.SHA1
+ pkg.reboot_suggested = True
+
+ col = cr.UpdateCollection()
+ col.shortname = "short name"
+ col.name = "long name"
+ col.append(pkg)
+
+ ref = cr.UpdateReference()
+ ref.href = "href"
+ ref.id = "id"
+ ref.type = "type"
+ ref.title = "title"
+
+ rec = cr.UpdateRecord()
+ rec.fromstr = "from"
+ rec.status = "status"
+ rec.type = "type"
+ rec.version = "version"
+ rec.id = "id"
+ rec.title = "title"
+ rec.issued_date = now
+ rec.updated_date = now
+ rec.rights = "rights"
+ rec.release = "release"
+ rec.pushcount = "pushcount"
+ rec.severity = "severity"
+ rec.summary = "summary"
+ rec.description = "description"
+ rec.solution = "solution"
+ rec.append_collection(col)
+ rec.append_reference(ref)
+
+ ui = cr.UpdateInfo()
+ ui.append(rec)
+ xml = ui.xml_dump()
+
+ self.assertEqual(xml,
+"""<?xml version="1.0" encoding="UTF-8"?>
+<updates>
+ <update from="from" status="status" type="type" version="version">
+ <id>id</id>
+ <title>title</title>
+ <issued date="%(now)s"/>
+ <updated date="%(now)s"/>
+ <rights>rights</rights>
+ <release>release</release>
+ <pushcount>pushcount</pushcount>
+ <severity>severity</severity>
+ <summary>summary</summary>
+ <description>description</description>
+ <solution>solution</solution>
+ <references>
+ <reference href="href" id="id" type="type" title="title"/>
+ </references>
+ <pkglist>
+ <collection short="short name">
+ <name>long name</name>
+ <package name="foo" version="1.2" release="3" epoch="0" arch="x86" src="foo.src.rpm">
+ <filename>foo.rpm</filename>
+ <sum type="sha1">abcdef</sum>
+ <reboot_suggested/>
+ </package>
+ </collection>
+ </pkglist>
+ </update>
+</updates>
+""" % {"now": now.strftime("%Y-%m-%d %H:%M:%S")})
--- /dev/null
+from datetime import datetime
+import unittest
+import shutil
+import tempfile
+import os.path
+import createrepo_c as cr
+
+from .fixtures import *
+
+class TestCaseUpdateRecord(unittest.TestCase):
+
+ def test_updaterecord_setters(self):
+ now = datetime.now()
+ # Microseconds are always 0 in updateinfo
+ now = datetime(now.year, now.month, now.day, now.hour, now.minute,
+ now.second, 0)
+
+ rec = cr.UpdateRecord()
+ self.assertTrue(rec)
+
+ self.assertEqual(rec.fromstr, None)
+ self.assertEqual(rec.status, None)
+ self.assertEqual(rec.type, None)
+ self.assertEqual(rec.version, None)
+ self.assertEqual(rec.id, None)
+ self.assertEqual(rec.title, None)
+ self.assertEqual(rec.issued_date, None)
+ self.assertEqual(rec.updated_date, None)
+ self.assertEqual(rec.rights, None)
+ self.assertEqual(rec.release, None)
+ self.assertEqual(rec.pushcount, None)
+ self.assertEqual(rec.severity, None)
+ self.assertEqual(rec.summary, None)
+ self.assertEqual(rec.description, None)
+ self.assertEqual(rec.solution, None)
+ self.assertEqual(rec.references, [])
+ self.assertEqual(rec.collections, [])
+
+ ref = cr.UpdateReference()
+ ref.href = "href"
+ ref.id = "id"
+ ref.type = "type"
+ ref.title = "title"
+
+ col = cr.UpdateCollection()
+ col.shortname = "short name"
+ col.name = "long name"
+
+ rec.fromstr = "from"
+ rec.status = "status"
+ rec.type = "type"
+ rec.version = "version"
+ rec.id = "id"
+ rec.title = "title"
+ rec.issued_date = now
+ rec.updated_date = now
+ rec.rights = "rights"
+ rec.release = "release"
+ rec.pushcount = "pushcount"
+ rec.severity = "severity"
+ rec.summary = "summary"
+ rec.description = "description"
+ rec.solution = "solution"
+ rec.append_reference(ref)
+ rec.append_collection(col)
+
+ self.assertEqual(rec.fromstr, "from")
+ self.assertEqual(rec.status, "status")
+ self.assertEqual(rec.type, "type")
+ self.assertEqual(rec.version, "version")
+ self.assertEqual(rec.id, "id")
+ self.assertEqual(rec.title, "title")
+ self.assertEqual(rec.issued_date, now)
+ self.assertEqual(rec.updated_date, now)
+ self.assertEqual(rec.rights, "rights")
+ self.assertEqual(rec.release, "release")
+ self.assertEqual(rec.pushcount, "pushcount")
+ self.assertEqual(rec.severity, "severity")
+ self.assertEqual(rec.summary, "summary")
+ self.assertEqual(rec.description, "description")
+ self.assertEqual(rec.solution, "solution")
+ self.assertEqual(len(rec.references), 1)
+ self.assertEqual(len(rec.collections), 1)
+
+ ref = rec.references[0]
+ self.assertEqual(ref.href, "href")
+ self.assertEqual(ref.id, "id")
+ self.assertEqual(ref.type, "type")
+ self.assertEqual(ref.title, "title")
+
+ col = rec.collections[0]
+ self.assertEqual(col.shortname, "short name")
+ self.assertEqual(col.name, "long name")
+ self.assertEqual(len(col.packages), 0)
+
+ def test_xml_dump_updaterecord(self):
+ now = datetime.now()
+ # Microseconds are always 0 in updateinfo
+ now = datetime(now.year, now.month, now.day, now.hour, now.minute,
+ now.second, 0)
+
+ rec = cr.UpdateRecord()
+ rec.fromstr = "from"
+ rec.status = "status"
+ rec.type = "type"
+ rec.version = "version"
+ rec.id = "id"
+ rec.title = "title"
+ rec.issued_date = now
+ rec.updated_date = now
+ rec.rights = "rights"
+ rec.release = "release"
+ rec.pushcount = "pushcount"
+ rec.severity = "severity"
+ rec.summary = "summary"
+ rec.description = "description"
+ rec.solution = "solution"
+
+ xml = cr.xml_dump_updaterecord(rec)
+ self.assertEqual(xml,
+""" <update from="from" status="status" type="type" version="version">
+ <id>id</id>
+ <title>title</title>
+ <issued date="%(now)s"/>
+ <updated date="%(now)s"/>
+ <rights>rights</rights>
+ <release>release</release>
+ <pushcount>pushcount</pushcount>
+ <severity>severity</severity>
+ <summary>summary</summary>
+ <description>description</description>
+ <solution>solution</solution>
+ <references/>
+ <pkglist/>
+ </update>
+""" % {"now": now.strftime("%Y-%m-%d %H:%M:%S")})
--- /dev/null
+import unittest
+import shutil
+import tempfile
+import os.path
+import createrepo_c as cr
+
+from .fixtures import *
+
+class TestCaseUpdateReference(unittest.TestCase):
+
+ def test_updatereference_setters(self):
+ ref = cr.UpdateReference()
+ self.assertTrue(ref)
+
+ self.assertEqual(ref.href, None)
+ self.assertEqual(ref.id, None)
+ self.assertEqual(ref.type, None)
+ self.assertEqual(ref.title, None)
+
+ ref.href = "href"
+ ref.id = "id"
+ ref.type = "type"
+ ref.title = "title"
+
+ self.assertEqual(ref.href, "href")
+ self.assertEqual(ref.id, "id")
+ self.assertEqual(ref.type, "type")
+ self.assertEqual(ref.title, "title")
import unittest
import createrepo_c as cr
-import fixtures
+from . import fixtures
class TestCaseVersion(unittest.TestCase):
def test_version(self):
import os.path
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseXmlFile(unittest.TestCase):
f.close()
import gzip
- content = gzip.open(path).read()
+ content = gzip.open(path).read().decode('utf-8')
self.assertEqual(content,
"""<?xml version="1.0" encoding="UTF-8"?>
<metadata xmlns="http://linux.duke.edu/metadata/common" xmlns:rpm="http://linux.duke.edu/metadata/rpm" packages="0">
f.close()
import bz2
- content = bz2.decompress(open(path).read())
+ content = bz2.decompress(open(path, 'rb').read()).decode('utf-8')
self.assertEqual(content,
"""<?xml version="1.0" encoding="UTF-8"?>
<metadata xmlns="http://linux.duke.edu/metadata/common" xmlns:rpm="http://linux.duke.edu/metadata/rpm" packages="0">
import subprocess
p = subprocess.Popen(["unxz", "--stdout", path], stdout=subprocess.PIPE)
- content = p.stdout.read()
+ content = p.stdout.read().decode('utf-8')
self.assertEqual(content,
"""<?xml version="1.0" encoding="UTF-8"?>
<metadata xmlns="http://linux.duke.edu/metadata/common" xmlns:rpm="http://linux.duke.edu/metadata/rpm" packages="0">
import os.path
import createrepo_c as cr
-from fixtures import *
+from .fixtures import *
class TestCaseXmlParserPrimary(unittest.TestCase):
self.assertEqual(pkg.files, [])
self.assertEqual(pkg.changelogs,
[('Tomas Mlcoch <tmlcoch@redhat.com> - 6.0.1-1',
- 1334664000L,
+ 1334664000,
'- First release'),
('Tomas Mlcoch <tmlcoch@redhat.com> - 6.0.1-2',
- 1334664001L,
+ 1334664001,
'- Second release')])
def test_xml_parser_other_repo02(self):
static void
-outputtest_setup(Outputtest *outputtest, gconstpointer test_data)
+outputtest_setup(Outputtest *outputtest, G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
int fd;
fd = g_file_open_tmp(TMP_FILE_PATTERN, &(outputtest->tmp_filename), NULL);
static void
-outputtest_teardown(Outputtest *outputtest, gconstpointer test_data)
+outputtest_teardown(Outputtest *outputtest, G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
-
if (outputtest->tmp_filename) {
remove(outputtest->tmp_filename);
g_free(outputtest->tmp_filename);
static void
-outputtest_cw_output(Outputtest *outputtest, gconstpointer test_data)
+outputtest_cw_output(Outputtest *outputtest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
-
// Plain
test_helper_cw_output(OUTPUT_TYPE_WRITE, outputtest->tmp_filename,
static void
-test_contentstating_singlewrite(Outputtest *outputtest, gconstpointer test_data)
+test_contentstating_singlewrite(Outputtest *outputtest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
CR_FILE *f;
int ret;
cr_ContentStat *stat;
GError *tmp_err = NULL;
- CR_UNUSED(test_data);
-
const char *content = "sdlkjowykjnhsadyhfsoaf\nasoiuyseahlndsf\n";
const int content_len = 39;
const char *content_sha256 = "c9d112f052ab86270bfb484817a513d6ce188133ddc0"
}
static void
-test_contentstating_multiwrite(Outputtest *outputtest, gconstpointer test_data)
+test_contentstating_multiwrite(Outputtest *outputtest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
CR_FILE *f;
int ret;
cr_ContentStat *stat;
GError *tmp_err = NULL;
- CR_UNUSED(test_data);
-
const char *content = "sdlkjowykjnhsadyhfsoaf\nasoiuyseahlndsf\n";
const int content_len = 39;
const char *content_sha256 = "c9d112f052ab86270bfb484817a513d6ce188133ddc0"
static void
test_cr_str_to_evr(void)
{
- struct cr_EVR evr;
+ cr_EVR *evr;
// V
evr = cr_str_to_evr("5.0.0", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "5.0.0");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "5.0.0");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
evr = cr_str_to_evr("6.1", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "6.1");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "6.1");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
evr = cr_str_to_evr("7", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "7");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "7");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
// VR
evr = cr_str_to_evr("5.0.0-2", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "5.0.0");
- g_assert_cmpstr(evr.release, ==, "2");
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "5.0.0");
+ g_assert_cmpstr(evr->release, ==, "2");
+ cr_evr_free(evr);
evr = cr_str_to_evr("6.1-3", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "6.1");
- g_assert_cmpstr(evr.release, ==, "3");
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "6.1");
+ g_assert_cmpstr(evr->release, ==, "3");
+ cr_evr_free(evr);
evr = cr_str_to_evr("7-4", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "7");
- g_assert_cmpstr(evr.release, ==, "4");
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "7");
+ g_assert_cmpstr(evr->release, ==, "4");
+ cr_evr_free(evr);
// EV
evr = cr_str_to_evr("1:5.0.0", NULL);
- g_assert_cmpstr(evr.epoch, ==, "1");
- g_assert_cmpstr(evr.version, ==, "5.0.0");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "1");
+ g_assert_cmpstr(evr->version, ==, "5.0.0");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
evr = cr_str_to_evr("2:6.1", NULL);
- g_assert_cmpstr(evr.epoch, ==, "2");
- g_assert_cmpstr(evr.version, ==, "6.1");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "2");
+ g_assert_cmpstr(evr->version, ==, "6.1");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
evr = cr_str_to_evr("3:7", NULL);
- g_assert_cmpstr(evr.epoch, ==, "3");
- g_assert_cmpstr(evr.version, ==, "7");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "3");
+ g_assert_cmpstr(evr->version, ==, "7");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
//cr_EVR
evr = cr_str_to_evr("1:5.0.0-11", NULL);
- g_assert_cmpstr(evr.epoch, ==, "1");
- g_assert_cmpstr(evr.version, ==, "5.0.0");
- g_assert_cmpstr(evr.release, ==, "11");
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "1");
+ g_assert_cmpstr(evr->version, ==, "5.0.0");
+ g_assert_cmpstr(evr->release, ==, "11");
+ cr_evr_free(evr);
evr = cr_str_to_evr("2:6.1-22", NULL);
- g_assert_cmpstr(evr.epoch, ==, "2");
- g_assert_cmpstr(evr.version, ==, "6.1");
- g_assert_cmpstr(evr.release, ==, "22");
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "2");
+ g_assert_cmpstr(evr->version, ==, "6.1");
+ g_assert_cmpstr(evr->release, ==, "22");
+ cr_evr_free(evr);
evr = cr_str_to_evr("3:7-33", NULL);
- g_assert_cmpstr(evr.epoch, ==, "3");
- g_assert_cmpstr(evr.version, ==, "7");
- g_assert_cmpstr(evr.release, ==, "33");
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "3");
+ g_assert_cmpstr(evr->version, ==, "7");
+ g_assert_cmpstr(evr->release, ==, "33");
+ cr_evr_free(evr);
// Bad strings
evr = cr_str_to_evr(":", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
evr = cr_str_to_evr(":-", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
// Really bad values
evr = cr_str_to_evr(NULL, NULL);
- g_assert_cmpstr(evr.epoch, ==, NULL);
- g_assert_cmpstr(evr.version, ==, NULL);
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, NULL);
+ g_assert_cmpstr(evr->version, ==, NULL);
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
evr = cr_str_to_evr("", NULL);
- g_assert_cmpstr(evr.epoch, ==, NULL);
- g_assert_cmpstr(evr.version, ==, NULL);
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, NULL);
+ g_assert_cmpstr(evr->version, ==, NULL);
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
evr = cr_str_to_evr("-", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
evr = cr_str_to_evr("-:", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, NULL);
+ g_assert_cmpstr(evr->version, ==, "");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
evr = cr_str_to_evr("foo:bar", NULL);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "bar");
- g_assert_cmpstr(evr.release, ==, NULL);
- free(evr.epoch);
- free(evr.version);
- free(evr.release);
+ g_assert_cmpstr(evr->epoch, ==, NULL);
+ g_assert_cmpstr(evr->version, ==, "bar");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ cr_evr_free(evr);
}
static void
test_cr_str_to_evr_with_chunk(void)
{
- struct cr_EVR evr;
+ cr_EVR *evr;
GStringChunk *chunk;
chunk = g_string_chunk_new(512);
// V
evr = cr_str_to_evr("5.0.0", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "5.0.0");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "5.0.0");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
evr = cr_str_to_evr("6.1", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "6.1");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "6.1");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
evr = cr_str_to_evr("7", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "7");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "7");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
// VR
evr = cr_str_to_evr("5.0.0-2", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "5.0.0");
- g_assert_cmpstr(evr.release, ==, "2");
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "5.0.0");
+ g_assert_cmpstr(evr->release, ==, "2");
+ g_free(evr);
evr = cr_str_to_evr("6.1-3", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "6.1");
- g_assert_cmpstr(evr.release, ==, "3");
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "6.1");
+ g_assert_cmpstr(evr->release, ==, "3");
+ g_free(evr);
evr = cr_str_to_evr("7-4", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "7");
- g_assert_cmpstr(evr.release, ==, "4");
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "7");
+ g_assert_cmpstr(evr->release, ==, "4");
+ g_free(evr);
// EV
evr = cr_str_to_evr("1:5.0.0", chunk);
- g_assert_cmpstr(evr.epoch, ==, "1");
- g_assert_cmpstr(evr.version, ==, "5.0.0");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, "1");
+ g_assert_cmpstr(evr->version, ==, "5.0.0");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
evr = cr_str_to_evr("2:6.1", chunk);
- g_assert_cmpstr(evr.epoch, ==, "2");
- g_assert_cmpstr(evr.version, ==, "6.1");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, "2");
+ g_assert_cmpstr(evr->version, ==, "6.1");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
evr = cr_str_to_evr("3:7", chunk);
- g_assert_cmpstr(evr.epoch, ==, "3");
- g_assert_cmpstr(evr.version, ==, "7");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, "3");
+ g_assert_cmpstr(evr->version, ==, "7");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
//cr_EVR
evr = cr_str_to_evr("1:5.0.0-11", chunk);
- g_assert_cmpstr(evr.epoch, ==, "1");
- g_assert_cmpstr(evr.version, ==, "5.0.0");
- g_assert_cmpstr(evr.release, ==, "11");
+ g_assert_cmpstr(evr->epoch, ==, "1");
+ g_assert_cmpstr(evr->version, ==, "5.0.0");
+ g_assert_cmpstr(evr->release, ==, "11");
+ g_free(evr);
evr = cr_str_to_evr("2:6.1-22", chunk);
- g_assert_cmpstr(evr.epoch, ==, "2");
- g_assert_cmpstr(evr.version, ==, "6.1");
- g_assert_cmpstr(evr.release, ==, "22");
+ g_assert_cmpstr(evr->epoch, ==, "2");
+ g_assert_cmpstr(evr->version, ==, "6.1");
+ g_assert_cmpstr(evr->release, ==, "22");
+ g_free(evr);
evr = cr_str_to_evr("3:7-33", chunk);
- g_assert_cmpstr(evr.epoch, ==, "3");
- g_assert_cmpstr(evr.version, ==, "7");
- g_assert_cmpstr(evr.release, ==, "33");
+ g_assert_cmpstr(evr->epoch, ==, "3");
+ g_assert_cmpstr(evr->version, ==, "7");
+ g_assert_cmpstr(evr->release, ==, "33");
+ g_free(evr);
// Bad strings
evr = cr_str_to_evr(":", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
evr = cr_str_to_evr(":-", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
// Really bad values
evr = cr_str_to_evr(NULL, chunk);
- g_assert_cmpstr(evr.epoch, ==, NULL);
- g_assert_cmpstr(evr.version, ==, NULL);
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, NULL);
+ g_assert_cmpstr(evr->version, ==, NULL);
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
evr = cr_str_to_evr("", chunk);
- g_assert_cmpstr(evr.epoch, ==, NULL);
- g_assert_cmpstr(evr.version, ==, NULL);
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, NULL);
+ g_assert_cmpstr(evr->version, ==, NULL);
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
evr = cr_str_to_evr("-", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, "0");
+ g_assert_cmpstr(evr->version, ==, "");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
evr = cr_str_to_evr("-:", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, NULL);
+ g_assert_cmpstr(evr->version, ==, "");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
evr = cr_str_to_evr("foo:bar", chunk);
- g_assert_cmpstr(evr.epoch, ==, "0");
- g_assert_cmpstr(evr.version, ==, "bar");
- g_assert_cmpstr(evr.release, ==, NULL);
+ g_assert_cmpstr(evr->epoch, ==, NULL);
+ g_assert_cmpstr(evr->version, ==, "bar");
+ g_assert_cmpstr(evr->release, ==, NULL);
+ g_free(evr);
g_string_chunk_free(chunk);
}
static void
-copyfiletest_setup(Copyfiletest *copyfiletest, gconstpointer test_data)
+copyfiletest_setup(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
copyfiletest->tmp_dir = g_strdup(TMPDIR_TEMPLATE);
mkdtemp(copyfiletest->tmp_dir);
copyfiletest->dst_file = g_strconcat(copyfiletest->tmp_dir, "/", DST_FILE, NULL);
static void
-copyfiletest_teardown(Copyfiletest *copyfiletest, gconstpointer test_data)
+copyfiletest_teardown(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
remove(copyfiletest->dst_file);
rmdir(copyfiletest->tmp_dir);
g_free(copyfiletest->tmp_dir);
static void
-copyfiletest_test_empty_file(Copyfiletest *copyfiletest, gconstpointer test_data)
+copyfiletest_test_empty_file(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
- int ret;
+ gboolean ret;
char *checksum;
GError *tmp_err = NULL;
g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS));
ret = cr_copy_file(TEST_EMPTY_FILE, copyfiletest->dst_file, &tmp_err);
+ g_assert(ret);
g_assert(!tmp_err);
- g_assert_cmpint(ret, ==, CRE_OK);
g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR));
checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL);
g_assert_cmpstr(checksum, ==, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855");
static void
-copyfiletest_test_text_file(Copyfiletest *copyfiletest, gconstpointer test_data)
+copyfiletest_test_text_file(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
- int ret;
+ gboolean ret;
char *checksum;
g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS));
ret = cr_copy_file(TEST_TEXT_FILE, copyfiletest->dst_file, NULL);
- g_assert_cmpint(ret, ==, CRE_OK);
+ g_assert(ret);
g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR));
checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL);
g_assert_cmpstr(checksum, ==, "2f395bdfa2750978965e4781ddf224c89646c7d7a1569b7ebb023b170f7bd8bb");
static void
-copyfiletest_test_binary_file(Copyfiletest *copyfiletest, gconstpointer test_data)
+copyfiletest_test_binary_file(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
- int ret;
+ gboolean ret;
char *checksum;
GError *tmp_err = NULL;
g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS));
ret = cr_copy_file(TEST_BINARY_FILE, copyfiletest->dst_file, &tmp_err);
g_assert(!tmp_err);
- g_assert_cmpint(ret, ==, CRE_OK);
+ g_assert(ret);
g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR));
checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL);
g_assert_cmpstr(checksum, ==, "bf68e32ad78cea8287be0f35b74fa3fecd0eaa91770b48f1a7282b015d6d883e");
static void
-copyfiletest_test_rewrite(Copyfiletest *copyfiletest, gconstpointer test_data)
+copyfiletest_test_rewrite(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
- int ret;
+ gboolean ret;
char *checksum;
GError *tmp_err = NULL;
g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS));
ret = cr_copy_file(TEST_BINARY_FILE, copyfiletest->dst_file, NULL);
- g_assert_cmpint(ret, ==, CRE_OK);
+ g_assert(ret);
g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR));
checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL);
g_assert_cmpstr(checksum, ==, "bf68e32ad78cea8287be0f35b74fa3fecd0eaa91770b48f1a7282b015d6d883e");
ret = cr_copy_file(TEST_TEXT_FILE, copyfiletest->dst_file, &tmp_err);
g_assert(!tmp_err);
- g_assert_cmpint(ret, ==, CRE_OK);
+ g_assert(ret);
g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR));
checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL);
g_assert_cmpstr(checksum, ==, "2f395bdfa2750978965e4781ddf224c89646c7d7a1569b7ebb023b170f7bd8bb");
static void
-copyfiletest_test_corner_cases(Copyfiletest *copyfiletest, gconstpointer test_data)
+copyfiletest_test_corner_cases(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
- int ret;
+ gboolean ret;
GError *tmp_err = NULL;
g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS));
- // Withou GError
+ // Without GError
ret = cr_copy_file(NON_EXIST_FILE, copyfiletest->dst_file, NULL);
- g_assert_cmpint(ret, !=, CRE_OK);
+ g_assert(!ret);
g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS));
// With GError
ret = cr_copy_file(NON_EXIST_FILE, copyfiletest->dst_file, &tmp_err);
g_assert(tmp_err);
g_error_free(tmp_err);
- g_assert_cmpint(ret, !=, CRE_OK);
+ g_assert(!ret);
g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS));
}
static void
-compressfile_test_text_file(Copyfiletest *copyfiletest, gconstpointer test_data)
+compressfile_test_text_file(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
int ret;
char *checksum;
GError *tmp_err = NULL;
static void
compressfile_with_stat_test_text_file(Copyfiletest *copyfiletest,
- gconstpointer test_data)
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
int ret;
char *checksum;
cr_ContentStat *stat;
static void
decompressfile_with_stat_test_text_file(Copyfiletest *copyfiletest,
- gconstpointer test_data)
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
int ret;
- char *checksum;
cr_ContentStat *stat;
GError *tmp_err = NULL;
static void
-test_cr_download_valid_url_1(Copyfiletest *copyfiletest, gconstpointer test_data)
+test_cr_download_valid_url_1(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
-
CURL *handle = curl_easy_init();
g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS));
static void
-test_cr_download_valid_url_2(Copyfiletest *copyfiletest, gconstpointer test_data)
+test_cr_download_valid_url_2(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
-
CURL *handle = curl_easy_init();
GError *tmp_err = NULL;
static void
-test_cr_download_invalid_url(Copyfiletest *copyfiletest, gconstpointer test_data)
+test_cr_download_invalid_url(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
-
CURL *handle = curl_easy_init();
GError *tmp_err = NULL;
static void
-test_cr_better_copy_file_local(Copyfiletest *copyfiletest, gconstpointer test_data)
+test_cr_better_copy_file_local(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- int ret;
+ gboolean ret;
char *checksum;
GError *tmp_err = NULL;
- CR_UNUSED(test_data);
-
g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS));
ret = cr_better_copy_file(TEST_BINARY_FILE, copyfiletest->dst_file, &tmp_err);
g_assert(!tmp_err);
- g_assert_cmpint(ret, ==, CRE_OK);
+ g_assert(ret);
g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR));
checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL);
g_assert_cmpstr(checksum, ==, "bf68e32ad78cea8287be0f35b74fa3fecd0eaa91770b48f1a7282b015d6d883e");
static void
-test_cr_better_copy_file_url(Copyfiletest *copyfiletest, gconstpointer test_data)
+test_cr_better_copy_file_url(Copyfiletest *copyfiletest,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- int ret;
+ gboolean ret;
GError *tmp_err = NULL;
- CR_UNUSED(test_data);
-
g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS));
ret = cr_better_copy_file(VALID_URL_01, copyfiletest->dst_file, &tmp_err);
g_assert(!tmp_err);
- g_assert_cmpint(ret, ==, CRE_OK);
+ g_assert(ret);
g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR));
}
struct cr_Version ver;
ver = cr_str_to_version(NULL);
- g_assert_cmpint(ver.version, ==, 0);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 0);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, NULL);
ver = cr_str_to_version("");
- g_assert_cmpint(ver.version, ==, 0);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 0);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, NULL);
ver = cr_str_to_version("abcd");
- g_assert_cmpint(ver.version, ==, 0);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 0);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, "abcd");
g_free(ver.suffix);
ver = cr_str_to_version("0.0.0");
- g_assert_cmpint(ver.version, ==, 0);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 0);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, NULL);
ver = cr_str_to_version("9");
- g_assert_cmpint(ver.version, ==, 9);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 9);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, NULL);
ver = cr_str_to_version("3beta");
- g_assert_cmpint(ver.version, ==, 3);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 3);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, "beta");
g_free(ver.suffix);
ver = cr_str_to_version("5.2gamma");
- g_assert_cmpint(ver.version, ==, 5);
- g_assert_cmpint(ver.release, ==, 2);
+ g_assert_cmpint(ver.major, ==, 5);
+ g_assert_cmpint(ver.minor, ==, 2);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, "gamma");
g_free(ver.suffix);
ver = cr_str_to_version("0.0.0b");
- g_assert_cmpint(ver.version, ==, 0);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 0);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, "b");
g_free(ver.suffix);
ver = cr_str_to_version("2.3.4");
- g_assert_cmpint(ver.version, ==, 2);
- g_assert_cmpint(ver.release, ==, 3);
+ g_assert_cmpint(ver.major, ==, 2);
+ g_assert_cmpint(ver.minor, ==, 3);
g_assert_cmpint(ver.patch, ==, 4);
g_assert_cmpstr(ver.suffix, ==, NULL);
ver = cr_str_to_version("11.33.123");
- g_assert_cmpint(ver.version, ==, 11);
- g_assert_cmpint(ver.release, ==, 33);
+ g_assert_cmpint(ver.major, ==, 11);
+ g_assert_cmpint(ver.minor, ==, 33);
g_assert_cmpint(ver.patch, ==, 123);
g_assert_cmpstr(ver.suffix, ==, NULL);
ver = cr_str_to_version("1234567.0987654.45678");
- g_assert_cmpint(ver.version, ==, 1234567);
- g_assert_cmpint(ver.release, ==, 987654);
+ g_assert_cmpint(ver.major, ==, 1234567);
+ g_assert_cmpint(ver.minor, ==, 987654);
g_assert_cmpint(ver.patch, ==, 45678);
g_assert_cmpstr(ver.suffix, ==, NULL);
ver = cr_str_to_version("1.0.2i");
- g_assert_cmpint(ver.version, ==, 1);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 1);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 2);
g_assert_cmpstr(ver.suffix, ==, "i");
g_free(ver.suffix);
ver = cr_str_to_version("1..3");
- g_assert_cmpint(ver.version, ==, 1);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 1);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 3);
g_assert_cmpstr(ver.suffix, ==, NULL);
g_free(ver.suffix);
ver = cr_str_to_version("..alpha");
- g_assert_cmpint(ver.version, ==, 0);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 0);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, "alpha");
g_free(ver.suffix);
ver = cr_str_to_version("alpha");
- g_assert_cmpint(ver.version, ==, 0);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 0);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, "alpha");
g_free(ver.suffix);
ver = cr_str_to_version("1-2-3");
- g_assert_cmpint(ver.version, ==, 1);
- g_assert_cmpint(ver.release, ==, 0);
+ g_assert_cmpint(ver.major, ==, 1);
+ g_assert_cmpint(ver.minor, ==, 0);
g_assert_cmpint(ver.patch, ==, 0);
g_assert_cmpstr(ver.suffix, ==, "-2-3");
g_free(ver.suffix);
static void
test_cr_split_rpm_filename(void)
{
- struct cr_NVREA *res;
+ cr_NEVRA *res;
res = cr_split_rpm_filename(NULL);
g_assert(!res);
- res = cr_split_rpm_filename("foo-1.0-1.i386.rpm");
+ res = cr_split_rpm_filename("foo-1.0-1.i386");
g_assert(res);
g_assert_cmpstr(res->name, ==, "foo");
g_assert_cmpstr(res->version, ==, "1.0");
g_assert_cmpstr(res->release, ==, "1");
g_assert(!res->epoch);
g_assert_cmpstr(res->arch, ==, "i386");
- cr_nvrea_free(res);
+ cr_nevra_free(res);
res = cr_split_rpm_filename("1:bar-9-123a.ia64.rpm");
g_assert(res);
g_assert_cmpstr(res->release, ==, "123a");
g_assert_cmpstr(res->epoch, ==, "1");
g_assert_cmpstr(res->arch, ==, "ia64");
- cr_nvrea_free(res);
+ cr_nevra_free(res);
- res = cr_split_rpm_filename(":a--.");
+ res = cr_split_rpm_filename("bar-2:9-123a.ia64.rpm");
g_assert(res);
- g_assert_cmpstr(res->name, ==, "a");
- g_assert_cmpstr(res->version, ==, "");
- g_assert_cmpstr(res->release, ==, "");
- g_assert_cmpstr(res->epoch, ==, "");
- g_assert_cmpstr(res->arch, ==, "");
- cr_nvrea_free(res);
+ g_assert_cmpstr(res->name, ==, "bar");
+ g_assert_cmpstr(res->version, ==, "9");
+ g_assert_cmpstr(res->release, ==, "123a");
+ g_assert_cmpstr(res->epoch, ==, "2");
+ g_assert_cmpstr(res->arch, ==, "ia64");
+ cr_nevra_free(res);
+
+ res = cr_split_rpm_filename("bar-9-123a:3.ia64.rpm");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "bar");
+ g_assert_cmpstr(res->version, ==, "9");
+ g_assert_cmpstr(res->release, ==, "123a");
+ g_assert_cmpstr(res->epoch, ==, "3");
+ g_assert_cmpstr(res->arch, ==, "ia64");
+ cr_nevra_free(res);
+
+ res = cr_split_rpm_filename("bar-9-123a.ia64.rpm:4");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "bar");
+ g_assert_cmpstr(res->version, ==, "9");
+ g_assert_cmpstr(res->release, ==, "123a");
+ g_assert_cmpstr(res->epoch, ==, "4");
+ g_assert_cmpstr(res->arch, ==, "ia64");
+ cr_nevra_free(res);
+
+ res = cr_split_rpm_filename("bar-9-123a.ia64:5");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "bar");
+ g_assert_cmpstr(res->version, ==, "9");
+ g_assert_cmpstr(res->release, ==, "123a");
+ g_assert_cmpstr(res->epoch, ==, "5");
+ g_assert_cmpstr(res->arch, ==, "ia64");
+ cr_nevra_free(res);
res = cr_split_rpm_filename("b");
g_assert(res);
g_assert(!res->release);
g_assert(!res->epoch);
g_assert(!res->arch);
- cr_nvrea_free(res);
+ cr_nevra_free(res);
}
+static void
+test_cr_str_to_nevr(void)
+{
+ cr_NEVR *res;
+
+ res = cr_str_to_nevr(NULL);
+ g_assert(!res);
+
+ res = cr_str_to_nevr("createrepo-0.9.9-22.fc20");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "createrepo");
+ g_assert_cmpstr(res->version, ==, "0.9.9");
+ g_assert_cmpstr(res->release, ==, "22.fc20");
+ g_assert(!res->epoch);
+ cr_nevr_free(res);
+
+ res = cr_str_to_nevr("bar-4:9-123a");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "bar");
+ g_assert_cmpstr(res->version, ==, "9");
+ g_assert_cmpstr(res->release, ==, "123a");
+ g_assert_cmpstr(res->epoch, ==, "4");
+ cr_nevr_free(res);
+
+ res = cr_str_to_nevr("3:foo-2-el.6");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "foo");
+ g_assert_cmpstr(res->version, ==, "2");
+ g_assert_cmpstr(res->release, ==, "el.6");
+ g_assert_cmpstr(res->epoch, ==, "3");
+ cr_nevr_free(res);
+
+ res = cr_str_to_nevr("foo-2-el.6:3");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "foo");
+ g_assert_cmpstr(res->version, ==, "2");
+ g_assert_cmpstr(res->release, ==, "el.6");
+ g_assert_cmpstr(res->epoch, ==, "3");
+ cr_nevr_free(res);
+
+ res = cr_str_to_nevr("b-1-2");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "b");
+ g_assert_cmpstr(res->version, ==, "1");
+ g_assert_cmpstr(res->release, ==, "2");
+ g_assert(!res->epoch);
+ cr_nevr_free(res);
+
+ res = cr_str_to_nevr("b");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "b");
+ g_assert(!res->version);
+ g_assert(!res->release);
+ g_assert(!res->epoch);
+ cr_nevr_free(res);
+}
+
+static void
+test_cr_str_to_nevra(void)
+{
+ cr_NEVRA *res;
+
+ res = cr_str_to_nevra(NULL);
+ g_assert(!res);
+
+ res = cr_str_to_nevra("crypto-utils-2.4.1-52.fc20.x86_64");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "crypto-utils");
+ g_assert_cmpstr(res->version, ==, "2.4.1");
+ g_assert_cmpstr(res->release, ==, "52.fc20");
+ g_assert(!res->epoch);
+ g_assert_cmpstr(res->arch, ==, "x86_64");
+ cr_nevra_free(res);
+
+ res = cr_str_to_nevra("crypto-utils-1:2.4.1-52.fc20.x86_64");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "crypto-utils");
+ g_assert_cmpstr(res->version, ==, "2.4.1");
+ g_assert_cmpstr(res->release, ==, "52.fc20");
+ g_assert_cmpstr(res->epoch, ==, "1");
+ g_assert_cmpstr(res->arch, ==, "x86_64");
+ cr_nevra_free(res);
+
+ res = cr_str_to_nevra("2:crypto-utils-2.4.1-52.fc20.x86_64");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "crypto-utils");
+ g_assert_cmpstr(res->version, ==, "2.4.1");
+ g_assert_cmpstr(res->release, ==, "52.fc20");
+ g_assert_cmpstr(res->epoch, ==, "2");
+ g_assert_cmpstr(res->arch, ==, "x86_64");
+ cr_nevra_free(res);
+
+ res = cr_str_to_nevra("crypto-utils-2.4.1-52.fc20:3.x86_64");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "crypto-utils");
+ g_assert_cmpstr(res->version, ==, "2.4.1");
+ g_assert_cmpstr(res->release, ==, "52.fc20");
+ g_assert_cmpstr(res->epoch, ==, "3");
+ g_assert_cmpstr(res->arch, ==, "x86_64");
+ cr_nevra_free(res);
+
+ res = cr_str_to_nevra("crypto-utils-2.4.1-52.fc20.x86_64:4");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "crypto-utils");
+ g_assert_cmpstr(res->version, ==, "2.4.1");
+ g_assert_cmpstr(res->release, ==, "52.fc20");
+ g_assert_cmpstr(res->epoch, ==, "4");
+ g_assert_cmpstr(res->arch, ==, "x86_64");
+ cr_nevra_free(res);
+
+ res = cr_str_to_nevra("a");
+ g_assert(res);
+ g_assert_cmpstr(res->name, ==, "a");
+ g_assert(!res->version);
+ g_assert(!res->release);
+ g_assert(!res->epoch);
+ g_assert(!res->arch);
+ cr_nevra_free(res);
+}
+
static void
test_cr_cmp_evr(void)
{
g_assert_cmpint(res, ==, 1);
}
+static void
+test_cr_cut_dirs(void)
+{
+ char *res;
+
+ res = cr_cut_dirs(NULL, 1);
+ g_assert_cmpstr(res, ==, NULL);
+
+ res = cr_cut_dirs("", 1);
+ g_assert_cmpstr(res, ==, "");
+
+ res = cr_cut_dirs("foo.rpm", 1);
+ g_assert_cmpstr(res, ==, "foo.rpm");
+
+ res = cr_cut_dirs("/foo.rpm", 1);
+ g_assert_cmpstr(res, ==, "foo.rpm");
+
+ res = cr_cut_dirs("//foo.rpm", 1);
+ g_assert_cmpstr(res, ==, "foo.rpm");
+
+ res = cr_cut_dirs("///foo.rpm", 1);
+ g_assert_cmpstr(res, ==, "foo.rpm");
+
+ res = cr_cut_dirs("bar/foo.rpm", 1);
+ g_assert_cmpstr(res, ==, "foo.rpm");
+
+ res = cr_cut_dirs("/bar/foo.rpm", 1);
+ g_assert_cmpstr(res, ==, "foo.rpm");
+
+ res = cr_cut_dirs("bar//foo.rpm", 1);
+ g_assert_cmpstr(res, ==, "foo.rpm");
+
+ res = cr_cut_dirs("//bar//foo.rpm", 1);
+ g_assert_cmpstr(res, ==, "foo.rpm");
+
+ res = cr_cut_dirs("///a///b/foo.rpm", 1);
+ g_assert_cmpstr(res, ==, "b/foo.rpm");
+
+ res = cr_cut_dirs("a/b/c/foo.rpm", 1);
+ g_assert_cmpstr(res, ==, "b/c/foo.rpm");
+
+ res = cr_cut_dirs("a/b/c/foo.rpm", 2);
+ g_assert_cmpstr(res, ==, "c/foo.rpm");
+
+ res = cr_cut_dirs("a/b/c/foo.rpm", 3);
+ g_assert_cmpstr(res, ==, "foo.rpm");
+
+ res = cr_cut_dirs("a///b///c///foo.rpm", 3);
+ g_assert_cmpstr(res, ==, "foo.rpm");
+}
int
main(int argc, char *argv[])
test_cr_cmp_version_str);
g_test_add_func("/misc/test_cr_split_rpm_filename",
test_cr_split_rpm_filename);
+ g_test_add_func("/misc/test_cr_str_to_nevr",
+ test_cr_str_to_nevr);
+ g_test_add_func("/misc/test_cr_str_to_nevra",
+ test_cr_str_to_nevra);
g_test_add_func("/misc/test_cr_cmp_evr",
test_cr_cmp_evr);
+ g_test_add_func("/misc/test_cr_cut_dirs",
+ test_cr_cut_dirs);
return g_test_run();
}
static void
-testdata_setup(TestData *testdata, gconstpointer test_data)
+testdata_setup(TestData *testdata,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
testdata->tmp_dir = g_strdup(TMP_DIR_PATTERN);
mkdtemp(testdata->tmp_dir);
}
static void
-testdata_teardown(TestData *testdata, gconstpointer test_data)
+testdata_teardown(TestData *testdata,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
cr_remove_dir(testdata->tmp_dir, NULL);
g_free(testdata->tmp_dir);
}
static void
-test_cr_open_db(TestData *testdata, gconstpointer test_data)
+test_cr_open_db(TestData *testdata,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
-
GError *err = NULL;
gchar *path = NULL;
cr_SqliteDb *db;
static void
-test_cr_db_add_primary_pkg(TestData *testdata, gconstpointer test_data)
+test_cr_db_add_primary_pkg(TestData *testdata,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
-
GError *err = NULL;
gchar *path;
cr_SqliteDb *db;
static void
-test_cr_db_dbinfo_update(TestData *testdata, gconstpointer test_data)
+test_cr_db_dbinfo_update(TestData *testdata,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
-
GError *err = NULL;
gchar *path;
cr_SqliteDb *db;
static void
-test_all(TestData *testdata, gconstpointer test_data)
+test_all(TestData *testdata,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
-
GError *err = NULL;
gchar *path;
cr_SqliteDb *db = NULL;
// Load package
cr_package_parser_init();
- pkg = cr_package_from_rpm(EMPTY_PKG, CR_CHECKSUM_SHA256, EMPTY_PKG, NULL, 5, NULL, NULL);
+ pkg = cr_package_from_rpm(EMPTY_PKG, CR_CHECKSUM_SHA256, EMPTY_PKG, NULL,
+ 5, NULL, CR_HDRR_NONE, NULL);
g_assert(pkg);
cr_package_parser_cleanup();
static void
-fixtures_setup(TestFixtures *fixtures, gconstpointer test_data)
+fixtures_setup(TestFixtures *fixtures,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
gchar *template = g_strdup(TMPDIR_TEMPLATE);
fixtures->tmpdir = mkdtemp(template);
g_assert(fixtures->tmpdir);
static void
-fixtures_teardown(TestFixtures *fixtures, gconstpointer test_data)
+fixtures_teardown(TestFixtures *fixtures,
+ G_GNUC_UNUSED gconstpointer test_data)
{
- CR_UNUSED(test_data);
-
if (!fixtures->tmpdir)
return;
static void
-test_no_packages(TestFixtures *fixtures, gconstpointer test_data)
+test_no_packages(TestFixtures *fixtures,
+ G_GNUC_UNUSED gconstpointer test_data)
{
cr_XmlFile *f;
gchar *path;
int ret;
GError *err = NULL;
- CR_UNUSED(test_data);
g_assert(g_file_test(fixtures->tmpdir, G_FILE_TEST_IS_DIR));
// Try primary.xml
static int
newpkgcb_skip_fake_bash(cr_Package **pkg,
- const char *pkgId,
+ G_GNUC_UNUSED const char *pkgId,
const char *name,
- const char *arch,
- void *cbdata,
+ G_GNUC_UNUSED const char *arch,
+ G_GNUC_UNUSED void *cbdata,
GError **err)
{
- CR_UNUSED(pkgId);
- CR_UNUSED(arch);
- CR_UNUSED(cbdata);
-
g_assert(pkg != NULL);
g_assert(*pkg == NULL);
g_assert(pkgId != NULL);
static int
newpkgcb_interrupt(cr_Package **pkg,
- const char *pkgId,
- const char *name,
- const char *arch,
- void *cbdata,
+ G_GNUC_UNUSED const char *pkgId,
+ G_GNUC_UNUSED const char *name,
+ G_GNUC_UNUSED const char *arch,
+ G_GNUC_UNUSED void *cbdata,
GError **err)
{
- CR_UNUSED(pkgId);
- CR_UNUSED(name);
- CR_UNUSED(arch);
- CR_UNUSED(cbdata);
-
g_assert(pkg != NULL);
g_assert(*pkg == NULL);
g_assert(pkgId != NULL);
}
static int
-warningcb(cr_XmlParserWarningType type,
- char *msg,
- void *cbdata,
- GError **err)
+warningcb(G_GNUC_UNUSED cr_XmlParserWarningType type,
+ G_GNUC_UNUSED char *msg,
+ void *cbdata,
+ G_GNUC_UNUSED GError **err)
{
- CR_UNUSED(type);
- CR_UNUSED(msg);
- CR_UNUSED(err);
-
g_assert(type < CR_XML_WARNING_SENTINEL);
g_assert(!err || *err == NULL);
}
static int
-warningcb_interrupt(cr_XmlParserWarningType type,
- char *msg,
- void *cbdata,
- GError **err)
+warningcb_interrupt(G_GNUC_UNUSED cr_XmlParserWarningType type,
+ G_GNUC_UNUSED char *msg,
+ G_GNUC_UNUSED void *cbdata,
+ G_GNUC_UNUSED GError **err)
{
- CR_UNUSED(type);
- CR_UNUSED(msg);
- CR_UNUSED(cbdata);
- CR_UNUSED(err);
-
g_assert(type < CR_XML_WARNING_SENTINEL);
g_assert(!err || *err == NULL);
g_free(warnmsgs);
}
+static void
+test_cr_xml_parse_different_md_type(void)
+{
+ char *warnmsgs;
+ int parsed = 0;
+ GString *warn_strings = g_string_new(0);
+ GError *tmp_err = NULL;
+ int ret = cr_xml_parse_filelists(TEST_REPO_01_OTHER, NULL, NULL,
+ pkgcb, &parsed, warningcb,
+ warn_strings, &tmp_err);
+ g_assert(tmp_err == NULL);
+ g_assert_cmpint(ret, ==, CRE_OK);
+ g_assert_cmpint(parsed, ==, 0);
+ warnmsgs = g_string_free(warn_strings, FALSE);
+ g_assert_cmpstr(warnmsgs, ==, "Unknown element \"otherdata\";"
+ "The file don't contain the expected element \"<filelists>\" - "
+ "The file probably isn't a valid filelists.xml;");
+ g_free(warnmsgs);
+}
+
int
main(int argc, char *argv[])
{
test_cr_xml_parse_filelists_bad_file_type_00);
g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_bad_file_type_01",
test_cr_xml_parse_filelists_bad_file_type_01);
+ g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_different_md_type",
+ test_cr_xml_parse_different_md_type);
return g_test_run();
}
// Callbacks
static int
-warningcb(cr_XmlParserWarningType type,
- char *msg,
- void *cbdata,
- GError **err)
+warningcb(G_GNUC_UNUSED cr_XmlParserWarningType type,
+ G_GNUC_UNUSED char *msg,
+ void *cbdata,
+ G_GNUC_UNUSED GError **err)
{
- CR_UNUSED(type);
- CR_UNUSED(msg);
- CR_UNUSED(err);
-
g_assert(type < CR_XML_WARNING_SENTINEL);
g_assert(!err || *err == NULL);
}
static int
-warningcb_interrupt(cr_XmlParserWarningType type,
- char *msg,
- void *cbdata,
- GError **err)
+warningcb_interrupt(G_GNUC_UNUSED cr_XmlParserWarningType type,
+ G_GNUC_UNUSED char *msg,
+ G_GNUC_UNUSED void *cbdata,
+ G_GNUC_UNUSED GError **err)
{
- CR_UNUSED(type);
- CR_UNUSED(msg);
- CR_UNUSED(cbdata);
- CR_UNUSED(err);
-
g_assert(type < CR_XML_WARNING_SENTINEL);
g_assert(!err || *err == NULL);
--- /dev/null
+/* createrepo_c - Library of routines for manipulation with repodata
+ * Copyright (C) 2013 Tomas Mlcoch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <glib.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "fixtures.h"
+#include "createrepo/error.h"
+#include "createrepo/misc.h"
+#include "createrepo/xml_parser.h"
+#include "createrepo/updateinfo.h"
+
+// Tests
+
+static void
+test_cr_xml_parse_updateinfo_00(void)
+{
+ GError *tmp_err = NULL;
+ cr_UpdateInfo *ui = cr_updateinfo_new();
+
+ int ret = cr_xml_parse_updateinfo(TEST_UPDATEINFO_00, ui,
+ NULL, NULL, &tmp_err);
+ g_assert(tmp_err == NULL);
+ g_assert_cmpint(ret, ==, CRE_OK);
+
+ cr_updateinfo_free(ui);
+}
+
+static void
+test_cr_xml_parse_updateinfo_01(void)
+{
+ GError *tmp_err = NULL;
+ cr_UpdateInfo *ui = cr_updateinfo_new();
+ cr_UpdateRecord *update;
+ cr_UpdateReference *ref;
+ cr_UpdateCollection *col;
+ cr_UpdateCollectionPackage *pkg;
+
+ int ret = cr_xml_parse_updateinfo(TEST_UPDATEINFO_01, ui,
+ NULL, NULL, &tmp_err);
+
+ g_assert(tmp_err == NULL);
+ g_assert_cmpint(ret, ==, CRE_OK);
+
+ g_assert_cmpint(g_slist_length(ui->updates), ==, 1);
+ update = ui->updates->data;
+
+ g_assert_cmpstr(update->from, ==, "secresponseteam@foo.bar");
+ g_assert_cmpstr(update->status, ==, "final");
+ g_assert_cmpstr(update->type, ==, "enhancement");
+ g_assert_cmpstr(update->version, ==, "3");
+ g_assert_cmpstr(update->id, ==, "foobarupdate_1");
+ g_assert_cmpstr(update->title, ==, "title_1");
+ g_assert_cmpstr(update->issued_date, ==, "2012-12-12 00:00:00");
+ g_assert_cmpstr(update->updated_date, ==, "2012-12-12 00:00:00");
+ g_assert_cmpstr(update->rights, ==, "rights_1");
+ g_assert_cmpstr(update->release, ==, "release_1");
+ g_assert_cmpstr(update->pushcount, ==, "pushcount_1");
+ g_assert_cmpstr(update->severity, ==, "severity_1");
+ g_assert_cmpstr(update->summary, ==, "summary_1");
+ g_assert_cmpstr(update->description, ==, "description_1");
+ g_assert_cmpstr(update->solution, ==, "solution_1");
+
+ g_assert_cmpint(g_slist_length(update->references), ==, 1);
+ ref = update->references->data;
+ g_assert_cmpstr(ref->href, ==, "https://foobar/foobarupdate_1");
+ g_assert_cmpstr(ref->id, ==, "1");
+ g_assert_cmpstr(ref->type, ==, "self");
+ g_assert_cmpstr(ref->title, ==, "update_1");
+
+ g_assert_cmpint(g_slist_length(update->collections), ==, 1);
+ col = update->collections->data;
+ g_assert_cmpstr(col->shortname, ==, "foo.component");
+ g_assert_cmpstr(col->name, ==, "Foo component");
+
+ g_assert_cmpint(g_slist_length(col->packages), ==, 1);
+ pkg = col->packages->data;
+ g_assert_cmpstr(pkg->name, ==, "bar");
+ g_assert_cmpstr(pkg->version, ==, "2.0.1");
+ g_assert_cmpstr(pkg->release, ==, "3");
+ g_assert_cmpstr(pkg->epoch, ==, "0");
+ g_assert_cmpstr(pkg->arch, ==, "noarch");
+ g_assert_cmpstr(pkg->src, ==, "bar-2.0.1-3.src.rpm");
+ g_assert_cmpstr(pkg->filename, ==, "bar-2.0.1-3.noarch.rpm");
+ g_assert_cmpstr(pkg->sum, ==, "29be985e1f652cd0a29ceed6a1c49964d3618bddd22f0be3292421c8777d26c8");
+ g_assert_cmpint(pkg->sum_type, ==, CR_CHECKSUM_SHA256);
+ g_assert(pkg->reboot_suggested);
+
+ cr_updateinfo_free(ui);
+}
+
+static void
+test_cr_xml_parse_updateinfo_02(void)
+{
+ GError *tmp_err = NULL;
+ cr_UpdateInfo *ui = cr_updateinfo_new();
+ cr_UpdateRecord *update;
+ cr_UpdateReference *ref;
+ cr_UpdateCollection *col;
+ cr_UpdateCollectionPackage *pkg;
+
+ int ret = cr_xml_parse_updateinfo(TEST_UPDATEINFO_02, ui,
+ NULL, NULL, &tmp_err);
+
+ g_assert(tmp_err == NULL);
+ g_assert_cmpint(ret, ==, CRE_OK);
+
+ g_assert_cmpint(g_slist_length(ui->updates), ==, 1);
+ update = ui->updates->data;
+
+ g_assert(!update->from);
+ g_assert(!update->status);
+ g_assert(!update->type);
+ g_assert(!update->version);
+ g_assert(!update->id);
+ g_assert(!update->title);
+ g_assert(!update->issued_date);
+ g_assert(!update->updated_date);
+ g_assert(!update->rights);
+ g_assert(!update->release);
+ g_assert(!update->pushcount);
+ g_assert(!update->severity);
+ g_assert(!update->summary);
+ g_assert(!update->description);
+ g_assert(!update->solution);
+
+ g_assert_cmpint(g_slist_length(update->references), ==, 1);
+ ref = update->references->data;
+ g_assert(!ref->href);
+ g_assert(!ref->id);
+ g_assert(!ref->type);
+ g_assert(!ref->title);
+
+ g_assert_cmpint(g_slist_length(update->collections), ==, 1);
+ col = update->collections->data;
+ g_assert(!col->shortname);
+ g_assert(!col->name);
+
+ g_assert_cmpint(g_slist_length(col->packages), ==, 1);
+ pkg = col->packages->data;
+ g_assert(!pkg->name);
+ g_assert(!pkg->version);
+ g_assert(!pkg->release);
+ g_assert(!pkg->epoch);
+ g_assert(!pkg->arch);
+ g_assert(!pkg->src);
+ g_assert(!pkg->filename);
+ g_assert(!pkg->sum);
+ g_assert_cmpint(pkg->sum_type, ==, CR_CHECKSUM_UNKNOWN);
+ g_assert(!pkg->reboot_suggested);
+
+ cr_updateinfo_free(ui);
+}
+
+int
+main(int argc, char *argv[])
+{
+ g_test_init(&argc, &argv, NULL);
+
+ g_test_add_func("/xml_parser_updateinfo/test_cr_xml_parse_updateinfo_00",
+ test_cr_xml_parse_updateinfo_00);
+ g_test_add_func("/xml_parser_updateinfo/test_cr_xml_parse_updateinfo_01",
+ test_cr_xml_parse_updateinfo_01);
+ g_test_add_func("/xml_parser_updateinfo/test_cr_xml_parse_updateinfo_02",
+ test_cr_xml_parse_updateinfo_02);
+
+ return g_test_run();
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE comps PUBLIC "-//Red Hat, Inc.//DTD Comps info//EN" "comps.dtd">
+<comps>
+ <group>
+ <id>test-group</id>
+ <_name>Test Group</_name>
+ <_description/>
+ <default>false</default>
+ <uservisible>True</uservisible>
+ <packagelist>
+ <packagereq type="default">Archer</packagereq>
+ </packagelist>
+ </group>
+ <category>
+ <id>test category</id>
+ <_name>Test Category</_name>
+ <_description>Description of test category.</_description>
+ <display_order>99</display_order>
+ <grouplist>
+ <groupid>test-group</groupid>
+ </grouplist>
+ </category>
+</comps>
--- /dev/null
+Name: Rimmer
+Version: 1.0.2
+Release: 2
+License: GPL
+Summary: Package with weak deps.
+Group: Development/Tools
+Url: http://pkgwithweakdeps.eu/
+Packager: Arnold Rimmer
+
+Requires: req <= 1
+Requires(pre): reqpre = 2
+Provides: pro <= 3
+Obsoletes: obs <= 4
+Conflicts: con <= 5
+Suggests: sug > 6
+Enhances: enh < 7
+Recommends: rec = 8
+Supplements: sup > 9
+
+%description
+Package with weak deps.
+
+#%prep
+#%setup -q
+
+%build
+touch README
+echo OK
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT/usr/bin/
+mkdir -p $RPM_BUILD_ROOT/usr/share/doc/%{name}-%{version}/
+touch $RPM_BUILD_ROOT/usr/bin/complex_a
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%{_bindir}/complex_a
+%doc README
+
+%changelog
+* Wed Apr 18 2012 Tomas Mlcoch <tmlcoch@redhat.com> - 2.2.2-2
+- Look, we've all got something to contribute to this discussion. And
+ I think what you should contribute from now on is silence.
+
+* Tue Apr 17 2012 Tomas Mlcoch <tmlcoch@redhat.com> - 1.1.1-1
+- First changelog.
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<updates>
+</updates>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<updates>
+ <update from="secresponseteam@foo.bar" status="final" type="enhancement" version="3">
+ <id>foobarupdate_1</id>
+ <title>title_1</title>
+ <issued date="2012-12-12 00:00:00"/>
+ <updated date="2012-12-12 00:00:00"/>
+ <rights>rights_1</rights>
+ <release>release_1</release>
+ <pushcount>pushcount_1</pushcount>
+ <severity>severity_1</severity>
+ <summary>summary_1</summary>
+ <description>description_1</description>
+ <solution>solution_1</solution>
+ <references>
+ <reference href="https://foobar/foobarupdate_1" id="1" type="self" title="update_1"/>
+ </references>
+ <pkglist>
+ <collection short="foo.component">
+ <name>Foo component</name>
+ <package name="bar" version="2.0.1" release="3" epoch="0" arch="noarch" src="bar-2.0.1-3.src.rpm">
+ <filename>bar-2.0.1-3.noarch.rpm</filename>
+ <sum type="sha256">29be985e1f652cd0a29ceed6a1c49964d3618bddd22f0be3292421c8777d26c8</sum>
+ <reboot_suggested/>
+ </package>
+ </collection>
+ </pkglist>
+ </update>
+</updates>
#!/bin/bash
-rm -v createrepo_c-*.rpm python-createrepo_c-*.rpm
-
+rm -fv createrepo_c-*.tar.xz
+rm -fv createrepo_c-*.rpm python-createrepo_c-*.rpm
+rm -fv deltarepo-*.rpm python-deltarepo-*.rpm
#!/bin/bash
-# /usr/share/man/man8/createrepo_c.8.gz
+# /usr/share/man/man8/createrepo_c.8
-EXPECTED_ARGS=4
+EXPECTED_ARGS=5
if [ $# -ne $EXPECTED_ARGS ]
then
- echo "Usage: `basename $0` <createrepo_input_file> <mergerepo_input_file> <modifyrepo_input_file> <outputdir>"
+ echo "Usage: `basename $0` <createrepo_input_file> <mergerepo_input_file> <modifyrepo_input_file> <sqliterepo_input_file> <outputdir>"
echo
- echo "Example: `basename $0` src/cmd_parser.c src/mergerepo_c.c src/modifyrepo_c.c doc/"
+ echo "Example: `basename $0` src/cmd_parser.c src/mergerepo_c.c src/modifyrepo_c.c src/sqliterepo_c.c doc/"
exit 1
fi
MY_DIR=`dirname $0`
MY_DIR="$MY_DIR/"
-python $MY_DIR/gen_rst.py $1 | rst2man | gzip > $4/createrepo_c.8.gz
-python $MY_DIR/gen_rst.py $2 --mergerepo | rst2man | gzip > $4/mergerepo_c.8.gz
-python $MY_DIR/gen_rst.py $3 --modifyrepo | rst2man | gzip > $4/modifyrepo_c.8.gz
+python $MY_DIR/gen_rst.py $1 | rst2man > $5/createrepo_c.8
+python $MY_DIR/gen_rst.py $2 --mergerepo | rst2man > $5/mergerepo_c.8
+python $MY_DIR/gen_rst.py $3 --modifyrepo | rst2man > $5/modifyrepo_c.8
+python $MY_DIR/gen_rst.py $4 --sqliterepo | rst2man > $5/sqliterepo_c.8
class Info(object):
- def __init__(self, name, description=None, synopsis=None, copyright=None, options=None):
+ def __init__(self, name, summary=None, description=None, synopsis=None, copyright=None, options=None):
self.name = name
+ self.summary = summary
self.description = description
self.synopsis = synopsis
self.copyright = copyright
rst += "%s\n" % self.name
rst += "%s\n\n" % ("=" * len(self.name),)
- # Add synopsis
- #if self.synopsis:
- # rst += ":Synopsis: %s\n\n" % self.synopsis
-
- # Add description
- if self.description:
- rst += ":Subtitle: %s\n\n" % self.description
+ # Add summary
+ if self.summary:
+ rst += "%s\n" % ("-" * len(self.summary))
+ rst += "%s\n" % self.summary
+ rst += "%s\n" % ("-" * len(self.summary))
+ rst += "\n"
# Add copyright
if self.copyright:
# Add date
rst += ":Date: $Date: %s $\n\n" % datetime.datetime.strftime(datetime.datetime.utcnow(), format="%F %X")
+ # Add synopsis
+ if self.synopsis:
+ rst += "SYNOPSIS\n"
+ rst += "========\n\n"
+ for line in self.synopsis:
+ rst += "%s\n\n" % line
+ rst += "\n"
+
# Add options
- rst += "-------\n"
rst += "OPTIONS\n"
- rst += "-------\n"
+ rst += "=======\n"
for command in self.options:
cmd = ""
arg_description = entry_match.group("arg_description").strip()
# Normalize short name
- if short_name and (short_name == "NULL" or short_name == "0"):
+ if short_name in ("NULL", "0", "\\0"):
short_name = None
# Normalize description
if __name__ == "__main__":
- parser = OptionParser('usage: %prog [options] <filename> [--mergerepo|--modifyrepo]')
+ parser = OptionParser('usage: %prog [options] <filename> [--mergerepo|--modifyrepo|--sqliterepo]')
parser.add_option('-m', '--mergerepo', action="store_true", help="Gen rst for mergerepo")
parser.add_option('-r', '--modifyrepo', action="store_true", help="Gen rst for modifyrepo")
+ parser.add_option('-s', '--sqliterepo', action="store_true", help="Gen rst for sqliterepo")
options, args = parser.parse_args()
if len(args) < 1:
if options.mergerepo:
NAME = "mergerepo_c"
info = Info(NAME,
- description="C implementation of mergerepo",
- synopsis="%s [options] <directory>" % (NAME,),
+ summary="Merge multiple rpm-md format repositories together",
+ synopsis=["%s [options] --repo repo1 --repo repo2" % (NAME,)],
options=args)
elif options.modifyrepo:
NAME = "modifyrepo_c"
info = Info(NAME,
- description="C implementation of modifyrepo",
- synopsis="%s [options] <directory>" % (NAME,),
+ summary="Modify a repomd.xml of rpm-md format repository",
+ synopsis=["%s [options] <input metadata> <output repodata>" % (NAME,),
+ "%s --remove <metadata type> <output repodata>" % (NAME,),
+ "%s [options] --batchfile <batch file> <output repodata>" % (NAME,) ],
+ options=args)
+ elif options.sqliterepo:
+ NAME = "sqliterepo_c"
+ info = Info(NAME,
+ summary="Generate sqlite db files for a repository in rpm-md format",
+ synopsis=["%s [options] <repo_directory>" % (NAME,) ],
options=args)
else:
NAME = "createrepo_c"
info = Info(NAME,
- description="C implementation of createrepo",
- synopsis="%s [options] <directory>" % (NAME,),
+ summary="Create rpm-md format (xml-rpm-metadata) repository",
+ synopsis=["%s [options] <directory>" % (NAME,)],
options=args)
ret = info.gen_rst()
#!/bin/bash
+PACKAGE="createrepo_c"
RPMBUILD_DIR="${HOME}/rpmbuild/"
BUILD_DIR="$RPMBUILD_DIR/BUILD"
-TARGETDIR=`pwd -P`
-SCRIPTDIR=$( cd "$(dirname "$0")" ; pwd -P )
-PROJECTROOTDIR=`realpath "$SCRIPTDIR/../"`
GITREV=`git rev-parse --short HEAD`
+PREFIX="" # Root project dir
+MY_DIR=`dirname "$0"`
-echo "Cleaning $BUILD_DIR"
-rm -rf $BUILD_DIR
-echo "Removing $RPMBUILD_DIR/createrepo_c.spec"
-rm -f $RPMBUILD_DIR/createrepo_c.spec
-
-if [ $# -gt "1" ] && [ "$1" = "-h" ]; then
- echo "Usage: `basename $0` [git revision]"
- exit 0
-fi
-
-if [ $# -eq "1" ]; then
- GITREV="$1"
+if [ $# -lt "1" -o $# -gt "2" ]
+then
+ echo "Usage: `basename "$0"` <root_project_dir> [revision]"
+ exit 1
fi
-echo "Generating rpm for $GITREV"
+PREFIX="$1/"
if [ ! -d "$RPMBUILD_DIR" ]; then
echo "rpmbuild dir $RPMBUILD_DIR doesn't exist!"
exit 1
fi
-echo "> Making tarball .."
-$SCRIPTDIR/make_tarball.sh $GITREV
+echo "Generating rpm for $GITREV"
+echo "Cleaning $BUILD_DIR"
+rm -rf "$BUILD_DIR"
+echo "Removing $RPMBUILD_DIR/$PACKAGE.spec"
+rm -f "$RPMBUILD_DIR/$PACKAGE.spec"
+
+echo "> Making tarball .."
+"$MY_DIR/make_tarball.sh" "$GITREV"
if [ ! $? == "0" ]; then
echo "Error while making tarball"
exit 1
echo "Tarball done"
echo "> Copying tarball and .spec file into the $RPMBUILD_DIR .."
-cp createrepo_c-$GITREV.tar.xz $RPMBUILD_DIR/SOURCES/
+cp "$PREFIX/$PACKAGE-$GITREV.tar.xz" "$RPMBUILD_DIR/SOURCES/"
if [ ! $? == "0" ]; then
- echo "Error while: cp createrepo_c-*.tar.xz $RPMBUILD_DIR/SOURCES/"
+ echo "Error while: cp $PREFIX/$PACKAGE-$GITREV.tar.xz $RPMBUILD_DIR/SOURCES/"
exit 1
fi
-cp $PROJECTROOTDIR/createrepo_c.spec $RPMBUILD_DIR/SPECS/
+# Copy via sed
+sed -i "s/%global gitrev .*/%global gitrev $GITREV/g" "$PREFIX/$PACKAGE.spec"
+sed "s/%global gitrev .*/%global gitrev $GITREV/g" "$PREFIX/$PACKAGE.spec" > "$RPMBUILD_DIR/SPECS/$PACKAGE.spec"
if [ ! $? == "0" ]; then
- echo "Error while: cp $PROJECTROOTDIR/createrepo_c*.spec $RPMBUILD_DIR/SPECS/"
+ echo "Error while: cp $PREFIX/$PACKAGE.spec $RPMBUILD_DIR/SPECS/"
exit 1
fi
echo "Copying done"
-echo "> Starting rpmbuild createrepo_c.."
-rpmbuild -ba $RPMBUILD_DIR/SPECS/createrepo_c.spec
+echo "> Starting rpmbuild $PACKAGE.."
+rpmbuild -ba "$RPMBUILD_DIR/SPECS/$PACKAGE.spec"
if [ ! $? == "0" ]; then
- echo "Error while: rpmbuild -ba $RPMBUILD_DIR/SPECS/createrepo_c.spec"
+ echo "Error while: rpmbuild -ba $RPMBUILD_DIR/SPECS/$PACKAGE.spec"
exit 1
fi
echo "rpmbuild done"
echo "> Cleanup .."
-rpmbuild --clean $RPMBUILD_DIR/SPECS/createrepo_c.spec
+rpmbuild --clean "$RPMBUILD_DIR/SPECS/$PACKAGE.spec"
echo "Cleanup done"
echo "> Moving rpms and srpm .."
-mv --verbose $RPMBUILD_DIR/SRPMS/createrepo_c-*.src.rpm $TARGETDIR/.
-mv --verbose $RPMBUILD_DIR/RPMS/*/createrepo_c-*.rpm $TARGETDIR/.
-mv --verbose $RPMBUILD_DIR/RPMS/*/python-createrepo_c-*.rpm $TARGETDIR/.
+mv --verbose "$RPMBUILD_DIR"/SRPMS/"$PACKAGE"-*.src.rpm "$PREFIX/."
+mv --verbose "$RPMBUILD_DIR"/RPMS/*/"$PACKAGE"-*.rpm "$PREFIX/."
+mv --verbose "$RPMBUILD_DIR"/RPMS/*/python*-"$PACKAGE"-*.rpm "$PREFIX/."
echo "Moving done"
echo "All done!"
+PACKAGE="createrepo_c"
TARGET_DIR="./"
if [ "$#" -eq "0" ]; then
GITREV=`git rev-parse --short HEAD`
else
- GITREV=$1
+ GITREV="$1"
fi
echo "Generate tarball for revision: $GITREV"
-git archive ${GITREV} --prefix=createrepo_c/ | xz > $TARGET_DIR/createrepo_c-${GITREV}.tar.xz
+git archive "${GITREV}" --prefix="$PACKAGE"/ | xz > "$TARGET_DIR"/"$PACKAGE"-"${GITREV}".tar.xz
--- /dev/null
+#!/usr/bin/env python
+"""
+Convert a single line from test output to run single test command.
+
+E.g:
+ "2: test_download_package_via_metalink (tests.test_yum_package_downloading.TestCaseYumPackageDownloading) ... ok"
+
+To:
+ tests/python/tests/test_yum_repo_downloading.py:TestCaseYumRepoDownloading.test_download_and_update_repo_01
+"""
+
+import os
+import sys
+import argparse
+
+LIBPATH = "./build/src/python/"
+COMMAND = "PYTHONPATH=`readlink -f {libpath}` nosetests{nosever} -s -v {testpath}"
+TEST_PATH_PREFIX = "tests/python"
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser(description='Convert a single line from '\
+ 'python unittesttest output to run command for this single test')
+
+ parser.add_argument('test_out_line', metavar='TESTOUTPUTLINE',
+ type=str,
+ help='A single line from python unittesttest output')
+ parser.add_argument('--python3', action='store_true', help='Python3 version')
+ args = parser.parse_args()
+
+ test_out_line = args.test_out_line
+
+ # Remove suffix "... ok" or "... FAIL"
+ test_out_line = test_out_line.split(" ... ")[0]
+
+ # Remove prefix "test_number: "
+ res = test_out_line = test_out_line.split(": ")
+ test_out_line = res[-1]
+
+ # Get test name
+ res = test_out_line.split(" ")
+ if len(res) != 2:
+ print("Bad input line format")
+ sys.exit(1)
+
+ test_name, test_out_line = res
+
+ # Get test case
+ test_out_line = test_out_line.strip().lstrip("(").rstrip(")")
+ res = test_out_line.rsplit(".", 1)
+ if len(res) != 2:
+ print("Bad input line format")
+ sys.exit(1)
+
+ test_out_line, test_case = res
+
+ # Get path
+ test_path = test_out_line.replace(".", "/") + ".py"
+
+ full_path = os.path.join(TEST_PATH_PREFIX, test_path)
+
+ nosever = ""
+ pythonversion = 2
+
+ if (args.python3):
+ nosever = "-3.3"
+ pythonversion = 3
+
+ testpath = "{0}:{1}.{2}".format(full_path, test_case, test_name)
+ libpath = LIBPATH.format(pythonversion=pythonversion)
+
+
+ command = COMMAND.format(libpath=libpath, nosever=nosever, testpath=testpath)
+
+ print(command)
+