ADD_DEFINITIONS (-DMAGEIA)
SET (ENABLE_MDKREPO ON)
SET (ENABLE_RPMDB ON)
+SET (ENABLE_RPMMD ON)
SET (ENABLE_LZMA_COMPRESSION ON)
SET (have_system ${have_system}x)
ENDIF (MAGEIA)
resolving package dependencies.
The sat-solver code has been written to aim for the newest packages,
-record the decison tree to provide introspection, and also allows to
+record the decision tree to provide introspection, and also allows to
provide the user with suggestions on how to deal with unsolvable
problems. It also takes advantage of the repository storage to
minimize memory usage.
SET(LIBSOLV_MAJOR "0")
SET(LIBSOLV_MINOR "6")
-SET(LIBSOLV_PATCH "14")
+SET(LIBSOLV_PATCH "15")
.\" Title: helix2solv
.\" Author: [see the "Author" section]
.\" Generator: DocBook XSL Stylesheets v1.78.0 <http://docbook.sf.net/>
-.\" Date: 08/26/2015
+.\" Date: 12/14/2015
.\" Manual: LIBSOLV
.\" Source: libsolv
.\" Language: English
.\"
-.TH "HELIX2SOLV" "1" "08/26/2015" "libsolv" "LIBSOLV"
+.TH "HELIX2SOLV" "1" "12/14/2015" "libsolv" "LIBSOLV"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
\fBhelix2solv\fR
.SH "DESCRIPTION"
.sp
-The helix format was a metadata format used in the RedCarpet package manager\&. It\(cqs still used in libzypp testcases\&. The helix2solv tool reads data in helix format from standhard input and write it in solv file format to standard output\&.
+The helix format was a metadata format used in the RedCarpet package manager\&. It\(cqs still used in libzypp testcases\&. The helix2solv tool reads data in helix format from standard input and writes it in solv file format to standard output\&.
.SH "AUTHOR"
.sp
Michael Schroeder <mls@suse\&.de>
-----------
The helix format was a metadata format used in the RedCarpet
package manager. It's still used in libzypp testcases.
-The helix2solv tool reads data in helix format from standhard
-input and write it in solv file format to standard output.
+The helix2solv tool reads data in helix format from standard
+input and writes it in solv file format to standard output.
Author
------
.\" Title: Libsolv-Bindings
.\" Author: [see the "Author" section]
.\" Generator: DocBook XSL Stylesheets v1.78.0 <http://docbook.sf.net/>
-.\" Date: 09/21/2015
+.\" Date: 12/14/2015
.\" Manual: LIBSOLV
.\" Source: libsolv
.\" Language: English
.\"
-.TH "LIBSOLV\-BINDINGS" "3" "09/21/2015" "libsolv" "LIBSOLV"
+.TH "LIBSOLV\-BINDINGS" "3" "12/14/2015" "libsolv" "LIBSOLV"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
.PP
\fBREL_GT\fR
.RS 4
-the \(lqgreater then\(rq bit
+the \(lqgreater than\(rq bit
.RE
.PP
\fBREL_ARCH\fR
.PP
\fBPOOL_FLAG_ADDFILEPROVIDESFILTERED\fR
.RS 4
-Make the addfileprovides method only add files from the standard locations (i\&.e\&. the \(lqbin\(rq and \(lqetc\(rq directories)\&. This is useful if you have only few packages that use non\-standard file dependencies, but you still wand the fast speed that addfileprovides() generates\&.
+Make the addfileprovides method only add files from the standard locations (i\&.e\&. the \(lqbin\(rq and \(lqetc\(rq directories)\&. This is useful if you have only few packages that use non\-standard file dependencies, but you still want the fast speed that addfileprovides() generates\&.
.RE
.SS "METHODS"
.sp
.RE
.\}
.sp
-Break the ownership relation betwen the binding object and the pool\&. After this call, the pool will not get freed even if the object goes out of scope\&. This also means that you must manually call the free method to free the pool data\&.
+Break the ownership relation between the binding object and the pool\&. After this call, the pool will not get freed even if the object goes out of scope\&. This also means that you must manually call the free method to free the pool data\&.
.sp
.if n \{\
.RS 4
.RE
.\}
.sp
-Some package managers like rpm allow dependencies on files contained in other packages\&. To allow libsolv to deal with those dependencies in an efficient way, you need to call the addfileprovides method after creating and reading all repositories\&. This method will scan all dependency for file names and than scan all packages for matching files\&. If a filename has been matched, it will be added to the provides list of the corresponding package\&. The addfileprovides_queue variant works the same way but returns an array containing all file dependencies\&. This information can be stored in the meta section of the repositories to speed up the next time the repository is loaded and addfileprovides is called\&.
+Some package managers like rpm allow dependencies on files contained in other packages\&. To allow libsolv to deal with those dependencies in an efficient way, you need to call the addfileprovides method after creating and reading all repositories\&. This method will scan all dependency for file names and then scan all packages for matching files\&. If a filename has been matched, it will be added to the provides list of the corresponding package\&. The addfileprovides_queue variant works the same way but returns an array containing all file dependencies\&. This information can be stored in the meta section of the repositories to speed up the next time the repository is loaded and addfileprovides is called\&.
.sp
.if n \{\
.RS 4
.RE
.\}
.sp
-Return all solvables that provide the specified dependency\&. You can use either a Dep object or an simple Id as argument\&.
+Return all solvables that provide the specified dependency\&. You can use either a Dep object or a simple Id as argument\&.
.sp
.if n \{\
.RS 4
.RE
.\}
.sp
-Set the callback function called when repository metadata needs to be loaded on demand\&. To make use of this feature, you need to create repodata stubs that tell the library which data is available but not loaded\&. If later on the data needs to be accessed, the callback function is called with a repodata argument\&. You can then load the data (maybe fetching it first from an remote server)\&. The callback should return true if the data has been made available\&.
+Set the callback function called when repository metadata needs to be loaded on demand\&. To make use of this feature, you need to create repodata stubs that tell the library which data is available but not loaded\&. If later on the data needs to be accessed, the callback function is called with a repodata argument\&. You can then load the data (maybe fetching it first from a remote server)\&. The callback should return true if the data has been made available\&.
.sp
.if n \{\
.RS 4
.PP
\fBSUSETAGS_RECORD_SHARES\fR
.RS 4
-This is specific to the add_susetags() method\&. Susetags allows to refer to already read packages to save disk space\&. If this data sharing needs to work over multiple calls to add_susetags, you need to specify this flag so that the share information is made available to subsequent calls\&.
+This is specific to the add_susetags() method\&. Susetags allows one to refer to already read packages to save disk space\&. If this data sharing needs to work over multiple calls to add_susetags, you need to specify this flag so that the share information is made available to subsequent calls\&.
.RE
.SS "METHODS"
.sp
.RE
.\}
.sp
-Add the repomd\&.xml meta description from the "rpm\-md" format to the repository\&. This file contains information about the repository like keywords, and also a list of all database files with checksums\&. The data is added the the "meta" section of the repository, i\&.e\&. no package gets created\&.
+Add the repomd\&.xml meta description from the "rpm\-md" format to the repository\&. This file contains information about the repository like keywords, and also a list of all database files with checksums\&. The data is added to the "meta" section of the repository, i\&.e\&. no package gets created\&.
.sp
.if n \{\
.RS 4
.RE
.\}
.sp
-Add the \(lqcontent\(rq meta description from the susetags format to the repository\&. This file contains information about the repository like keywords, and also a list of all database files with checksums\&. The data is added the the "meta" section of the repository, i\&.e\&. no package gets created\&.
+Add the \(lqcontent\(rq meta description from the susetags format to the repository\&. This file contains information about the repository like keywords, and also a list of all database files with checksums\&. The data is added to the "meta" section of the repository, i\&.e\&. no package gets created\&.
.sp
.if n \{\
.RS 4
.RE
.\}
.sp
-Returns \-1 if the epoch/version/release of the solvable is less then the one from the other solvable, 1 if it is greater, and 0 if they are equal\&. Note that "equal" does not mean that the evr is identical\&.
+Returns \-1 if the epoch/version/release of the solvable is less than the one from the other solvable, 1 if it is greater, and 0 if they are equal\&. Note that "equal" does not mean that the evr is identical\&.
.sp
.if n \{\
.RS 4
.PP
\fBSOLVER_RULE_FEATURE\fR
.RS 4
-Feature rules are fallback rules used when a update rule is disabled\&. They include all packages that may replace the installed package ignoring the update policy, i\&.e\&. they contain downgrades, arch changes and so on\&. Without them, the solver would simply erase installed packages if their update rule gets disabled\&.
+Feature rules are fallback rules used when an update rule is disabled\&. They include all packages that may replace the installed package ignoring the update policy, i\&.e\&. they contain downgrades, arch changes and so on\&. Without them, the solver would simply erase installed packages if their update rule gets disabled\&.
.RE
.PP
\fBSOLVER_RULE_JOB\fR
.PP
\fBSOLVER_RULE_DISTUPGRADE\fR
.RS 4
-This are simple negative assertions that make sure that only packages are kept that are also available in one of the repositories\&.
+These are simple negative assertions that make sure that only packages are kept that are also available in one of the repositories\&.
.RE
.PP
\fBSOLVER_RULE_INFARCH\fR
.PP
\fBSOLVER_RULE_LEARNT\fR
.RS 4
-These rules are generated by the solver to keep it from running into the same problem multiple times when it has to backtrack\&. They are the main reason why a sat solver is faster then other dependency solver implementations\&.
+These rules are generated by the solver to keep it from running into the same problem multiple times when it has to backtrack\&. They are the main reason why a sat solver is faster than other dependency solver implementations\&.
.RE
.sp
Special dependency rule types:
.PP
\fBSOLVER_TRANSACTION_MULTIREINSTALL\fR
.RS 4
-This element reinstalls a installed package keeping the other versions installed\&.
+This element reinstalls an installed package keeping the other versions installed\&.
.RE
.sp
Transaction element types, active view
.PP
\fBSOLVER_TRANSACTION_DOWNGRADE\fR
.RS 4
-This element installs a older version of an installed package\&.
+This element installs an older version of an installed package\&.
.RE
.PP
\fBSOLVER_TRANSACTION_OBSOLETES\fR
.RE
.\}
.sp
-Return all packages that are to be installed by the transaction\&. This are the packages that need to be downloaded from the repositories\&.
+Return all packages that are to be installed by the transaction\&. These are the packages that need to be downloaded from the repositories\&.
.sp
.if n \{\
.RS 4
.RE
.\}
.sp
-Order the steps in the transactions so that dependant packages are updated before packages that depend on them\&. For rpm, you can also use rpmlib\(cqs ordering functionality, debian\(cqs dpkg does not provide a way to order a transaction\&.
+Order the steps in the transactions so that dependent packages are updated before packages that depend on them\&. For rpm, you can also use rpmlib\(cqs ordering functionality, debian\(cqs dpkg does not provide a way to order a transaction\&.
.SS "ACTIVE/PASSIVE VIEW"
.sp
-Active view list what new packages get installed, while passive view shows what happens to the installed packages\&. Most often there\(cqs not much difference between the two modes, but things get interesting of multiple package get replaced by one new package\&. Say you have installed package A\-1\-1 and B\-1\-1, and now install A\-2\-1 with has a new dependency that obsoletes B\&. The transaction elements will be
+Active view lists what new packages get installed, while passive view shows what happens to the installed packages\&. Most often there\(cqs not much difference between the two modes, but things get interesting if multiple packages get replaced by one new package\&. Say you have installed packages A\-1\-1 and B\-1\-1, and now install A\-2\-1 which has a new dependency that obsoletes B\&. The transaction elements will be
.sp
.if n \{\
.RS 4
the ``equals to'' bit
*REL_GT*::
-the ``greater then'' bit
+the ``greater than'' bit
*REL_ARCH*::
used for relations that describe an extra architecture filter, the
Make the addfileprovides method only add files from the standard
locations (i.e. the ``bin'' and ``etc'' directories). This is
useful if you have only few packages that use non-standard file
-dependencies, but you still wand the fast speed that addfileprovides()
+dependencies, but you still want the fast speed that addfileprovides()
generates.
=== METHODS ===
pool.disown()
pool.disown()
-Break the ownership relation betwen the binding object and the pool. After
+Break the ownership relation between the binding object and the pool. After
this call, the pool will not get freed even if the object goes out of
scope. This also means that you must manually call the free method to free
the pool data.
Some package managers like rpm allow dependencies on files contained in other
packages. To allow libsolv to deal with those dependencies in an efficient way,
you need to call the addfileprovides method after creating and reading all
-repositories. This method will scan all dependency for file names and than scan
+repositories. This method will scan all dependency for file names and then scan
all packages for matching files. If a filename has been matched, it will be
added to the provides list of the corresponding package. The
addfileprovides_queue variant works the same way but returns an array
solvables = pool.whatprovides(dep)
Return all solvables that provide the specified dependency. You can use either
-a Dep object or an simple Id as argument.
+a Dep object or a simple Id as argument.
Id *matchprovidingids(const char *match, int flags)
my @ids = $pool->matchprovidingids($match, $flags);
demand. To make use of this feature, you need to create repodata stubs that
tell the library which data is available but not loaded. If later on the data
needs to be accessed, the callback function is called with a repodata argument.
-You can then load the data (maybe fetching it first from an remote server).
+You can then load the data (maybe fetching it first from a remote server).
The callback should return true if the data has been made available.
/* bindings only */
Do not create stubs for repository parts that can be downloaded on demand.
*SUSETAGS_RECORD_SHARES*::
-This is specific to the add_susetags() method. Susetags allows to refer to
+This is specific to the add_susetags() method. Susetags allows one to refer to
already read packages to save disk space. If this data sharing needs to
work over multiple calls to add_susetags, you need to specify this flag so
that the share information is made available to subsequent calls.
Add the repomd.xml meta description from the "rpm-md" format to the repository.
This file contains information about the repository like keywords, and also a
-list of all database files with checksums. The data is added the the "meta"
+list of all database files with checksums. The data is added to the "meta"
section of the repository, i.e. no package gets created.
bool add_updateinfoxml(FILE *fp, int flags = 0)
Add the ``content'' meta description from the susetags format to the repository.
This file contains information about the repository like keywords, and also
-a list of all database files with checksums. The data is added the the "meta"
+a list of all database files with checksums. The data is added to the "meta"
section of the repository, i.e. no package gets created.
bool add_susetags(FILE *fp, Id defvendor, const char *language, int flags = 0)
$solvable.evrcmp(other)
$solvable.evrcmp(other)
-Returns -1 if the epoch/version/release of the solvable is less then the
+Returns -1 if the epoch/version/release of the solvable is less than the
one from the other solvable, 1 if it is greater, and 0 if they are equal.
Note that "equal" does not mean that the evr is identical.
that may replace the installed package.
*SOLVER_RULE_FEATURE*::
-Feature rules are fallback rules used when a update rule is disabled. They
+Feature rules are fallback rules used when an update rule is disabled. They
include all packages that may replace the installed package ignoring the
update policy, i.e. they contain downgrades, arch changes and so on.
Without them, the solver would simply erase installed packages if their
Job rules implement the job given to the solver.
*SOLVER_RULE_DISTUPGRADE*::
-This are simple negative assertions that make sure that only packages
+These are simple negative assertions that make sure that only packages
are kept that are also available in one of the repositories.
*SOLVER_RULE_INFARCH*::
*SOLVER_RULE_LEARNT*::
These rules are generated by the solver to keep it from running into
the same problem multiple times when it has to backtrack. They are
-the main reason why a sat solver is faster then other dependency solver
+the main reason why a sat solver is faster than other dependency solver
implementations.
Special dependency rule types:
versions installed.
*SOLVER_TRANSACTION_MULTIREINSTALL*::
-This element reinstalls a installed package keeping the other versions
+This element reinstalls an installed package keeping the other versions
installed.
Transaction element types, active view
This element installs a newer version of an installed package.
*SOLVER_TRANSACTION_DOWNGRADE*::
-This element installs a older version of an installed package.
+This element installs an older version of an installed package.
*SOLVER_TRANSACTION_OBSOLETES*::
This element installs a package that obsoletes an installed package.
newsolvables = trans.newsolvables()
newsolvables = trans.newsolvables()
-Return all packages that are to be installed by the transaction. This are
+Return all packages that are to be installed by the transaction. These are
the packages that need to be downloaded from the repositories.
Solvable *keptsolvables();
trans.order()
trans.order()
-Order the steps in the transactions so that dependant packages are updated
+Order the steps in the transactions so that dependent packages are updated
before packages that depend on them. For rpm, you can also use rpmlib's
ordering functionality, debian's dpkg does not provide a way to order a
transaction.
=== ACTIVE/PASSIVE VIEW ===
-Active view list what new packages get installed, while passive view shows
+Active view lists what new packages get installed, while passive view shows
what happens to the installed packages. Most often there's not much
-difference between the two modes, but things get interesting of multiple
-package get replaced by one new package. Say you have installed package
-A-1-1 and B-1-1, and now install A-2-1 with has a new dependency that
+difference between the two modes, but things get interesting if multiple
+packages get replaced by one new package. Say you have installed packages
+A-1-1 and B-1-1, and now install A-2-1 which has a new dependency that
obsoletes B. The transaction elements will be
updated A-1-1 (other: A-2-1)
.\" Title: Libsolv-Constantids
.\" Author: [see the "Author" section]
.\" Generator: DocBook XSL Stylesheets v1.78.0 <http://docbook.sf.net/>
-.\" Date: 08/26/2015
+.\" Date: 12/14/2015
.\" Manual: LIBSOLV
.\" Source: libsolv
.\" Language: English
.\"
-.TH "LIBSOLV\-CONSTANTIDS" "3" "08/26/2015" "libsolv" "LIBSOLV"
+.TH "LIBSOLV\-CONSTANTIDS" "3" "12/14/2015" "libsolv" "LIBSOLV"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
.PP
\fBSOLVABLE_RECOMMENDS "solvable:recommends"\fR
.RS 4
-Stores an array of dependency Ids that describe the capabilities that also should be installed when this package is installed\&. It\(cqs not an error if not all capabilites can be met\&.
+Stores an array of dependency Ids that describe the capabilities that also should be installed when this package is installed\&. It\(cqs not an error if not all capabilities can be met\&.
.RE
.PP
\fBSOLVABLE_SUGGESTS "solvable:suggests"\fR
.PP
\fBPRODUCT_SHORTLABEL "product:shortlabel"\fR
.RS 4
-A identification string of the product\&.
+An identification string of the product\&.
.RE
.PP
\fBPRODUCT_DISTPRODUCT "product:distproduct"\fR
.PP
\fBPRODUCT_REGISTER_TARGET "product:regtarget"\fR
.RS 4
-A target for prouduct registering\&.
+A target for product registering\&.
.RE
.PP
\fBPRODUCT_REGISTER_RELEASE "product:regrelease"\fR
.RS 4
-A release string for proudct registering\&.
+A release string for product registering\&.
.RE
.PP
\fBPUBKEY_KEYID "pubkey:keyid"\fR
.PP
\fBNAMESPACE_SPLITPROVIDES "namespace:splitprovides"\fR
.RS 4
-The dependency is a special splitprovides dependency used to implement updates that include a package split\&. A splitprovoide dependency contains a filename and a package name, it is matched if a package with the provided package name is installed that contains the filename\&. This namespace is implemented in libsolv, so you do not need a callback\&.
+The dependency is a special splitprovides dependency used to implement updates that include a package split\&. A splitprovides dependency contains a filename and a package name, it is matched if a package with the provided package name is installed that contains the filename\&. This namespace is implemented in libsolv, so you do not need a callback\&.
.RE
.PP
\fBNAMESPACE_LANGUAGE "namespace:language"\fR
.PP
\fBREPOKEY_TYPE_DIRSTRARRAY "repokey:type:dirstrarray"\fR
.RS 4
-The data is an tuple consisting of a directory Id and a basename\&. Used to store file names\&.
+The data is a tuple consisting of a directory Id and a basename\&. Used to store file names\&.
.RE
.PP
\fBREPOKEY_TYPE_DIRNUMNUMARRAY "repokey:type:dirnumnumarray"\fR
.RS 4
-The data is an triple consisting of a directory Id and two 32bit unsigned integers\&. Used to store disk usage information\&.
+The data is a triple consisting of a directory Id and two 32bit unsigned integers\&. Used to store disk usage information\&.
.RE
.PP
\fBREPOKEY_TYPE_MD5 "repokey:type:md5"\fR
.PP
\fBDELTA_BASE_EVR "delta:baseevr"\fR
.RS 4
-The version of the package the delta was build against\&.
+The version of the package the delta was built against\&.
.RE
.PP
\fBDELTA_SEQ_NAME "delta:seqname"\fR
*SOLVABLE_RECOMMENDS "solvable:recommends"*::
Stores an array of dependency Ids that describe the capabilities that
also should be installed when this package is installed. It's not an
- error if not all capabilites can be met.
+ error if not all capabilities can be met.
*SOLVABLE_SUGGESTS "solvable:suggests"*::
Stores an array of dependency Ids that describe the capabilities that
The basename of the product file in the package.
*PRODUCT_SHORTLABEL "product:shortlabel"*::
- A identification string of the product.
+ An identification string of the product.
*PRODUCT_DISTPRODUCT "product:distproduct"*::
Obsolete, do not use. Was a SUSE Code-10 product name.
A product line string used for product registering.
*PRODUCT_REGISTER_TARGET "product:regtarget"*::
- A target for prouduct registering.
+ A target for product registering.
*PRODUCT_REGISTER_RELEASE "product:regrelease"*::
- A release string for proudct registering.
+ A release string for product registering.
*PUBKEY_KEYID "pubkey:keyid"*::
The keyid of a pubkey, consisting of 8 bytes in hex.
*NAMESPACE_SPLITPROVIDES "namespace:splitprovides"*::
The dependency is a special splitprovides dependency used to implement
- updates that include a package split. A splitprovoide dependency contains
+ updates that include a package split. A splitprovides dependency contains
a filename and a package name, it is matched if a package with the
provided package name is installed that contains the filename.
This namespace is implemented in libsolv, so you do not need a callback.
space.
*REPOKEY_TYPE_DIRSTRARRAY "repokey:type:dirstrarray"*::
- The data is an tuple consisting of a directory Id and a basename.
+ The data is a tuple consisting of a directory Id and a basename.
Used to store file names.
*REPOKEY_TYPE_DIRNUMNUMARRAY "repokey:type:dirnumnumarray"*::
- The data is an triple consisting of a directory Id and two 32bit
+ The data is a triple consisting of a directory Id and two 32bit
unsigned integers. Used to store disk usage information.
*REPOKEY_TYPE_MD5 "repokey:type:md5"*::
The checksum of the delta rpm file.
*DELTA_BASE_EVR "delta:baseevr"*::
- The version of the package the delta was build against.
+ The version of the package the delta was built against.
*DELTA_SEQ_NAME "delta:seqname"*::
The first part of the delta sequence, the base package name.
.\" Title: Libsolv-Pool
.\" Author: [see the "Author" section]
.\" Generator: DocBook XSL Stylesheets v1.78.0 <http://docbook.sf.net/>
-.\" Date: 08/26/2015
+.\" Date: 12/14/2015
.\" Manual: LIBSOLV
.\" Source: libsolv
.\" Language: English
.\"
-.TH "LIBSOLV\-POOL" "3" "08/26/2015" "libsolv" "LIBSOLV"
+.TH "LIBSOLV\-POOL" "3" "12/14/2015" "libsolv" "LIBSOLV"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
.PP
\fBDISTTYPE_RPM\fR
.RS 4
-Used for systems with use rpm as low level package manager\&.
+Used for systems which use rpm as low level package manager\&.
.RE
.PP
\fBDISTTYPE_DEB\fR
.RS 4
-Used for systems with use dpkg as low level package manager\&.
+Used for systems which use dpkg as low level package manager\&.
.RE
.PP
\fBDISTTYPE_ARCH\fR
.RS 4
-Used for systems with use the arch linux package manager\&.
+Used for systems which use the arch linux package manager\&.
.RE
.PP
\fBDISTTYPE_HAIKU\fR
.RS 4
-Used for systems with use haiku packages\&.
+Used for systems which use haiku packages\&.
.RE
.PP
\fBPOOL_FLAG_PROMOTEEPOCH\fR
.PP
\fBPOOL_FLAG_ADDFILEPROVIDESFILTERED\fR
.RS 4
-Make the addfileprovides method only add files from the standard locations (i\&.e\&. the \(lqbin\(rq and \(lqetc\(rq directories)\&. This is useful if you have only few packages that use non\-standard file dependencies, but you still wand the fast speed that addfileprovides() generates\&.
+Make the addfileprovides method only add files from the standard locations (i\&.e\&. the \(lqbin\(rq and \(lqetc\(rq directories)\&. This is useful if you have only few packages that use non\-standard file dependencies, but you still want the fast speed that addfileprovides() generates\&.
.RE
.SS "Functions"
.sp
.PP
\fBREL_WITH\fR
.RS 4
-Like REL_AND, but packages mast match both dependencies simultaneously\&. See the section about boolean dependencies about more information\&.
+Like REL_AND, but packages must match both dependencies simultaneously\&. See the section about boolean dependencies about more information\&.
.RE
.PP
\fBREL_NAMESPACE\fR
.PP
\fBREL_ARCH\fR
.RS 4
-A architecture filter dependency\&. The \(lqname\(rq part of the relation is a sub\-dependency, the \(lqevr\(rq part is the Id of an architecture that the matching packages must have (note that this is an exact match ignoring architecture policies)\&.
+An architecture filter dependency\&. The \(lqname\(rq part of the relation is a sub\-dependency, the \(lqevr\(rq part is the Id of an architecture that the matching packages must have (note that this is an exact match ignoring architecture policies)\&.
.RE
.PP
\fBREL_FILECONFLICT\fR
.RE
.\}
.sp
-Compare two version Ids, return \-1 if the first version is less then the second version, 0 if they are identical, and 1 if the first version is bigger than the second one\&.
+Compare two version Ids, return \-1 if the first version is less than the second version, 0 if they are identical, and 1 if the first version is bigger than the second one\&.
.sp
.if n \{\
.RS 4
.RE
.\}
.sp
-Create a index that maps dependency Ids to sets of packages that provide the dependency\&.
+Create an index that maps dependency Ids to sets of packages that provide the dependency\&.
.sp
.if n \{\
.RS 4
.RE
.\}
.sp
-Some package managers like rpm allow dependencies on files contained in other packages\&. To allow libsolv to deal with those dependencies in an efficient way, you need to call the addfileprovides method after creating and reading all repositories\&. This method will scan all dependency for file names and than scan all packages for matching files\&. If a filename has been matched, it will be added to the provides list of the corresponding package\&.
+Some package managers like rpm allow dependencies on files contained in other packages\&. To allow libsolv to deal with those dependencies in an efficient way, you need to call the addfileprovides method after creating and reading all repositories\&. This method will scan all dependency for file names and then scan all packages for matching files\&. If a filename has been matched, it will be added to the provides list of the corresponding package\&.
.sp
.if n \{\
.RS 4
.RE
.\}
.sp
-Like pool_tmpjoin(), but if the first argument is the last allocated space in the pool\(cqs temporary space area, it will be replaced with the result of the join and no new temporary space slot will be used\&. Thus you can join more then three strings by a combination of one pool_tmpjoin() and multiple pool_tmpappend() calls\&. Note that the \fIstr1\fR pointer is no longer usable after the call\&.
+Like pool_tmpjoin(), but if the first argument is the last allocated space in the pool\(cqs temporary space area, it will be replaced with the result of the join and no new temporary space slot will be used\&. Thus you can join more than three strings by a combination of one pool_tmpjoin() and multiple pool_tmpappend() calls\&. Note that the \fIstr1\fR pointer is no longer usable after the call\&.
.SH "DATA LOOKUP"
.SS "Constants"
.PP
=== Constants ===
*DISTTYPE_RPM*::
-Used for systems with use rpm as low level package manager.
+Used for systems which use rpm as low level package manager.
*DISTTYPE_DEB*::
-Used for systems with use dpkg as low level package manager.
+Used for systems which use dpkg as low level package manager.
*DISTTYPE_ARCH*::
-Used for systems with use the arch linux package manager.
+Used for systems which use the arch linux package manager.
*DISTTYPE_HAIKU*::
-Used for systems with use haiku packages.
+Used for systems which use haiku packages.
*POOL_FLAG_PROMOTEEPOCH*::
Promote the epoch of the providing dependency to the requesting
Make the addfileprovides method only add files from the standard
locations (i.e. the ``bin'' and ``etc'' directories). This is
useful if you have only few packages that use non-standard file
-dependencies, but you still wand the fast speed that addfileprovides()
+dependencies, but you still want the fast speed that addfileprovides()
generates.
be two sub-dependencies. Packages can match any part of the dependency.
*REL_WITH*::
-Like REL_AND, but packages mast match both dependencies simultaneously. See
+Like REL_AND, but packages must match both dependencies simultaneously. See
the section about boolean dependencies about more information.
*REL_NAMESPACE*::
for more information.
*REL_ARCH*::
-A architecture filter dependency. The ``name'' part of the relation is a
+An architecture filter dependency. The ``name'' part of the relation is a
sub-dependency, the ``evr'' part is the Id of an architecture that the
matching packages must have (note that this is an exact match ignoring
architecture policies).
=== Functions ===
int pool_evrcmp(const Pool *pool, Id evr1id, Id evr2id, int mode);
-Compare two version Ids, return -1 if the first version is less then the
+Compare two version Ids, return -1 if the first version is less than the
second version, 0 if they are identical, and 1 if the first version is
bigger than the second one.
------------------
void pool_createwhatprovides(Pool *pool);
-Create a index that maps dependency Ids to sets of packages that provide the
+Create an index that maps dependency Ids to sets of packages that provide the
dependency.
void pool_freewhatprovides(Pool *pool);
other packages. To allow libsolv to deal with those dependencies in an
efficient way, you need to call the addfileprovides method after creating
and reading all repositories. This method will scan all dependency for file
-names and than scan all packages for matching files. If a filename has been
+names and then scan all packages for matching files. If a filename has been
matched, it will be added to the provides list of the corresponding
package.
Like pool_tmpjoin(), but if the first argument is the last allocated space
in the pool's temporary space area, it will be replaced with the result of
the join and no new temporary space slot will be used. Thus you can join
-more then three strings by a combination of one pool_tmpjoin() and multiple
+more than three strings by a combination of one pool_tmpjoin() and multiple
pool_tmpappend() calls. Note that the _str1_ pointer is no longer usable
after the call.
Repo *installed = pool->installed;
Id p;
int obsoleteusescolors = pool_get_flag(pool, POOL_FLAG_OBSOLETEUSESCOLORS);
+ int hdrfetches;
queue_empty(conflicts);
if (!pkgs->count)
/* first pass: scan dirs */
if (!cbdata.aliases)
{
+ hdrfetches = 0;
cflmapn = (cutoff + 3) * 64;
while ((cflmapn & (cflmapn - 1)) != 0)
cflmapn = cflmapn & (cflmapn - 1);
handle = (*handle_cb)(pool, p, handle_cbdata);
if (!handle)
continue;
+ hdrfetches++;
rpm_iterate_filelist(handle, RPM_ITERATE_FILELIST_ONLYDIRS, finddirs_cb, &cbdata);
if (MAPTST(&cbdata.idxmap, i))
idxmapset++;
}
POOL_DEBUG(SOLV_DEBUG_STATS, "dirmap size: %d, used %d\n", cbdata.dirmapn + 1, cbdata.dirmapused);
POOL_DEBUG(SOLV_DEBUG_STATS, "dirmap memory usage: %d K\n", (cbdata.dirmapn + 1) * 2 * (int)sizeof(Id) / 1024);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "header fetches: %d\n", hdrfetches);
POOL_DEBUG(SOLV_DEBUG_STATS, "dirmap creation took %d ms\n", solv_timems(now));
POOL_DEBUG(SOLV_DEBUG_STATS, "dir conflicts found: %d, idxmap %d of %d\n", cbdata.dirconflicts, idxmapset, pkgs->count);
}
cbdata.cflmap = solv_calloc(cflmapn, 2 * sizeof(Id));
cbdata.cflmapn = cflmapn - 1; /* make it a mask */
cbdata.create = 1;
+ hdrfetches = 0;
for (i = 0; i < pkgs->count; i++)
{
if (i == cutoff)
handle = (*handle_cb)(pool, p, handle_cbdata);
if (!handle)
continue;
+ hdrfetches++;
cbdata.lastdiridx = -1;
rpm_iterate_filelist(handle, RPM_ITERATE_FILELIST_NOGHOSTS, cbdata.aliases ? findfileconflicts_basename_cb : findfileconflicts_cb, &cbdata);
}
POOL_DEBUG(SOLV_DEBUG_STATS, "filemap size: %d, used %d\n", cbdata.cflmapn + 1, cbdata.cflmapused);
POOL_DEBUG(SOLV_DEBUG_STATS, "filemap memory usage: %d K\n", (cbdata.cflmapn + 1) * 2 * (int)sizeof(Id) / 1024);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "header fetches: %d\n", hdrfetches);
POOL_DEBUG(SOLV_DEBUG_STATS, "filemap creation took %d ms\n", solv_timems(now));
POOL_DEBUG(SOLV_DEBUG_STATS, "lookat_dir size: %d\n", cbdata.lookat_dir.count);
queue_free(&cbdata.lookat_dir);
cbdata.statmapn = cflmapn - 1; /* make it a mask */
}
cbdata.create = 0;
+ hdrfetches = 0;
for (i = 0; i < pkgs->count; i++)
{
if (!MAPTST(&cbdata.idxmap, i))
handle = (*handle_cb)(pool, p, handle_cbdata);
if (!handle)
continue;
+ hdrfetches++;
cbdata.lastdiridx = -1;
rpm_iterate_filelist(handle, RPM_ITERATE_FILELIST_NOGHOSTS, findfileconflicts_alias_cb, &cbdata);
}
POOL_DEBUG(SOLV_DEBUG_STATS, "normap size: %d, used %d\n", cbdata.normapn + 1, cbdata.normapused);
POOL_DEBUG(SOLV_DEBUG_STATS, "normap memory usage: %d K\n", (cbdata.normapn + 1) * 2 * (int)sizeof(Id) / 1024);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "header fetches: %d\n", hdrfetches);
POOL_DEBUG(SOLV_DEBUG_STATS, "stats made: %d\n", cbdata.statsmade);
if (cbdata.usestat)
{
cbdata.cflmapn = 0;
cbdata.cflmapused = 0;
- now = solv_timems(0);
-
map_free(&cbdata.idxmap);
/* sort and unify/prune */
+ now = solv_timems(0);
POOL_DEBUG(SOLV_DEBUG_STATS, "raw candidates: %d, pruning\n", cbdata.lookat.count / 4);
solv_sort(cbdata.lookat.elements, cbdata.lookat.count / 4, sizeof(Id) * 4, &lookat_hx_cmp, pool);
for (i = j = 0; i < cbdata.lookat.count; )
}
queue_truncate(&cbdata.lookat, j);
POOL_DEBUG(SOLV_DEBUG_STATS, "candidates now: %d\n", cbdata.lookat.count / 4);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "pruning took %d ms\n", solv_timems(now));
/* third pass: collect file info for all files that match a hx */
+ now = solv_timems(0);
solv_sort(cbdata.lookat.elements, cbdata.lookat.count / 4, sizeof(Id) * 4, &lookat_idx_cmp, pool);
queue_init(&cbdata.files);
+ hdrfetches = 0;
for (i = 0; i < cbdata.lookat.count; i += 4)
{
Id idx = cbdata.lookat.elements[i + 1];
iterflags |= RPM_ITERATE_FILELIST_WITHCOL;
p = pkgs->elements[idx];
handle = (*handle_cb)(pool, p, handle_cbdata);
+ if (handle)
+ hdrfetches++;
for (;; i += 4)
{
int fstart = cbdata.files.count;
break;
}
}
+ POOL_DEBUG(SOLV_DEBUG_STATS, "header fetches: %d\n", hdrfetches);
+ POOL_DEBUG(SOLV_DEBUG_STATS, "file info fetching took %d ms\n", solv_timems(now));
cbdata.normap = solv_free(cbdata.normap);
cbdata.normapn = 0;
/* forth pass: for each hx we have, compare all matching files against all other matching files */
+ now = solv_timems(0);
solv_sort(cbdata.lookat.elements, cbdata.lookat.count / 4, sizeof(Id) * 4, &lookat_hx_cmp, pool);
for (i = 0; i < cbdata.lookat.count - 4; i += 4)
{
STATE_GROUP,
STATE_KEYWORDS,
STATE_KEYWORD,
+ STATE_EXTENDS,
NUMSTATES
};
{ STATE_APPLICATION, "url", STATE_URL, 1 },
{ STATE_APPLICATION, "project_group", STATE_GROUP, 1 },
{ STATE_APPLICATION, "keywords", STATE_KEYWORDS, 0 },
+ { STATE_APPLICATION, "extends", STATE_EXTENDS, 1 },
{ STATE_DESCRIPTION, "p", STATE_P, 1 },
{ STATE_DESCRIPTION, "ul", STATE_UL, 0 },
{ STATE_DESCRIPTION, "ol", STATE_OL, 0 },
int flags;
char *desktop_file;
int havesummary;
+ const char *filename;
+ Queue *owners;
};
Pool *pool = pd->pool;
Solvable *s = pd->solvable;
struct stateswitch *sw;
+ const char *type;
#if 0
fprintf(stderr, "start: [%d]%s\n", pd->state, name);
s = pd->solvable = pool_id2solvable(pool, repo_add_solvable(pd->repo));
pd->handle = s - pool->solvables;
pd->havesummary = 0;
+ type = find_attr("type", atts);
+ if (!type || !*type)
+ type = "desktop";
+ repodata_set_poolstr(pd->data, pd->handle, SOLVABLE_CATEGORY, type);
break;
case STATE_DESCRIPTION:
pd->description = solv_free(pd->description);
fclose(fp);
}
+static char *
+guess_filename_from_id(Pool *pool, const char *id)
+{
+ int l = strlen(id);
+ char *r = pool_tmpjoin(pool, id, ".metainfo.xml", 0);
+ if (l > 8 && !strcmp(".desktop", id + l - 8))
+ strcpy(r + l - 8, ".appdata.xml");
+ else if (l > 4 && !strcmp(".ttf", id + l - 4))
+ strcpy(r + l - 4, ".metainfo.xml");
+ else if (l > 4 && !strcmp(".otf", id + l - 4))
+ strcpy(r + l - 4, ".metainfo.xml");
+ else if (l > 4 && !strcmp(".xml", id + l - 4))
+ strcpy(r + l - 4, ".metainfo.xml");
+ else if (l > 3 && !strcmp(".db", id + l - 3))
+ strcpy(r + l - 3, ".metainfo.xml");
+ else
+ return 0;
+ return r;
+}
+
static void XMLCALL
endElement(void *userData, const char *name)
{
l -= 8;
s->name = pool_strn2id(pool, name, l, 1);
}
+ if (!s->requires && pd->owners)
+ {
+ int i;
+ Id id;
+ for (i = 0; i < pd->owners->count; i++)
+ {
+ Solvable *os = pd->pool->solvables + pd->owners->elements[i];
+ s->requires = repo_addid_dep(pd->repo, s->requires, os->name, 0);
+ id = pool_str2id(pd->pool, pool_tmpjoin(pd->pool, "application-appdata(", pool_id2str(pd->pool, os->name), ")"), 1);
+ s->provides = repo_addid_dep(pd->repo, s->provides, id, 0);
+ }
+ }
+ if (!s->requires && (pd->desktop_file || pd->filename))
+ {
+ /* add appdata() link requires/provides */
+ const char *filename = pd->filename;
+ if (!filename)
+ filename = guess_filename_from_id(pool, pd->desktop_file);
+ if (filename)
+ {
+ filename = pool_tmpjoin(pool, "application-appdata(", filename, ")");
+ s->requires = repo_addid_dep(pd->repo, s->requires, pool_str2id(pd->pool, filename + 12, 1), 0);
+ s->provides = repo_addid_dep(pd->repo, s->provides, pool_str2id(pd->pool, filename, 1), 0);
+ }
+ }
if (s->name && s->arch != ARCH_SRC && s->arch != ARCH_NOSRC)
s->provides = repo_addid_dep(pd->repo, s->provides, pool_rel2id(pd->pool, s->name, s->evr, REL_EQ, 1), 0);
pd->solvable = 0;
break;
case STATE_ID:
pd->desktop_file = solv_strdup(pd->content);
- /* guess the appdata.xml file name from the id element */
- if (pd->lcontent > 8 && !strcmp(".desktop", pd->content + pd->lcontent - 8))
- pd->content[pd->lcontent - 8] = 0;
- else if (pd->lcontent > 4 && !strcmp(".ttf", pd->content + pd->lcontent - 4))
- pd->content[pd->lcontent - 4] = 0;
- else if (pd->lcontent > 4 && !strcmp(".otf", pd->content + pd->lcontent - 4))
- pd->content[pd->lcontent - 4] = 0;
- else if (pd->lcontent > 4 && !strcmp(".xml", pd->content + pd->lcontent - 4))
- pd->content[pd->lcontent - 4] = 0;
- else if (pd->lcontent > 3 && !strcmp(".db", pd->content + pd->lcontent - 3))
- pd->content[pd->lcontent - 3] = 0;
- id = pool_str2id(pd->pool, pool_tmpjoin(pool, "appdata(", pd->content, ".appdata.xml)"), 1);
- s->requires = repo_addid_dep(pd->repo, s->requires, id, 0);
- id = pool_str2id(pd->pool, pool_tmpjoin(pool, "application-appdata(", pd->content, ".appdata.xml)"), 1);
- s->provides = repo_addid_dep(pd->repo, s->provides, id, 0);
break;
case STATE_NAME:
s->name = pool_str2id(pd->pool, pool_tmpjoin(pool, "application:", pd->content, 0), 1);
case STATE_GROUP:
repodata_add_poolstr_array(pd->data, pd->handle, SOLVABLE_GROUP, pd->content);
break;
+ case STATE_EXTENDS:
+ repodata_add_poolstr_array(pd->data, pd->handle, SOLVABLE_EXTENDS, pd->content);
+ break;
case STATE_DESCRIPTION:
if (pd->description)
{
case STATE_PKGNAME:
id = pool_str2id(pd->pool, pd->content, 1);
s->requires = repo_addid_dep(pd->repo, s->requires, id, 0);
+ id = pool_str2id(pd->pool, pool_tmpjoin(pd->pool, "application-appdata(", pd->content, ")"), 1);
+ s->provides = repo_addid_dep(pd->repo, s->provides, id, 0);
break;
case STATE_KEYWORD:
repodata_add_poolstr_array(pd->data, pd->handle, SOLVABLE_KEYWORDS, pd->content);
#define BUFF_SIZE 8192
-int
-repo_add_appdata(Repo *repo, FILE *fp, int flags)
+static int
+repo_add_appdata_fn(Repo *repo, FILE *fp, int flags, const char *filename, Queue *owners)
{
Pool *pool = repo->pool;
struct parsedata pd;
pd.pool = repo->pool;
pd.data = data;
pd.flags = flags;
+ pd.filename = filename;
+ pd.owners = owners;
pd.content = malloc(256);
pd.acontent = 256;
return ret;
}
+int
+repo_add_appdata(Repo *repo, FILE *fp, int flags)
+{
+ return repo_add_appdata_fn(repo, fp, flags, 0, 0);
+}
+
+static void
+search_uninternalized_filelist(Repo *repo, const char *dir, Queue *res)
+{
+ Pool *pool = repo->pool;
+ Id rdid, p;
+ Id iter, did, idid;
+
+ for (rdid = 1; rdid < repo->nrepodata; rdid++)
+ {
+ Repodata *data = repo_id2repodata(repo, rdid);
+ if (!data)
+ continue;
+ if (data->state == REPODATA_STUB)
+ continue;
+ if (!repodata_has_keyname(data, SOLVABLE_FILELIST))
+ continue;
+ did = repodata_str2dir(data, dir, 0);
+ if (!did)
+ continue;
+ for (p = data->start; p < data->end; p++)
+ {
+ if (p >= pool->nsolvables)
+ continue;
+ if (pool->solvables[p].repo != repo)
+ continue;
+ iter = 0;
+ for (;;)
+ {
+ const char *str;
+ int l;
+ Id id;
+ idid = did;
+ str = repodata_lookup_dirstrarray_uninternalized(data, p, SOLVABLE_FILELIST, &idid, &iter);
+ if (!iter)
+ break;
+ l = strlen(str);
+ if (l > 12 && strncmp(str + l - 12, ".appdata.xml", 12))
+ id = pool_str2id(pool, str, 1);
+ else if (l > 13 && strncmp(str + l - 13, ".metainfo.xml", 13))
+ id = pool_str2id(pool, str, 1);
+ else
+ continue;
+ queue_push2(res, p, id);
+ }
+ }
+ }
+}
+
/* add all files ending in .appdata.xml */
int
repo_add_appdata_dir(Repo *repo, const char *appdatadir, int flags)
DIR *dir;
char *dirpath;
Repodata *data;
+ Queue flq;
+ Queue oq;
+ queue_init(&flq);
+ queue_init(&oq);
+ if (flags & APPDATA_SEARCH_UNINTERNALIZED_FILELIST)
+ search_uninternalized_filelist(repo, appdatadir, &flq);
data = repo_add_repodata(repo, flags);
if (flags & REPO_USE_ROOTDIR)
dirpath = pool_prepend_rootdir(repo->pool, appdatadir);
const char *n;
FILE *fp;
int len = strlen(entry->d_name);
- if (len <= 12 || strcmp(entry->d_name + len - 12, ".appdata.xml") != 0)
- continue;
if (entry->d_name[0] == '.')
continue;
+ if (!(len > 12 && !strcmp(entry->d_name + len - 12, ".appdata.xml")) &&
+ !(len > 13 && !strcmp(entry->d_name + len - 13, ".metainfo.xml")))
+ continue;
n = pool_tmpjoin(repo->pool, dirpath, "/", entry->d_name);
fp = fopen(n, "r");
if (!fp)
pool_error(repo->pool, 0, "%s: %s", n, strerror(errno));
continue;
}
- repo_add_appdata(repo, fp, flags | REPO_NO_INTERNALIZE | REPO_REUSE_REPODATA | APPDATA_CHECK_DESKTOP_FILE);
+ if (flags & APPDATA_SEARCH_UNINTERNALIZED_FILELIST)
+ {
+ Id id = pool_str2id(repo->pool, entry->d_name, 0);
+ queue_empty(&oq);
+ if (id)
+ {
+ int i;
+ for (i = 0; i < flq.count; i += 2)
+ if (flq.elements[i + 1] == id)
+ queue_push(&oq, flq.elements[i]);
+ }
+ }
+ repo_add_appdata_fn(repo, fp, flags | REPO_NO_INTERNALIZE | REPO_REUSE_REPODATA | APPDATA_CHECK_DESKTOP_FILE, entry->d_name, oq.count ? &oq : 0);
fclose(fp);
}
closedir(dir);
solv_free(dirpath);
if (!(flags & REPO_NO_INTERNALIZE))
repodata_internalize(data);
+ queue_free(&oq);
+ queue_free(&flq);
return 0;
}
int repo_add_appdata(Repo *repo, FILE *fp, int flags);
int repo_add_appdata_dir(Repo *repo, const char *appdatadir, int flags);
+#define APPDATA_SEARCH_UNINTERNALIZED_FILELIST (1 << 8)
#define APPDATA_CHECK_DESKTOP_FILE (1 << 30) /* internal */
if (!rel || !evr)
{
- pool_debug(pool, SOLV_FATAL, "repo_content: bad relation '%s %s'\n", name, rel);
+ pool_debug(pool, SOLV_ERROR, "repo_content: bad relation '%s %s'\n", name, rel);
continue;
}
for (flags = 0; flags < 6; flags++)
break;
if (flags == 6)
{
- pool_debug(pool, SOLV_FATAL, "repo_content: unknown relation '%s'\n", rel);
+ pool_debug(pool, SOLV_ERROR, "repo_content: unknown relation '%s'\n", rel);
continue;
}
id = pool_rel2id(pool, id, pool_str2id(pool, evr, 1), flags + 1, 1);
if (s && !s->name)
{
- pool_debug(pool, SOLV_FATAL, "repo_content: 'content' incomplete, no product solvable created!\n");
+ pool_debug(pool, SOLV_ERROR, "repo_content: 'content' incomplete, no product solvable created!\n");
repo_free_solvable(repo, s - pool->solvables, 1);
s = 0;
}
s->evr = evr2id(pool, pd,
pd->epoch ? pd->evrspace + pd->epoch : 0,
pd->version ? pd->evrspace + pd->version : 0,
- pd->release ? pd->evrspace + pd->release : "");
+ pd->release ? pd->evrspace + pd->release : 0);
/* ensure self-provides */
if (s->name && s->arch != ARCH_SRC && s->arch != ARCH_NOSRC)
s->provides = repo_addid_dep(pd->repo, s->provides, pool_rel2id(pool, s->name, s->evr, REL_EQ, 1), 0);
};
static const char *features[] = {
-#ifdef ENABLE_LINKED_PACKAGES
+#ifdef ENABLE_LINKED_PKGS
"linked_packages",
#endif
#ifdef ENABLE_COMPLEX_DEPS
-------------------------------------------------------------------
+Mon Dec 14 15:48:01 CET 2015 - mls@suse.de
+
+- change product links to also look at timestamps [bnc#956443]
+- rework multiversion orphaned handling [bnc#957606]
+- support key type changes in repodata_internalize()
+- allow serialization of REPOKEY_TYPE_DELETED
+- improve appdata handling of installed packages
+- improve performance when run under xen
+- bump version to 0.6.15
+
+-------------------------------------------------------------------
Mon Oct 5 13:27:25 CEST 2015 - mls@suse.de
- fix bug in recommends handling [bnc#948482]
repodata_localize_id;
repodata_lookup_bin_checksum;
repodata_lookup_binary;
+ repodata_lookup_dirstrarray_uninternalized;
repodata_lookup_id;
repodata_lookup_id_uninternalized;
repodata_lookup_idarray;
solv_depmarker;
solv_dupappend;
solv_dupjoin;
+ solv_extend_realloc;
solv_free;
solv_hex2bin;
solv_latin1toutf8;
*
* product:
* created from product data in the repository (which is generated from files
- * in /etc/products.d. In the future we may switch to using product()
+ * in /etc/products.d). In the future we may switch to using product()
* provides of packages.
*
* pattern:
#include "pool.h"
#include "repo.h"
+#include "evr.h"
#include "linkedpkg.h"
#ifdef ENABLE_LINKED_PKGS
Id req = 0;
Id prv = 0;
Id p, pp;
- Id pkgname = 0;
+ Id pkgname = 0, appdataid = 0;
/* find appdata requires */
if (s->requires)
{
- Id appdataid = 0;
Id *reqp = s->repo->idarraydata + s->requires;
while ((req = *reqp++) != 0) /* go through all requires */
{
else
pkgname = req;
}
- req = appdataid;
}
+ req = appdataid ? appdataid : pkgname;
if (!req)
return;
/* find application-appdata provides */
if (s->provides)
{
Id *prvp = s->repo->idarraydata + s->provides;
+ const char *reqs = pool_id2str(pool, req);
+ const char *prvs;
while ((prv = *prvp++) != 0) /* go through all provides */
{
if (ISRELDEP(prv))
continue;
- if (strncmp("application-appdata(", pool_id2str(pool, prv), 20))
+ prvs = pool_id2str(pool, prv);
+ if (strncmp("application-appdata(", prvs, 20))
continue;
- if (!strcmp(pool_id2str(pool, prv) + 12, pool_id2str(pool, req)))
- break;
+ if (appdataid)
+ {
+ if (!strcmp(prvs + 12, reqs))
+ break;
+ }
+ else
+ {
+ int reqsl = strlen(reqs);
+ if (!strncmp(prvs + 20, reqs, reqsl) && !strcmp(prvs + 20 + reqsl, ")"))
+ break;
+ }
}
}
if (!prv)
if (pool->solvables[p].repo == s->repo)
if (!pkgname || pool->solvables[p].name == pkgname)
queue_push(qr, p);
- if (!qr->count && pkgname)
+ if (!qr->count && pkgname && appdataid)
{
/* huh, no matching package? try without pkgname filter */
FOR_PROVIDES(p, pp, req)
{
Id p, pp, namerelid;
char *str;
+ unsigned int sbt = 0;
/* search for project requires */
namerelid = 0;
continue;
queue_push(qr, p);
}
+ if (qr->count > 1)
+ {
+ /* multiple providers. try buildtime filter */
+ sbt = solvable_lookup_num(s, SOLVABLE_BUILDTIME, 0);
+ if (sbt)
+ {
+ unsigned int bt;
+ int i, j;
+ int filterqp = 1;
+ for (i = j = 0; i < qr->count; i++)
+ {
+ bt = solvable_lookup_num(pool->solvables + qr->elements[i], SOLVABLE_BUILDTIME, 0);
+ if (!bt)
+ filterqp = 0; /* can't filter */
+ if (!bt || bt == sbt)
+ qr->elements[j++] = qr->elements[i];
+ }
+ if (j)
+ qr->count = j;
+ if (!j || !filterqp)
+ sbt = 0; /* filter failed */
+ }
+ }
if (!qr->count && s->repo == pool->installed)
{
/* oh no! Look up reference file */
Solvable *ps = pool->solvables + p;
if (s->name != ps->name || ps->repo != s->repo || ps->arch != s->arch || s->evr != ps->evr)
continue;
+ if (sbt && solvable_lookup_num(ps, SOLVABLE_BUILDTIME, 0) != sbt)
+ continue;
queue_push(qp, p);
}
}
find_product_link(pool, s, reqidp, qr, prvidp, qp);
}
+static int
+name_min_max(Pool *pool, Solvable *s, Id *namep, Id *minp, Id *maxp)
+{
+ Queue q;
+ Id qbuf[4];
+ Id name, min, max;
+ int i;
+
+ queue_init_buffer(&q, qbuf, sizeof(qbuf)/sizeof(*qbuf));
+ find_package_link(pool, s, 0, &q, 0, 0);
+ if (!q.count)
+ {
+ queue_free(&q);
+ return 0;
+ }
+ s = pool->solvables + q.elements[0];
+ name = s->name;
+ min = max = s->evr;
+ for (i = 1; i < q.count; i++)
+ {
+ s = pool->solvables + q.elements[i];
+ if (s->name != name)
+ {
+ queue_free(&q);
+ return 0;
+ }
+ if (s->evr == min || s->evr == max)
+ continue;
+ if (pool_evrcmp(pool, min, s->evr, EVRCMP_COMPARE) >= 0)
+ min = s->evr;
+ else if (min == max || pool_evrcmp(pool, max, s->evr, EVRCMP_COMPARE) <= 0)
+ max = s->evr;
+ }
+ queue_free(&q);
+ *namep = name;
+ *minp = min;
+ *maxp = max;
+ return 1;
+}
+
+int
+pool_link_evrcmp(Pool *pool, Solvable *s1, Solvable *s2)
+{
+ Id name1, evrmin1, evrmax1;
+ Id name2, evrmin2, evrmax2;
+
+ if (s1->name != s2->name)
+ return 0; /* can't compare */
+ if (!name_min_max(pool, s1, &name1, &evrmin1, &evrmax1))
+ return 0;
+ if (!name_min_max(pool, s2, &name2, &evrmin2, &evrmax2))
+ return 0;
+ /* compare linked names */
+ if (name1 != name2)
+ return 0;
+ if (evrmin1 == evrmin2 && evrmax1 == evrmax2)
+ return 0;
+ /* now compare evr intervals */
+ if (evrmin1 == evrmax1 && evrmin2 == evrmax2)
+ return pool_evrcmp(pool, evrmin1, evrmax2, EVRCMP_COMPARE);
+ if (evrmin1 != evrmax2 && pool_evrcmp(pool, evrmin1, evrmax2, EVRCMP_COMPARE) > 0)
+ return 1;
+ if (evrmax1 != evrmin2 && pool_evrcmp(pool, evrmax1, evrmin2, EVRCMP_COMPARE) < 0)
+ return -1;
+ return 0;
+}
+
+
#endif
/* generic */
extern void find_package_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Queue *qp);
+extern int pool_link_evrcmp(Pool *pool, Solvable *s1, Solvable *s2);
#endif
#include "policy.h"
#include "poolvendor.h"
#include "poolarch.h"
+#include "linkedpkg.h"
#include "cplxdeps.h"
+
/*-----------------------------------------------------------------*/
/*
void
prune_to_best_version(Pool *pool, Queue *plist)
{
- int i, j;
+ int i, j, r;
Solvable *s, *best;
if (plist->count < 2) /* no need to prune for a single entry */
best = s; /* take current as new best */
continue;
}
-
- if (best->evr != s->evr) /* compare evr */
- {
- if (pool_evrcmp(pool, best->evr, s->evr, EVRCMP_COMPARE) < 0)
- best = s;
- }
+ r = best->evr != s->evr ? pool_evrcmp(pool, best->evr, s->evr, EVRCMP_COMPARE) : 0;
+#ifdef ENABLE_LINKED_PKGS
+ if (r == 0 && has_package_link(pool, s))
+ r = pool_link_evrcmp(pool, best, s);
+#endif
+ if (r < 0)
+ best = s;
}
plist->elements[j++] = best - pool->solvables; /* finish last group */
plist->count = j;
s->evr = ID_EMPTY;
pool->debugmask = SOLV_DEBUG_RESULT; /* FIXME */
-#ifdef FEDORA
+#if defined(FEDORA) || defined(MAGEIA)
pool->implicitobsoleteusescolors = 1;
#endif
#ifdef RPM5
#include "util.h"
static const char *archpolicies[] = {
-#ifdef FEDORA
+#if defined(FEDORA) || defined(MAGEIA)
"x86_64", "x86_64:athlon:i686:i586:i486:i386",
#else
"x86_64", "x86_64:i686:i586:i486:i386",
"mips64", "mips64",
"mips64el", "mips64el",
"m68k", "m68k",
-#ifdef FEDORA
+#if defined(FEDORA) || defined(MAGEIA)
"ia32e", "ia32e:x86_64:athlon:i686:i586:i486:i386",
"athlon", "athlon:i686:i586:i486:i386",
"amd64", "amd64:x86_64:athlon:i686:i586:i486:i386",
return 0;
}
+const char *
+repodata_lookup_dirstrarray_uninternalized(Repodata *data, Id solvid, Id keyname, Id *didp, Id *iterp)
+{
+ Id *ap, did;
+ Id iter = *iterp;
+ if (iter == 0) /* find key data */
+ {
+ if (!data->attrs)
+ return 0;
+ ap = data->attrs[solvid - data->start];
+ if (!ap)
+ return 0;
+ for (; *ap; ap += 2)
+ if (data->keys[*ap].name == keyname && data->keys[*ap].type == REPOKEY_TYPE_DIRSTRARRAY)
+ break;
+ if (!*ap)
+ return 0;
+ iter = ap[1];
+ }
+ did = *didp;
+ for (ap = data->attriddata + iter; *ap; ap += 2)
+ {
+ if (did && ap[0] != did)
+ continue;
+ *didp = ap[0];
+ *iterp = ap - data->attriddata + 2;
+ return (const char *)data->attrdata + ap[1];
+ }
+ *iterp = 0;
+ return 0;
+}
/************************************************************************
* data search
case REPOKEY_TYPE_VOID:
case REPOKEY_TYPE_CONSTANT:
case REPOKEY_TYPE_CONSTANTID:
+ case REPOKEY_TYPE_DELETED:
break;
case REPOKEY_TYPE_STR:
data_addblob(xd, data->attrdata + val, strlen((char *)(data->attrdata + val)) + 1);
sp = schema;
kp = data->xattrs[-*ida];
if (!kp)
- continue;
+ continue; /* ignore empty elements */
num++;
- for (;*kp; kp += 2)
+ for (; *kp; kp += 2)
*sp++ = *kp;
*sp = 0;
if (!schemaid)
schemaid = repodata_schema2id(data, schema, 1);
else if (schemaid != repodata_schema2id(data, schema, 0))
{
- pool_debug(data->repo->pool, SOLV_FATAL, "fixarray substructs with different schemas\n");
- exit(1);
+ pool_debug(data->repo->pool, SOLV_ERROR, "repodata_serialize_key: fixarray substructs with different schemas\n");
+ num = 0;
+ break;
}
}
+ data_addid(xd, num);
if (!num)
break;
- data_addid(xd, num);
data_addid(xd, schemaid);
for (ida = data->attriddata + val; *ida; ida++)
{
Id *kp = data->xattrs[-*ida];
if (!kp)
continue;
- for (;*kp; kp += 2)
+ for (; *kp; kp += 2)
repodata_serialize_key(data, newincore, newvincore, schema, data->keys + *kp, kp[1]);
}
break;
break;
}
default:
- pool_debug(data->repo->pool, SOLV_FATAL, "don't know how to handle type %d\n", key->type);
+ pool_debug(data->repo->pool, SOLV_FATAL, "repodata_serialize_key: don't know how to handle type %d\n", key->type);
exit(1);
}
if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
}
}
+/* create a circular linked list of all keys that share
+ * the same keyname */
+static Id *
+calculate_keylink(Repodata *data)
+{
+ int i, j;
+ Id *link;
+ Id maxkeyname = 0, *keytable = 0;
+ link = solv_calloc(data->nkeys, sizeof(Id));
+ if (data->nkeys <= 2)
+ return link;
+ for (i = 1; i < data->nkeys; i++)
+ {
+ Id n = data->keys[i].name;
+ if (n >= maxkeyname)
+ {
+ keytable = solv_realloc2(keytable, n + 128, sizeof(Id));
+ memset(keytable + maxkeyname, 0, (n + 128 - maxkeyname) * sizeof(Id));
+ maxkeyname = n + 128;
+ }
+ j = keytable[n];
+ if (j)
+ link[i] = link[j];
+ else
+ j = i;
+ link[j] = i;
+ keytable[n] = i;
+ }
+ /* remove links that just point to themselfs */
+ for (i = 1; i < data->nkeys; i++)
+ if (link[i] == i)
+ link[i] = 0;
+ solv_free(keytable);
+ return link;
+}
+
void
repodata_internalize(Repodata *data)
{
Repokey *key, solvkey;
Id entry, nentry;
- Id schemaid, keyid, *schema, *sp, oldschema, *keyp, *seen;
+ Id schemaid, keyid, *schema, *sp, oldschemaid, *keyp, *seen;
+ Offset *oldincoreoffs = 0;
int schemaidx;
unsigned char *dp, *ndp;
- int newschema, oldcount;
+ int neednewschema;
struct extdata newincore;
struct extdata newvincore;
Id solvkeyid;
+ Id *keylink;
+ int haveoldkl;
if (!data->attrs && !data->xattrs)
return;
data->mainschema = 0;
data->mainschemaoffsets = solv_free(data->mainschemaoffsets);
+ keylink = calculate_keylink(data);
/* join entry data */
/* we start with the meta data, entry -1 */
for (entry = -1; entry < nentry; entry++)
{
- memset(seen, 0, data->nkeys * sizeof(Id));
- oldschema = 0;
+ oldschemaid = 0;
dp = data->incoredata;
if (dp)
{
dp += entry >= 0 ? data->incoreoffset[entry] : 1;
- dp = data_read_id(dp, &oldschema);
+ dp = data_read_id(dp, &oldschemaid);
}
+ memset(seen, 0, data->nkeys * sizeof(Id));
#if 0
-fprintf(stderr, "oldschema %d\n", oldschema);
-fprintf(stderr, "schemata %d\n", data->schemata[oldschema]);
+fprintf(stderr, "oldschemaid %d\n", oldschemaid);
+fprintf(stderr, "schemata %d\n", data->schemata[oldschemaid]);
fprintf(stderr, "schemadata %p\n", data->schemadata);
#endif
- /* seen: -1: old data 0: skipped >0: id + 1 */
- newschema = 0;
- oldcount = 0;
+
+ /* seen: -1: old data, 0: skipped, >0: id + 1 */
+ neednewschema = 0;
sp = schema;
- for (keyp = data->schemadata + data->schemata[oldschema]; *keyp; keyp++)
+ haveoldkl = 0;
+ for (keyp = data->schemadata + data->schemata[oldschemaid]; *keyp; keyp++)
{
if (seen[*keyp])
{
- pool_debug(data->repo->pool, SOLV_FATAL, "Inconsistent old data (key occured twice).\n");
- exit(1);
+ /* oops, should not happen */
+ neednewschema = 1;
+ continue;
}
- seen[*keyp] = -1;
+ seen[*keyp] = -1; /* use old marker */
*sp++ = *keyp;
- oldcount++;
+ if (keylink[*keyp])
+ haveoldkl = 1; /* potential keylink conflict */
}
- if (entry >= 0)
- keyp = data->attrs ? data->attrs[entry] : 0;
- else
+
+ /* strip solvables key */
+ if (entry < 0 && solvkeyid && seen[solvkeyid])
{
- /* strip solvables key */
*sp = 0;
for (sp = keyp = schema; *sp; sp++)
if (*sp != solvkeyid)
*keyp++ = *sp;
- else
- oldcount--;
sp = keyp;
seen[solvkeyid] = 0;
- keyp = data->xattrs ? data->xattrs[1] : 0;
+ neednewschema = 1;
}
+
+ /* add new entries */
+ if (entry >= 0)
+ keyp = data->attrs ? data->attrs[entry] : 0;
+ else
+ keyp = data->xattrs ? data->xattrs[1] : 0;
if (keyp)
for (; *keyp; keyp += 2)
{
if (!seen[*keyp])
{
- newschema = 1;
+ neednewschema = 1;
*sp++ = *keyp;
+ if (haveoldkl && keylink[*keyp]) /* this should be pretty rare */
+ {
+ Id kl;
+ for (kl = keylink[*keyp]; kl != *keyp; kl = keylink[kl])
+ if (seen[kl] == -1)
+ {
+ /* replacing old key kl, remove from schema and seen */
+ Id *osp;
+ for (osp = schema; osp < sp; osp++)
+ if (*osp == kl)
+ {
+ memmove(osp, osp + 1, (sp - osp) * sizeof(Id));
+ sp--;
+ seen[kl] = 0;
+ break;
+ }
+ }
+ }
}
seen[*keyp] = keyp[1] + 1;
}
+
+ /* add solvables key if needed */
if (entry < 0 && data->end != data->start)
{
- *sp++ = solvkeyid;
- newschema = 1;
+ *sp++ = solvkeyid; /* always last in schema */
+ neednewschema = 1;
}
+
+ /* commit schema */
*sp = 0;
- if (newschema)
+ if (neednewschema)
/* Ideally we'd like to sort the new schema here, to ensure
- schema equality independend of the ordering. We can't do that
- yet. For once see below (old ids need to come before new ids).
- An additional difficulty is that we also need to move
- the values with the keys. */
+ schema equality independend of the ordering. */
schemaid = repodata_schema2id(data, schema, 1);
else
- schemaid = oldschema;
+ schemaid = oldschemaid;
+
+ if (entry < 0)
+ {
+ data->mainschemaoffsets = solv_calloc(sp - schema, sizeof(Id));
+ data->mainschema = schemaid;
+ }
+
+ /* find offsets in old incore data */
+ if (oldschemaid)
+ {
+ Id *lastneeded = 0;
+ for (sp = data->schemadata + data->schemata[oldschemaid]; *sp; sp++)
+ if (seen[*sp] == -1)
+ lastneeded = sp + 1;
+ if (lastneeded)
+ {
+ if (!oldincoreoffs)
+ oldincoreoffs = solv_malloc2(data->nkeys, 2 * sizeof(Offset));
+ for (sp = data->schemadata + data->schemata[oldschemaid]; sp != lastneeded; sp++)
+ {
+ /* Skip the data associated with this old key. */
+ key = data->keys + *sp;
+ ndp = dp;
+ if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
+ {
+ ndp = data_skip(ndp, REPOKEY_TYPE_ID);
+ ndp = data_skip(ndp, REPOKEY_TYPE_ID);
+ }
+ else if (key->storage == KEY_STORAGE_INCORE)
+ ndp = data_skip_key(data, ndp, key);
+ oldincoreoffs[*sp * 2] = dp - data->incoredata;
+ oldincoreoffs[*sp * 2 + 1] = ndp - dp;
+ dp = ndp;
+ }
+ }
+ }
+ /* just copy over the complete old entry (including the schemaid) if there was no new data */
+ if (entry >= 0 && !neednewschema && oldschemaid && (!data->attrs || !data->attrs[entry]) && dp)
+ {
+ ndp = data->incoredata + data->incoreoffset[entry];
+ data->incoreoffset[entry] = newincore.len;
+ data_addblob(&newincore, ndp, dp - ndp);
+ goto entrydone;
+ }
/* Now create data blob. We walk through the (possibly new) schema
and either copy over old data, or insert the new. */
- /* XXX Here we rely on the fact that the (new) schema has the form
- o1 o2 o3 o4 ... | n1 n2 n3 ...
- (oX being the old keyids (possibly overwritten), and nX being
- the new keyids). This rules out sorting the keyids in order
- to ensure a small schema count. */
if (entry >= 0)
data->incoreoffset[entry] = newincore.len;
data_addid(&newincore, schemaid);
- if (entry == -1)
- {
- data->mainschema = schemaid;
- data->mainschemaoffsets = solv_calloc(sp - schema, sizeof(Id));
- }
+
/* we don't use a pointer to the schemadata here as repodata_serialize_key
* may call repodata_schema2id() which might realloc our schemadata */
for (schemaidx = data->schemata[schemaid]; (keyid = data->schemadata[schemaidx]) != 0; schemaidx++)
{
- if (entry == -1)
- data->mainschemaoffsets[schemaidx - data->schemata[schemaid]] = newincore.len;
- if (keyid == solvkeyid)
+ if (entry < 0)
{
- /* add flexarray entry count */
- data_addid(&newincore, data->end - data->start);
- break;
- }
- key = data->keys + keyid;
-#if 0
- fprintf(stderr, "internalize %d(%d):%s:%s\n", entry, entry + data->start, pool_id2str(data->repo->pool, key->name), pool_id2str(data->repo->pool, key->type));
-#endif
- ndp = dp;
- if (oldcount)
- {
- /* Skip the data associated with this old key. */
- if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
+ data->mainschemaoffsets[schemaidx - data->schemata[schemaid]] = newincore.len;
+ if (keyid == solvkeyid)
{
- ndp = data_skip(dp, REPOKEY_TYPE_ID);
- ndp = data_skip(ndp, REPOKEY_TYPE_ID);
+ /* add flexarray entry count */
+ data_addid(&newincore, data->end - data->start);
+ break; /* always the last entry */
}
- else if (key->storage == KEY_STORAGE_INCORE)
- ndp = data_skip_key(data, dp, key);
- oldcount--;
}
if (seen[keyid] == -1)
{
- /* If this key was an old one _and_ was not overwritten with
- a different value copy over the old value (we skipped it
- above). */
- if (dp != ndp)
- data_addblob(&newincore, dp, ndp - dp);
- seen[keyid] = 0;
+ if (oldincoreoffs[keyid * 2 + 1])
+ data_addblob(&newincore, data->incoredata + oldincoreoffs[keyid * 2], oldincoreoffs[keyid * 2 + 1]);
}
else if (seen[keyid])
- {
- /* Otherwise we have a new value. Parse it into the internal form. */
- repodata_serialize_key(data, &newincore, &newvincore, schema, key, seen[keyid] - 1);
- }
- dp = ndp;
+ repodata_serialize_key(data, &newincore, &newvincore, schema, data->keys + keyid, seen[keyid] - 1);
}
+
+entrydone:
+ /* free memory */
if (entry >= 0 && data->attrs)
{
if (data->attrs[entry])
data->lastdatalen = 0;
solv_free(schema);
solv_free(seen);
+ solv_free(keylink);
+ solv_free(oldincoreoffs);
repodata_free_schemahash(data);
solv_free(data->incoredata);
void repodata_set_deltalocation(Repodata *data, Id handle, int medianr, const char *dir, const char *file);
void repodata_set_sourcepkg(Repodata *data, Id solvid, const char *sourcepkg);
Id repodata_lookup_id_uninternalized(Repodata *data, Id solvid, Id keyname, Id voidid);
+const char *repodata_lookup_dirstrarray_uninternalized(Repodata *data, Id solvid, Id keyname, Id *didp, Id *iterp);
/* stats */
unsigned int repodata_memused(Repodata *data);
if (!qs->count)
{
if (allow_all)
- return 0; /* orphaned, don't create feature rule */
+ return 0; /* orphaned, don't create feature rule */
/* check if this is an orphaned package */
policy_findupdatepackages(solv, s, qs, 1);
if (!qs->count)
- return 0; /* orphaned, don't create update rule */
+ return 0; /* orphaned, don't create update rule */
qs->count = 0;
return -SYSTEMSOLVABLE; /* supported but not installable */
}
if (allow_all)
return s - pool->solvables;
/* check if it is ok to keep the installed package */
+ if (solv->dupmap.size && MAPTST(&solv->dupmap, s - pool->solvables))
+ return s - pool->solvables;
for (i = 0; i < qs->count; i++)
{
Solvable *ns = pool->solvables + qs->elements[i];
return -SYSTEMSOLVABLE;
}
+#if 0
/* add packages from the dup repositories to the update candidates
* this isn't needed for the global dup mode as all packages are
* from dup repos in that case */
}
queue_free(&dupqs);
}
+#endif
/*-------------------------------------------------------------------
*
Id p, d;
Queue qs;
Id qsbuf[64];
+ int isorphaned = 0;
queue_init_buffer(&qs, qsbuf, sizeof(qsbuf)/sizeof(*qsbuf));
p = s - pool->solvables;
/* find update candidates for 's' */
- if (solv->dupmap_all)
+ if (solv->dupmap_all || (solv->dupinvolvedmap.size && MAPTST(&solv->dupinvolvedmap, p)))
p = finddistupgradepackages(solv, s, &qs, allow_all);
else
- {
- policy_findupdatepackages(solv, s, &qs, allow_all);
- if (!allow_all && solv->dupinvolvedmap.size && MAPTST(&solv->dupinvolvedmap, p))
- addduppackages(solv, s, &qs);
- }
+ policy_findupdatepackages(solv, s, &qs, allow_all);
#ifdef ENABLE_LINKED_PKGS
if (solv->instbuddy && solv->instbuddy[s - pool->solvables - solv->installed->start])
const char *name = pool_id2str(pool, s->name);
if (strncmp(name, "pattern:", 8) == 0 || strncmp(name, "application:", 12) == 0)
{
- /* a linked pseudo package. As it is linked, we do not need an update rule */
+ /* a linked pseudo package. As it is linked, we do not need an update/feature rule */
/* nevertheless we set specialupdaters so we can update */
solver_addrule(solv, 0, 0, 0);
if (!allow_all && qs.count)
}
#endif
- if (!allow_all && !p && solv->dupmap_all)
+ if (!allow_all && !p) /* !p implies qs.count == 0 */
{
queue_push(&solv->orphaned, s - pool->solvables); /* an orphaned package */
if (solv->keep_orphans && !(solv->droporphanedmap_all || (solv->droporphanedmap.size && MAPTST(&solv->droporphanedmap, s - pool->solvables - solv->installed->start))))
p = s - pool->solvables; /* keep this orphaned package installed */
+ queue_free(&qs);
+ solver_addrule(solv, p, 0, 0);
+ return;
}
if (!allow_all && qs.count && solv->multiversion.size)
if (i < qs.count)
{
/* filter out all multiversion packages as they don't update */
- d = pool_queuetowhatprovides(pool, &qs);
+ d = pool_queuetowhatprovides(pool, &qs); /* save qs away */
for (j = i; i < qs.count; i++)
{
if (MAPTST(&solv->multiversion, qs.elements[i]))
}
qs.elements[j++] = qs.elements[i];
}
- if (j < qs.count)
+ if (j < qs.count) /* filtered at least one package? */
{
- if (d && solv->installed && s->repo == solv->installed &&
- (solv->updatemap_all || (solv->updatemap.size && MAPTST(&solv->updatemap, s - pool->solvables - solv->installed->start))))
+ if (j == 0 && p == -SYSTEMSOLVABLE)
{
+ /* this is a multiversion orphan */
+ queue_push(&solv->orphaned, s - pool->solvables);
if (!solv->specialupdaters)
solv->specialupdaters = solv_calloc(solv->installed->end - solv->installed->start, sizeof(Id));
solv->specialupdaters[s - pool->solvables - solv->installed->start] = d;
- }
- if (j == 0 && p == -SYSTEMSOLVABLE && solv->dupmap_all)
- {
- queue_push(&solv->orphaned, s - pool->solvables); /* also treat as orphaned */
- j = qs.count;
+ if (solv->keep_orphans && !(solv->droporphanedmap_all || (solv->droporphanedmap.size && MAPTST(&solv->droporphanedmap, s - pool->solvables - solv->installed->start))))
+ {
+ /* we need to keep the orphan */
+ queue_free(&qs);
+ solver_addrule(solv, s - pool->solvables, 0, 0);
+ return;
+ }
+ /* we can drop it as long as we update */
+ isorphaned = 1;
+ j = qs.count; /* force the update */
}
qs.count = j;
}
{
/* could fallthrough, but then we would do pool_queuetowhatprovides twice */
queue_free(&qs);
- solver_addrule(solv, p, 0, d); /* allow update of s */
+ solver_addrule(solv, s - pool->solvables, 0, d); /* allow update of s */
return;
}
}
}
+ if (!isorphaned && p == -SYSTEMSOLVABLE && solv->dupmap.size)
+ p = s - pool->solvables; /* let the dup rules sort it out */
if (qs.count && p == -SYSTEMSOLVABLE)
p = queue_shift(&qs);
if (qs.count > 1)
queue_pushunique(solv->cleandeps_updatepkgs, p);
}
-static inline void
+static void
solver_addtodupmaps(Solver *solv, Id p, Id how, int targeted)
{
Pool *pool = solv->pool;
solver_addduprules(Solver *solv, Map *addedmap)
{
Pool *pool = solv->pool;
+ Repo *installed = solv->installed;
Id p, pp;
Solvable *s, *ps;
int first, i;
+ Rule *r;
solv->duprules = solv->nrules;
for (i = 1; i < pool->nsolvables; i++)
break;
if (!MAPTST(&solv->dupinvolvedmap, p))
continue;
- if (solv->installed && ps->repo == solv->installed)
+ if (installed && ps->repo == installed)
{
if (!solv->updatemap.size)
- map_grow(&solv->updatemap, solv->installed->end - solv->installed->start);
- MAPSET(&solv->updatemap, p - solv->installed->start);
+ map_grow(&solv->updatemap, installed->end - installed->start);
+ MAPSET(&solv->updatemap, p - installed->start);
if (!MAPTST(&solv->dupmap, p))
{
Id ip, ipp;
if (is->evr == ps->evr && solvable_identical(ps, is))
break;
}
- if (!ip)
- solver_addrule(solv, -p, 0, 0); /* no match, sorry */
- else
- MAPSET(&solv->dupmap, p); /* for best rules processing */
+ if (ip)
+ {
+ /* ok, found a good one. we may keep this package. */
+ MAPSET(&solv->dupmap, p); /* for best rules processing */
+ continue;
+ }
+ r = solv->rules + solv->updaterules + (p - installed->start);
+ if (!r->p)
+ r = solv->rules + solv->featurerules + (p - installed->start);
+ if (r->p && solv->specialupdaters && solv->specialupdaters[p - installed->start])
+ {
+ /* this is a multiversion orphan, we're good if an update is installed */
+ solver_addrule(solv, -p, 0, solv->specialupdaters[p - installed->start]);
+ continue;
+ }
+ solver_addrule(solv, -p, 0, 0); /* no match, sorry */
}
}
else if (!MAPTST(&solv->dupmap, p))
/* check if the newest versions of pi still provides the dependency we're looking for */
static int
-solver_choicerulecheck(Solver *solv, Id pi, Rule *r, Map *m)
+solver_choicerulecheck(Solver *solv, Id pi, Rule *r, Map *m, Queue *q)
{
Pool *pool = solv->pool;
Rule *ur;
- Queue q;
- Id p, pp, qbuf[32];
+ Id p, pp;
int i;
- ur = solv->rules + solv->updaterules + (pi - pool->installed->start);
- if (!ur->p)
- ur = solv->rules + solv->featurerules + (pi - pool->installed->start);
- if (!ur->p)
- return 0;
- queue_init_buffer(&q, qbuf, sizeof(qbuf)/sizeof(*qbuf));
- FOR_RULELITERALS(p, pp, ur)
- if (p > 0)
- queue_push(&q, p);
- if (q.count > 1)
- policy_filter_unwanted(solv, &q, POLICY_MODE_CHOOSE);
- for (i = 0; i < q.count; i++)
- if (MAPTST(m, q.elements[i]))
- break;
- /* 1: none of the newest versions provide it */
- i = i == q.count ? 1 : 0;
- queue_free(&q);
- return i;
+ if (!q->count || q->elements[0] != pi)
+ {
+ if (q->count)
+ queue_empty(q);
+ ur = solv->rules + solv->updaterules + (pi - pool->installed->start);
+ if (!ur->p)
+ ur = solv->rules + solv->featurerules + (pi - pool->installed->start);
+ if (!ur->p)
+ return 0;
+ queue_push2(q, pi, 0);
+ FOR_RULELITERALS(p, pp, ur)
+ if (p > 0)
+ queue_push(q, p);
+ }
+ if (q->count == 2)
+ return 1;
+ if (q->count == 3)
+ {
+ p = q->elements[2];
+ return MAPTST(m, p) ? 0 : 1;
+ }
+ if (!q->elements[1])
+ {
+ for (i = 2; i < q->count; i++)
+ if (!MAPTST(m, q->elements[i]))
+ break;
+ if (i == q->count)
+ return 0; /* all provide it, no need to filter */
+ /* some don't provide it, have to filter */
+ queue_deleten(q, 0, 2);
+ policy_filter_unwanted(solv, q, POLICY_MODE_CHOOSE);
+ queue_unshift(q, 1); /* filter mark */
+ queue_unshift(q, pi);
+ }
+ for (i = 2; i < q->count; i++)
+ if (MAPTST(m, q->elements[i]))
+ return 0; /* at least one provides it */
+ return 1; /* none of the new packages provided it */
}
static inline void
Pool *pool = solv->pool;
Map m, mneg;
Rule *r;
- Queue q, qi;
+ Queue q, qi, qcheck;
int i, j, rid, havechoice;
Id p, d, pp;
Id p2, pp2;
solv->choicerules_ref = solv_calloc(solv->pkgrules_end, sizeof(Id));
queue_init(&q);
queue_init(&qi);
+ queue_init(&qcheck);
map_init(&m, pool->nsolvables);
map_init(&mneg, pool->nsolvables);
/* set up negative assertion map from infarch and dup rules */
p2 = qi.elements[i];
if (!p2)
continue;
- if (solver_choicerulecheck(solv, p2, r, &m))
+ if (solver_choicerulecheck(solv, p2, r, &m, &qcheck))
{
/* oops, remove element p from q */
queue_removeelement(&q, qi.elements[i + 1]);
qi.elements[j++] = p2;
}
queue_truncate(&qi, j);
+
if (!q.count || !qi.count)
{
FOR_RULELITERALS(p, pp, r)
}
queue_free(&q);
queue_free(&qi);
+ queue_free(&qcheck);
map_free(&m);
map_free(&mneg);
solv->choicerules_end = solv->nrules;
Rule *r;
if (m && !MAPTST(m, v - solv->updaterules))
continue;
- /* check if identical to feature rule, we don't like that */
+ /* check if identical to feature rule, we don't like that (except for orphans) */
r = solv->rules + solv->featurerules + (v - solv->updaterules);
if (!r->p)
{
/* update rule == feature rule */
if (v > lastfeature)
lastfeature = v;
+ /* prefer orphaned packages in dup mode */
+ if (solv->keep_orphans)
+ {
+ r = solv->rules + v;
+ if (!r->d && r->p == (solv->installed->start + (v - solv->updaterules)))
+ {
+ lastfeature = v;
+ lastupdate = 0;
+ break;
+ }
+ }
continue;
}
if (v > lastupdate)
if (!solv->decisioncnt_orphan)
solv->decisioncnt_orphan = solv->decisionq.count;
- if (solv->dupmap_all && solv->installed)
+ if (solv->installed && (solv->orphaned.count || solv->brokenorphanrules))
{
int installedone = 0;
Solvable *s;
Rule *r;
int now, solve_start;
- int hasdupjob = 0;
+ int needduprules = 0;
int hasbestinstalljob = 0;
solve_start = solv_timems(0);
MAPSET(&solv->droporphanedmap, p - installed->start);
}
break;
+ case SOLVER_ALLOWUNINSTALL:
+ if (select == SOLVER_SOLVABLE_ALL || (select == SOLVER_SOLVABLE_REPO && installed && what == installed->repoid))
+ solv->allowuninstall_all = 1;
+ FOR_JOB_SELECT(p, pp, select, what)
+ {
+ s = pool->solvables + p;
+ if (s->repo != installed)
+ continue;
+ if (!solv->allowuninstallmap.size)
+ map_grow(&solv->allowuninstallmap, installed->end - installed->start);
+ MAPSET(&solv->allowuninstallmap, p - installed->start);
+ }
+ break;
default:
break;
}
if (how & SOLVER_FORCEBEST)
solv->bestupdatemap_all = 1;
}
- if (!solv->dupmap_all || solv->allowuninstall)
- hasdupjob = 1;
+ if ((how & SOLVER_TARGETED) != 0)
+ needduprules = 1;
+ if (!solv->dupmap_all || solv->allowuninstall || solv->allowuninstall_all || solv->allowuninstallmap.size || solv->keep_orphans)
+ needduprules = 1;
break;
default:
break;
/* create dup maps if needed. We need the maps early to create our
* update rules */
- if (hasdupjob)
+ if (needduprules)
solver_createdupmaps(solv);
/*
* check for and remove duplicate
*/
r = solv->rules + solv->nrules - 1; /* r: update rule */
- if (!r->p)
- continue;
sr = r - (installed->end - installed->start); /* sr: feature rule */
+ if (!r->p)
+ {
+ if (sr->p)
+ memset(sr, 0, sizeof(*sr)); /* no feature rules without update rules */
+ continue;
+ }
/* it's also orphaned if the feature rule consists just of the installed package */
if (!solv->dupmap_all && sr->p == i && !sr->d && !sr->w2)
queue_push(&solv->orphaned, i);
break;
case SOLVER_ALLOWUNINSTALL:
POOL_DEBUG(SOLV_DEBUG_JOB, "job: allowuninstall %s\n", solver_select2str(pool, select, what));
- if (select == SOLVER_SOLVABLE_ALL || (select == SOLVER_SOLVABLE_REPO && installed && what == installed->repoid))
- solv->allowuninstall_all = 1;
- FOR_JOB_SELECT(p, pp, select, what)
- {
- s = pool->solvables + p;
- if (s->repo != installed)
- continue;
- if (!solv->allowuninstallmap.size)
- map_grow(&solv->allowuninstallmap, installed->end - installed->start);
- MAPSET(&solv->allowuninstallmap, p - installed->start);
- }
break;
default:
POOL_DEBUG(SOLV_DEBUG_JOB, "job: unknown job\n");
else
solv->infarchrules = solv->infarchrules_end = solv->nrules;
- if (hasdupjob)
+ if (needduprules)
solver_addduprules(solv, &addedmap);
else
solv->duprules = solv->duprules_end = solv->nrules;
else
solv->bestrules = solv->bestrules_end = solv->nrules;
- if (hasdupjob)
+ if (needduprules)
solver_freedupmaps(solv); /* no longer needed */
if (solv->do_yum_obsoletes)
return r;
}
+/* this was solv_realloc2(old, len, size), but we now overshoot
+ * for huge len sizes */
+void *
+solv_extend_realloc(void *old, size_t len, size_t size, size_t block)
+{
+ size_t xblock = (block + 1) << 5;
+ len = (len + block) & ~block;
+ if (len >= xblock && xblock)
+ {
+ xblock <<= 1;
+ while (len >= xblock && xblock)
+ xblock <<= 1;
+ if (xblock)
+ {
+ size_t nlen;
+ xblock = (xblock >> 5) - 1;
+ nlen = (len + xblock) & ~xblock;
+ if (nlen > len)
+ len = nlen;
+ }
+ }
+ return solv_realloc2(old, len, size);
+}
+
void *
solv_free(void *mem)
{
extern void *solv_calloc(size_t, size_t);
extern void *solv_realloc(void *, size_t);
extern void *solv_realloc2(void *, size_t, size_t);
+extern void *solv_extend_realloc(void *, size_t, size_t, size_t);
extern void *solv_free(void *);
extern char *solv_strdup(const char *);
extern void solv_oom(size_t, size_t);
if (nmemb == 1)
{
if ((len & block) == 0)
- buf = solv_realloc2(buf, len + (1 + block), size);
+ buf = solv_extend_realloc(buf, len + 1, size, block);
}
else
{
if (((len - 1) | block) != ((len + nmemb - 1) | block))
- buf = solv_realloc2(buf, (len + (nmemb + block)) & ~block, size);
+ buf = solv_extend_realloc(buf, len + nmemb, size, block);
}
return buf;
}
static inline void *solv_extend_resize(void *buf, size_t len, size_t size, size_t block)
{
if (len)
- buf = solv_realloc2(buf, (len + block) & ~block, size);
+ buf = solv_extend_realloc(buf, len, size, block);
return buf;
}
void *buf;
if (!len)
return 0;
- buf = solv_malloc2((len + block) & ~block, size);
+ buf = solv_extend_realloc((void *)0, len, size, block);
memset(buf, 0, ((len + block) & ~block) * size);
return buf;
}
--- /dev/null
+# test dup with multiversion packages
+#
+# part 1: simple update
+repo system 0 testtags <inline>
+#>=Pkg: a 1 1 i686
+repo available 0 testtags <inline>
+#>=Pkg: a 2 1 i686
+system i686 * system
+
+job multiversion name a
+job distupgrade all packages
+# a-1-1 is treated as orphaned and stays behind
+result transaction,problems <inline>
+#>install a-2-1.i686@available
+
+nextjob
+
+job multiversion name a
+job distupgrade repo available
+# a-1-1 is treated as orphaned and stays behind
+result transaction,problems <inline>
+#>install a-2-1.i686@available
+
+
+### same with keeporphans
+
+nextjob
+
+solverflags keeporphans
+job multiversion name a
+job distupgrade all packages
+# a-1-1 is treated as orphaned and stays behind
+result transaction,problems <inline>
+#>install a-2-1.i686@available
+
+
+nextjob
+
+solverflags keeporphans
+job multiversion name a
+job distupgrade repo available
+# a-1-1 is treated as orphaned and stays behind
+result transaction,problems <inline>
+#>install a-2-1.i686@available
+
+
+### same with allowuninstall
+
+nextjob
+
+solverflags allowuninstall
+job multiversion name a
+job distupgrade all packages
+# a-1-1 is treated as orphaned and stays behind
+result transaction,problems <inline>
+#>install a-2-1.i686@available
+
+
+nextjob
+
+solverflags allowuninstall
+job multiversion name a
+job distupgrade repo available
+# a-1-1 is treated as orphaned and stays behind
+result transaction,problems <inline>
+#>install a-2-1.i686@available
+
+
+### same with allowuninstall and keeporphans
+
+nextjob
+
+solverflags allowuninstall keeporphans
+job multiversion name a
+job distupgrade all packages
+# a-1-1 is treated as orphaned and stays behind
+result transaction,problems <inline>
+#>install a-2-1.i686@available
+
+
+nextjob
+
+solverflags allowuninstall keeporphans
+job multiversion name a
+job distupgrade repo available
+# a-1-1 is treated as orphaned and stays behind
+result transaction,problems <inline>
+#>install a-2-1.i686@available
+
+
+
--- /dev/null
+# test dup with multiversion packages
+# same as with dup_multiversion1, but we can't keep the orphan
+
+#
+# part 1: simple update
+repo system 0 testtags <inline>
+#>=Pkg: a 1 1 i686
+#>=Pkg: b 1 1 i686
+repo available 0 testtags <inline>
+#>=Pkg: a 2 1 i686
+#>=Pkg: b 2 1 i686
+#>=Con: a = 1-1
+system i686 * system
+
+job multiversion name a
+job distupgrade all packages
+result transaction,problems <inline>
+#>erase a-1-1.i686@system
+#>install a-2-1.i686@available
+#>upgrade b-1-1.i686@system b-2-1.i686@available
+
+nextjob
+
+job multiversion name a
+job distupgrade repo available
+result transaction,problems <inline>
+#>erase a-1-1.i686@system
+#>install a-2-1.i686@available
+#>upgrade b-1-1.i686@system b-2-1.i686@available
+
+
+### same with keeporphans, this will result in problems as we cannot keep the orphan
+
+nextjob
+
+solverflags keeporphans
+job multiversion name a
+job distupgrade all packages
+result transaction,problems <inline>
+#>install a-2-1.i686@available
+#>problem 4d4de423 info package b-2-1.i686 conflicts with a = 1-1 provided by a-1-1.i686
+#>problem 4d4de423 solution 2cf4745c erase a-1-1.i686@system
+#>problem 4d4de423 solution 2cf4745c replace a-1-1.i686@system a-2-1.i686@available
+#>problem 4d4de423 solution 5a433aff allow b-1-1.i686@system
+#>problem 4d4de423 solution ce4305f2 erase b-1-1.i686@system
+
+nextjob
+
+solverflags keeporphans
+job multiversion name a
+job distupgrade repo available
+result transaction,problems <inline>
+#>install a-2-1.i686@available
+#>problem 4d4de423 info package b-2-1.i686 conflicts with a = 1-1 provided by a-1-1.i686
+#>problem 4d4de423 solution 2cf4745c erase a-1-1.i686@system
+#>problem 4d4de423 solution 2cf4745c replace a-1-1.i686@system a-2-1.i686@available
+#>problem 4d4de423 solution 5a433aff allow b-1-1.i686@system
+#>problem 4d4de423 solution ce4305f2 erase b-1-1.i686@system
+
+### same with allowuninstall
+
+nextjob
+
+solverflags allowuninstall
+job multiversion name a
+job distupgrade all packages
+result transaction,problems <inline>
+#>erase a-1-1.i686@system
+#>install a-2-1.i686@available
+#>upgrade b-1-1.i686@system b-2-1.i686@available
+
+nextjob
+
+solverflags allowuninstall
+job multiversion name a
+job distupgrade repo available
+result transaction,problems <inline>
+#>erase a-1-1.i686@system
+#>install a-2-1.i686@available
+#>upgrade b-1-1.i686@system b-2-1.i686@available
+
+
+### same with allowuninstall and keeporphans
+
+nextjob
+
+solverflags allowuninstall keeporphans
+job multiversion name a
+job distupgrade all packages
+# a-1-1 is treated as orphaned and stays behind
+result transaction,problems <inline>
+#>erase b-1-1.i686@system
+#>install a-2-1.i686@available
+
+
+nextjob
+
+solverflags allowuninstall keeporphans
+job multiversion name a
+job distupgrade repo available
+# a-1-1 is treated as orphaned and stays behind
+result transaction,problems <inline>
+#>erase b-1-1.i686@system
+#>install a-2-1.i686@available
+
+
--- /dev/null
+# test dup with multiversion packages where we cannot install the
+# target. Should give problems except for allowuninstall.
+#
+# part 1: simple update
+repo system 0 testtags <inline>
+#>=Pkg: a 1 1 i686
+repo available 0 testtags <inline>
+#>=Pkg: a 2 1 i686
+#>=Req: c
+system i686 * system
+
+job multiversion name a
+job distupgrade all packages
+result transaction,problems <inline>
+#>problem 251f1f35 info nothing provides c needed by a-2-1.i686
+#>problem 251f1f35 solution 2f2d254c allow a-1-1.i686@system
+
+nextjob
+
+job multiversion name a
+job distupgrade repo available
+result transaction,problems <inline>
+#>erase a-1-1.i686@system
+#>problem 251f1f35 info nothing provides c needed by a-2-1.i686
+#>problem 251f1f35 solution 2f2d254c allow a-1-1.i686@system
+
+### same with keeporphans
+
+nextjob
+
+solverflags keeporphans
+job multiversion name a
+job distupgrade all packages
+result transaction,problems <inline>
+#>problem 771581fd info nothing provides c needed by a-2-1.i686
+#>problem 771581fd solution 179b72ed allow a-1-1.i686@system
+#>problem 771581fd solution 2cf4745c erase a-1-1.i686@system
+
+nextjob
+
+solverflags keeporphans
+job multiversion name a
+job distupgrade repo available
+result transaction,problems <inline>
+#>problem 771581fd info nothing provides c needed by a-2-1.i686
+#>problem 771581fd solution 179b72ed allow a-1-1.i686@system
+#>problem 771581fd solution 2cf4745c erase a-1-1.i686@system
+
+### same with allowuninstall
+
+nextjob
+
+solverflags allowuninstall
+job multiversion name a
+job distupgrade all packages
+result transaction,problems <inline>
+#>erase a-1-1.i686@system
+
+
+nextjob
+
+solverflags allowuninstall
+job multiversion name a
+job distupgrade repo available
+result transaction,problems <inline>
+#>erase a-1-1.i686@system
+
+
+### same with allowuninstall and keeporphans
+
+nextjob
+
+solverflags allowuninstall keeporphans
+job multiversion name a
+job distupgrade all packages
+result transaction,problems <inline>
+#>erase a-1-1.i686@system
+
+
+nextjob
+
+solverflags allowuninstall keeporphans
+job multiversion name a
+job distupgrade repo available
+result transaction,problems <inline>
+#>erase a-1-1.i686@system
+
+
#ifdef ENABLE_APPDATA
if (add_appdata)
- repo_add_appdata_dir(repo, "/usr/share/appdata", REPO_USE_ROOTDIR | REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE);
+ repo_add_appdata_dir(repo, "/usr/share/appdata", REPO_USE_ROOTDIR | REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE | APPDATA_SEARCH_UNINTERNALIZED_FILELIST);
#endif
repodata_internalize(data);