#include "zypp/base/Logger.h"
#include "zypp/media/MediaMultiCurl.h"
#include "zypp/media/MetaLinkParser.h"
-#include "zypp/ManagedFile.h"
using namespace std;
using namespace zypp::base;
worker->evaluateCurlCode(Pathname(), cc, false);
}
}
-
- if ( _filesize > 0 && _fetchedgoodsize > _filesize ) {
- ZYPP_THROW(MediaFileSizeExceededException(_baseurl, _filesize));
- }
}
// send report
if (_report)
{
int percent = _totalsize ? (100 * (_fetchedgoodsize + _fetchedsize)) / (_totalsize + _fetchedsize) : 0;
-
double avg = 0;
if (now > _starttime)
avg = _fetchedsize / (now - _starttime);
}
// here we try to suppress all progress coming from a metalink download
-// bsc#1021291: Nevertheless send alive trigger (without stats), so UIs
-// are able to abort a hanging metalink download via callback response.
int MediaMultiCurl::progressCallback( void *clientp, double dltotal, double dlnow, double ultotal, double ulnow)
{
CURL *_curl = MediaCurl::progressCallback_getcurl(clientp);
if (!_curl)
- return MediaCurl::aliveCallback(clientp, dltotal, dlnow, ultotal, ulnow);
-
- // bsc#408814: Don't report any sizes before we don't have data on disk. Data reported
- // due to redirection etc. are not interesting, but may disturb filesize checks.
- FILE *fp = 0;
- if ( curl_easy_getinfo( _curl, CURLINFO_PRIVATE, &fp ) != CURLE_OK || !fp )
- return MediaCurl::aliveCallback( clientp, dltotal, dlnow, ultotal, ulnow );
- if ( ftell( fp ) == 0 )
- return MediaCurl::aliveCallback( clientp, dltotal, 0.0, ultotal, ulnow );
+ return 0;
- // (no longer needed due to the filesize check above?)
// work around curl bug that gives us old data
long httpReturnCode = 0;
if (curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &httpReturnCode ) != CURLE_OK || httpReturnCode == 0)
- return MediaCurl::aliveCallback(clientp, dltotal, dlnow, ultotal, ulnow);
+ return 0;
char *ptr = NULL;
bool ismetalink = false;
}
if (!ismetalink && dlnow < 256)
{
- // can't tell yet, ...
- return MediaCurl::aliveCallback(clientp, dltotal, dlnow, ultotal, ulnow);
+ // can't tell yet, suppress callback
+ return 0;
}
if (!ismetalink)
{
+ FILE *fp = 0;
+ if (curl_easy_getinfo(_curl, CURLINFO_PRIVATE, &fp) != CURLE_OK)
+ return 0;
+ if (!fp)
+ return 0; /* hmm */
fflush(fp);
ismetalink = looks_like_metalink_fd(fileno(fp));
DBG << "looks_like_metalink_fd: " << ismetalink << endl;
}
if (ismetalink)
{
- // this is a metalink file change the expected filesize
- MediaCurl::resetExpectedFileSize( clientp, ByteCount( 2, ByteCount::MB) );
- // we're downloading the metalink file. Just trigger aliveCallbacks
- curl_easy_setopt(_curl, CURLOPT_PROGRESSFUNCTION, &MediaCurl::aliveCallback);
- return MediaCurl::aliveCallback(clientp, dltotal, dlnow, ultotal, ulnow);
+ // we're downloading the metalink file. no progress please.
+ curl_easy_setopt(_curl, CURLOPT_NOPROGRESS, 1L);
+ return 0;
}
curl_easy_setopt(_curl, CURLOPT_PROGRESSFUNCTION, &MediaCurl::progressCallback);
return MediaCurl::progressCallback(clientp, dltotal, dlnow, ultotal, ulnow);
}
-void MediaMultiCurl::doGetFileCopy( const Pathname & filename , const Pathname & target, callback::SendReport<DownloadProgressReport> & report, const ByteCount &expectedFileSize_r, RequestOptions options ) const
+void MediaMultiCurl::doGetFileCopy( const Pathname & filename , const Pathname & target, callback::SendReport<DownloadProgressReport> & report, RequestOptions options ) const
{
Pathname dest = target.absolutename();
if( assert_dir( dest.dirname() ) )
{
DBG << "assert_dir " << dest.dirname() << " failed" << endl;
- ZYPP_THROW( MediaSystemException(getFileUrl(filename), "System error on " + dest.dirname().asString()) );
+ Url url(getFileUrl(filename));
+ ZYPP_THROW( MediaSystemException(url, "System error on " + dest.dirname().asString()) );
}
-
- ManagedFile destNew { target.extend( ".new.zypp.XXXXXX" ) };
- AutoFILE file;
+ string destNew = target.asString() + ".new.zypp.XXXXXX";
+ char *buf = ::strdup( destNew.c_str());
+ if( !buf)
{
- AutoFREE<char> buf { ::strdup( (*destNew).c_str() ) };
- if( ! buf )
- {
- ERR << "out of memory for temp file name" << endl;
- ZYPP_THROW(MediaSystemException(getFileUrl(filename), "out of memory for temp file name"));
- }
-
- AutoFD tmp_fd { ::mkostemp( buf, O_CLOEXEC ) };
- if( tmp_fd == -1 )
- {
- ERR << "mkstemp failed for file '" << destNew << "'" << endl;
- ZYPP_THROW(MediaWriteException(destNew));
- }
- destNew = ManagedFile( (*buf), filesystem::unlink );
-
- file = ::fdopen( tmp_fd, "we" );
- if ( ! file )
- {
- ERR << "fopen failed for file '" << destNew << "'" << endl;
- ZYPP_THROW(MediaWriteException(destNew));
- }
- tmp_fd.resetDispose(); // don't close it here! ::fdopen moved ownership to file
+ ERR << "out of memory for temp file name" << endl;
+ Url url(getFileUrl(filename));
+ ZYPP_THROW(MediaSystemException(url, "out of memory for temp file name"));
}
+ int tmp_fd = ::mkostemp( buf, O_CLOEXEC );
+ if( tmp_fd == -1)
+ {
+ free( buf);
+ ERR << "mkstemp failed for file '" << destNew << "'" << endl;
+ ZYPP_THROW(MediaWriteException(destNew));
+ }
+ destNew = buf;
+ free( buf);
+
+ FILE *file = ::fdopen( tmp_fd, "we" );
+ if ( !file ) {
+ ::close( tmp_fd);
+ filesystem::unlink( destNew );
+ ERR << "fopen failed for file '" << destNew << "'" << endl;
+ ZYPP_THROW(MediaWriteException(destNew));
+ }
DBG << "dest: " << dest << endl;
DBG << "temp: " << destNew << endl;
curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeadersMetalink);
// change to our own progress funcion
curl_easy_setopt(_curl, CURLOPT_PROGRESSFUNCTION, &progressCallback);
- curl_easy_setopt(_curl, CURLOPT_PRIVATE, (*file) ); // important to pass the FILE* explicitly (passing through varargs)
+ curl_easy_setopt(_curl, CURLOPT_PRIVATE, file);
try
{
- MediaCurl::doGetFileCopyFile(filename, dest, file, report, expectedFileSize_r, options);
+ MediaCurl::doGetFileCopyFile(filename, dest, file, report, options);
}
catch (Exception &ex)
{
+ ::fclose(file);
+ filesystem::unlink(destNew);
curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_NONE);
curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, 0L);
curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeaders);
// some proxies do not store the content type, so also look at the file to find
// out if we received a metalink (bnc#649925)
fflush(file);
- if (looks_like_metalink(destNew))
+ if (looks_like_metalink(Pathname(destNew)))
ismetalink = true;
}
if (ismetalink)
{
bool userabort = false;
+ fclose(file);
+ file = NULL;
Pathname failedFile = ZConfig::instance().repoCachePath() / "MultiCurl.failed";
- file = nullptr; // explicitly close destNew before the parser reads it.
try
{
MetaLinkParser mlp;
- mlp.parse(destNew);
+ mlp.parse(Pathname(destNew));
MediaBlockList bl = mlp.getBlockList();
vector<Url> urls = mlp.getUrls();
XXX << bl << endl;
- file = fopen((*destNew).c_str(), "w+e");
+ file = fopen(destNew.c_str(), "w+e");
if (!file)
ZYPP_THROW(MediaWriteException(destNew));
if (PathInfo(target).isExist())
}
try
{
- multifetch(filename, file, &urls, &report, &bl, expectedFileSize_r);
+ multifetch(filename, file, &urls, &report, &bl);
}
catch (MediaCurlException &ex)
{
ZYPP_RETHROW(ex);
}
}
- catch (MediaFileSizeExceededException &ex) {
- ZYPP_RETHROW(ex);
- }
catch (Exception &ex)
{
// something went wrong. fall back to normal download
- file = nullptr; // explicitly close destNew before moving it
+ if (file)
+ fclose(file);
+ file = NULL;
if (PathInfo(destNew).size() >= 63336)
{
::unlink(failedFile.asString().c_str());
}
if (userabort)
{
+ filesystem::unlink(destNew);
ZYPP_RETHROW(ex);
}
- file = fopen((*destNew).c_str(), "w+e");
+ file = fopen(destNew.c_str(), "w+e");
if (!file)
ZYPP_THROW(MediaWriteException(destNew));
- MediaCurl::doGetFileCopyFile(filename, dest, file, report, expectedFileSize_r, options | OPTION_NO_REPORT_START);
+ MediaCurl::doGetFileCopyFile(filename, dest, file, report, options | OPTION_NO_REPORT_START);
}
}
{
ERR << "Failed to chmod file " << destNew << endl;
}
-
- file.resetDispose(); // we're going to close it manually here
if (::fclose(file))
{
filesystem::unlink(destNew);
ERR << "Fclose failed for file '" << destNew << "'" << endl;
ZYPP_THROW(MediaWriteException(destNew));
}
-
if ( rename( destNew, dest ) != 0 )
{
ERR << "Rename failed" << endl;
ZYPP_THROW(MediaWriteException(dest));
}
- destNew.resetDispose(); // no more need to unlink it
-
DBG << "done: " << PathInfo(dest) << endl;
}
+///////////////////////////////////////////////////////////////////
+namespace {
+ // bsc#933839: propagate proxy settings passed in the repo URL
+ inline Url propagateQueryParams( Url url_r, const Url & template_r )
+ {
+ for ( std::string param : { "proxy", "proxyport", "proxyuser", "proxypass"} )
+ {
+ const std::string & value( template_r.getQueryParam( param ) );
+ if ( ! value.empty() )
+ url_r.setQueryParam( param, value );
+ }
+ return url_r;
+ }
+}
+///////////////////////////////////////////////////////////////////
+
void MediaMultiCurl::multifetch(const Pathname & filename, FILE *fp, std::vector<Url> *urllist, callback::SendReport<DownloadProgressReport> *report, MediaBlockList *blklist, off_t filesize) const
{
Url baseurl(getFileUrl(filename));
if (!_multi)
ZYPP_THROW(MediaCurlInitException(baseurl));
}
-
multifetchrequest req(this, filename, baseurl, _multi, fp, report, blklist, filesize);
req._timeout = _settings.timeout();
req._connect_timeout = _settings.connectTimeout();
if (scheme == "http" || scheme == "https" || scheme == "ftp" || scheme == "tftp")
{
checkProtocol(*urliter);
- myurllist.push_back(*urliter);
+ myurllist.push_back(propagateQueryParams(*urliter, _url));
}
}
catch (...)