using namespace std;
using namespace zypp::base;
+#undef CURLVERSION_AT_LEAST
+#define CURLVERSION_AT_LEAST(M,N,O) LIBCURL_VERSION_NUM >= ((((M)<<8)+(N))<<8)+(O)
+
namespace zypp {
namespace media {
double _timeout;
double _connect_timeout;
double _maxspeed;
+ int _maxworkers;
};
#define BLKSIZE 131072
-#define MAXWORKERS 5
#define MAXURLS 10
return size;
}
+ if (_blkstart && _off == _blkstart)
+ {
+ // make sure that the server replied with "partial content"
+ // for http requests
+ char *effurl;
+ (void)curl_easy_getinfo(_curl, CURLINFO_EFFECTIVE_URL, &effurl);
+ if (effurl && !strncasecmp(effurl, "http", 4))
+ {
+ long statuscode = 0;
+ (void)curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &statuscode);
+ if (statuscode != 206)
+ return size ? 0 : 1;
+ }
+ }
+
_blkreceived += len;
_received += len;
_request->_lastprogress = now;
-
+
if (_state == WORKER_DISCARD || !_request->_fp)
{
// block is no longer needed
string line(p + 9, l - 9);
if (line[l - 10] == '\r')
line.erase(l - 10, 1);
- DBG << "#" << _workerno << ": redirecting to" << line << endl;
+ XXX << "#" << _workerno << ": redirecting to" << line << endl;
return size;
}
if (l <= 14 || l >= 128 || strncasecmp(p, "Content-Range:", 14) != 0)
return size;
- p += 14;
- l -= 14;
+ p += 14;
+ l -= 14;
while (l && (*p == ' ' || *p == '\t'))
p++, l--;
- if (l < 6 || strncasecmp(p, "bytes", 5))
+ if (l < 6 || strncasecmp(p, "bytes", 5))
return size;
p += 5;
l -= 5;
char buf[128];
- memcpy(buf, p, l);
+ memcpy(buf, p, l);
buf[l] = 0;
unsigned long long start, off, filesize;
if (sscanf(buf, "%llu-%llu/%llu", &start, &off, &filesize) != 3)
}
if (_request->_filesize != (off_t)filesize)
{
- DBG << "#" << _workerno << ": filesize mismatch" << endl;
+ XXX << "#" << _workerno << ": filesize mismatch" << endl;
_state = WORKER_BROKEN;
strncpy(_curlError, "filesize mismatch", CURL_ERROR_SIZE);
}
_urlbuf = curlUrl.asString();
_curl = _request->_context->fromEasyPool(_url.getHost());
if (_curl)
- DBG << "reused worker from pool" << endl;
+ XXX << "reused worker from pool" << endl;
if (!_curl && !(_curl = curl_easy_init()))
{
_state = WORKER_BROKEN;
use_auth = "digest,basic"; // our default
long auth = CurlAuthData::auth_type_str2long(use_auth);
if( auth != CURLAUTH_NONE)
- {
- DBG << "#" << _workerno << ": Enabling HTTP authentication methods: " << use_auth
+ {
+ XXX << "#" << _workerno << ": Enabling HTTP authentication methods: " << use_auth
<< " (CURLOPT_HTTPAUTH=" << auth << ")" << std::endl;
curl_easy_setopt(_curl, CURLOPT_HTTPAUTH, auth);
}
curl_multi_remove_handle(_request->_multi, _curl);
if (_state == WORKER_DONE || _state == WORKER_SLEEP)
{
+#if CURLVERSION_AT_LEAST(7,15,5)
curl_easy_setopt(_curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)0);
+#endif
curl_easy_setopt(_curl, CURLOPT_PRIVATE, (void *)0);
curl_easy_setopt(_curl, CURLOPT_WRITEFUNCTION, (void *)0);
curl_easy_setopt(_curl, CURLOPT_WRITEDATA, (void *)0);
return;
}
- DBG << "checking DNS lookup of " << host << endl;
+ XXX << "checking DNS lookup of " << host << endl;
int pipefds[2];
if (pipe(pipefds))
{
struct addrinfo *ai, aihints;
memset(&aihints, 0, sizeof(aihints));
aihints.ai_family = PF_UNSPEC;
- int tstsock = socket(PF_INET6, SOCK_DGRAM, 0);
+ int tstsock = socket(PF_INET6, SOCK_DGRAM | SOCK_CLOEXEC, 0);
if (tstsock == -1)
aihints.ai_family = PF_INET;
else
void
multifetchworker::dnsevent(fd_set &rset)
{
-
+
if (_state != WORKER_LOOKUP || !FD_ISSET(_dnspipe, &rset))
return;
int status;
return;
}
int exitcode = WEXITSTATUS(status);
- DBG << "#" << _workerno << ": DNS lookup returned " << exitcode << endl;
+ XXX << "#" << _workerno << ": DNS lookup returned " << exitcode << endl;
if (exitcode != 0)
{
_state = WORKER_BROKEN;
bool
multifetchworker::checkChecksum()
{
- // DBG << "checkChecksum block " << _blkno << endl;
+ // XXX << "checkChecksum block " << _blkno << endl;
if (!_blksize || !_request->_blklist)
return true;
return _request->_blklist->verifyDigest(_blkno, _dig);
bool
multifetchworker::recheckChecksum()
{
- // DBG << "recheckChecksum block " << _blkno << endl;
+ // XXX << "recheckChecksum block " << _blkno << endl;
if (!_request->_fp || !_blksize || !_request->_blklist)
return true;
if (fseeko(_request->_fp, _blkstart, SEEK_SET))
{
if (!_request->_stealing)
{
- DBG << "start stealing!" << endl;
+ XXX << "start stealing!" << endl;
_request->_stealing = true;
}
multifetchworker *best = 0;
}
// lets see if we should sleep a bit
- DBG << "me #" << _workerno << ": " << _avgspeed << ", size " << best->_blksize << endl;
- DBG << "best #" << best->_workerno << ": " << best->_avgspeed << ", size " << (best->_blksize - best->_blkreceived) << endl;
- if (_avgspeed && best->_avgspeed && (best->_blksize - best->_blkreceived) * _avgspeed < best->_blksize * best->_avgspeed)
+ XXX << "me #" << _workerno << ": " << _avgspeed << ", size " << best->_blksize << endl;
+ XXX << "best #" << best->_workerno << ": " << best->_avgspeed << ", size " << (best->_blksize - best->_blkreceived) << endl;
+ if (_avgspeed && best->_avgspeed && best->_blksize - best->_blkreceived > 0 &&
+ (best->_blksize - best->_blkreceived) * _avgspeed < best->_blksize * best->_avgspeed)
{
if (!now)
now = currentTime();
double sl = (best->_blksize - best->_blkreceived) / best->_avgspeed * 2;
if (sl > 1)
sl = 1;
- DBG << "#" << _workerno << ": going to sleep for " << sl * 1000 << " ms" << endl;
+ XXX << "#" << _workerno << ": going to sleep for " << sl * 1000 << " ms" << endl;
_sleepuntil = now + sl;
_state = WORKER_SLEEP;
_request->_sleepworkers++;
stealjob();
return;
}
-
+
MediaBlockList *blklist = _request->_blklist;
if (!blklist)
{
else
{
MediaBlock blk = blklist->getBlock(_request->_blkno);
- while (_request->_blkoff >= blk.off + blk.size)
+ while (_request->_blkoff >= (off_t)(blk.off + blk.size))
{
if (++_request->_blkno == blklist->numBlocks())
{
sprintf(rangebuf, "%llu-", (unsigned long long)_blkstart);
else
sprintf(rangebuf, "%llu-%llu", (unsigned long long)_blkstart, (unsigned long long)_blkstart + _blksize - 1);
- DBG << "#" << _workerno << ": BLK " << _blkno << ":" << rangebuf << " " << _url << endl;
+ XXX << "#" << _workerno << ": BLK " << _blkno << ":" << rangebuf << " " << _url << endl;
if (curl_easy_setopt(_curl, CURLOPT_RANGE, !_noendrange || _blkstart != 0 ? rangebuf : (char *)0) != CURLE_OK)
{
_request->_activeworkers--;
if (_request->_blklist)
_request->_blklist->createDigest(_dig); // resets digest
_state = WORKER_FETCH;
-
+
double now = currentTime();
_blkstarttime = now;
_blkreceived = 0;
_timeout = 0;
_connect_timeout = 0;
_maxspeed = 0;
+ _maxworkers = 0;
if (blklist)
{
for (size_t blkno = 0; blkno < blklist->numBlocks(); blkno++)
if (_finished)
{
- DBG << "finished!" << endl;
+ XXX << "finished!" << endl;
break;
}
- if (_activeworkers < MAXWORKERS && urliter != urllist.end() && _workers.size() < MAXURLS)
+ if ((int)_activeworkers < _maxworkers && urliter != urllist.end() && _workers.size() < MAXURLS)
{
// spawn another worker!
multifetchworker *worker = new multifetchworker(workerno++, *this, *urliter);
continue;
if (_minsleepuntil == worker->_sleepuntil)
_minsleepuntil = 0;
- DBG << "#" << worker->_workerno << ": sleep done, wake up" << endl;
+ XXX << "#" << worker->_workerno << ": sleep done, wake up" << endl;
_sleepworkers--;
// nextjob chnages the state
worker->nextjob();
// collect all curl results, reschedule new jobs
CURLMsg *msg;
while ((msg = curl_multi_info_read(_multi, &nqueue)) != 0)
- {
- if (msg->msg != CURLMSG_DONE)
- continue;
+ {
+ if (msg->msg != CURLMSG_DONE)
+ continue;
+ CURL *easy = msg->easy_handle;
+ CURLcode cc = msg->data.result;
multifetchworker *worker;
- if (curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, &worker) != CURLE_OK)
+ if (curl_easy_getinfo(easy, CURLINFO_PRIVATE, &worker) != CURLE_OK)
ZYPP_THROW(MediaCurlException(_baseurl, "curl_easy_getinfo", "unknown error"));
- CURLcode cc = msg->data.result;
if (worker->_blkreceived && now > worker->_blkstarttime)
{
if (worker->_avgspeed)
- worker->_avgspeed = (worker->_avgspeed + worker->_blkreceived / (now - worker->_blkstarttime)) / 2;
+ worker->_avgspeed = (worker->_avgspeed + worker->_blkreceived / (now - worker->_blkstarttime)) / 2;
else
- worker->_avgspeed = worker->_blkreceived / (now - worker->_blkstarttime);
+ worker->_avgspeed = worker->_blkreceived / (now - worker->_blkstarttime);
}
- DBG << "#" << worker->_workerno << ": BLK " << worker->_blkno << " done code " << cc << " speed " << worker->_avgspeed << endl;
- curl_multi_remove_handle(_multi, msg->easy_handle);
+ XXX << "#" << worker->_workerno << ": BLK " << worker->_blkno << " done code " << cc << " speed " << worker->_avgspeed << endl;
+ curl_multi_remove_handle(_multi, easy);
if (cc == CURLE_HTTP_RETURNED_ERROR)
- {
- long statuscode = 0;
- (void)curl_easy_getinfo(msg->easy_handle, CURLINFO_RESPONSE_CODE, &statuscode);
- DBG << "HTTP status " << statuscode << endl;
+ {
+ long statuscode = 0;
+ (void)curl_easy_getinfo(easy, CURLINFO_RESPONSE_CODE, &statuscode);
+ XXX << "HTTP status " << statuscode << endl;
if (statuscode == 416 && !_blklist) /* Range error */
{
if (_filesize == off_t(-1))
{
if (!worker->_noendrange)
{
- DBG << "#" << worker->_workerno << ": retrying with no end range" << endl;
+ XXX << "#" << worker->_workerno << ": retrying with no end range" << endl;
worker->_noendrange = true;
worker->run();
continue;
continue;
}
}
- }
+ }
if (cc == 0)
{
if (!worker->checkChecksum())
// with something broken. Thus we have to re-check the block.
if (!worker->recheckChecksum())
{
- DBG << "#" << worker->_workerno << ": recheck checksum error, refetch block" << endl;
+ XXX << "#" << worker->_workerno << ": recheck checksum error, refetch block" << endl;
// re-fetch! No need to worry about the bad workers,
// they will now be set to DISCARD. At the end of their block
// they will notice that they wrote bad data and go into BROKEN.
int maxworkerno = 0;
int numbetter = 0;
for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
- {
+ {
multifetchworker *oworker = *workeriter;
if (oworker->_state == WORKER_BROKEN)
continue;
ratio = ratio * ratio;
if (ratio > .01)
{
- DBG << "#" << worker->_workerno << ": too slow ("<< ratio << ", " << worker->_avgspeed << ", #" << maxworkerno << ": " << maxavg << "), going to sleep for " << ratio * 1000 << " ms" << endl;
+ XXX << "#" << worker->_workerno << ": too slow ("<< ratio << ", " << worker->_avgspeed << ", #" << maxworkerno << ": " << maxavg << "), going to sleep for " << ratio * 1000 << " ms" << endl;
worker->_sleepuntil = now + ratio;
worker->_state = WORKER_SLEEP;
_sleepworkers++;
{
double avg = _fetchedsize / (now - _starttime);
avg = worker->_maxspeed * _maxspeed / avg;
- if (avg < _maxspeed / MAXWORKERS)
- avg = _maxspeed / MAXWORKERS;
+ if (avg < _maxspeed / _maxworkers)
+ avg = _maxspeed / _maxworkers;
if (avg > _maxspeed)
avg = _maxspeed;
if (avg < 1024)
avg = 1024;
worker->_maxspeed = avg;
+#if CURLVERSION_AT_LEAST(7,15,5)
curl_easy_setopt(worker->_curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)(avg));
+#endif
}
worker->nextjob();
_customHeadersMetalink = curl_slist_append(_customHeadersMetalink, "Accept: */*, application/metalink+xml, application/metalink4+xml");
}
+static bool looks_like_metalink_fd(int fd)
+{
+ char buf[256], *p;
+ int l;
+ while ((l = pread(fd, buf, sizeof(buf) - 1, (off_t)0)) == -1 && errno == EINTR)
+ ;
+ if (l == -1)
+ return 0;
+ buf[l] = 0;
+ p = buf;
+ while (*p == ' ' || *p == '\t' || *p == '\r' || *p == '\n')
+ p++;
+ if (!strncasecmp(p, "<?xml", 5))
+ {
+ while (*p && *p != '>')
+ p++;
+ if (*p == '>')
+ p++;
+ while (*p == ' ' || *p == '\t' || *p == '\r' || *p == '\n')
+ p++;
+ }
+ bool ret = !strncasecmp(p, "<metalink", 9) ? true : false;
+ return ret;
+}
+
+static bool looks_like_metalink(const Pathname & file)
+{
+ int fd;
+ if ((fd = open(file.asString().c_str(), O_RDONLY|O_CLOEXEC)) == -1)
+ return false;
+ bool ret = looks_like_metalink_fd(fd);
+ close(fd);
+ DBG << "looks_like_metalink(" << file << "): " << ret << endl;
+ return ret;
+}
+
+// here we try to suppress all progress coming from a metalink download
+int MediaMultiCurl::progressCallback( void *clientp, double dltotal, double dlnow, double ultotal, double ulnow)
+{
+ CURL *_curl = MediaCurl::progressCallback_getcurl(clientp);
+ if (!_curl)
+ return 0;
+
+ // work around curl bug that gives us old data
+ long httpReturnCode = 0;
+ if (curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &httpReturnCode ) != CURLE_OK || httpReturnCode == 0)
+ return 0;
+
+ char *ptr = NULL;
+ bool ismetalink = false;
+ if (curl_easy_getinfo(_curl, CURLINFO_CONTENT_TYPE, &ptr) == CURLE_OK && ptr)
+ {
+ string ct = string(ptr);
+ if (ct.find("application/metalink+xml") == 0 || ct.find("application/metalink4+xml") == 0)
+ ismetalink = true;
+ }
+ if (!ismetalink && dlnow < 256)
+ {
+ // can't tell yet, suppress callback
+ return 0;
+ }
+ if (!ismetalink)
+ {
+ FILE *fp = 0;
+ if (curl_easy_getinfo(_curl, CURLINFO_PRIVATE, &fp) != CURLE_OK)
+ return 0;
+ if (!fp)
+ return 0; /* hmm */
+ fflush(fp);
+ ismetalink = looks_like_metalink_fd(fileno(fp));
+ DBG << "looks_like_metalink_fd: " << ismetalink << endl;
+ }
+ if (ismetalink)
+ {
+ // we're downloading the metalink file. no progress please.
+ curl_easy_setopt(_curl, CURLOPT_NOPROGRESS, 1L);
+ return 0;
+ }
+ curl_easy_setopt(_curl, CURLOPT_PROGRESSFUNCTION, &MediaCurl::progressCallback);
+ return MediaCurl::progressCallback(clientp, dltotal, dlnow, ultotal, ulnow);
+}
void MediaMultiCurl::doGetFileCopy( const Pathname & filename , const Pathname & target, callback::SendReport<DownloadProgressReport> & report, RequestOptions options ) const
{
ZYPP_THROW(MediaSystemException(url, "out of memory for temp file name"));
}
- int tmp_fd = ::mkstemp( buf );
+ int tmp_fd = ::mkostemp( buf, O_CLOEXEC );
if( tmp_fd == -1)
{
free( buf);
destNew = buf;
free( buf);
- FILE *file = ::fdopen( tmp_fd, "w" );
+ FILE *file = ::fdopen( tmp_fd, "we" );
if ( !file ) {
::close( tmp_fd);
filesystem::unlink( destNew );
}
// change header to include Accept: metalink
curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeadersMetalink);
+ // change to our own progress funcion
+ curl_easy_setopt(_curl, CURLOPT_PROGRESSFUNCTION, &progressCallback);
+ curl_easy_setopt(_curl, CURLOPT_PRIVATE, file);
try
{
MediaCurl::doGetFileCopyFile(filename, dest, file, report, options);
curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_NONE);
curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, 0L);
curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeaders);
+ curl_easy_setopt(_curl, CURLOPT_PRIVATE, (void *)0);
ZYPP_RETHROW(ex);
}
curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_NONE);
curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, 0L);
curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeaders);
+ curl_easy_setopt(_curl, CURLOPT_PRIVATE, (void *)0);
long httpReturnCode = 0;
CURLcode infoRet = curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &httpReturnCode);
if (infoRet == CURLE_OK)
{
WAR << "Could not get the reponse code." << endl;
}
+
+ bool ismetalink = false;
+
char *ptr = NULL;
if (curl_easy_getinfo(_curl, CURLINFO_CONTENT_TYPE, &ptr) == CURLE_OK && ptr)
{
string ct = string(ptr);
if (ct.find("application/metalink+xml") == 0 || ct.find("application/metalink4+xml") == 0)
+ ismetalink = true;
+ }
+
+ if (!ismetalink)
+ {
+ // some proxies do not store the content type, so also look at the file to find
+ // out if we received a metalink (bnc#649925)
+ fflush(file);
+ if (looks_like_metalink(Pathname(destNew)))
+ ismetalink = true;
+ }
+
+ if (ismetalink)
+ {
+ bool userabort = false;
+ fclose(file);
+ file = NULL;
+ Pathname failedFile = ZConfig::instance().repoCachePath() / "MultiCurl.failed";
+ try
{
- bool userabort = false;
- fclose(file);
- file = NULL;
- Pathname failedFile = ZConfig::instance().repoCachePath() / "MultiCurl.failed";
+ MetaLinkParser mlp;
+ mlp.parse(Pathname(destNew));
+ MediaBlockList bl = mlp.getBlockList();
+ vector<Url> urls = mlp.getUrls();
+ XXX << bl << endl;
+ file = fopen(destNew.c_str(), "w+e");
+ if (!file)
+ ZYPP_THROW(MediaWriteException(destNew));
+ if (PathInfo(target).isExist())
+ {
+ XXX << "reusing blocks from file " << target << endl;
+ bl.reuseBlocks(file, target.asString());
+ XXX << bl << endl;
+ }
+ if (bl.haveChecksum(1) && PathInfo(failedFile).isExist())
+ {
+ XXX << "reusing blocks from file " << failedFile << endl;
+ bl.reuseBlocks(file, failedFile.asString());
+ XXX << bl << endl;
+ filesystem::unlink(failedFile);
+ }
+ Pathname df = deltafile();
+ if (!df.empty())
+ {
+ XXX << "reusing blocks from file " << df << endl;
+ bl.reuseBlocks(file, df.asString());
+ XXX << bl << endl;
+ }
try
{
- MetaLinkParser mlp;
- mlp.parse(Pathname(destNew));
- MediaBlockList bl = mlp.getBlockList();
- vector<Url> urls = mlp.getUrls();
- DBG << bl << endl;
- file = fopen(destNew.c_str(), "w+");
- if (!file)
- ZYPP_THROW(MediaWriteException(destNew));
- if (PathInfo(target).isExist())
- {
- DBG << "reusing blocks from file " << target << endl;
- bl.reuseBlocks(file, target.asString());
- DBG << bl << endl;
- }
- if (bl.haveChecksum(1) && PathInfo(failedFile).isExist())
- {
- DBG << "reusing blocks from file " << failedFile << endl;
- bl.reuseBlocks(file, failedFile.asString());
- DBG << bl << endl;
- filesystem::unlink(failedFile);
- }
- Pathname df = deltafile();
- if (!df.empty())
- {
- DBG << "reusing blocks from file " << df << endl;
- bl.reuseBlocks(file, df.asString());
- DBG << bl << endl;
- }
- try
- {
- multifetch(filename, file, &urls, &report, &bl);
- }
- catch (MediaCurlException &ex)
- {
- userabort = ex.errstr() == "User abort";
- ZYPP_RETHROW(ex);
- }
+ multifetch(filename, file, &urls, &report, &bl);
}
- catch (Exception &ex)
+ catch (MediaCurlException &ex)
{
- // something went wrong. fall back to normal download
- if (file)
- fclose(file);
- file = NULL;
- if (PathInfo(destNew).size() >= 63336)
- {
- ::unlink(failedFile.asString().c_str());
- filesystem::hardlinkCopy(destNew, failedFile);
- }
- if (userabort)
- {
- filesystem::unlink(destNew);
- ZYPP_RETHROW(ex);
- }
- file = fopen(destNew.c_str(), "w+");
- if (!file)
- ZYPP_THROW(MediaWriteException(destNew));
- MediaCurl::doGetFileCopyFile(filename, dest, file, report, options | OPTION_NO_REPORT_START);
+ userabort = ex.errstr() == "User abort";
+ ZYPP_RETHROW(ex);
+ }
+ }
+ catch (Exception &ex)
+ {
+ // something went wrong. fall back to normal download
+ if (file)
+ fclose(file);
+ file = NULL;
+ if (PathInfo(destNew).size() >= 63336)
+ {
+ ::unlink(failedFile.asString().c_str());
+ filesystem::hardlinkCopy(destNew, failedFile);
}
+ if (userabort)
+ {
+ filesystem::unlink(destNew);
+ ZYPP_RETHROW(ex);
+ }
+ file = fopen(destNew.c_str(), "w+e");
+ if (!file)
+ ZYPP_THROW(MediaWriteException(destNew));
+ MediaCurl::doGetFileCopyFile(filename, dest, file, report, options | OPTION_NO_REPORT_START);
}
}
+
if (::fchmod( ::fileno(file), filesystem::applyUmaskTo( 0644 )))
{
ERR << "Failed to chmod file " << destNew << endl;
req._timeout = _settings.timeout();
req._connect_timeout = _settings.connectTimeout();
req._maxspeed = _settings.maxDownloadSpeed();
+ req._maxworkers = _settings.maxConcurrentConnections();
+ if (req._maxworkers > MAXURLS)
+ req._maxworkers = MAXURLS;
+ if (req._maxworkers <= 0)
+ req._maxworkers = 1;
std::vector<Url> myurllist;
for (std::vector<Url>::iterator urliter = urllist->begin(); urliter != urllist->end(); ++urliter)
{
try
{
string scheme = urliter->getScheme();
- if (scheme == "http" || scheme == "https" || scheme == "ftp")
+ if (scheme == "http" || scheme == "https" || scheme == "ftp" || scheme == "tftp")
{
checkProtocol(*urliter);
myurllist.push_back(*urliter);