libzypp  12.16.5
MediaMultiCurl.cc
Go to the documentation of this file.
1 /*---------------------------------------------------------------------\
2 | ____ _ __ __ ___ |
3 | |__ / \ / / . \ . \ |
4 | / / \ V /| _/ _/ |
5 | / /__ | | | | | | |
6 | /_____||_| |_| |_| |
7 | |
8 \---------------------------------------------------------------------*/
13 #include <ctype.h>
14 #include <sys/types.h>
15 #include <signal.h>
16 #include <sys/wait.h>
17 #include <netdb.h>
18 #include <arpa/inet.h>
19 
20 #include <vector>
21 #include <iostream>
22 #include <algorithm>
23 
24 
25 #include "zypp/ZConfig.h"
26 #include "zypp/base/Logger.h"
29 
30 using namespace std;
31 using namespace zypp::base;
32 
33 #undef CURLVERSION_AT_LEAST
34 #define CURLVERSION_AT_LEAST(M,N,O) LIBCURL_VERSION_NUM >= ((((M)<<8)+(N))<<8)+(O)
35 
36 namespace zypp {
37  namespace media {
38 
39 
41 
42 
43 class multifetchrequest;
44 
45 // Hack: we derive from MediaCurl just to get the storage space for
46 // settings, url, curlerrors and the like
47 
49  friend class multifetchrequest;
50 
51 public:
52  multifetchworker(int no, multifetchrequest &request, const Url &url);
54  void nextjob();
55  void run();
56  bool checkChecksum();
57  bool recheckChecksum();
58  void disableCompetition();
59 
60  void checkdns();
61  void adddnsfd(fd_set &rset, int &maxfd);
62  void dnsevent(fd_set &rset);
63 
64  int _workerno;
65 
66  int _state;
67  bool _competing;
68 
69  size_t _blkno;
70  off_t _blkstart;
71  size_t _blksize;
73 
74  double _blkstarttime;
75  size_t _blkreceived;
76  off_t _received;
77 
78  double _avgspeed;
79  double _maxspeed;
80 
81  double _sleepuntil;
82 
83 private:
84  void stealjob();
85 
86  size_t writefunction(void *ptr, size_t size);
87  static size_t _writefunction(void *ptr, size_t size, size_t nmemb, void *stream);
88 
89  size_t headerfunction(char *ptr, size_t size);
90  static size_t _headerfunction(void *ptr, size_t size, size_t nmemb, void *stream);
91 
93  int _pass;
94  string _urlbuf;
95  off_t _off;
96  size_t _size;
98 
99  pid_t _pid;
100  int _dnspipe;
101 };
102 
103 #define WORKER_STARTING 0
104 #define WORKER_LOOKUP 1
105 #define WORKER_FETCH 2
106 #define WORKER_DISCARD 3
107 #define WORKER_DONE 4
108 #define WORKER_SLEEP 5
109 #define WORKER_BROKEN 6
110 
111 
112 
114 public:
115  multifetchrequest(const MediaMultiCurl *context, const Pathname &filename, const Url &baseurl, CURLM *multi, FILE *fp, callback::SendReport<DownloadProgressReport> *report, MediaBlockList *blklist, off_t filesize);
117 
118  void run(std::vector<Url> &urllist);
119 
120 protected:
121  friend class multifetchworker;
122 
124  const Pathname _filename;
126 
127  FILE *_fp;
130  off_t _filesize;
131 
132  CURLM *_multi;
133 
134  std::list<multifetchworker *> _workers;
135  bool _stealing;
137 
138  size_t _blkno;
139  off_t _blkoff;
144  bool _finished;
145  off_t _totalsize;
148 
149  double _starttime;
151 
154  double _periodavg;
155 
156 public:
157  double _timeout;
159  double _maxspeed;
161 };
162 
163 #define BLKSIZE 131072
164 #define MAXURLS 10
165 
166 
168 
169 static double
171 {
172  struct timeval tv;
173  if (gettimeofday(&tv, NULL))
174  return 0;
175  return tv.tv_sec + tv.tv_usec / 1000000.;
176 }
177 
178 size_t
179 multifetchworker::writefunction(void *ptr, size_t size)
180 {
181  size_t len, cnt;
182  if (_state == WORKER_BROKEN)
183  return size ? 0 : 1;
184 
185  double now = currentTime();
186 
187  len = size > _size ? _size : size;
188  if (!len)
189  {
190  // kill this job?
191  return size;
192  }
193 
194  if (_blkstart && _off == _blkstart)
195  {
196  // make sure that the server replied with "partial content"
197  // for http requests
198  char *effurl;
199  (void)curl_easy_getinfo(_curl, CURLINFO_EFFECTIVE_URL, &effurl);
200  if (effurl && !strncasecmp(effurl, "http", 4))
201  {
202  long statuscode = 0;
203  (void)curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &statuscode);
204  if (statuscode != 206)
205  return size ? 0 : 1;
206  }
207  }
208 
209  _blkreceived += len;
210  _received += len;
211 
212  _request->_lastprogress = now;
213 
214  if (_state == WORKER_DISCARD || !_request->_fp)
215  {
216  // block is no longer needed
217  // still calculate the checksum so that we can throw out bad servers
218  if (_request->_blklist)
219  _dig.update((const char *)ptr, len);
220  _off += len;
221  _size -= len;
222  return size;
223  }
224  if (fseeko(_request->_fp, _off, SEEK_SET))
225  return size ? 0 : 1;
226  cnt = fwrite(ptr, 1, len, _request->_fp);
227  if (cnt > 0)
228  {
229  _request->_fetchedsize += cnt;
230  if (_request->_blklist)
231  _dig.update((const char *)ptr, cnt);
232  _off += cnt;
233  _size -= cnt;
234  if (cnt == len)
235  return size;
236  }
237  return cnt;
238 }
239 
240 size_t
241 multifetchworker::_writefunction(void *ptr, size_t size, size_t nmemb, void *stream)
242 {
243  multifetchworker *me = reinterpret_cast<multifetchworker *>(stream);
244  return me->writefunction(ptr, size * nmemb);
245 }
246 
247 size_t
248 multifetchworker::headerfunction(char *p, size_t size)
249 {
250  size_t l = size;
251  if (l > 9 && !strncasecmp(p, "Location:", 9))
252  {
253  string line(p + 9, l - 9);
254  if (line[l - 10] == '\r')
255  line.erase(l - 10, 1);
256  XXX << "#" << _workerno << ": redirecting to" << line << endl;
257  return size;
258  }
259  if (l <= 14 || l >= 128 || strncasecmp(p, "Content-Range:", 14) != 0)
260  return size;
261  p += 14;
262  l -= 14;
263  while (l && (*p == ' ' || *p == '\t'))
264  p++, l--;
265  if (l < 6 || strncasecmp(p, "bytes", 5))
266  return size;
267  p += 5;
268  l -= 5;
269  char buf[128];
270  memcpy(buf, p, l);
271  buf[l] = 0;
272  unsigned long long start, off, filesize;
273  if (sscanf(buf, "%llu-%llu/%llu", &start, &off, &filesize) != 3)
274  return size;
275  if (_request->_filesize == (off_t)-1)
276  {
277  WAR << "#" << _workerno << ": setting request filesize to " << filesize << endl;
278  _request->_filesize = filesize;
279  if (_request->_totalsize == 0 && !_request->_blklist)
280  _request->_totalsize = filesize;
281  }
282  if (_request->_filesize != (off_t)filesize)
283  {
284  XXX << "#" << _workerno << ": filesize mismatch" << endl;
285  _state = WORKER_BROKEN;
286  strncpy(_curlError, "filesize mismatch", CURL_ERROR_SIZE);
287  }
288  return size;
289 }
290 
291 size_t
292 multifetchworker::_headerfunction(void *ptr, size_t size, size_t nmemb, void *stream)
293 {
294  multifetchworker *me = reinterpret_cast<multifetchworker *>(stream);
295  return me->headerfunction((char *)ptr, size * nmemb);
296 }
297 
298 multifetchworker::multifetchworker(int no, multifetchrequest &request, const Url &url)
299 : MediaCurl(url, Pathname())
300 {
301  _workerno = no;
302  _request = &request;
304  _competing = false;
305  _off = _blkstart = 0;
306  _size = _blksize = 0;
307  _pass = 0;
308  _blkno = 0;
309  _pid = 0;
310  _dnspipe = -1;
311  _blkreceived = 0;
312  _received = 0;
313  _blkstarttime = 0;
314  _avgspeed = 0;
315  _sleepuntil = 0;
317  _noendrange = false;
318 
319  Url curlUrl( clearQueryString(url) );
320  _urlbuf = curlUrl.asString();
322  if (_curl)
323  XXX << "reused worker from pool" << endl;
324  if (!_curl && !(_curl = curl_easy_init()))
325  {
327  strncpy(_curlError, "curl_easy_init failed", CURL_ERROR_SIZE);
328  return;
329  }
330  try
331  {
332  setupEasy();
333  }
334  catch (Exception &ex)
335  {
336  curl_easy_cleanup(_curl);
337  _curl = 0;
339  strncpy(_curlError, "curl_easy_setopt failed", CURL_ERROR_SIZE);
340  return;
341  }
342  curl_easy_setopt(_curl, CURLOPT_PRIVATE, this);
343  curl_easy_setopt(_curl, CURLOPT_URL, _urlbuf.c_str());
344  curl_easy_setopt(_curl, CURLOPT_WRITEFUNCTION, &_writefunction);
345  curl_easy_setopt(_curl, CURLOPT_WRITEDATA, this);
346  if (_request->_filesize == off_t(-1) || !_request->_blklist || !_request->_blklist->haveChecksum(0))
347  {
348  curl_easy_setopt(_curl, CURLOPT_HEADERFUNCTION, &_headerfunction);
349  curl_easy_setopt(_curl, CURLOPT_HEADERDATA, this);
350  }
351  // if this is the same host copy authorization
352  // (the host check is also what curl does when doing a redirect)
353  // (note also that unauthorized exceptions are thrown with the request host)
354  if (url.getHost() == _request->_context->_url.getHost())
355  {
359  if ( _settings.userPassword().size() )
360  {
361  curl_easy_setopt(_curl, CURLOPT_USERPWD, _settings.userPassword().c_str());
362  string use_auth = _settings.authType();
363  if (use_auth.empty())
364  use_auth = "digest,basic"; // our default
365  long auth = CurlAuthData::auth_type_str2long(use_auth);
366  if( auth != CURLAUTH_NONE)
367  {
368  XXX << "#" << _workerno << ": Enabling HTTP authentication methods: " << use_auth
369  << " (CURLOPT_HTTPAUTH=" << auth << ")" << std::endl;
370  curl_easy_setopt(_curl, CURLOPT_HTTPAUTH, auth);
371  }
372  }
373  }
374  checkdns();
375 }
376 
378 {
379  if (_curl)
380  {
382  curl_multi_remove_handle(_request->_multi, _curl);
383  if (_state == WORKER_DONE || _state == WORKER_SLEEP)
384  {
385 #if CURLVERSION_AT_LEAST(7,15,5)
386  curl_easy_setopt(_curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)0);
387 #endif
388  curl_easy_setopt(_curl, CURLOPT_PRIVATE, (void *)0);
389  curl_easy_setopt(_curl, CURLOPT_WRITEFUNCTION, (void *)0);
390  curl_easy_setopt(_curl, CURLOPT_WRITEDATA, (void *)0);
391  curl_easy_setopt(_curl, CURLOPT_HEADERFUNCTION, (void *)0);
392  curl_easy_setopt(_curl, CURLOPT_HEADERDATA, (void *)0);
394  }
395  else
396  curl_easy_cleanup(_curl);
397  _curl = 0;
398  }
399  if (_pid)
400  {
401  kill(_pid, SIGKILL);
402  int status;
403  while (waitpid(_pid, &status, 0) == -1)
404  if (errno != EINTR)
405  break;
406  _pid = 0;
407  }
408  if (_dnspipe != -1)
409  {
410  close(_dnspipe);
411  _dnspipe = -1;
412  }
413  // the destructor in MediaCurl doesn't call disconnect() if
414  // the media is not attached, so we do it here manually
415  disconnectFrom();
416 }
417 
418 static inline bool env_isset(string name)
419 {
420  const char *s = getenv(name.c_str());
421  return s && *s ? true : false;
422 }
423 
424 void
426 {
427  string host = _url.getHost();
428 
429  if (host.empty())
430  return;
431 
432  if (_request->_context->isDNSok(host))
433  return;
434 
435  // no need to do dns checking for numeric hosts
436  char addrbuf[128];
437  if (inet_pton(AF_INET, host.c_str(), addrbuf) == 1)
438  return;
439  if (inet_pton(AF_INET6, host.c_str(), addrbuf) == 1)
440  return;
441 
442  // no need to do dns checking if we use a proxy
443  if (!_settings.proxy().empty())
444  return;
445  if (env_isset("all_proxy") || env_isset("ALL_PROXY"))
446  return;
447  string schemeproxy = _url.getScheme() + "_proxy";
448  if (env_isset(schemeproxy))
449  return;
450  if (schemeproxy != "http_proxy")
451  {
452  std::transform(schemeproxy.begin(), schemeproxy.end(), schemeproxy.begin(), ::toupper);
453  if (env_isset(schemeproxy))
454  return;
455  }
456 
457  XXX << "checking DNS lookup of " << host << endl;
458  int pipefds[2];
459  if (pipe(pipefds))
460  {
462  strncpy(_curlError, "DNS pipe creation failed", CURL_ERROR_SIZE);
463  return;
464  }
465  _pid = fork();
466  if (_pid == pid_t(-1))
467  {
468  close(pipefds[0]);
469  close(pipefds[1]);
470  _pid = 0;
472  strncpy(_curlError, "DNS checker fork failed", CURL_ERROR_SIZE);
473  return;
474  }
475  else if (_pid == 0)
476  {
477  close(pipefds[0]);
478  // XXX: close all other file descriptors
479  struct addrinfo *ai, aihints;
480  memset(&aihints, 0, sizeof(aihints));
481  aihints.ai_family = PF_UNSPEC;
482  int tstsock = socket(PF_INET6, SOCK_DGRAM | SOCK_CLOEXEC, 0);
483  if (tstsock == -1)
484  aihints.ai_family = PF_INET;
485  else
486  close(tstsock);
487  aihints.ai_socktype = SOCK_STREAM;
488  aihints.ai_flags = AI_CANONNAME;
489  unsigned int connecttimeout = _request->_connect_timeout;
490  if (connecttimeout)
491  alarm(connecttimeout);
492  signal(SIGALRM, SIG_DFL);
493  if (getaddrinfo(host.c_str(), NULL, &aihints, &ai))
494  _exit(1);
495  _exit(0);
496  }
497  close(pipefds[1]);
498  _dnspipe = pipefds[0];
500 }
501 
502 void
503 multifetchworker::adddnsfd(fd_set &rset, int &maxfd)
504 {
505  if (_state != WORKER_LOOKUP)
506  return;
507  FD_SET(_dnspipe, &rset);
508  if (maxfd < _dnspipe)
509  maxfd = _dnspipe;
510 }
511 
512 void
514 {
515 
516  if (_state != WORKER_LOOKUP || !FD_ISSET(_dnspipe, &rset))
517  return;
518  int status;
519  while (waitpid(_pid, &status, 0) == -1)
520  {
521  if (errno != EINTR)
522  return;
523  }
524  _pid = 0;
525  if (_dnspipe != -1)
526  {
527  close(_dnspipe);
528  _dnspipe = -1;
529  }
530  if (!WIFEXITED(status))
531  {
533  strncpy(_curlError, "DNS lookup failed", CURL_ERROR_SIZE);
535  return;
536  }
537  int exitcode = WEXITSTATUS(status);
538  XXX << "#" << _workerno << ": DNS lookup returned " << exitcode << endl;
539  if (exitcode != 0)
540  {
542  strncpy(_curlError, "DNS lookup failed", CURL_ERROR_SIZE);
544  return;
545  }
547  nextjob();
548 }
549 
550 bool
552 {
553  // XXX << "checkChecksum block " << _blkno << endl;
554  if (!_blksize || !_request->_blklist)
555  return true;
557 }
558 
559 bool
561 {
562  // XXX << "recheckChecksum block " << _blkno << endl;
563  if (!_request->_fp || !_blksize || !_request->_blklist)
564  return true;
565  if (fseeko(_request->_fp, _blkstart, SEEK_SET))
566  return false;
567  char buf[4096];
568  size_t l = _blksize;
569  _request->_blklist->createDigest(_dig); // resets digest
570  while (l)
571  {
572  size_t cnt = l > sizeof(buf) ? sizeof(buf) : l;
573  if (fread(buf, cnt, 1, _request->_fp) != 1)
574  return false;
575  _dig.update(buf, cnt);
576  l -= cnt;
577  }
579 }
580 
581 
582 void
584 {
585  if (!_request->_stealing)
586  {
587  XXX << "start stealing!" << endl;
588  _request->_stealing = true;
589  }
590  multifetchworker *best = 0;
591  std::list<multifetchworker *>::iterator workeriter = _request->_workers.begin();
592  double now = 0;
593  for (; workeriter != _request->_workers.end(); ++workeriter)
594  {
595  multifetchworker *worker = *workeriter;
596  if (worker == this)
597  continue;
598  if (worker->_pass == -1)
599  continue; // do not steal!
600  if (worker->_state == WORKER_DISCARD || worker->_state == WORKER_DONE || worker->_state == WORKER_SLEEP || !worker->_blksize)
601  continue; // do not steal finished jobs
602  if (!worker->_avgspeed && worker->_blkreceived)
603  {
604  if (!now)
605  now = currentTime();
606  if (now > worker->_blkstarttime)
607  worker->_avgspeed = worker->_blkreceived / (now - worker->_blkstarttime);
608  }
609  if (!best || best->_pass > worker->_pass)
610  {
611  best = worker;
612  continue;
613  }
614  if (best->_pass < worker->_pass)
615  continue;
616  // if it is the same block, we want to know the best worker, otherwise the worst
617  if (worker->_blkstart == best->_blkstart)
618  {
619  if ((worker->_blksize - worker->_blkreceived) * best->_avgspeed < (best->_blksize - best->_blkreceived) * worker->_avgspeed)
620  best = worker;
621  }
622  else
623  {
624  if ((worker->_blksize - worker->_blkreceived) * best->_avgspeed > (best->_blksize - best->_blkreceived) * worker->_avgspeed)
625  best = worker;
626  }
627  }
628  if (!best)
629  {
632  _request->_finished = true;
633  return;
634  }
635  // do not sleep twice
636  if (_state != WORKER_SLEEP)
637  {
638  if (!_avgspeed && _blkreceived)
639  {
640  if (!now)
641  now = currentTime();
642  if (now > _blkstarttime)
644  }
645 
646  // lets see if we should sleep a bit
647  XXX << "me #" << _workerno << ": " << _avgspeed << ", size " << best->_blksize << endl;
648  XXX << "best #" << best->_workerno << ": " << best->_avgspeed << ", size " << (best->_blksize - best->_blkreceived) << endl;
649  if (_avgspeed && best->_avgspeed && best->_blksize - best->_blkreceived > 0 &&
650  (best->_blksize - best->_blkreceived) * _avgspeed < best->_blksize * best->_avgspeed)
651  {
652  if (!now)
653  now = currentTime();
654  double sl = (best->_blksize - best->_blkreceived) / best->_avgspeed * 2;
655  if (sl > 1)
656  sl = 1;
657  XXX << "#" << _workerno << ": going to sleep for " << sl * 1000 << " ms" << endl;
658  _sleepuntil = now + sl;
661  return;
662  }
663  }
664 
665  _competing = true;
666  best->_competing = true;
667  _blkstart = best->_blkstart;
668  _blksize = best->_blksize;
669  best->_pass++;
670  _pass = best->_pass;
671  _blkno = best->_blkno;
672  run();
673 }
674 
675 void
677 {
678  std::list<multifetchworker *>::iterator workeriter = _request->_workers.begin();
679  for (; workeriter != _request->_workers.end(); ++workeriter)
680  {
681  multifetchworker *worker = *workeriter;
682  if (worker == this)
683  continue;
684  if (worker->_blkstart == _blkstart)
685  {
686  if (worker->_state == WORKER_FETCH)
687  worker->_state = WORKER_DISCARD;
688  worker->_pass = -1; /* do not steal this one, we already have it */
689  }
690  }
691 }
692 
693 
694 void
696 {
697  _noendrange = false;
698  if (_request->_stealing)
699  {
700  stealjob();
701  return;
702  }
703 
704  MediaBlockList *blklist = _request->_blklist;
705  if (!blklist)
706  {
707  _blksize = BLKSIZE;
708  if (_request->_filesize != off_t(-1))
709  {
711  {
712  stealjob();
713  return;
714  }
716  if (_blksize > BLKSIZE)
717  _blksize = BLKSIZE;
718  }
719  }
720  else
721  {
722  MediaBlock blk = blklist->getBlock(_request->_blkno);
723  while (_request->_blkoff >= (off_t)(blk.off + blk.size))
724  {
725  if (++_request->_blkno == blklist->numBlocks())
726  {
727  stealjob();
728  return;
729  }
730  blk = blklist->getBlock(_request->_blkno);
731  _request->_blkoff = blk.off;
732  }
733  _blksize = blk.off + blk.size - _request->_blkoff;
734  if (_blksize > BLKSIZE && !blklist->haveChecksum(_request->_blkno))
735  _blksize = BLKSIZE;
736  }
740  run();
741 }
742 
743 void
745 {
746  char rangebuf[128];
747 
748  if (_state == WORKER_BROKEN || _state == WORKER_DONE)
749  return; // just in case...
750  if (_noendrange)
751  sprintf(rangebuf, "%llu-", (unsigned long long)_blkstart);
752  else
753  sprintf(rangebuf, "%llu-%llu", (unsigned long long)_blkstart, (unsigned long long)_blkstart + _blksize - 1);
754  XXX << "#" << _workerno << ": BLK " << _blkno << ":" << rangebuf << " " << _url << endl;
755  if (curl_easy_setopt(_curl, CURLOPT_RANGE, !_noendrange || _blkstart != 0 ? rangebuf : (char *)0) != CURLE_OK)
756  {
759  strncpy(_curlError, "curl_easy_setopt range failed", CURL_ERROR_SIZE);
760  return;
761  }
762  if (curl_multi_add_handle(_request->_multi, _curl) != CURLM_OK)
763  {
766  strncpy(_curlError, "curl_multi_add_handle failed", CURL_ERROR_SIZE);
767  return;
768  }
769  _request->_havenewjob = true;
770  _off = _blkstart;
771  _size = _blksize;
772  if (_request->_blklist)
773  _request->_blklist->createDigest(_dig); // resets digest
775 
776  double now = currentTime();
777  _blkstarttime = now;
778  _blkreceived = 0;
779 }
780 
781 
783 
784 
785 multifetchrequest::multifetchrequest(const MediaMultiCurl *context, const Pathname &filename, const Url &baseurl, CURLM *multi, FILE *fp, callback::SendReport<DownloadProgressReport> *report, MediaBlockList *blklist, off_t filesize) : _context(context), _filename(filename), _baseurl(baseurl)
786 {
787  _fp = fp;
788  _report = report;
789  _blklist = blklist;
790  _filesize = filesize;
791  _multi = multi;
792  _stealing = false;
793  _havenewjob = false;
794  _blkno = 0;
795  if (_blklist)
797  else
798  _blkoff = 0;
799  _activeworkers = 0;
800  _lookupworkers = 0;
801  _sleepworkers = 0;
802  _minsleepuntil = 0;
803  _finished = false;
804  _fetchedsize = 0;
805  _fetchedgoodsize = 0;
806  _totalsize = 0;
808  _lastperiodfetched = 0;
809  _periodavg = 0;
810  _timeout = 0;
811  _connect_timeout = 0;
812  _maxspeed = 0;
813  _maxworkers = 0;
814  if (blklist)
815  {
816  for (size_t blkno = 0; blkno < blklist->numBlocks(); blkno++)
817  {
818  MediaBlock blk = blklist->getBlock(blkno);
819  _totalsize += blk.size;
820  }
821  }
822  else if (filesize != off_t(-1))
823  _totalsize = filesize;
824 }
825 
827 {
828  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
829  {
830  multifetchworker *worker = *workeriter;
831  *workeriter = NULL;
832  delete worker;
833  }
834  _workers.clear();
835 }
836 
837 void
838 multifetchrequest::run(std::vector<Url> &urllist)
839 {
840  int workerno = 0;
841  std::vector<Url>::iterator urliter = urllist.begin();
842  for (;;)
843  {
844  fd_set rset, wset, xset;
845  int maxfd, nqueue;
846 
847  if (_finished)
848  {
849  XXX << "finished!" << endl;
850  break;
851  }
852 
853  if ((int)_activeworkers < _maxworkers && urliter != urllist.end() && _workers.size() < MAXURLS)
854  {
855  // spawn another worker!
856  multifetchworker *worker = new multifetchworker(workerno++, *this, *urliter);
857  _workers.push_back(worker);
858  if (worker->_state != WORKER_BROKEN)
859  {
860  _activeworkers++;
861  if (worker->_state != WORKER_LOOKUP)
862  {
863  worker->nextjob();
864  }
865  else
866  _lookupworkers++;
867  }
868  ++urliter;
869  continue;
870  }
871  if (!_activeworkers)
872  {
873  WAR << "No more active workers!" << endl;
874  // show the first worker error we find
875  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
876  {
877  if ((*workeriter)->_state != WORKER_BROKEN)
878  continue;
879  ZYPP_THROW(MediaCurlException(_baseurl, "Server error", (*workeriter)->_curlError));
880  }
881  break;
882  }
883 
884  FD_ZERO(&rset);
885  FD_ZERO(&wset);
886  FD_ZERO(&xset);
887 
888  curl_multi_fdset(_multi, &rset, &wset, &xset, &maxfd);
889 
890  if (_lookupworkers)
891  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
892  (*workeriter)->adddnsfd(rset, maxfd);
893 
894  timeval tv;
895  // if we added a new job we have to call multi_perform once
896  // to make it show up in the fd set. do not sleep in this case.
897  tv.tv_sec = 0;
898  tv.tv_usec = _havenewjob ? 0 : 200000;
899  if (_sleepworkers && !_havenewjob)
900  {
901  if (_minsleepuntil == 0)
902  {
903  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
904  {
905  multifetchworker *worker = *workeriter;
906  if (worker->_state != WORKER_SLEEP)
907  continue;
908  if (!_minsleepuntil || _minsleepuntil > worker->_sleepuntil)
909  _minsleepuntil = worker->_sleepuntil;
910  }
911  }
912  double sl = _minsleepuntil - currentTime();
913  if (sl < 0)
914  {
915  sl = 0;
916  _minsleepuntil = 0;
917  }
918  if (sl < .2)
919  tv.tv_usec = sl * 1000000;
920  }
921  int r = select(maxfd + 1, &rset, &wset, &xset, &tv);
922  if (r == -1 && errno != EINTR)
923  ZYPP_THROW(MediaCurlException(_baseurl, "select() failed", "unknown error"));
924  if (r != 0 && _lookupworkers)
925  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
926  {
927  multifetchworker *worker = *workeriter;
928  if (worker->_state != WORKER_LOOKUP)
929  continue;
930  (*workeriter)->dnsevent(rset);
931  if (worker->_state != WORKER_LOOKUP)
932  _lookupworkers--;
933  }
934  _havenewjob = false;
935 
936  // run curl
937  for (;;)
938  {
939  CURLMcode mcode;
940  int tasks;
941  mcode = curl_multi_perform(_multi, &tasks);
942  if (mcode == CURLM_CALL_MULTI_PERFORM)
943  continue;
944  if (mcode != CURLM_OK)
945  ZYPP_THROW(MediaCurlException(_baseurl, "curl_multi_perform", "unknown error"));
946  break;
947  }
948 
949  double now = currentTime();
950 
951  // update periodavg
952  if (now > _lastperiodstart + .5)
953  {
954  if (!_periodavg)
956  else
959  _lastperiodstart = now;
960  }
961 
962  // wake up sleepers
963  if (_sleepworkers)
964  {
965  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
966  {
967  multifetchworker *worker = *workeriter;
968  if (worker->_state != WORKER_SLEEP)
969  continue;
970  if (worker->_sleepuntil > now)
971  continue;
972  if (_minsleepuntil == worker->_sleepuntil)
973  _minsleepuntil = 0;
974  XXX << "#" << worker->_workerno << ": sleep done, wake up" << endl;
975  _sleepworkers--;
976  // nextjob chnages the state
977  worker->nextjob();
978  }
979  }
980 
981  // collect all curl results, reschedule new jobs
982  CURLMsg *msg;
983  while ((msg = curl_multi_info_read(_multi, &nqueue)) != 0)
984  {
985  if (msg->msg != CURLMSG_DONE)
986  continue;
987  CURL *easy = msg->easy_handle;
988  CURLcode cc = msg->data.result;
989  multifetchworker *worker;
990  if (curl_easy_getinfo(easy, CURLINFO_PRIVATE, &worker) != CURLE_OK)
991  ZYPP_THROW(MediaCurlException(_baseurl, "curl_easy_getinfo", "unknown error"));
992  if (worker->_blkreceived && now > worker->_blkstarttime)
993  {
994  if (worker->_avgspeed)
995  worker->_avgspeed = (worker->_avgspeed + worker->_blkreceived / (now - worker->_blkstarttime)) / 2;
996  else
997  worker->_avgspeed = worker->_blkreceived / (now - worker->_blkstarttime);
998  }
999  XXX << "#" << worker->_workerno << ": BLK " << worker->_blkno << " done code " << cc << " speed " << worker->_avgspeed << endl;
1000  curl_multi_remove_handle(_multi, easy);
1001  if (cc == CURLE_HTTP_RETURNED_ERROR)
1002  {
1003  long statuscode = 0;
1004  (void)curl_easy_getinfo(easy, CURLINFO_RESPONSE_CODE, &statuscode);
1005  XXX << "HTTP status " << statuscode << endl;
1006  if (statuscode == 416 && !_blklist) /* Range error */
1007  {
1008  if (_filesize == off_t(-1))
1009  {
1010  if (!worker->_noendrange)
1011  {
1012  XXX << "#" << worker->_workerno << ": retrying with no end range" << endl;
1013  worker->_noendrange = true;
1014  worker->run();
1015  continue;
1016  }
1017  worker->_noendrange = false;
1018  worker->stealjob();
1019  continue;
1020  }
1021  if (worker->_blkstart >= _filesize)
1022  {
1023  worker->nextjob();
1024  continue;
1025  }
1026  }
1027  }
1028  if (cc == 0)
1029  {
1030  if (!worker->checkChecksum())
1031  {
1032  WAR << "#" << worker->_workerno << ": checksum error, disable worker" << endl;
1033  worker->_state = WORKER_BROKEN;
1034  strncpy(worker->_curlError, "checksum error", CURL_ERROR_SIZE);
1035  _activeworkers--;
1036  continue;
1037  }
1038  if (worker->_state == WORKER_FETCH)
1039  {
1040  if (worker->_competing)
1041  {
1042  worker->disableCompetition();
1043  // multiple workers wrote into this block. We already know that our
1044  // data was correct, but maybe some other worker overwrote our data
1045  // with something broken. Thus we have to re-check the block.
1046  if (!worker->recheckChecksum())
1047  {
1048  XXX << "#" << worker->_workerno << ": recheck checksum error, refetch block" << endl;
1049  // re-fetch! No need to worry about the bad workers,
1050  // they will now be set to DISCARD. At the end of their block
1051  // they will notice that they wrote bad data and go into BROKEN.
1052  worker->run();
1053  continue;
1054  }
1055  }
1056  _fetchedgoodsize += worker->_blksize;
1057  }
1058 
1059  // make bad workers sleep a little
1060  double maxavg = 0;
1061  int maxworkerno = 0;
1062  int numbetter = 0;
1063  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
1064  {
1065  multifetchworker *oworker = *workeriter;
1066  if (oworker->_state == WORKER_BROKEN)
1067  continue;
1068  if (oworker->_avgspeed > maxavg)
1069  {
1070  maxavg = oworker->_avgspeed;
1071  maxworkerno = oworker->_workerno;
1072  }
1073  if (oworker->_avgspeed > worker->_avgspeed)
1074  numbetter++;
1075  }
1076  if (maxavg && !_stealing)
1077  {
1078  double ratio = worker->_avgspeed / maxavg;
1079  ratio = 1 - ratio;
1080  if (numbetter < 3) // don't sleep that much if we're in the top two
1081  ratio = ratio * ratio;
1082  if (ratio > .01)
1083  {
1084  XXX << "#" << worker->_workerno << ": too slow ("<< ratio << ", " << worker->_avgspeed << ", #" << maxworkerno << ": " << maxavg << "), going to sleep for " << ratio * 1000 << " ms" << endl;
1085  worker->_sleepuntil = now + ratio;
1086  worker->_state = WORKER_SLEEP;
1087  _sleepworkers++;
1088  continue;
1089  }
1090  }
1091 
1092  // do rate control (if requested)
1093  // should use periodavg, but that's not what libcurl does
1094  if (_maxspeed && now > _starttime)
1095  {
1096  double avg = _fetchedsize / (now - _starttime);
1097  avg = worker->_maxspeed * _maxspeed / avg;
1098  if (avg < _maxspeed / _maxworkers)
1099  avg = _maxspeed / _maxworkers;
1100  if (avg > _maxspeed)
1101  avg = _maxspeed;
1102  if (avg < 1024)
1103  avg = 1024;
1104  worker->_maxspeed = avg;
1105 #if CURLVERSION_AT_LEAST(7,15,5)
1106  curl_easy_setopt(worker->_curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)(avg));
1107 #endif
1108  }
1109 
1110  worker->nextjob();
1111  }
1112  else
1113  {
1114  worker->_state = WORKER_BROKEN;
1115  _activeworkers--;
1116  if (!_activeworkers && !(urliter != urllist.end() && _workers.size() < MAXURLS))
1117  {
1118  // end of workers reached! goodbye!
1119  worker->evaluateCurlCode(Pathname(), cc, false);
1120  }
1121  }
1122  }
1123 
1124  // send report
1125  if (_report)
1126  {
1127  int percent = _totalsize ? (100 * (_fetchedgoodsize + _fetchedsize)) / (_totalsize + _fetchedsize) : 0;
1128  double avg = 0;
1129  if (now > _starttime)
1130  avg = _fetchedsize / (now - _starttime);
1131  if (!(*(_report))->progress(percent, _baseurl, avg, _lastperiodstart == _starttime ? avg : _periodavg))
1132  ZYPP_THROW(MediaCurlException(_baseurl, "User abort", "cancelled"));
1133  }
1134 
1135  if (_timeout && now - _lastprogress > _timeout)
1136  break;
1137  }
1138 
1139  if (!_finished)
1141 
1142  // print some download stats
1143  WAR << "overall result" << endl;
1144  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
1145  {
1146  multifetchworker *worker = *workeriter;
1147  WAR << "#" << worker->_workerno << ": state: " << worker->_state << " received: " << worker->_received << " url: " << worker->_url << endl;
1148  }
1149 }
1150 
1151 
1153 
1154 
1155 MediaMultiCurl::MediaMultiCurl(const Url &url_r, const Pathname & attach_point_hint_r)
1156  : MediaCurl(url_r, attach_point_hint_r)
1157 {
1158  MIL << "MediaMultiCurl::MediaMultiCurl(" << url_r << ", " << attach_point_hint_r << ")" << endl;
1159  _multi = 0;
1161 }
1162 
1164 {
1166  {
1167  curl_slist_free_all(_customHeadersMetalink);
1169  }
1170  if (_multi)
1171  {
1172  curl_multi_cleanup(_multi);
1173  _multi = 0;
1174  }
1175  std::map<std::string, CURL *>::iterator it;
1176  for (it = _easypool.begin(); it != _easypool.end(); it++)
1177  {
1178  CURL *easy = it->second;
1179  if (easy)
1180  {
1181  curl_easy_cleanup(easy);
1182  it->second = NULL;
1183  }
1184  }
1185 }
1186 
1188 {
1190 
1192  {
1193  curl_slist_free_all(_customHeadersMetalink);
1195  }
1196  struct curl_slist *sl = _customHeaders;
1197  for (; sl; sl = sl->next)
1198  _customHeadersMetalink = curl_slist_append(_customHeadersMetalink, sl->data);
1199  _customHeadersMetalink = curl_slist_append(_customHeadersMetalink, "Accept: */*, application/metalink+xml, application/metalink4+xml");
1200 }
1201 
1202 static bool looks_like_metalink_fd(int fd)
1203 {
1204  char buf[256], *p;
1205  int l;
1206  while ((l = pread(fd, buf, sizeof(buf) - 1, (off_t)0)) == -1 && errno == EINTR)
1207  ;
1208  if (l == -1)
1209  return 0;
1210  buf[l] = 0;
1211  p = buf;
1212  while (*p == ' ' || *p == '\t' || *p == '\r' || *p == '\n')
1213  p++;
1214  if (!strncasecmp(p, "<?xml", 5))
1215  {
1216  while (*p && *p != '>')
1217  p++;
1218  if (*p == '>')
1219  p++;
1220  while (*p == ' ' || *p == '\t' || *p == '\r' || *p == '\n')
1221  p++;
1222  }
1223  bool ret = !strncasecmp(p, "<metalink", 9) ? true : false;
1224  return ret;
1225 }
1226 
1227 static bool looks_like_metalink(const Pathname & file)
1228 {
1229  int fd;
1230  if ((fd = open(file.asString().c_str(), O_RDONLY|O_CLOEXEC)) == -1)
1231  return false;
1232  bool ret = looks_like_metalink_fd(fd);
1233  close(fd);
1234  DBG << "looks_like_metalink(" << file << "): " << ret << endl;
1235  return ret;
1236 }
1237 
1238 // here we try to suppress all progress coming from a metalink download
1239 int MediaMultiCurl::progressCallback( void *clientp, double dltotal, double dlnow, double ultotal, double ulnow)
1240 {
1241  CURL *_curl = MediaCurl::progressCallback_getcurl(clientp);
1242  if (!_curl)
1243  return 0;
1244 
1245  // work around curl bug that gives us old data
1246  long httpReturnCode = 0;
1247  if (curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &httpReturnCode ) != CURLE_OK || httpReturnCode == 0)
1248  return 0;
1249 
1250  char *ptr = NULL;
1251  bool ismetalink = false;
1252  if (curl_easy_getinfo(_curl, CURLINFO_CONTENT_TYPE, &ptr) == CURLE_OK && ptr)
1253  {
1254  string ct = string(ptr);
1255  if (ct.find("application/metalink+xml") == 0 || ct.find("application/metalink4+xml") == 0)
1256  ismetalink = true;
1257  }
1258  if (!ismetalink && dlnow < 256)
1259  {
1260  // can't tell yet, suppress callback
1261  return 0;
1262  }
1263  if (!ismetalink)
1264  {
1265  FILE *fp = 0;
1266  if (curl_easy_getinfo(_curl, CURLINFO_PRIVATE, &fp) != CURLE_OK)
1267  return 0;
1268  if (!fp)
1269  return 0; /* hmm */
1270  fflush(fp);
1271  ismetalink = looks_like_metalink_fd(fileno(fp));
1272  DBG << "looks_like_metalink_fd: " << ismetalink << endl;
1273  }
1274  if (ismetalink)
1275  {
1276  // we're downloading the metalink file. no progress please.
1277  curl_easy_setopt(_curl, CURLOPT_NOPROGRESS, 1L);
1278  return 0;
1279  }
1280  curl_easy_setopt(_curl, CURLOPT_PROGRESSFUNCTION, &MediaCurl::progressCallback);
1281  return MediaCurl::progressCallback(clientp, dltotal, dlnow, ultotal, ulnow);
1282 }
1283 
1284 void MediaMultiCurl::doGetFileCopy( const Pathname & filename , const Pathname & target, callback::SendReport<DownloadProgressReport> & report, RequestOptions options ) const
1285 {
1286  Pathname dest = target.absolutename();
1287  if( assert_dir( dest.dirname() ) )
1288  {
1289  DBG << "assert_dir " << dest.dirname() << " failed" << endl;
1290  Url url(getFileUrl(filename));
1291  ZYPP_THROW( MediaSystemException(url, "System error on " + dest.dirname().asString()) );
1292  }
1293  string destNew = target.asString() + ".new.zypp.XXXXXX";
1294  char *buf = ::strdup( destNew.c_str());
1295  if( !buf)
1296  {
1297  ERR << "out of memory for temp file name" << endl;
1298  Url url(getFileUrl(filename));
1299  ZYPP_THROW(MediaSystemException(url, "out of memory for temp file name"));
1300  }
1301 
1302  int tmp_fd = ::mkostemp( buf, O_CLOEXEC );
1303  if( tmp_fd == -1)
1304  {
1305  free( buf);
1306  ERR << "mkstemp failed for file '" << destNew << "'" << endl;
1307  ZYPP_THROW(MediaWriteException(destNew));
1308  }
1309  destNew = buf;
1310  free( buf);
1311 
1312  FILE *file = ::fdopen( tmp_fd, "we" );
1313  if ( !file ) {
1314  ::close( tmp_fd);
1315  filesystem::unlink( destNew );
1316  ERR << "fopen failed for file '" << destNew << "'" << endl;
1317  ZYPP_THROW(MediaWriteException(destNew));
1318  }
1319  DBG << "dest: " << dest << endl;
1320  DBG << "temp: " << destNew << endl;
1321 
1322  // set IFMODSINCE time condition (no download if not modified)
1323  if( PathInfo(target).isExist() && !(options & OPTION_NO_IFMODSINCE) )
1324  {
1325  curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_IFMODSINCE);
1326  curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, (long)PathInfo(target).mtime());
1327  }
1328  else
1329  {
1330  curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_NONE);
1331  curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, 0L);
1332  }
1333  // change header to include Accept: metalink
1334  curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeadersMetalink);
1335  // change to our own progress funcion
1336  curl_easy_setopt(_curl, CURLOPT_PROGRESSFUNCTION, &progressCallback);
1337  curl_easy_setopt(_curl, CURLOPT_PRIVATE, file);
1338  try
1339  {
1340  MediaCurl::doGetFileCopyFile(filename, dest, file, report, options);
1341  }
1342  catch (Exception &ex)
1343  {
1344  ::fclose(file);
1345  filesystem::unlink(destNew);
1346  curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_NONE);
1347  curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, 0L);
1348  curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeaders);
1349  curl_easy_setopt(_curl, CURLOPT_PRIVATE, (void *)0);
1350  ZYPP_RETHROW(ex);
1351  }
1352  curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_NONE);
1353  curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, 0L);
1354  curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeaders);
1355  curl_easy_setopt(_curl, CURLOPT_PRIVATE, (void *)0);
1356  long httpReturnCode = 0;
1357  CURLcode infoRet = curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &httpReturnCode);
1358  if (infoRet == CURLE_OK)
1359  {
1360  DBG << "HTTP response: " + str::numstring(httpReturnCode) << endl;
1361  if ( httpReturnCode == 304
1362  || ( httpReturnCode == 213 && _url.getScheme() == "ftp" ) ) // not modified
1363  {
1364  DBG << "not modified: " << PathInfo(dest) << endl;
1365  return;
1366  }
1367  }
1368  else
1369  {
1370  WAR << "Could not get the reponse code." << endl;
1371  }
1372 
1373  bool ismetalink = false;
1374 
1375  char *ptr = NULL;
1376  if (curl_easy_getinfo(_curl, CURLINFO_CONTENT_TYPE, &ptr) == CURLE_OK && ptr)
1377  {
1378  string ct = string(ptr);
1379  if (ct.find("application/metalink+xml") == 0 || ct.find("application/metalink4+xml") == 0)
1380  ismetalink = true;
1381  }
1382 
1383  if (!ismetalink)
1384  {
1385  // some proxies do not store the content type, so also look at the file to find
1386  // out if we received a metalink (bnc#649925)
1387  fflush(file);
1388  if (looks_like_metalink(Pathname(destNew)))
1389  ismetalink = true;
1390  }
1391 
1392  if (ismetalink)
1393  {
1394  bool userabort = false;
1395  fclose(file);
1396  file = NULL;
1397  Pathname failedFile = ZConfig::instance().repoCachePath() / "MultiCurl.failed";
1398  try
1399  {
1400  MetaLinkParser mlp;
1401  mlp.parse(Pathname(destNew));
1402  MediaBlockList bl = mlp.getBlockList();
1403  vector<Url> urls = mlp.getUrls();
1404  XXX << bl << endl;
1405  file = fopen(destNew.c_str(), "w+e");
1406  if (!file)
1407  ZYPP_THROW(MediaWriteException(destNew));
1408  if (PathInfo(target).isExist())
1409  {
1410  XXX << "reusing blocks from file " << target << endl;
1411  bl.reuseBlocks(file, target.asString());
1412  XXX << bl << endl;
1413  }
1414  if (bl.haveChecksum(1) && PathInfo(failedFile).isExist())
1415  {
1416  XXX << "reusing blocks from file " << failedFile << endl;
1417  bl.reuseBlocks(file, failedFile.asString());
1418  XXX << bl << endl;
1419  filesystem::unlink(failedFile);
1420  }
1421  Pathname df = deltafile();
1422  if (!df.empty())
1423  {
1424  XXX << "reusing blocks from file " << df << endl;
1425  bl.reuseBlocks(file, df.asString());
1426  XXX << bl << endl;
1427  }
1428  try
1429  {
1430  multifetch(filename, file, &urls, &report, &bl);
1431  }
1432  catch (MediaCurlException &ex)
1433  {
1434  userabort = ex.errstr() == "User abort";
1435  ZYPP_RETHROW(ex);
1436  }
1437  }
1438  catch (Exception &ex)
1439  {
1440  // something went wrong. fall back to normal download
1441  if (file)
1442  fclose(file);
1443  file = NULL;
1444  if (PathInfo(destNew).size() >= 63336)
1445  {
1446  ::unlink(failedFile.asString().c_str());
1447  filesystem::hardlinkCopy(destNew, failedFile);
1448  }
1449  if (userabort)
1450  {
1451  filesystem::unlink(destNew);
1452  ZYPP_RETHROW(ex);
1453  }
1454  file = fopen(destNew.c_str(), "w+e");
1455  if (!file)
1456  ZYPP_THROW(MediaWriteException(destNew));
1457  MediaCurl::doGetFileCopyFile(filename, dest, file, report, options | OPTION_NO_REPORT_START);
1458  }
1459  }
1460 
1461  if (::fchmod( ::fileno(file), filesystem::applyUmaskTo( 0644 )))
1462  {
1463  ERR << "Failed to chmod file " << destNew << endl;
1464  }
1465  if (::fclose(file))
1466  {
1467  filesystem::unlink(destNew);
1468  ERR << "Fclose failed for file '" << destNew << "'" << endl;
1469  ZYPP_THROW(MediaWriteException(destNew));
1470  }
1471  if ( rename( destNew, dest ) != 0 )
1472  {
1473  ERR << "Rename failed" << endl;
1475  }
1476  DBG << "done: " << PathInfo(dest) << endl;
1477 }
1478 
1479 void MediaMultiCurl::multifetch(const Pathname & filename, FILE *fp, std::vector<Url> *urllist, callback::SendReport<DownloadProgressReport> *report, MediaBlockList *blklist, off_t filesize) const
1480 {
1481  Url baseurl(getFileUrl(filename));
1482  if (blklist && filesize == off_t(-1) && blklist->haveFilesize())
1483  filesize = blklist->getFilesize();
1484  if (blklist && !blklist->haveBlocks() && filesize != 0)
1485  blklist = 0;
1486  if (blklist && (filesize == 0 || !blklist->numBlocks()))
1487  {
1488  checkFileDigest(baseurl, fp, blklist);
1489  return;
1490  }
1491  if (filesize == 0)
1492  return;
1493  if (!_multi)
1494  {
1495  _multi = curl_multi_init();
1496  if (!_multi)
1498  }
1499  multifetchrequest req(this, filename, baseurl, _multi, fp, report, blklist, filesize);
1500  req._timeout = _settings.timeout();
1504  if (req._maxworkers > MAXURLS)
1505  req._maxworkers = MAXURLS;
1506  if (req._maxworkers <= 0)
1507  req._maxworkers = 1;
1508  std::vector<Url> myurllist;
1509  for (std::vector<Url>::iterator urliter = urllist->begin(); urliter != urllist->end(); ++urliter)
1510  {
1511  try
1512  {
1513  string scheme = urliter->getScheme();
1514  if (scheme == "http" || scheme == "https" || scheme == "ftp" || scheme == "tftp")
1515  {
1516  checkProtocol(*urliter);
1517  myurllist.push_back(*urliter);
1518  }
1519  }
1520  catch (...)
1521  {
1522  }
1523  }
1524  if (!myurllist.size())
1525  myurllist.push_back(baseurl);
1526  req.run(myurllist);
1527  checkFileDigest(baseurl, fp, blklist);
1528 }
1529 
1530 void MediaMultiCurl::checkFileDigest(Url &url, FILE *fp, MediaBlockList *blklist) const
1531 {
1532  if (!blklist || !blklist->haveFileChecksum())
1533  return;
1534  if (fseeko(fp, off_t(0), SEEK_SET))
1535  ZYPP_THROW(MediaCurlException(url, "fseeko", "seek error"));
1536  Digest dig;
1537  blklist->createFileDigest(dig);
1538  char buf[4096];
1539  size_t l;
1540  while ((l = fread(buf, 1, sizeof(buf), fp)) > 0)
1541  dig.update(buf, l);
1542  if (!blklist->verifyFileDigest(dig))
1543  ZYPP_THROW(MediaCurlException(url, "file verification failed", "checksum error"));
1544 }
1545 
1546 bool MediaMultiCurl::isDNSok(const string &host) const
1547 {
1548  return _dnsok.find(host) == _dnsok.end() ? false : true;
1549 }
1550 
1551 void MediaMultiCurl::setDNSok(const string &host) const
1552 {
1553  _dnsok.insert(host);
1554 }
1555 
1556 CURL *MediaMultiCurl::fromEasyPool(const string &host) const
1557 {
1558  if (_easypool.find(host) == _easypool.end())
1559  return 0;
1560  CURL *ret = _easypool[host];
1561  _easypool.erase(host);
1562  return ret;
1563 }
1564 
1565 void MediaMultiCurl::toEasyPool(const std::string &host, CURL *easy) const
1566 {
1567  CURL *oldeasy = _easypool[host];
1568  _easypool[host] = easy;
1569  if (oldeasy)
1570  curl_easy_cleanup(oldeasy);
1571 }
1572 
1573  } // namespace media
1574 } // namespace zypp
1575