libzypp  17.22.1
MediaMultiCurl.cc
Go to the documentation of this file.
1 /*---------------------------------------------------------------------\
2 | ____ _ __ __ ___ |
3 | |__ / \ / / . \ . \ |
4 | / / \ V /| _/ _/ |
5 | / /__ | | | | | | |
6 | /_____||_| |_| |_| |
7 | |
8 \---------------------------------------------------------------------*/
13 #include <ctype.h>
14 #include <sys/types.h>
15 #include <signal.h>
16 #include <sys/wait.h>
17 #include <netdb.h>
18 #include <arpa/inet.h>
19 
20 #include <vector>
21 #include <iostream>
22 #include <algorithm>
23 
24 
25 #include "zypp/ZConfig.h"
26 #include "zypp/base/Logger.h"
29 #include "zypp/ManagedFile.h"
30 #include "zypp/media/CurlHelper.h"
31 
32 using namespace std;
33 using namespace zypp::base;
34 
35 #undef CURLVERSION_AT_LEAST
36 #define CURLVERSION_AT_LEAST(M,N,O) LIBCURL_VERSION_NUM >= ((((M)<<8)+(N))<<8)+(O)
37 
38 namespace zypp {
39  namespace media {
40 
41 
43 
44 
45 class multifetchrequest;
46 
47 // Hack: we derive from MediaCurl just to get the storage space for
48 // settings, url, curlerrors and the like
49 
51  friend class multifetchrequest;
52 
53 public:
54  multifetchworker(int no, multifetchrequest &request, const Url &url);
56  void nextjob();
57  void run();
58  bool checkChecksum();
59  bool recheckChecksum();
60  void disableCompetition();
61 
62  void checkdns();
63  void adddnsfd(fd_set &rset, int &maxfd);
64  void dnsevent(fd_set &rset);
65 
66  int _workerno;
67 
68  int _state;
69  bool _competing;
70 
71  size_t _blkno;
72  off_t _blkstart;
73  size_t _blksize;
75 
76  double _blkstarttime;
77  size_t _blkreceived;
78  off_t _received;
79 
80  double _avgspeed;
81  double _maxspeed;
82 
83  double _sleepuntil;
84 
85 private:
86  void stealjob();
87 
88  size_t writefunction(void *ptr, size_t size);
89  static size_t _writefunction(void *ptr, size_t size, size_t nmemb, void *stream);
90 
91  size_t headerfunction(char *ptr, size_t size);
92  static size_t _headerfunction(void *ptr, size_t size, size_t nmemb, void *stream);
93 
95  int _pass;
96  string _urlbuf;
97  off_t _off;
98  size_t _size;
100 
101  pid_t _pid;
102  int _dnspipe;
103 };
104 
105 #define WORKER_STARTING 0
106 #define WORKER_LOOKUP 1
107 #define WORKER_FETCH 2
108 #define WORKER_DISCARD 3
109 #define WORKER_DONE 4
110 #define WORKER_SLEEP 5
111 #define WORKER_BROKEN 6
112 
113 
114 
116 public:
117  multifetchrequest(const MediaMultiCurl *context, const Pathname &filename, const Url &baseurl, CURLM *multi, FILE *fp, callback::SendReport<DownloadProgressReport> *report, MediaBlockList *blklist, off_t filesize);
119 
120  void run(std::vector<Url> &urllist);
121 
122 protected:
123  friend class multifetchworker;
124 
128 
129  FILE *_fp;
132  off_t _filesize;
133 
134  CURLM *_multi;
135 
136  std::list<multifetchworker *> _workers;
137  bool _stealing;
139 
140  size_t _blkno;
141  off_t _blkoff;
146  bool _finished;
147  off_t _totalsize;
150 
151  double _starttime;
153 
156  double _periodavg;
157 
158 public:
159  double _timeout;
161  double _maxspeed;
163 };
164 
165 #define BLKSIZE 131072
166 #define MAXURLS 10
167 
168 
170 
171 static double
173 {
174  struct timeval tv;
175  if (gettimeofday(&tv, NULL))
176  return 0;
177  return tv.tv_sec + tv.tv_usec / 1000000.;
178 }
179 
180 size_t
181 multifetchworker::writefunction(void *ptr, size_t size)
182 {
183  size_t len, cnt;
184  if (_state == WORKER_BROKEN)
185  return size ? 0 : 1;
186 
187  double now = currentTime();
188 
189  len = size > _size ? _size : size;
190  if (!len)
191  {
192  // kill this job?
193  return size;
194  }
195 
196  if (_blkstart && _off == _blkstart)
197  {
198  // make sure that the server replied with "partial content"
199  // for http requests
200  char *effurl;
201  (void)curl_easy_getinfo(_curl, CURLINFO_EFFECTIVE_URL, &effurl);
202  if (effurl && !strncasecmp(effurl, "http", 4))
203  {
204  long statuscode = 0;
205  (void)curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &statuscode);
206  if (statuscode != 206)
207  return size ? 0 : 1;
208  }
209  }
210 
211  _blkreceived += len;
212  _received += len;
213 
214  _request->_lastprogress = now;
215 
216  if (_state == WORKER_DISCARD || !_request->_fp)
217  {
218  // block is no longer needed
219  // still calculate the checksum so that we can throw out bad servers
220  if (_request->_blklist)
221  _dig.update((const char *)ptr, len);
222  _off += len;
223  _size -= len;
224  return size;
225  }
226  if (fseeko(_request->_fp, _off, SEEK_SET))
227  return size ? 0 : 1;
228  cnt = fwrite(ptr, 1, len, _request->_fp);
229  if (cnt > 0)
230  {
231  _request->_fetchedsize += cnt;
232  if (_request->_blklist)
233  _dig.update((const char *)ptr, cnt);
234  _off += cnt;
235  _size -= cnt;
236  if (cnt == len)
237  return size;
238  }
239  return cnt;
240 }
241 
242 size_t
243 multifetchworker::_writefunction(void *ptr, size_t size, size_t nmemb, void *stream)
244 {
245  multifetchworker *me = reinterpret_cast<multifetchworker *>(stream);
246  return me->writefunction(ptr, size * nmemb);
247 }
248 
249 size_t
250 multifetchworker::headerfunction(char *p, size_t size)
251 {
252  size_t l = size;
253  if (l > 9 && !strncasecmp(p, "Location:", 9))
254  {
255  string line(p + 9, l - 9);
256  if (line[l - 10] == '\r')
257  line.erase(l - 10, 1);
258  XXX << "#" << _workerno << ": redirecting to" << line << endl;
259  return size;
260  }
261  if (l <= 14 || l >= 128 || strncasecmp(p, "Content-Range:", 14) != 0)
262  return size;
263  p += 14;
264  l -= 14;
265  while (l && (*p == ' ' || *p == '\t'))
266  p++, l--;
267  if (l < 6 || strncasecmp(p, "bytes", 5))
268  return size;
269  p += 5;
270  l -= 5;
271  char buf[128];
272  memcpy(buf, p, l);
273  buf[l] = 0;
274  unsigned long long start, off, filesize;
275  if (sscanf(buf, "%llu-%llu/%llu", &start, &off, &filesize) != 3)
276  return size;
277  if (_request->_filesize == (off_t)-1)
278  {
279  WAR << "#" << _workerno << ": setting request filesize to " << filesize << endl;
280  _request->_filesize = filesize;
281  if (_request->_totalsize == 0 && !_request->_blklist)
282  _request->_totalsize = filesize;
283  }
284  if (_request->_filesize != (off_t)filesize)
285  {
286  XXX << "#" << _workerno << ": filesize mismatch" << endl;
288  strncpy(_curlError, "filesize mismatch", CURL_ERROR_SIZE);
289  }
290  return size;
291 }
292 
293 size_t
294 multifetchworker::_headerfunction(void *ptr, size_t size, size_t nmemb, void *stream)
295 {
296  multifetchworker *me = reinterpret_cast<multifetchworker *>(stream);
297  return me->headerfunction((char *)ptr, size * nmemb);
298 }
299 
300 multifetchworker::multifetchworker(int no, multifetchrequest &request, const Url &url)
301 : MediaCurl(url, Pathname())
302 {
303  _workerno = no;
304  _request = &request;
306  _competing = false;
307  _off = _blkstart = 0;
308  _size = _blksize = 0;
309  _pass = 0;
310  _blkno = 0;
311  _pid = 0;
312  _dnspipe = -1;
313  _blkreceived = 0;
314  _received = 0;
315  _blkstarttime = 0;
316  _avgspeed = 0;
317  _sleepuntil = 0;
319  _noendrange = false;
320 
321  Url curlUrl( clearQueryString(url) );
322  _urlbuf = curlUrl.asString();
324  if (_curl)
325  XXX << "reused worker from pool" << endl;
326  if (!_curl && !(_curl = curl_easy_init()))
327  {
329  strncpy(_curlError, "curl_easy_init failed", CURL_ERROR_SIZE);
330  return;
331  }
332  try
333  {
334  setupEasy();
335  }
336  catch (Exception &ex)
337  {
338  curl_easy_cleanup(_curl);
339  _curl = 0;
341  strncpy(_curlError, "curl_easy_setopt failed", CURL_ERROR_SIZE);
342  return;
343  }
344  curl_easy_setopt(_curl, CURLOPT_PRIVATE, this);
345  curl_easy_setopt(_curl, CURLOPT_URL, _urlbuf.c_str());
346  curl_easy_setopt(_curl, CURLOPT_WRITEFUNCTION, &_writefunction);
347  curl_easy_setopt(_curl, CURLOPT_WRITEDATA, this);
348  if (_request->_filesize == off_t(-1) || !_request->_blklist || !_request->_blklist->haveChecksum(0))
349  {
350  curl_easy_setopt(_curl, CURLOPT_HEADERFUNCTION, &_headerfunction);
351  curl_easy_setopt(_curl, CURLOPT_HEADERDATA, this);
352  }
353  // if this is the same host copy authorization
354  // (the host check is also what curl does when doing a redirect)
355  // (note also that unauthorized exceptions are thrown with the request host)
356  if (url.getHost() == _request->_context->_url.getHost())
357  {
361  if ( _settings.userPassword().size() )
362  {
363  curl_easy_setopt(_curl, CURLOPT_USERPWD, _settings.userPassword().c_str());
364  string use_auth = _settings.authType();
365  if (use_auth.empty())
366  use_auth = "digest,basic"; // our default
367  long auth = CurlAuthData::auth_type_str2long(use_auth);
368  if( auth != CURLAUTH_NONE)
369  {
370  XXX << "#" << _workerno << ": Enabling HTTP authentication methods: " << use_auth
371  << " (CURLOPT_HTTPAUTH=" << auth << ")" << std::endl;
372  curl_easy_setopt(_curl, CURLOPT_HTTPAUTH, auth);
373  }
374  }
375  }
376  checkdns();
377 }
378 
380 {
381  if (_curl)
382  {
384  curl_multi_remove_handle(_request->_multi, _curl);
385  if (_state == WORKER_DONE || _state == WORKER_SLEEP)
386  {
387 #if CURLVERSION_AT_LEAST(7,15,5)
388  curl_easy_setopt(_curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)0);
389 #endif
390  curl_easy_setopt(_curl, CURLOPT_PRIVATE, (void *)0);
391  curl_easy_setopt(_curl, CURLOPT_WRITEFUNCTION, (void *)0);
392  curl_easy_setopt(_curl, CURLOPT_WRITEDATA, (void *)0);
393  curl_easy_setopt(_curl, CURLOPT_HEADERFUNCTION, (void *)0);
394  curl_easy_setopt(_curl, CURLOPT_HEADERDATA, (void *)0);
396  }
397  else
398  curl_easy_cleanup(_curl);
399  _curl = 0;
400  }
401  if (_pid)
402  {
403  kill(_pid, SIGKILL);
404  int status;
405  while (waitpid(_pid, &status, 0) == -1)
406  if (errno != EINTR)
407  break;
408  _pid = 0;
409  }
410  if (_dnspipe != -1)
411  {
412  close(_dnspipe);
413  _dnspipe = -1;
414  }
415  // the destructor in MediaCurl doesn't call disconnect() if
416  // the media is not attached, so we do it here manually
417  disconnectFrom();
418 }
419 
420 static inline bool env_isset(string name)
421 {
422  const char *s = getenv(name.c_str());
423  return s && *s ? true : false;
424 }
425 
426 void
428 {
429  string host = _url.getHost();
430 
431  if (host.empty())
432  return;
433 
434  if (_request->_context->isDNSok(host))
435  return;
436 
437  // no need to do dns checking for numeric hosts
438  char addrbuf[128];
439  if (inet_pton(AF_INET, host.c_str(), addrbuf) == 1)
440  return;
441  if (inet_pton(AF_INET6, host.c_str(), addrbuf) == 1)
442  return;
443 
444  // no need to do dns checking if we use a proxy
445  if (!_settings.proxy().empty())
446  return;
447  if (env_isset("all_proxy") || env_isset("ALL_PROXY"))
448  return;
449  string schemeproxy = _url.getScheme() + "_proxy";
450  if (env_isset(schemeproxy))
451  return;
452  if (schemeproxy != "http_proxy")
453  {
454  std::transform(schemeproxy.begin(), schemeproxy.end(), schemeproxy.begin(), ::toupper);
455  if (env_isset(schemeproxy))
456  return;
457  }
458 
459  XXX << "checking DNS lookup of " << host << endl;
460  int pipefds[2];
461  if (pipe(pipefds))
462  {
464  strncpy(_curlError, "DNS pipe creation failed", CURL_ERROR_SIZE);
465  return;
466  }
467  _pid = fork();
468  if (_pid == pid_t(-1))
469  {
470  close(pipefds[0]);
471  close(pipefds[1]);
472  _pid = 0;
474  strncpy(_curlError, "DNS checker fork failed", CURL_ERROR_SIZE);
475  return;
476  }
477  else if (_pid == 0)
478  {
479  close(pipefds[0]);
480  // XXX: close all other file descriptors
481  struct addrinfo *ai, aihints;
482  memset(&aihints, 0, sizeof(aihints));
483  aihints.ai_family = PF_UNSPEC;
484  int tstsock = socket(PF_INET6, SOCK_DGRAM | SOCK_CLOEXEC, 0);
485  if (tstsock == -1)
486  aihints.ai_family = PF_INET;
487  else
488  close(tstsock);
489  aihints.ai_socktype = SOCK_STREAM;
490  aihints.ai_flags = AI_CANONNAME;
491  unsigned int connecttimeout = _request->_connect_timeout;
492  if (connecttimeout)
493  alarm(connecttimeout);
494  signal(SIGALRM, SIG_DFL);
495  if (getaddrinfo(host.c_str(), NULL, &aihints, &ai))
496  _exit(1);
497  _exit(0);
498  }
499  close(pipefds[1]);
500  _dnspipe = pipefds[0];
502 }
503 
504 void
505 multifetchworker::adddnsfd(fd_set &rset, int &maxfd)
506 {
507  if (_state != WORKER_LOOKUP)
508  return;
509  FD_SET(_dnspipe, &rset);
510  if (maxfd < _dnspipe)
511  maxfd = _dnspipe;
512 }
513 
514 void
516 {
517 
518  if (_state != WORKER_LOOKUP || !FD_ISSET(_dnspipe, &rset))
519  return;
520  int status;
521  while (waitpid(_pid, &status, 0) == -1)
522  {
523  if (errno != EINTR)
524  return;
525  }
526  _pid = 0;
527  if (_dnspipe != -1)
528  {
529  close(_dnspipe);
530  _dnspipe = -1;
531  }
532  if (!WIFEXITED(status))
533  {
535  strncpy(_curlError, "DNS lookup failed", CURL_ERROR_SIZE);
537  return;
538  }
539  int exitcode = WEXITSTATUS(status);
540  XXX << "#" << _workerno << ": DNS lookup returned " << exitcode << endl;
541  if (exitcode != 0)
542  {
544  strncpy(_curlError, "DNS lookup failed", CURL_ERROR_SIZE);
546  return;
547  }
549  nextjob();
550 }
551 
552 bool
554 {
555  // XXX << "checkChecksum block " << _blkno << endl;
556  if (!_blksize || !_request->_blklist)
557  return true;
559 }
560 
561 bool
563 {
564  // XXX << "recheckChecksum block " << _blkno << endl;
565  if (!_request->_fp || !_blksize || !_request->_blklist)
566  return true;
567  if (fseeko(_request->_fp, _blkstart, SEEK_SET))
568  return false;
569  char buf[4096];
570  size_t l = _blksize;
571  _request->_blklist->createDigest(_dig); // resets digest
572  while (l)
573  {
574  size_t cnt = l > sizeof(buf) ? sizeof(buf) : l;
575  if (fread(buf, cnt, 1, _request->_fp) != 1)
576  return false;
577  _dig.update(buf, cnt);
578  l -= cnt;
579  }
581 }
582 
583 
584 void
586 {
587  if (!_request->_stealing)
588  {
589  XXX << "start stealing!" << endl;
590  _request->_stealing = true;
591  }
592  multifetchworker *best = 0;
593  std::list<multifetchworker *>::iterator workeriter = _request->_workers.begin();
594  double now = 0;
595  for (; workeriter != _request->_workers.end(); ++workeriter)
596  {
597  multifetchworker *worker = *workeriter;
598  if (worker == this)
599  continue;
600  if (worker->_pass == -1)
601  continue; // do not steal!
602  if (worker->_state == WORKER_DISCARD || worker->_state == WORKER_DONE || worker->_state == WORKER_SLEEP || !worker->_blksize)
603  continue; // do not steal finished jobs
604  if (!worker->_avgspeed && worker->_blkreceived)
605  {
606  if (!now)
607  now = currentTime();
608  if (now > worker->_blkstarttime)
609  worker->_avgspeed = worker->_blkreceived / (now - worker->_blkstarttime);
610  }
611  if (!best || best->_pass > worker->_pass)
612  {
613  best = worker;
614  continue;
615  }
616  if (best->_pass < worker->_pass)
617  continue;
618  // if it is the same block, we want to know the best worker, otherwise the worst
619  if (worker->_blkstart == best->_blkstart)
620  {
621  if ((worker->_blksize - worker->_blkreceived) * best->_avgspeed < (best->_blksize - best->_blkreceived) * worker->_avgspeed)
622  best = worker;
623  }
624  else
625  {
626  if ((worker->_blksize - worker->_blkreceived) * best->_avgspeed > (best->_blksize - best->_blkreceived) * worker->_avgspeed)
627  best = worker;
628  }
629  }
630  if (!best)
631  {
634  _request->_finished = true;
635  return;
636  }
637  // do not sleep twice
638  if (_state != WORKER_SLEEP)
639  {
640  if (!_avgspeed && _blkreceived)
641  {
642  if (!now)
643  now = currentTime();
644  if (now > _blkstarttime)
646  }
647 
648  // lets see if we should sleep a bit
649  XXX << "me #" << _workerno << ": " << _avgspeed << ", size " << best->_blksize << endl;
650  XXX << "best #" << best->_workerno << ": " << best->_avgspeed << ", size " << (best->_blksize - best->_blkreceived) << endl;
651  if (_avgspeed && best->_avgspeed && best->_blksize - best->_blkreceived > 0 &&
652  (best->_blksize - best->_blkreceived) * _avgspeed < best->_blksize * best->_avgspeed)
653  {
654  if (!now)
655  now = currentTime();
656  double sl = (best->_blksize - best->_blkreceived) / best->_avgspeed * 2;
657  if (sl > 1)
658  sl = 1;
659  XXX << "#" << _workerno << ": going to sleep for " << sl * 1000 << " ms" << endl;
660  _sleepuntil = now + sl;
663  return;
664  }
665  }
666 
667  _competing = true;
668  best->_competing = true;
669  _blkstart = best->_blkstart;
670  _blksize = best->_blksize;
671  best->_pass++;
672  _pass = best->_pass;
673  _blkno = best->_blkno;
674  run();
675 }
676 
677 void
679 {
680  std::list<multifetchworker *>::iterator workeriter = _request->_workers.begin();
681  for (; workeriter != _request->_workers.end(); ++workeriter)
682  {
683  multifetchworker *worker = *workeriter;
684  if (worker == this)
685  continue;
686  if (worker->_blkstart == _blkstart)
687  {
688  if (worker->_state == WORKER_FETCH)
689  worker->_state = WORKER_DISCARD;
690  worker->_pass = -1; /* do not steal this one, we already have it */
691  }
692  }
693 }
694 
695 
696 void
698 {
699  _noendrange = false;
700  if (_request->_stealing)
701  {
702  stealjob();
703  return;
704  }
705 
706  MediaBlockList *blklist = _request->_blklist;
707  if (!blklist)
708  {
709  _blksize = BLKSIZE;
710  if (_request->_filesize != off_t(-1))
711  {
713  {
714  stealjob();
715  return;
716  }
718  if (_blksize > BLKSIZE)
719  _blksize = BLKSIZE;
720  }
721  }
722  else
723  {
724  MediaBlock blk = blklist->getBlock(_request->_blkno);
725  while (_request->_blkoff >= (off_t)(blk.off + blk.size))
726  {
727  if (++_request->_blkno == blklist->numBlocks())
728  {
729  stealjob();
730  return;
731  }
732  blk = blklist->getBlock(_request->_blkno);
733  _request->_blkoff = blk.off;
734  }
735  _blksize = blk.off + blk.size - _request->_blkoff;
736  if (_blksize > BLKSIZE && !blklist->haveChecksum(_request->_blkno))
737  _blksize = BLKSIZE;
738  }
742  run();
743 }
744 
745 void
747 {
748  char rangebuf[128];
749 
750  if (_state == WORKER_BROKEN || _state == WORKER_DONE)
751  return; // just in case...
752  if (_noendrange)
753  sprintf(rangebuf, "%llu-", (unsigned long long)_blkstart);
754  else
755  sprintf(rangebuf, "%llu-%llu", (unsigned long long)_blkstart, (unsigned long long)_blkstart + _blksize - 1);
756  XXX << "#" << _workerno << ": BLK " << _blkno << ":" << rangebuf << " " << _url << endl;
757  if (curl_easy_setopt(_curl, CURLOPT_RANGE, !_noendrange || _blkstart != 0 ? rangebuf : (char *)0) != CURLE_OK)
758  {
761  strncpy(_curlError, "curl_easy_setopt range failed", CURL_ERROR_SIZE);
762  return;
763  }
764  if (curl_multi_add_handle(_request->_multi, _curl) != CURLM_OK)
765  {
768  strncpy(_curlError, "curl_multi_add_handle failed", CURL_ERROR_SIZE);
769  return;
770  }
771  _request->_havenewjob = true;
772  _off = _blkstart;
773  _size = _blksize;
774  if (_request->_blklist)
775  _request->_blklist->createDigest(_dig); // resets digest
777 
778  double now = currentTime();
779  _blkstarttime = now;
780  _blkreceived = 0;
781 }
782 
783 
785 
786 
787 multifetchrequest::multifetchrequest(const MediaMultiCurl *context, const Pathname &filename, const Url &baseurl, CURLM *multi, FILE *fp, callback::SendReport<DownloadProgressReport> *report, MediaBlockList *blklist, off_t filesize) : _context(context), _filename(filename), _baseurl(baseurl)
788 {
789  _fp = fp;
790  _report = report;
791  _blklist = blklist;
792  _filesize = filesize;
793  _multi = multi;
794  _stealing = false;
795  _havenewjob = false;
796  _blkno = 0;
797  if (_blklist)
799  else
800  _blkoff = 0;
801  _activeworkers = 0;
802  _lookupworkers = 0;
803  _sleepworkers = 0;
804  _minsleepuntil = 0;
805  _finished = false;
806  _fetchedsize = 0;
807  _fetchedgoodsize = 0;
808  _totalsize = 0;
810  _lastperiodfetched = 0;
811  _periodavg = 0;
812  _timeout = 0;
813  _connect_timeout = 0;
814  _maxspeed = 0;
815  _maxworkers = 0;
816  if (blklist)
817  {
818  for (size_t blkno = 0; blkno < blklist->numBlocks(); blkno++)
819  {
820  MediaBlock blk = blklist->getBlock(blkno);
821  _totalsize += blk.size;
822  }
823  }
824  else if (filesize != off_t(-1))
825  _totalsize = filesize;
826 }
827 
829 {
830  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
831  {
832  multifetchworker *worker = *workeriter;
833  *workeriter = NULL;
834  delete worker;
835  }
836  _workers.clear();
837 }
838 
839 void
840 multifetchrequest::run(std::vector<Url> &urllist)
841 {
842  int workerno = 0;
843  std::vector<Url>::iterator urliter = urllist.begin();
844  for (;;)
845  {
846  fd_set rset, wset, xset;
847  int maxfd, nqueue;
848 
849  if (_finished)
850  {
851  XXX << "finished!" << endl;
852  break;
853  }
854 
855  if ((int)_activeworkers < _maxworkers && urliter != urllist.end() && _workers.size() < MAXURLS)
856  {
857  // spawn another worker!
858  multifetchworker *worker = new multifetchworker(workerno++, *this, *urliter);
859  _workers.push_back(worker);
860  if (worker->_state != WORKER_BROKEN)
861  {
862  _activeworkers++;
863  if (worker->_state != WORKER_LOOKUP)
864  {
865  worker->nextjob();
866  }
867  else
868  _lookupworkers++;
869  }
870  ++urliter;
871  continue;
872  }
873  if (!_activeworkers)
874  {
875  WAR << "No more active workers!" << endl;
876  // show the first worker error we find
877  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
878  {
879  if ((*workeriter)->_state != WORKER_BROKEN)
880  continue;
881  ZYPP_THROW(MediaCurlException(_baseurl, "Server error", (*workeriter)->_curlError));
882  }
883  break;
884  }
885 
886  FD_ZERO(&rset);
887  FD_ZERO(&wset);
888  FD_ZERO(&xset);
889 
890  curl_multi_fdset(_multi, &rset, &wset, &xset, &maxfd);
891 
892  if (_lookupworkers)
893  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
894  (*workeriter)->adddnsfd(rset, maxfd);
895 
896  timeval tv;
897  // if we added a new job we have to call multi_perform once
898  // to make it show up in the fd set. do not sleep in this case.
899  tv.tv_sec = 0;
900  tv.tv_usec = _havenewjob ? 0 : 200000;
901  if (_sleepworkers && !_havenewjob)
902  {
903  if (_minsleepuntil == 0)
904  {
905  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
906  {
907  multifetchworker *worker = *workeriter;
908  if (worker->_state != WORKER_SLEEP)
909  continue;
910  if (!_minsleepuntil || _minsleepuntil > worker->_sleepuntil)
911  _minsleepuntil = worker->_sleepuntil;
912  }
913  }
914  double sl = _minsleepuntil - currentTime();
915  if (sl < 0)
916  {
917  sl = 0;
918  _minsleepuntil = 0;
919  }
920  if (sl < .2)
921  tv.tv_usec = sl * 1000000;
922  }
923  int r = select(maxfd + 1, &rset, &wset, &xset, &tv);
924  if (r == -1 && errno != EINTR)
925  ZYPP_THROW(MediaCurlException(_baseurl, "select() failed", "unknown error"));
926  if (r != 0 && _lookupworkers)
927  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
928  {
929  multifetchworker *worker = *workeriter;
930  if (worker->_state != WORKER_LOOKUP)
931  continue;
932  (*workeriter)->dnsevent(rset);
933  if (worker->_state != WORKER_LOOKUP)
934  _lookupworkers--;
935  }
936  _havenewjob = false;
937 
938  // run curl
939  for (;;)
940  {
941  CURLMcode mcode;
942  int tasks;
943  mcode = curl_multi_perform(_multi, &tasks);
944  if (mcode == CURLM_CALL_MULTI_PERFORM)
945  continue;
946  if (mcode != CURLM_OK)
947  ZYPP_THROW(MediaCurlException(_baseurl, "curl_multi_perform", "unknown error"));
948  break;
949  }
950 
951  double now = currentTime();
952 
953  // update periodavg
954  if (now > _lastperiodstart + .5)
955  {
956  if (!_periodavg)
958  else
961  _lastperiodstart = now;
962  }
963 
964  // wake up sleepers
965  if (_sleepworkers)
966  {
967  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
968  {
969  multifetchworker *worker = *workeriter;
970  if (worker->_state != WORKER_SLEEP)
971  continue;
972  if (worker->_sleepuntil > now)
973  continue;
974  if (_minsleepuntil == worker->_sleepuntil)
975  _minsleepuntil = 0;
976  XXX << "#" << worker->_workerno << ": sleep done, wake up" << endl;
977  _sleepworkers--;
978  // nextjob chnages the state
979  worker->nextjob();
980  }
981  }
982 
983  // collect all curl results, reschedule new jobs
984  CURLMsg *msg;
985  while ((msg = curl_multi_info_read(_multi, &nqueue)) != 0)
986  {
987  if (msg->msg != CURLMSG_DONE)
988  continue;
989  CURL *easy = msg->easy_handle;
990  CURLcode cc = msg->data.result;
991  multifetchworker *worker;
992  if (curl_easy_getinfo(easy, CURLINFO_PRIVATE, &worker) != CURLE_OK)
993  ZYPP_THROW(MediaCurlException(_baseurl, "curl_easy_getinfo", "unknown error"));
994  if (worker->_blkreceived && now > worker->_blkstarttime)
995  {
996  if (worker->_avgspeed)
997  worker->_avgspeed = (worker->_avgspeed + worker->_blkreceived / (now - worker->_blkstarttime)) / 2;
998  else
999  worker->_avgspeed = worker->_blkreceived / (now - worker->_blkstarttime);
1000  }
1001  XXX << "#" << worker->_workerno << ": BLK " << worker->_blkno << " done code " << cc << " speed " << worker->_avgspeed << endl;
1002  curl_multi_remove_handle(_multi, easy);
1003  if (cc == CURLE_HTTP_RETURNED_ERROR)
1004  {
1005  long statuscode = 0;
1006  (void)curl_easy_getinfo(easy, CURLINFO_RESPONSE_CODE, &statuscode);
1007  XXX << "HTTP status " << statuscode << endl;
1008  if (statuscode == 416 && !_blklist) /* Range error */
1009  {
1010  if (_filesize == off_t(-1))
1011  {
1012  if (!worker->_noendrange)
1013  {
1014  XXX << "#" << worker->_workerno << ": retrying with no end range" << endl;
1015  worker->_noendrange = true;
1016  worker->run();
1017  continue;
1018  }
1019  worker->_noendrange = false;
1020  worker->stealjob();
1021  continue;
1022  }
1023  if (worker->_blkstart >= _filesize)
1024  {
1025  worker->nextjob();
1026  continue;
1027  }
1028  }
1029  }
1030  if (cc == 0)
1031  {
1032  if (!worker->checkChecksum())
1033  {
1034  WAR << "#" << worker->_workerno << ": checksum error, disable worker" << endl;
1035  worker->_state = WORKER_BROKEN;
1036  strncpy(worker->_curlError, "checksum error", CURL_ERROR_SIZE);
1037  _activeworkers--;
1038  continue;
1039  }
1040  if (worker->_state == WORKER_FETCH)
1041  {
1042  if (worker->_competing)
1043  {
1044  worker->disableCompetition();
1045  // multiple workers wrote into this block. We already know that our
1046  // data was correct, but maybe some other worker overwrote our data
1047  // with something broken. Thus we have to re-check the block.
1048  if (!worker->recheckChecksum())
1049  {
1050  XXX << "#" << worker->_workerno << ": recheck checksum error, refetch block" << endl;
1051  // re-fetch! No need to worry about the bad workers,
1052  // they will now be set to DISCARD. At the end of their block
1053  // they will notice that they wrote bad data and go into BROKEN.
1054  worker->run();
1055  continue;
1056  }
1057  }
1058  _fetchedgoodsize += worker->_blksize;
1059  }
1060 
1061  // make bad workers sleep a little
1062  double maxavg = 0;
1063  int maxworkerno = 0;
1064  int numbetter = 0;
1065  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
1066  {
1067  multifetchworker *oworker = *workeriter;
1068  if (oworker->_state == WORKER_BROKEN)
1069  continue;
1070  if (oworker->_avgspeed > maxavg)
1071  {
1072  maxavg = oworker->_avgspeed;
1073  maxworkerno = oworker->_workerno;
1074  }
1075  if (oworker->_avgspeed > worker->_avgspeed)
1076  numbetter++;
1077  }
1078  if (maxavg && !_stealing)
1079  {
1080  double ratio = worker->_avgspeed / maxavg;
1081  ratio = 1 - ratio;
1082  if (numbetter < 3) // don't sleep that much if we're in the top two
1083  ratio = ratio * ratio;
1084  if (ratio > .01)
1085  {
1086  XXX << "#" << worker->_workerno << ": too slow ("<< ratio << ", " << worker->_avgspeed << ", #" << maxworkerno << ": " << maxavg << "), going to sleep for " << ratio * 1000 << " ms" << endl;
1087  worker->_sleepuntil = now + ratio;
1088  worker->_state = WORKER_SLEEP;
1089  _sleepworkers++;
1090  continue;
1091  }
1092  }
1093 
1094  // do rate control (if requested)
1095  // should use periodavg, but that's not what libcurl does
1096  if (_maxspeed && now > _starttime)
1097  {
1098  double avg = _fetchedsize / (now - _starttime);
1099  avg = worker->_maxspeed * _maxspeed / avg;
1100  if (avg < _maxspeed / _maxworkers)
1101  avg = _maxspeed / _maxworkers;
1102  if (avg > _maxspeed)
1103  avg = _maxspeed;
1104  if (avg < 1024)
1105  avg = 1024;
1106  worker->_maxspeed = avg;
1107 #if CURLVERSION_AT_LEAST(7,15,5)
1108  curl_easy_setopt(worker->_curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)(avg));
1109 #endif
1110  }
1111 
1112  worker->nextjob();
1113  }
1114  else
1115  {
1116  worker->_state = WORKER_BROKEN;
1117  _activeworkers--;
1118  if (!_activeworkers && !(urliter != urllist.end() && _workers.size() < MAXURLS))
1119  {
1120  // end of workers reached! goodbye!
1121  worker->evaluateCurlCode(Pathname(), cc, false);
1122  }
1123  }
1124 
1125  if ( _filesize > 0 && _fetchedgoodsize > _filesize ) {
1127  }
1128  }
1129 
1130  // send report
1131  if (_report)
1132  {
1133  int percent = _totalsize ? (100 * (_fetchedgoodsize + _fetchedsize)) / (_totalsize + _fetchedsize) : 0;
1134 
1135  double avg = 0;
1136  if (now > _starttime)
1137  avg = _fetchedsize / (now - _starttime);
1138  if (!(*(_report))->progress(percent, _baseurl, avg, _lastperiodstart == _starttime ? avg : _periodavg))
1139  ZYPP_THROW(MediaCurlException(_baseurl, "User abort", "cancelled"));
1140  }
1141 
1142  if (_timeout && now - _lastprogress > _timeout)
1143  break;
1144  }
1145 
1146  if (!_finished)
1148 
1149  // print some download stats
1150  WAR << "overall result" << endl;
1151  for (std::list<multifetchworker *>::iterator workeriter = _workers.begin(); workeriter != _workers.end(); ++workeriter)
1152  {
1153  multifetchworker *worker = *workeriter;
1154  WAR << "#" << worker->_workerno << ": state: " << worker->_state << " received: " << worker->_received << " url: " << worker->_url << endl;
1155  }
1156 }
1157 
1158 
1160 
1161 
1162 MediaMultiCurl::MediaMultiCurl(const Url &url_r, const Pathname & attach_point_hint_r)
1163  : MediaCurl(url_r, attach_point_hint_r)
1164 {
1165  MIL << "MediaMultiCurl::MediaMultiCurl(" << url_r << ", " << attach_point_hint_r << ")" << endl;
1166  _multi = 0;
1168 }
1169 
1171 {
1173  {
1174  curl_slist_free_all(_customHeadersMetalink);
1176  }
1177  if (_multi)
1178  {
1179  curl_multi_cleanup(_multi);
1180  _multi = 0;
1181  }
1182  std::map<std::string, CURL *>::iterator it;
1183  for (it = _easypool.begin(); it != _easypool.end(); it++)
1184  {
1185  CURL *easy = it->second;
1186  if (easy)
1187  {
1188  curl_easy_cleanup(easy);
1189  it->second = NULL;
1190  }
1191  }
1192 }
1193 
1195 {
1197 
1199  {
1200  curl_slist_free_all(_customHeadersMetalink);
1202  }
1203  struct curl_slist *sl = _customHeaders;
1204  for (; sl; sl = sl->next)
1205  _customHeadersMetalink = curl_slist_append(_customHeadersMetalink, sl->data);
1206  _customHeadersMetalink = curl_slist_append(_customHeadersMetalink, "Accept: */*, application/metalink+xml, application/metalink4+xml");
1207 }
1208 
1209 static bool looks_like_metalink_fd(int fd)
1210 {
1211  char buf[256], *p;
1212  int l;
1213  while ((l = pread(fd, buf, sizeof(buf) - 1, (off_t)0)) == -1 && errno == EINTR)
1214  ;
1215  if (l == -1)
1216  return 0;
1217  buf[l] = 0;
1218  p = buf;
1219  while (*p == ' ' || *p == '\t' || *p == '\r' || *p == '\n')
1220  p++;
1221  if (!strncasecmp(p, "<?xml", 5))
1222  {
1223  while (*p && *p != '>')
1224  p++;
1225  if (*p == '>')
1226  p++;
1227  while (*p == ' ' || *p == '\t' || *p == '\r' || *p == '\n')
1228  p++;
1229  }
1230  bool ret = !strncasecmp(p, "<metalink", 9) ? true : false;
1231  return ret;
1232 }
1233 
1234 static bool looks_like_metalink(const Pathname & file)
1235 {
1236  int fd;
1237  if ((fd = open(file.asString().c_str(), O_RDONLY|O_CLOEXEC)) == -1)
1238  return false;
1239  bool ret = looks_like_metalink_fd(fd);
1240  close(fd);
1241  DBG << "looks_like_metalink(" << file << "): " << ret << endl;
1242  return ret;
1243 }
1244 
1245 // here we try to suppress all progress coming from a metalink download
1246 // bsc#1021291: Nevertheless send alive trigger (without stats), so UIs
1247 // are able to abort a hanging metalink download via callback response.
1248 int MediaMultiCurl::progressCallback( void *clientp, double dltotal, double dlnow, double ultotal, double ulnow)
1249 {
1250  CURL *_curl = MediaCurl::progressCallback_getcurl(clientp);
1251  if (!_curl)
1252  return MediaCurl::aliveCallback(clientp, dltotal, dlnow, ultotal, ulnow);
1253 
1254  // bsc#408814: Don't report any sizes before we don't have data on disk. Data reported
1255  // due to redirection etc. are not interesting, but may disturb filesize checks.
1256  FILE *fp = 0;
1257  if ( curl_easy_getinfo( _curl, CURLINFO_PRIVATE, &fp ) != CURLE_OK || !fp )
1258  return MediaCurl::aliveCallback( clientp, dltotal, dlnow, ultotal, ulnow );
1259  if ( ftell( fp ) == 0 )
1260  return MediaCurl::aliveCallback( clientp, dltotal, 0.0, ultotal, ulnow );
1261 
1262  // (no longer needed due to the filesize check above?)
1263  // work around curl bug that gives us old data
1264  long httpReturnCode = 0;
1265  if (curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &httpReturnCode ) != CURLE_OK || httpReturnCode == 0)
1266  return MediaCurl::aliveCallback(clientp, dltotal, dlnow, ultotal, ulnow);
1267 
1268  char *ptr = NULL;
1269  bool ismetalink = false;
1270  if (curl_easy_getinfo(_curl, CURLINFO_CONTENT_TYPE, &ptr) == CURLE_OK && ptr)
1271  {
1272  string ct = string(ptr);
1273  if (ct.find("application/metalink+xml") == 0 || ct.find("application/metalink4+xml") == 0)
1274  ismetalink = true;
1275  }
1276  if (!ismetalink && dlnow < 256)
1277  {
1278  // can't tell yet, ...
1279  return MediaCurl::aliveCallback(clientp, dltotal, dlnow, ultotal, ulnow);
1280  }
1281  if (!ismetalink)
1282  {
1283  fflush(fp);
1284  ismetalink = looks_like_metalink_fd(fileno(fp));
1285  DBG << "looks_like_metalink_fd: " << ismetalink << endl;
1286  }
1287  if (ismetalink)
1288  {
1289  // this is a metalink file change the expected filesize
1291  // we're downloading the metalink file. Just trigger aliveCallbacks
1292  curl_easy_setopt(_curl, CURLOPT_PROGRESSFUNCTION, &MediaCurl::aliveCallback);
1293  return MediaCurl::aliveCallback(clientp, dltotal, dlnow, ultotal, ulnow);
1294  }
1295  curl_easy_setopt(_curl, CURLOPT_PROGRESSFUNCTION, &MediaCurl::progressCallback);
1296  return MediaCurl::progressCallback(clientp, dltotal, dlnow, ultotal, ulnow);
1297 }
1298 
1299 void MediaMultiCurl::doGetFileCopy( const Pathname & filename , const Pathname & target, callback::SendReport<DownloadProgressReport> & report, const ByteCount &expectedFileSize_r, RequestOptions options ) const
1300 {
1301  Pathname dest = target.absolutename();
1302  if( assert_dir( dest.dirname() ) )
1303  {
1304  DBG << "assert_dir " << dest.dirname() << " failed" << endl;
1305  ZYPP_THROW( MediaSystemException(getFileUrl(filename), "System error on " + dest.dirname().asString()) );
1306  }
1307 
1308  ManagedFile destNew { target.extend( ".new.zypp.XXXXXX" ) };
1309  AutoFILE file;
1310  {
1311  AutoFREE<char> buf { ::strdup( (*destNew).c_str() ) };
1312  if( ! buf )
1313  {
1314  ERR << "out of memory for temp file name" << endl;
1315  ZYPP_THROW(MediaSystemException(getFileUrl(filename), "out of memory for temp file name"));
1316  }
1317 
1318  AutoFD tmp_fd { ::mkostemp( buf, O_CLOEXEC ) };
1319  if( tmp_fd == -1 )
1320  {
1321  ERR << "mkstemp failed for file '" << destNew << "'" << endl;
1322  ZYPP_THROW(MediaWriteException(destNew));
1323  }
1324  destNew = ManagedFile( (*buf), filesystem::unlink );
1325 
1326  file = ::fdopen( tmp_fd, "we" );
1327  if ( ! file )
1328  {
1329  ERR << "fopen failed for file '" << destNew << "'" << endl;
1330  ZYPP_THROW(MediaWriteException(destNew));
1331  }
1332  tmp_fd.resetDispose(); // don't close it here! ::fdopen moved ownership to file
1333  }
1334 
1335  DBG << "dest: " << dest << endl;
1336  DBG << "temp: " << destNew << endl;
1337 
1338  // set IFMODSINCE time condition (no download if not modified)
1339  if( PathInfo(target).isExist() && !(options & OPTION_NO_IFMODSINCE) )
1340  {
1341  curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_IFMODSINCE);
1342  curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, (long)PathInfo(target).mtime());
1343  }
1344  else
1345  {
1346  curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_NONE);
1347  curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, 0L);
1348  }
1349  // change header to include Accept: metalink
1350  curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeadersMetalink);
1351  // change to our own progress funcion
1352  curl_easy_setopt(_curl, CURLOPT_PROGRESSFUNCTION, &progressCallback);
1353  curl_easy_setopt(_curl, CURLOPT_PRIVATE, (*file) ); // important to pass the FILE* explicitly (passing through varargs)
1354  try
1355  {
1356  MediaCurl::doGetFileCopyFile(filename, dest, file, report, expectedFileSize_r, options);
1357  }
1358  catch (Exception &ex)
1359  {
1360  curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_NONE);
1361  curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, 0L);
1362  curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeaders);
1363  curl_easy_setopt(_curl, CURLOPT_PRIVATE, (void *)0);
1364  ZYPP_RETHROW(ex);
1365  }
1366  curl_easy_setopt(_curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_NONE);
1367  curl_easy_setopt(_curl, CURLOPT_TIMEVALUE, 0L);
1368  curl_easy_setopt(_curl, CURLOPT_HTTPHEADER, _customHeaders);
1369  curl_easy_setopt(_curl, CURLOPT_PRIVATE, (void *)0);
1370  long httpReturnCode = 0;
1371  CURLcode infoRet = curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &httpReturnCode);
1372  if (infoRet == CURLE_OK)
1373  {
1374  DBG << "HTTP response: " + str::numstring(httpReturnCode) << endl;
1375  if ( httpReturnCode == 304
1376  || ( httpReturnCode == 213 && _url.getScheme() == "ftp" ) ) // not modified
1377  {
1378  DBG << "not modified: " << PathInfo(dest) << endl;
1379  return;
1380  }
1381  }
1382  else
1383  {
1384  WAR << "Could not get the reponse code." << endl;
1385  }
1386 
1387  bool ismetalink = false;
1388 
1389  char *ptr = NULL;
1390  if (curl_easy_getinfo(_curl, CURLINFO_CONTENT_TYPE, &ptr) == CURLE_OK && ptr)
1391  {
1392  string ct = string(ptr);
1393  if (ct.find("application/metalink+xml") == 0 || ct.find("application/metalink4+xml") == 0)
1394  ismetalink = true;
1395  }
1396 
1397  if (!ismetalink)
1398  {
1399  // some proxies do not store the content type, so also look at the file to find
1400  // out if we received a metalink (bnc#649925)
1401  fflush(file);
1402  if (looks_like_metalink(destNew))
1403  ismetalink = true;
1404  }
1405 
1406  if (ismetalink)
1407  {
1408  bool userabort = false;
1409  Pathname failedFile = ZConfig::instance().repoCachePath() / "MultiCurl.failed";
1410  file = nullptr; // explicitly close destNew before the parser reads it.
1411  try
1412  {
1413  MetaLinkParser mlp;
1414  mlp.parse(destNew);
1415  MediaBlockList bl = mlp.getBlockList();
1416  vector<Url> urls = mlp.getUrls();
1417  XXX << bl << endl;
1418  file = fopen((*destNew).c_str(), "w+e");
1419  if (!file)
1420  ZYPP_THROW(MediaWriteException(destNew));
1421  if (PathInfo(target).isExist())
1422  {
1423  XXX << "reusing blocks from file " << target << endl;
1424  bl.reuseBlocks(file, target.asString());
1425  XXX << bl << endl;
1426  }
1427  if (bl.haveChecksum(1) && PathInfo(failedFile).isExist())
1428  {
1429  XXX << "reusing blocks from file " << failedFile << endl;
1430  bl.reuseBlocks(file, failedFile.asString());
1431  XXX << bl << endl;
1432  filesystem::unlink(failedFile);
1433  }
1434  Pathname df = deltafile();
1435  if (!df.empty())
1436  {
1437  XXX << "reusing blocks from file " << df << endl;
1438  bl.reuseBlocks(file, df.asString());
1439  XXX << bl << endl;
1440  }
1441  try
1442  {
1443  multifetch(filename, file, &urls, &report, &bl, expectedFileSize_r);
1444  }
1445  catch (MediaCurlException &ex)
1446  {
1447  userabort = ex.errstr() == "User abort";
1448  ZYPP_RETHROW(ex);
1449  }
1450  }
1451  catch (MediaFileSizeExceededException &ex) {
1452  ZYPP_RETHROW(ex);
1453  }
1454  catch (Exception &ex)
1455  {
1456  // something went wrong. fall back to normal download
1457  file = nullptr; // explicitly close destNew before moving it
1458  if (PathInfo(destNew).size() >= 63336)
1459  {
1460  ::unlink(failedFile.asString().c_str());
1461  filesystem::hardlinkCopy(destNew, failedFile);
1462  }
1463  if (userabort)
1464  {
1465  ZYPP_RETHROW(ex);
1466  }
1467  file = fopen((*destNew).c_str(), "w+e");
1468  if (!file)
1469  ZYPP_THROW(MediaWriteException(destNew));
1470  MediaCurl::doGetFileCopyFile(filename, dest, file, report, expectedFileSize_r, options | OPTION_NO_REPORT_START);
1471  }
1472  }
1473 
1474  if (::fchmod( ::fileno(file), filesystem::applyUmaskTo( 0644 )))
1475  {
1476  ERR << "Failed to chmod file " << destNew << endl;
1477  }
1478 
1479  file.resetDispose(); // we're going to close it manually here
1480  if (::fclose(file))
1481  {
1482  filesystem::unlink(destNew);
1483  ERR << "Fclose failed for file '" << destNew << "'" << endl;
1484  ZYPP_THROW(MediaWriteException(destNew));
1485  }
1486 
1487  if ( rename( destNew, dest ) != 0 )
1488  {
1489  ERR << "Rename failed" << endl;
1491  }
1492  destNew.resetDispose(); // no more need to unlink it
1493 
1494  DBG << "done: " << PathInfo(dest) << endl;
1495 }
1496 
1497 void MediaMultiCurl::multifetch(const Pathname & filename, FILE *fp, std::vector<Url> *urllist, callback::SendReport<DownloadProgressReport> *report, MediaBlockList *blklist, off_t filesize) const
1498 {
1499  Url baseurl(getFileUrl(filename));
1500  if (blklist && filesize == off_t(-1) && blklist->haveFilesize())
1501  filesize = blklist->getFilesize();
1502  if (blklist && !blklist->haveBlocks() && filesize != 0)
1503  blklist = 0;
1504  if (blklist && (filesize == 0 || !blklist->numBlocks()))
1505  {
1506  checkFileDigest(baseurl, fp, blklist);
1507  return;
1508  }
1509  if (filesize == 0)
1510  return;
1511  if (!_multi)
1512  {
1513  _multi = curl_multi_init();
1514  if (!_multi)
1516  }
1517 
1518  multifetchrequest req(this, filename, baseurl, _multi, fp, report, blklist, filesize);
1519  req._timeout = _settings.timeout();
1523  if (req._maxworkers > MAXURLS)
1524  req._maxworkers = MAXURLS;
1525  if (req._maxworkers <= 0)
1526  req._maxworkers = 1;
1527  std::vector<Url> myurllist;
1528  for (std::vector<Url>::iterator urliter = urllist->begin(); urliter != urllist->end(); ++urliter)
1529  {
1530  try
1531  {
1532  string scheme = urliter->getScheme();
1533  if (scheme == "http" || scheme == "https" || scheme == "ftp" || scheme == "tftp")
1534  {
1535  checkProtocol(*urliter);
1536  myurllist.push_back(internal::propagateQueryParams(*urliter, _url));
1537  }
1538  }
1539  catch (...)
1540  {
1541  }
1542  }
1543  if (!myurllist.size())
1544  myurllist.push_back(baseurl);
1545  req.run(myurllist);
1546  checkFileDigest(baseurl, fp, blklist);
1547 }
1548 
1549 void MediaMultiCurl::checkFileDigest(Url &url, FILE *fp, MediaBlockList *blklist) const
1550 {
1551  if (!blklist || !blklist->haveFileChecksum())
1552  return;
1553  if (fseeko(fp, off_t(0), SEEK_SET))
1554  ZYPP_THROW(MediaCurlException(url, "fseeko", "seek error"));
1555  Digest dig;
1556  blklist->createFileDigest(dig);
1557  char buf[4096];
1558  size_t l;
1559  while ((l = fread(buf, 1, sizeof(buf), fp)) > 0)
1560  dig.update(buf, l);
1561  if (!blklist->verifyFileDigest(dig))
1562  ZYPP_THROW(MediaCurlException(url, "file verification failed", "checksum error"));
1563 }
1564 
1565 bool MediaMultiCurl::isDNSok(const string &host) const
1566 {
1567  return _dnsok.find(host) == _dnsok.end() ? false : true;
1568 }
1569 
1570 void MediaMultiCurl::setDNSok(const string &host) const
1571 {
1572  _dnsok.insert(host);
1573 }
1574 
1575 CURL *MediaMultiCurl::fromEasyPool(const string &host) const
1576 {
1577  if (_easypool.find(host) == _easypool.end())
1578  return 0;
1579  CURL *ret = _easypool[host];
1580  _easypool.erase(host);
1581  return ret;
1582 }
1583 
1584 void MediaMultiCurl::toEasyPool(const std::string &host, CURL *easy) const
1585 {
1586  CURL *oldeasy = _easypool[host];
1587  _easypool[host] = easy;
1588  if (oldeasy)
1589  curl_easy_cleanup(oldeasy);
1590 }
1591 
1592  } // namespace media
1593 } // namespace zypp
1594 
zypp::media::multifetchworker::_maxspeed
double _maxspeed
Definition: MediaMultiCurl.cc:81
zypp::media::MediaMultiCurl::_customHeadersMetalink
curl_slist * _customHeadersMetalink
Definition: MediaMultiCurl.h:72
MAXURLS
#define MAXURLS
Definition: MediaMultiCurl.cc:166
zypp::media::MediaMultiCurl::_dnsok
std::set< std::string > _dnsok
Definition: MediaMultiCurl.h:74
zypp::media::multifetchworker::_request
multifetchrequest * _request
Definition: MediaMultiCurl.cc:94
zypp::filesystem::assert_dir
int assert_dir(const Pathname &path, unsigned mode)
Like 'mkdir -p'.
Definition: PathInfo.cc:315
zypp::media::multifetchworker::_dig
Digest _dig
Definition: MediaMultiCurl.cc:99
zypp::media::multifetchrequest::multifetchworker
friend class multifetchworker
Definition: MediaMultiCurl.cc:123
zypp::media::MediaBlockList::createDigest
bool createDigest(Digest &digest) const
Definition: MediaBlockList.cc:119
zypp::Url::getScheme
std::string getScheme() const
Returns the scheme name of the URL.
Definition: Url.cc:528
zypp::media::multifetchworker::_pass
int _pass
Definition: MediaMultiCurl.cc:95
zypp::media::multifetchworker::_off
off_t _off
Definition: MediaMultiCurl.cc:97
zypp::media::multifetchworker::_avgspeed
double _avgspeed
Definition: MediaMultiCurl.cc:80
zypp::media::multifetchrequest::_multi
CURLM * _multi
Definition: MediaMultiCurl.cc:134
zypp::filesystem::PathInfo::isExist
bool isExist() const
Return whether valid stat info exists.
Definition: PathInfo.h:281
zypp::media::TransferSettings::userPassword
std::string userPassword() const
returns the user and password as a user:pass string
Definition: TransferSettings.cc:121
MediaMultiCurl.h
zypp::media::MediaCurl::OPTION_NO_IFMODSINCE
to not add a IFMODSINCE header if target exists
Definition: MediaCurl.h:44
zypp::media::MediaCurl::evaluateCurlCode
void evaluateCurlCode(const zypp::Pathname &filename, CURLcode code, bool timeout) const
Evaluates a curl return code and throws the right MediaException filename Filename being downloaded c...
Definition: MediaCurl.cc:669
zypp::Exception
Base class for Exception.
Definition: Exception.h:145
zypp::media::multifetchrequest::_havenewjob
bool _havenewjob
Definition: MediaMultiCurl.cc:138
zypp::media::MediaBlockList::haveFilesize
bool haveFilesize() const
Definition: MediaBlockList.h:75
zypp::media::multifetchrequest::_periodavg
double _periodavg
Definition: MediaMultiCurl.cc:156
zypp::media::multifetchrequest::_lastperiodstart
double _lastperiodstart
Definition: MediaMultiCurl.cc:154
zypp::callback::SendReport
Definition: Callback.h:236
zypp::media::multifetchrequest::_fetchedsize
off_t _fetchedsize
Definition: MediaMultiCurl.cc:148
zypp::media::multifetchworker::_urlbuf
string _urlbuf
Definition: MediaMultiCurl.cc:96
zypp::media::TransferSettings::username
std::string username() const
auth username
Definition: TransferSettings.cc:112
zypp::media::MediaCurl::checkProtocol
void checkProtocol(const Url &url) const
check the url is supported by the curl library
Definition: MediaCurl.cc:234
zypp::media::multifetchworker::dnsevent
void dnsevent(fd_set &rset)
Definition: MediaMultiCurl.cc:515
zypp::media::MediaBlockList
Definition: MediaBlockList.h:35
ZConfig.h
zypp::filesystem::rename
int rename(const Pathname &oldpath, const Pathname &newpath)
Like 'rename'.
Definition: PathInfo.cc:690
zypp::media::MediaBlockList::verifyDigest
bool verifyDigest(size_t blkno, Digest &digest) const
Definition: MediaBlockList.cc:125
zypp::media::MediaMultiCurl::setupEasy
virtual void setupEasy() override
initializes the curl easy handle with the data from the url
Definition: MediaMultiCurl.cc:1194
zypp::ManagedFile
AutoDispose< const Pathname > ManagedFile
A Pathname plus associated cleanup code to be executed when path is no longer needed.
Definition: ManagedFile.h:27
zypp::media::multifetchworker::nextjob
void nextjob()
Definition: MediaMultiCurl.cc:697
zypp::media::TransferSettings::timeout
long timeout() const
transfer timeout
Definition: TransferSettings.cc:176
zypp::media::multifetchrequest::_stealing
bool _stealing
Definition: MediaMultiCurl.cc:137
MIL
#define MIL
Definition: Logger.h:79
zypp::media::multifetchworker::_workerno
int _workerno
Definition: MediaMultiCurl.cc:66
zypp::ZConfig::repoCachePath
Pathname repoCachePath() const
Path where the caches are kept (/var/cache/zypp)
Definition: ZConfig.cc:920
zypp::media::MediaTimeoutException
Definition: MediaException.h:476
zypp::media::TransferSettings::authType
std::string authType() const
get the allowed authentication types
Definition: TransferSettings.cc:253
zypp::media::MediaHandler::deltafile
Pathname deltafile() const
Definition: MediaHandler.cc:1405
zypp::media::TransferSettings::maxConcurrentConnections
long maxConcurrentConnections() const
Maximum number of concurrent connections for a single transfer.
Definition: TransferSettings.cc:190
zypp::media::MetaLinkParser
Definition: MetaLinkParser.h:28
zypp::media::MediaCurl::resetExpectedFileSize
static void resetExpectedFileSize(void *clientp, const ByteCount &expectedFileSize)
MediaMultiCurl needs to reset the expected filesize in case a metalink file is downloaded otherwise t...
Definition: MediaCurl.cc:1319
BLKSIZE
#define BLKSIZE
Definition: MediaMultiCurl.cc:165
zypp::media::multifetchworker::_blkno
size_t _blkno
Definition: MediaMultiCurl.cc:71
zypp::media::MediaCurl::getFileUrl
Url getFileUrl(const Pathname &filename) const
concatenate the attach url and the filename to a complete download url
Definition: MediaCurl.cc:567
ZYPP_THROW
#define ZYPP_THROW(EXCPT)
Drops a logline and throws the Exception.
Definition: Exception.h:392
zypp::media::multifetchrequest::_fetchedgoodsize
off_t _fetchedgoodsize
Definition: MediaMultiCurl.cc:149
zypp::media::MediaCurlException::errstr
std::string errstr() const
Definition: MediaException.h:354
zypp::media::MediaBlockList::numBlocks
size_t numBlocks() const
return the number of blocks in the blocklist
Definition: MediaBlockList.h:62
zypp::media::multifetchrequest::_filename
const Pathname _filename
Definition: MediaMultiCurl.cc:126
zypp::media::MediaCurl
Implementation class for FTP, HTTP and HTTPS MediaHandler.
Definition: MediaCurl.h:32
zypp::media::MediaBlock::off
off_t off
Definition: MediaBlockList.h:31
zypp::media::MediaCurl::doGetFileCopyFile
void doGetFileCopyFile(const Pathname &srcFilename, const Pathname &dest, FILE *file, callback::SendReport< DownloadProgressReport > &_report, const ByteCount &expectedFileSize_r, RequestOptions options=OPTION_NONE) const
Definition: MediaCurl.cc:1106
zypp::media::currentTime
static double currentTime()
Definition: MediaMultiCurl.cc:172
_state
AutoDispose< void * > _state
Definition: TargetImpl.commitFindFileConflicts.cc:114
zypp::media::MediaCurl::_customHeaders
curl_slist * _customHeaders
Definition: MediaCurl.h:178
zypp::media::multifetchworker::_blkstarttime
double _blkstarttime
Definition: MediaMultiCurl.cc:76
zypp::media::multifetchworker::_blkreceived
size_t _blkreceived
Definition: MediaMultiCurl.cc:77
zypp::media::MediaBlockList::createFileDigest
bool createFileDigest(Digest &digest) const
Definition: MediaBlockList.cc:66
report
callback::SendReport< DownloadProgressReport > * report
Definition: MediaCurl.cc:69
zypp::ByteCount::MB
static const Unit MB
1000^2 Byte
Definition: ByteCount.h:60
zypp::media::MediaBlock
a single block from the blocklist, consisting of an offset and a size
Definition: MediaBlockList.h:26
zypp::filesystem::unlink
int unlink(const Pathname &path)
Like 'unlink'.
Definition: PathInfo.cc:648
zypp::media::MediaMultiCurl::toEasyPool
void toEasyPool(const std::string &host, CURL *easy) const
Definition: MediaMultiCurl.cc:1584
zypp::media::MediaMultiCurl::setDNSok
void setDNSok(const std::string &host) const
Definition: MediaMultiCurl.cc:1570
WORKER_BROKEN
#define WORKER_BROKEN
Definition: MediaMultiCurl.cc:111
zypp::Digest
Compute Message Digests (MD5, SHA1 etc)
Definition: Digest.h:45
zypp::media::multifetchrequest::_blkno
size_t _blkno
Definition: MediaMultiCurl.cc:140
zypp::media::multifetchrequest::~multifetchrequest
~multifetchrequest()
Definition: MediaMultiCurl.cc:828
zypp::media::multifetchworker::_size
size_t _size
Definition: MediaMultiCurl.cc:98
zypp::media::MetaLinkParser::getBlockList
MediaBlockList getBlockList()
return the block list from the parsed metalink data
Definition: MetaLinkParser.cc:487
zypp::ByteCount
Store and operate with byte count.
Definition: ByteCount.h:30
zypp::media::MediaCurl::_curlError
char _curlError[CURL_ERROR_SIZE]
Definition: MediaCurl.h:177
zypp::media::TransferSettings::setUsername
void setUsername(std::string &&val_r)
sets the auth username
Definition: TransferSettings.cc:109
zypp::media::MediaCurl::progressCallback
static int progressCallback(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow)
Callback sending just an alive trigger to the UI, without stats (e.g.
Definition: MediaCurl.cc:1276
zypp::media::env_isset
static bool env_isset(string name)
Definition: MediaMultiCurl.cc:420
WORKER_DONE
#define WORKER_DONE
Definition: MediaMultiCurl.cc:109
zypp::media::MediaBlockList::haveBlocks
bool haveBlocks() const
do we have a blocklist describing the file? set to true when addBlock() is called
Definition: MediaBlockList.h:43
zypp::media::multifetchworker::_competing
bool _competing
Definition: MediaMultiCurl.cc:69
zypp::media::multifetchworker::_state
int _state
Definition: MediaMultiCurl.cc:68
zypp::filesystem::PathInfo
Wrapper class for ::stat/::lstat.
Definition: PathInfo.h:220
zypp::AutoFD
AutoDispose<int> calling ::close
Definition: AutoDispose.h:203
zypp::media::MetaLinkParser::getUrls
std::vector< Url > getUrls()
return the download urls from the parsed metalink data
Definition: MetaLinkParser.cc:477
zypp::media::MediaCurl::setupEasy
virtual void setupEasy()
initializes the curl easy handle with the data from the url
Definition: MediaCurl.cc:259
zypp::media::multifetchrequest::_lastperiodfetched
double _lastperiodfetched
Definition: MediaMultiCurl.cc:155
zypp::media::multifetchworker::_sleepuntil
double _sleepuntil
Definition: MediaMultiCurl.cc:83
zypp::Url::asString
std::string asString() const
Returns a default string representation of the Url object.
Definition: Url.cc:492
zypp::media::multifetchrequest::_activeworkers
size_t _activeworkers
Definition: MediaMultiCurl.cc:142
WORKER_LOOKUP
#define WORKER_LOOKUP
Definition: MediaMultiCurl.cc:106
zypp::media::multifetchworker::_writefunction
static size_t _writefunction(void *ptr, size_t size, size_t nmemb, void *stream)
Definition: MediaMultiCurl.cc:243
zypp::media::looks_like_metalink_fd
static bool looks_like_metalink_fd(int fd)
Definition: MediaMultiCurl.cc:1209
zypp::media::MediaCurlException
Definition: MediaException.h:342
Logger.h
WAR
#define WAR
Definition: Logger.h:80
zypp::filesystem::Pathname::absolutename
Pathname absolutename() const
Return this path, adding a leading '/' if relative.
Definition: Pathname.h:139
zypp::ZConfig::instance
static ZConfig & instance()
Singleton ctor.
Definition: Resolver.cc:124
WORKER_DISCARD
#define WORKER_DISCARD
Definition: MediaMultiCurl.cc:108
zypp
Easy-to use interface to the ZYPP dependency resolver.
Definition: CodePitfalls.doc:1
zypp::media::multifetchworker::checkChecksum
bool checkChecksum()
Definition: MediaMultiCurl.cc:553
zypp::str::numstring
std::string numstring(char n, int w=0)
Definition: String.h:286
zypp::media::MediaBlockList::verifyFileDigest
bool verifyFileDigest(Digest &digest) const
Definition: MediaBlockList.cc:72
zypp::media::multifetchworker::disableCompetition
void disableCompetition()
Definition: MediaMultiCurl.cc:678
zypp::ByteCount::asString
std::string asString(unsigned field_width_r=0, unsigned unit_width_r=1) const
Auto selected Unit and precision.
Definition: ByteCount.h:133
zypp::media::MediaCurl::disconnectFrom
virtual void disconnectFrom() override
Definition: MediaCurl.cc:545
zypp::media::multifetchrequest::_maxspeed
double _maxspeed
Definition: MediaMultiCurl.cc:161
zypp::media::MediaHandler::url
Url url() const
Url used.
Definition: MediaHandler.h:507
zypp::media::looks_like_metalink
static bool looks_like_metalink(const Pathname &file)
Definition: MediaMultiCurl.cc:1234
zypp::media::multifetchworker::_noendrange
bool _noendrange
Definition: MediaMultiCurl.cc:74
zypp::media::multifetchworker::~multifetchworker
~multifetchworker()
Definition: MediaMultiCurl.cc:379
zypp::media::multifetchrequest::run
void run(std::vector< Url > &urllist)
Definition: MediaMultiCurl.cc:840
zypp::media::MetaLinkParser::parse
void parse(const Pathname &filename)
parse a file consisting of metalink xml data
Definition: MetaLinkParser.cc:431
zypp::media::multifetchworker::recheckChecksum
bool recheckChecksum()
Definition: MediaMultiCurl.cc:562
zypp::media::multifetchrequest::_minsleepuntil
double _minsleepuntil
Definition: MediaMultiCurl.cc:145
zypp::media::MediaSystemException
Definition: MediaException.h:218
zypp::AutoFREE
Definition: AutoDispose.h:225
zypp::media::multifetchrequest::_finished
bool _finished
Definition: MediaMultiCurl.cc:146
zypp::media::MediaCurl::progressCallback_getcurl
static CURL * progressCallback_getcurl(void *clientp)
Definition: MediaCurl.cc:1292
WORKER_SLEEP
#define WORKER_SLEEP
Definition: MediaMultiCurl.cc:110
zypp::media::MediaMultiCurl::fromEasyPool
CURL * fromEasyPool(const std::string &host) const
Definition: MediaMultiCurl.cc:1575
zypp::media::MediaMultiCurl::progressCallback
static int progressCallback(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow)
Definition: MediaMultiCurl.cc:1248
zypp::media::MediaMultiCurl::~MediaMultiCurl
~MediaMultiCurl() override
Definition: MediaMultiCurl.cc:1170
zypp::media::multifetchrequest::_maxworkers
int _maxworkers
Definition: MediaMultiCurl.cc:162
zypp::media::multifetchworker::_headerfunction
static size_t _headerfunction(void *ptr, size_t size, size_t nmemb, void *stream)
Definition: MediaMultiCurl.cc:294
zypp::media::MediaCurlInitException
Definition: MediaException.h:204
zypp::media::MediaMultiCurl::_multi
CURLM * _multi
Definition: MediaMultiCurl.h:73
zypp::AutoFILE
AutoDispose<FILE*> calling ::fclose
Definition: AutoDispose.h:214
zypp::media::MediaMultiCurl::checkFileDigest
void checkFileDigest(Url &url, FILE *fp, MediaBlockList *blklist) const
Definition: MediaMultiCurl.cc:1549
internal::propagateQueryParams
zypp::Url propagateQueryParams(zypp::Url url_r, const zypp::Url &template_r)
Definition: CurlHelper.cc:375
zypp::media::multifetchworker::writefunction
size_t writefunction(void *ptr, size_t size)
Definition: MediaMultiCurl.cc:181
zypp::media::MediaMultiCurl
Definition: MediaMultiCurl.h:40
zypp::media::MediaBlockList::reuseBlocks
void reuseBlocks(FILE *wfp, std::string filename)
scan a file for blocks from our blocklist.
Definition: MediaBlockList.cc:290
zypp::media::multifetchworker::stealjob
void stealjob()
Definition: MediaMultiCurl.cc:585
zypp::media::multifetchrequest::_connect_timeout
double _connect_timeout
Definition: MediaMultiCurl.cc:160
zypp::media::MediaFileSizeExceededException
Definition: MediaException.h:490
zypp::media::multifetchworker
Definition: MediaMultiCurl.cc:50
zypp::media::multifetchrequest
Definition: MediaMultiCurl.cc:115
zypp::media::MediaBlockList::getFilesize
off_t getFilesize() const
Definition: MediaBlockList.h:72
zypp::media::MediaMultiCurl::doGetFileCopy
virtual void doGetFileCopy(const Pathname &srcFilename, const Pathname &targetFilename, callback::SendReport< DownloadProgressReport > &_report, const ByteCount &expectedFileSize_r, RequestOptions options=OPTION_NONE) const override
Definition: MediaMultiCurl.cc:1299
WORKER_STARTING
#define WORKER_STARTING
Definition: MediaMultiCurl.cc:105
zypp::filesystem::df
ByteCount df(const Pathname &path_r)
Report free disk space on a mounted file system.
Definition: PathInfo.cc:1103
zypp::media::MediaBlockList::haveChecksum
bool haveChecksum(size_t blkno) const
Definition: MediaBlockList.h:98
ZYPP_RETHROW
#define ZYPP_RETHROW(EXCPT)
Drops a logline and rethrows, updating the CodeLocation.
Definition: Exception.h:400
zypp::AutoDispose
Reference counted access to a Tp object calling a custom Dispose function when the last AutoDispose h...
Definition: AutoDispose.h:92
zypp::media::multifetchworker::_blksize
size_t _blksize
Definition: MediaMultiCurl.cc:73
zypp::media::MediaBlockList::getBlock
MediaBlock getBlock(size_t blkno) const
return the offset/size of a block with number blkno
Definition: MediaBlockList.h:56
zypp::filesystem::Pathname::dirname
Pathname dirname() const
Return all but the last component od this path.
Definition: Pathname.h:124
zypp::media::multifetchrequest::_report
callback::SendReport< DownloadProgressReport > * _report
Definition: MediaMultiCurl.cc:130
std
Definition: Arch.h:347
zypp::media::MediaCurl::clearQueryString
Url clearQueryString(const Url &url) const
Definition: MediaCurl.cc:216
zypp::filesystem::Pathname::extend
Pathname extend(const std::string &r) const
Append string r to the last component of the path.
Definition: Pathname.h:170
zypp::Digest::update
bool update(const char *bytes, size_t len)
feed data into digest computation algorithm
Definition: Digest.cc:225
MetaLinkParser.h
zypp::media::multifetchrequest::_filesize
off_t _filesize
Definition: MediaMultiCurl.cc:132
zypp::media::TransferSettings::setAuthType
void setAuthType(std::string &&val_r)
set the allowed authentication types
Definition: TransferSettings.cc:250
zypp::filesystem::Pathname
Pathname.
Definition: Pathname.h:44
zypp::media::multifetchrequest::_baseurl
Url _baseurl
Definition: MediaMultiCurl.cc:127
zypp::Url::getHost
std::string getHost(EEncoding eflag=zypp::url::E_DECODED) const
Returns the hostname or IP from the URL authority.
Definition: Url.cc:583
ERR
#define ERR
Definition: Logger.h:81
zypp::base
Definition: DrunkenBishop.cc:24
zypp::media::MediaHandler::_url
const Url _url
Url to handle.
Definition: MediaHandler.h:110
zypp::media::multifetchrequest::_lastprogress
double _lastprogress
Definition: MediaMultiCurl.cc:152
DBG
#define DBG
Definition: Logger.h:78
zypp::media::TransferSettings::setPassword
void setPassword(std::string &&val_r)
sets the auth password
Definition: TransferSettings.cc:115
zypp::media::MediaBlockList::haveFileChecksum
bool haveFileChecksum() const
Definition: MediaBlockList.h:86
zypp::media::MediaBlock::size
size_t size
Definition: MediaBlockList.h:32
zypp::media::CurlAuthData::auth_type_str2long
static long auth_type_str2long(std::string &auth_type_str)
Converts a string of comma separated list of authetication type names into a long of ORed CURLAUTH_* ...
Definition: MediaUserAuth.cc:94
zypp::media::MediaMultiCurl::MediaMultiCurl
MediaMultiCurl(const Url &url_r, const Pathname &attach_point_hint_r)
Definition: MediaMultiCurl.cc:1162
zypp::media::multifetchrequest::_timeout
double _timeout
Definition: MediaMultiCurl.cc:159
zypp::media::multifetchworker::checkdns
void checkdns()
Definition: MediaMultiCurl.cc:427
zypp::media::TransferSettings::connectTimeout
long connectTimeout() const
connection timeout
Definition: TransferSettings.cc:183
zypp::media::MediaMultiCurl::isDNSok
bool isDNSok(const std::string &host) const
Definition: MediaMultiCurl.cc:1565
XXX
#define XXX
Definition: Logger.h:77
zypp::media::multifetchrequest::_workers
std::list< multifetchworker * > _workers
Definition: MediaMultiCurl.cc:136
WORKER_FETCH
#define WORKER_FETCH
Definition: MediaMultiCurl.cc:107
ManagedFile.h
zypp::AutoDispose::resetDispose
void resetDispose()
Set no dispose function.
Definition: AutoDispose.h:162
zypp::filesystem::applyUmaskTo
mode_t applyUmaskTo(mode_t mode_r)
Modify mode_r according to the current umask ( mode_r & ~getUmask() ).
Definition: PathInfo.h:813
zypp::media::MediaCurl::_curl
CURL * _curl
Definition: MediaCurl.h:176
zypp::filesystem::hardlinkCopy
int hardlinkCopy(const Pathname &oldpath, const Pathname &newpath)
Create newpath as hardlink or copy of oldpath.
Definition: PathInfo.cc:831
zypp::media::multifetchrequest::_fp
FILE * _fp
Definition: MediaMultiCurl.cc:129
CurlHelper.h
zypp::media::multifetchworker::_received
off_t _received
Definition: MediaMultiCurl.cc:78
zypp::filesystem::Pathname::asString
const std::string & asString() const
String representation.
Definition: Pathname.h:91
zypp::media::multifetchrequest::multifetchrequest
multifetchrequest(const MediaMultiCurl *context, const Pathname &filename, const Url &baseurl, CURLM *multi, FILE *fp, callback::SendReport< DownloadProgressReport > *report, MediaBlockList *blklist, off_t filesize)
Definition: MediaMultiCurl.cc:787
url
Url url
Definition: MediaCurl.cc:65
zypp::Url
Url manipulation class.
Definition: Url.h:87
zypp::media::MediaCurl::OPTION_NO_REPORT_START
do not send a start ProgressReport
Definition: MediaCurl.h:46
zypp::media::multifetchrequest::_totalsize
off_t _totalsize
Definition: MediaMultiCurl.cc:147
zypp::media::multifetchrequest::_starttime
double _starttime
Definition: MediaMultiCurl.cc:151
zypp::media::multifetchrequest::_blkoff
off_t _blkoff
Definition: MediaMultiCurl.cc:141
zypp::media::TransferSettings::proxy
std::string proxy() const
proxy host
Definition: TransferSettings.cc:147
zypp::media::TransferSettings::password
std::string password() const
auth password
Definition: TransferSettings.cc:118
zypp::media::multifetchworker::headerfunction
size_t headerfunction(char *ptr, size_t size)
Definition: MediaMultiCurl.cc:250
zypp::media::MediaWriteException
Definition: MediaException.h:162
zypp::media::MediaCurl::_settings
TransferSettings _settings
Definition: MediaCurl.h:179
zypp::media::multifetchworker::_blkstart
off_t _blkstart
Definition: MediaMultiCurl.cc:72
zypp::media::multifetchworker::_dnspipe
int _dnspipe
Definition: MediaMultiCurl.cc:102
zypp::media::multifetchrequest::_lookupworkers
size_t _lookupworkers
Definition: MediaMultiCurl.cc:143
zypp::media::multifetchworker::_pid
pid_t _pid
Definition: MediaMultiCurl.cc:101
zypp::media::multifetchrequest::_sleepworkers
size_t _sleepworkers
Definition: MediaMultiCurl.cc:144
zypp::media::multifetchrequest::_context
const MediaMultiCurl * _context
Definition: MediaMultiCurl.cc:125
zypp::media::MediaMultiCurl::_easypool
std::map< std::string, CURL * > _easypool
Definition: MediaMultiCurl.h:75
zypp::media::MediaMultiCurl::multifetch
void multifetch(const Pathname &filename, FILE *fp, std::vector< Url > *urllist, callback::SendReport< DownloadProgressReport > *report=0, MediaBlockList *blklist=0, off_t filesize=off_t(-1)) const
Definition: MediaMultiCurl.cc:1497
zypp::media::multifetchworker::adddnsfd
void adddnsfd(fd_set &rset, int &maxfd)
Definition: MediaMultiCurl.cc:505
zypp::media::multifetchworker::run
void run()
Definition: MediaMultiCurl.cc:746
zypp::media::multifetchrequest::_blklist
MediaBlockList * _blklist
Definition: MediaMultiCurl.cc:131
zypp::media::TransferSettings::maxDownloadSpeed
long maxDownloadSpeed() const
Maximum download speed (bytes per second)
Definition: TransferSettings.cc:204