return url
-#######################################################################
-
-def fetch_with_wget(loc,mydigests, type,host,path,user,pswd):
- myfile = os.path.basename(path)
- dlfile = os.path.join(getenv("DL_DIR"), myfile)
-
- try:
- mystat = os.stat(dlfile)
- if mydigests.has_key(myfile):
- # if we have the digest file, we know the final size and can resume the download.
- if mystat[ST_SIZE] < mydigests[myfile]["size"]:
- fetched = 1
- else:
- # we already have it downloaded, skip
- # if our file is bigger than the recorded size, digestcheck should catch it.
- fetched = 2
- else:
- # we don't have the digest file, but the file exists. Assume it is fully downloaded.
- fetched = 2
- except (OSError,IOError),e:
- fetched = 0
-
- # we either need to resume or start the download
- if fetched != 2:
- # you can't use "continue" when you're inside a "try" block
- if fetched == 1:
- # resume mode:
- note("Resuming download...")
- myfetch = getenv("FETCHCOMMAND")
- else:
- # normal mode:
- myfetch = getenv("RESUMECOMMAND")
- note("fetch " +loc)
- myfetch = myfetch.replace("${URI}",loc)
- myfetch = myfetch.replace("${FILE}",myfile)
- debug(2,myfetch)
- myret = os.system(myfetch)
-
- if mydigests.has_key(myfile):
- print "0"
- try:
- mystat = os.stat(dlfile)
- print "1", myret
- # no exception? file exists. let digestcheck() report
- # an appropriately for size or md5 errors
- if myret and (mystat[ST_SIZE] < mydigests[myfile]["size"]):
- print "2"
- # Fetch failed... Try the next one... Kill 404 files though.
- if (mystat[ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
- print "3"
- html404 = re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
- try:
- if html404.search(open(dlfile).read()):
- try:
- os.unlink(dlfile)
- note("deleting invalid distfile (improper 404 redirect from server)")
- except:
- pass
- except:
- pass
- print "What to do?"
- return 1
- return 2
- except (OSError,IOError),e:
- fetched = 0
- else:
- if not myret:
- return 2
-
- if fetched != 2:
- error("Couldn't download "+ myfile)
- return 0
- return fetched
-
-
-#######################################################################
-def fetch_with_cvs(mydigests, type,host,path,user,pswd,parm):
- fatal('fetch via CVS not yet implemented')
-
-
-#######################################################################
-
-def fetch_with_bk(mydigests, type,host,path,user,pswd,parm):
- fatal('fetch via BitKeeper not yet implemented')
-
-
-#######################################################################
-
-def fetch(urls):
- digestfn = env["FILESDIR"]+"/digest-"+env["PF"]
- mydigests = {}
- if os.path.exists(digestfn):
- debug(3, "checking digest "+ digestfn)
- myfile = open(digestfn,"r")
- mylines = myfile.readlines()
- for x in mylines:
- myline = string.split(x)
- if len(myline)<4:
- # invalid line
- oe.fatal("The digest %s appears to be corrupt" % digestfn);
- try:
- mydigests[myline[2]] = {"md5" : myline[1], "size" : string.atol(myline[3])}
- except ValueError:
- oe.fatal("The digest %s appears to be corrupt" % digestfn);
-
- for loc in urls.split():
- debug(2,"fetching %s" % loc)
- (type, host, path, user, pswd, parm) = decodeurl(expand(loc))
-
- if type in ['http','https','ftp']:
- fetched = fetch_with_wget(loc,mydigests, type,host,path,user,pswd)
- elif type in ['cvs', 'pserver']:
- fetched = fetch_with_cvs(mydigests, type,host,path,user,pswd,parm)
- elif type == 'bk':
- fetched = fetch_with_bk(mydigests, type,host,path,user,pswd,parm)
- else:
- fatal("can't fetch with method '%s'" % type)
- if fetched != 2:
- error("Couldn't download "+ loc)
- return 0
-
- return 1
-
#######################################################################
#######################################################################