Python:轮流下载多个文件

时间:2010-07-29 06:23:20

标签: python

在脚本循环中执行文件下载和保存(curl)。但循环迭代太快,因此下载和保存操作没有时间完成它的操作。威胁结果文件破损

def get_images_thread(table):
    class LoopThread ( threading.Thread ):
        def run ( self ):
            global db
            c=db.cursor()
            c.execute(""" SELECT * FROM js_stones ORDER BY stone_id LIMIT 1
                            """)           
            ec = EasyCurl(table)

            while(1):
                stone = c.fetchone()
                if stone == None:
                    break
                img_fname = stone[2]
                print img_fname
                url = "http://www.jstone.it/"+img_fname
                fname = url.strip("/").split("/")[-1].strip()
                ec.perform(url, filename="D:\\Var\\Python\\Jstone\\downloadeble_pictures\\"+fname, 
                                    progress=ec.textprogress)

2 个答案:

答案 0 :(得分:4)

这是examples for the PycURL library

的摘录
# Make a queue with (url, filename) tuples
queue = Queue.Queue()
for url in urls:
    url = url.strip()
    if not url or url[0] == "#":
        continue
    filename = "doc_%03d.dat" % (len(queue.queue) + 1)
    queue.put((url, filename))


# Check args
assert queue.queue, "no URLs given"
num_urls = len(queue.queue)
num_conn = min(num_conn, num_urls)
assert 1 <= num_conn <= 10000, "invalid number of concurrent connections"
print "PycURL %s (compiled against 0x%x)" % (pycurl.version, pycurl.COMPILE_LIBCURL_VERSION_NUM)
print "----- Getting", num_urls, "URLs using", num_conn, "connections -----"


class WorkerThread(threading.Thread):
    def __init__(self, queue):
        threading.Thread.__init__(self)
        self.queue = queue

    def run(self):
        while 1:
            try:
                url, filename = self.queue.get_nowait()
            except Queue.Empty:
                raise SystemExit
            fp = open(filename, "wb")
            curl = pycurl.Curl()
            curl.setopt(pycurl.URL, url)
            curl.setopt(pycurl.FOLLOWLOCATION, 1)
            curl.setopt(pycurl.MAXREDIRS, 5)
            curl.setopt(pycurl.CONNECTTIMEOUT, 30)
            curl.setopt(pycurl.TIMEOUT, 300)
            curl.setopt(pycurl.NOSIGNAL, 1)
            curl.setopt(pycurl.WRITEDATA, fp)
            try:
                curl.perform()
            except:
                import traceback
                traceback.print_exc(file=sys.stderr)
                sys.stderr.flush()
            curl.close()
            fp.close()
            sys.stdout.write(".")
            sys.stdout.flush()


# Start a bunch of threads
threads = []
for dummy in range(num_conn):
    t = WorkerThread(queue)
    t.start()
    threads.append(t)


# Wait for all threads to finish
for thread in threads:
    thread.join()

答案 1 :(得分:-1)

如果你问我认为你在问什么,

from time import sleep
sleep(1)

应该“解决”(这是最大的hacky!)你的问题。文档here。不过,我会检查那确实是你的问题。似乎灾难性地暂停几秒钟会阻止文件被破坏下载。更多细节也会很好。

os.waitpid()

也可能会有帮助。

相关问题