天天看點

使用tornado實作簡單爬蟲

代碼在官方文檔的示例代碼中有,但是作為一個tornado新手來說閱讀起來還是有點困難的,于是我在代碼中添加了注釋,友善了解,代碼如下:

# coding=utf-8
 #!/usr/bin/env python

import time
from datetime import timedelta

try:
    from HTMLParser import HTMLParser
    from urlparse import urljoin, urldefrag
except ImportError:
    from html.parser import HTMLParser
    from urllib.parse import urljoin, urldefrag

from tornado import httpclient, gen, ioloop, queues

 # 設定要爬取的網址
base_url = 'http://www.baidu.com'
 # 設定worker數量
concurrency = 
 # 此代碼會擷取base_url下的所有其他url
@gen.coroutine
def get_links_from_url(url):

    try:
        # 通過異步向url發起請求
        response = yield httpclient.AsyncHTTPClient().fetch(url)
        print('fetched %s' % url)
        # 響應如果是位元組類型 進行解碼
        html = response.body if isinstance(response.body, str) \
            else response.body.decode(errors='ignore')
        # 建構url清單
        urls = [urljoin(url, remove_fragment(new_url))
                for new_url in get_links(html)]
    except Exception as e:
        print('Exception: %s %s' % (e, url))
        # 報錯傳回空清單
        raise gen.Return([])
    # 傳回url清單
    raise gen.Return(urls)


def remove_fragment(url):
    #去除錨點
    pure_url, frag = urldefrag(url)

    return pure_url


def get_links(html):
    #從html頁面裡提取url
    class URLSeeker(HTMLParser):
        def __init__(self):
            HTMLParser.__init__(self)
            self.urls = []

        def handle_starttag(self, tag, attrs):
            href = dict(attrs).get('href')
            if href and tag == 'a':
                self.urls.append(href)

    url_seeker = URLSeeker()
    url_seeker.feed(html)
    return url_seeker.urls


@gen.coroutine
def main():
    # 建立隊列
    q = queues.Queue()
    # 記錄開始時間戳
    start = time.time()
    # 建構兩個集合
    fetching, fetched = set(), set()

    @gen.coroutine
    def fetch_url():
        # 從隊列中取出資料
        current_url = yield q.get()
        try:
            # 如果取出的資料在隊列中已經存在  傳回
            if current_url in fetching:
                return

            print('fetching %s' % current_url)
            # 如果不存在添加到集合當中
            fetching.add(current_url)
            # 從新放入的連結中繼續擷取連結
            urls = yield get_links_from_url(current_url)
            # 将已經請求玩的url放入第二個集合
            fetched.add(current_url)

            for new_url in urls:
                # Only follow links beneath the base URL
                # 如果連結是以傳入的url開始則放入隊列
                if new_url.startswith(base_url):
                    yield q.put(new_url)

        finally:
            # 隊列内資料減一
            q.task_done()

    @gen.coroutine
    def worker():
        while True:
            # 保證程式持續運作
            yield fetch_url()
    # 将第一個url放入隊列
    q.put(base_url)

    # Start workers, then wait for the work queue to be empty.
    for _ in range(concurrency):
        # 啟動對應數量的worker
        worker()
    # 等待隊列資料處理完成
    yield q.join(timeout=timedelta(seconds=))
    # 如果兩個集合不相等抛出異常
    assert fetching == fetched
    # 列印執行時間
    print('Done in %d seconds, fetched %s URLs.' % (
        time.time() - start, len(fetched)))


if __name__ == '__main__':
    io_loop = ioloop.IOLoop.current()
    io_loop.run_sync(main)