天天看點

python scrapy架構爬取知乎動态頁面

說明:純文科生。2個月正式的python代碼經驗。

目的:用scrapy架構爬取知乎的這個頁面:https://www.zhihu.com/search?type=content&q=%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0

紅框框裡面的内容

python scrapy架構爬取知乎動态頁面

步驟:

一、建立project:cd到要建立項目的目錄,執行以下指令。

scrapy startproject zhihuSpider
           

二、建立spider:cd到spiders檔案夾,執行以下指令,ZH是你爬蟲的名字,必須唯一;zhihu.com是allowed_domains。

$ scrapy genspider ZH zhihu.com
           

三、item.py,收集的資料分别有問題的内容、回答者的姓名和自我介紹、答案内容。

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

import scrapy

# 關于每一個問題的資訊
class QuesInfoItem(scrapy.Item):
	question = scrapy.Field() #問題内容
	author_name = scrapy.Field() #作者姓名
	author_bio = scrapy.Field() #作者簡介
	answer_content = scrapy.Field() #答案
           

四、middlewares.py,導入time子產品,記錄爬蟲耗時

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
import time,os


class ZhihuSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        self.startTime = time.time()
        # print('__file__ is %s' % __file__)
        # print ("path ====== %s " % os.path.normcase(__file__))
        print('   爬蟲開始   '.center(50, "*"))
        print(('   開始時間:%.2f   ' % self.startTime).center(50, "*"))

    def spider_closed(self, spider):
        self.endTime = time.time()
        _t = self.endTime - self.startTime
        print(('   結束時間:%.2f   ' % self.endTime).center(50, "*"))
        print(('   耗時:%.2f s   ' % _t).center(50, "*"))
        print('   爬蟲結束   '.center(50, "*"))

class MyproxiesSpiderMiddleware(object):

    def __init__(self):  
        self.ips = []
        
    def process_request(self, request, spider):  
        pass
        # if spider.name == 'question':
        #     ip = "https://116.3.94.128:80"
        #     # print("============ 使用代理 %s ============" % ip)
        #     request.meta["proxy"] = ip
           

五、piplines.py,添加 ZhihuSpiderWriteToCSVPipeline 和 ZhihuSpiderWriteToDBPipeline 類,将抓取的資料儲存到csv檔案和資料庫當中

# -*- coding: utf-8 -*-

# Define your item pipelines here
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import csv
import os,sys
import sqlite3
from ZhihuSpider.items import QuesInfoItem

#資料寫入csv檔案
class ZhihuSpiderWriteToCSVPipeline(object):

    def open_spider(self, spider):
        # print("abs path is %s" %(os.path.abspath(sys.argv[0])))
        
        self.csvFile = open(os.path.abspath('C:/Users/Administrator/Desktop/ZhihuSpider/test.csv'), "w+",newline='')
        try:
            self.write = csv.writer(self.csvFile)
            self.write.writerow(('question', 'author_name', 'author_bio', 'answer_content'))
        except Exception as e:
            pass 

    def close_spider(self, spider):
        self.csvFile.close()

    def process_item(self, item, spider):
        try:
            self.write.writerow((item["question"], item["author_name"], item["author_bio"], item["answer_content"]))
        except BaseException as e:
            pass
            
        return item

#資料寫入資料庫檔案
class ZhihuSpiderWriteToDBPipeline(object):

    def open_spider(self, spider):
        try:
            self.conn = sqlite3.connect(os.path.abspath('C:/Users/Administrator/Desktop/ZhihuSpider/test.db'))
            self.cursor = self.conn.cursor()
        except BaseException as e:
            pass
            
    def close_spider(self, spider):
        try:
            self.cursor.close()
            self.conn.commit()
            self.conn.close()
        except BaseException as e:
            pass

    def process_item(self, item, spider):
        try:
            if isinstance(item, QuesInfoItem):
                self.cursor.execute('insert into question (question, author_name, author_bio, answer_content) values (?, ?, ?, ?)', (item["question"], item["author_name"], item["author_bio"], item["answer_content"]))
        except BaseException as e:
            print(e)
            pass
            
        return item
           

六、setting.py,激活各個環節

# -*- coding: utf-8 -*-

# Scrapy settings for zhihuSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'zhihuSpider'

LOG_LEVEL= 'WARNING'

SPIDER_MODULES = ['zhihuSpider.spiders']
NEWSPIDER_MODULE = 'zhihuSpider.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'zhihuSpider (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'zh-cn',
  'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
   'zhihuSpider.middlewares.ZhihuspiderSpiderMiddleware': 543,
}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   'zhihuSpider.middlewares.MyproxiesSpiderMiddleware': 544,
}

DOWNLOAD_DELAY = 1

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'zhihuSpider.pipelines.ZhihuspiderWriteToCSVPipeline': 300,  # ZhihuspiderWriteToCSVPipeline 與 pipelines 中 class 名稱相同
   'zhihuSpider.pipelines.ZhihuspiderWriteToDBPipeline': 400  # ZhihuspiderWriteToDBPipeline 與 pipelines 中 class 名稱相同
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
           

七、重頭的ZH.py

1)代碼:

# -*- coding: utf-8 -*-
import scrapy
#import requests
from scrapy import Request
from scrapy.spiders import CrawlSpider
import time
import re
import json
from ZhihuSpider.items import QuesInfoItem

class ZhSpider(CrawlSpider):
    name = 'ZH'
    allowed_domains = ['zhihu.com']
    # start_urls是Spider在啟動時進行爬取的入口URL清單。第一個被擷取到的頁面的URL将是其中一個,
    # 後續的URL從初始的URL的響應中擷取
    start_urls = ['https://www.zhihu.com/r/search?q=%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0',
    'https://www.zhihu.com/r/search?q=%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0&correction=1&type=content&offset=30',
    ]

    i = 0
    # parse是Spider的一個方法。被調用時,每個初始的URL響應後傳回的response對象,将會作為唯一的參數傳回給該方法
    # 該方法負責解析傳回的資料(respose data)、提取資料(item)以及生成需要進一步處理的URL的Response對象

    def parse(self, response):

        # print('***********************\n',response.body,'***********************\n\n')
        print('*************開始下載下傳json檔案:*********************')
        # 1、實作網頁的解析,生成item
        # 首先打開js路徑,擷取'htmls'KEY下面的内容,是一個整體的str檔案,沒有标KEY,是以用re去解析它
        try:
            # print(type(response.body))
            # print(type(response.text))
            jsDict = json.loads(response.body)
            # print(type(jsDict))
            print('*************開始解析頁面*********************')
            questions = jsDict['htmls']
    
            # 抽取所有的問題和對應的follwer_num, answer_num和answer_abstract
            for q in questions:
                item = QuesInfoItem()
                # 删去源代碼中關鍵詞“<em>機器學習</em>”的标簽
                q = q.replace('<em>','').replace('</em>','')
                # 問題資訊在标簽 class=\"js-title-link\">和</a>當中
                question = re.findall('class=\"js-title-link\">(.*?)</a>',q)[0]
                print(question)
                item['question'] = question

                time.sleep(2)
        
                # 作者姓名在标簽 data-author-name=\"和\" data-entry-url=\"當中
                try:
                    author_name = re.findall('data-author-name=\"(.*?)\" data-entry-url=\"',q)[0]
                    print('作者姓名:',author_name)
                except:
                    author_name = None
                item['author_name'] = author_name
    
                # 作者簡介在标簽 <span title=\"和\" class=\"bio\">當中
                try:
                    author_bio = re.findall('<span title=\"(.*?)\" class=\"bio\">',q)
                    print('作者簡介:',author_bio)
                except:
                    author_bio = None
                item['author_bio'] = author_bio

                time.sleep(2)

                # 回答内容資訊在标簽 <script type=\"text\" class=\"content\">和</script>當中
                try:
                    answer_content = re.findall('<script type=\"text\" class=\"content\">(.*?)</script>', q)[0]
                    print(answer_content[:100]) #内容太多隻列印一部分出來看一下
                except:
                    answer_content = None
                item['answer_content'] = answer_content

                time.sleep(2)

                yield item

            # 2、構造下一頁的連結并回調給parse方法
            first_url = 'https://www.zhihu.com/r/search?q=%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0'
            # 下一頁連結資訊在js檔案的['paging']标簽下的['next']KEY中
            nexturl = jsDict['paging']['next']
            last_url = re.findall('&(.*)', nexturl)[0]
            url = first_url + '&' +last_url
            print(url)
            yield Request(url, callback=self.parse) 

        except json.decoder.JSONDecodeError as e: #這個報錯開始是因為找錯了url一直報錯加的,現在應該沒關系可以去掉了
            print('JSONDecodeError')

           

2)如何找到js的url:檢視源代碼發現要抽取的内容在<script></script>标簽内,是 javascript 動态加載的,Firefox浏覽器,F12打開web開發者工具—網絡(network),點選頁面中的“更多”,在新出現的URL中檢視,發現這個連結的響應(response)符合條件。用json.loads()方法将response.body這一byte類型的檔案轉化為dict類型的檔案。

python scrapy架構爬取知乎動态頁面

3)如何解析網頁:用正規表達式進行解析,隻要定位好要抽取的内容的位置,利用(.*?)符号表示要抽取的資訊内容。比如:問題資訊在标簽 class=\"js-title-link\">和</a>當中,是以 question = re.findall('class=\"js-title-link\">(.*?)</a>',q)[0]

python scrapy架構爬取知乎動态頁面

4)如何構造下一頁url:下一頁連結資訊在js檔案的['paging']标簽下的['next']KEY中,抽取并組合一下就好了。

代碼連結:https://github.com/MTINGSUN/zhihuspider.git

以上。