- 資料同步及異步存儲到MySQL
- 對于ajax 加載的資料用selenium輔助加載解析
- 整站爬取提取url規則
jianshu.py 檔案
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from jianshu_spider.items import JianshuSpiderItem
class JianshuSpider(CrawlSpider):
name = 'jianshu'
allowed_domains = ['jianshu.com']
start_urls = ['https://www.jianshu.com/']
rules = (
# 觀察url發現,字首都一樣,後面是12個數字加小寫字母的組合
Rule(LinkExtractor(allow=r'.*/p/[0-9a-z]{12}.*'), callback='parse_detial', follow=True),
)
def parse_detial(self, response):
title = response.xpath('//h1[@class="title"]/text()').extract_first('') # 提取标題
avatar = response.xpath('//a[@class="avatar"]/img/@src').extract_first('') # 提取頭像
author = response.xpath('//span[@class="name"]/a/text()').extract_first('') # 提取作者
publish_time = response.xpath('//span[@class="publish-time"]/text()').extract_first('') # 提取釋出時間
content = response.xpath('//div[@class="show-content"]').get() # 提取文章内容
# 提取文章ip,也就是url上面的不一樣的字元串
process_url = response.url.split('?')[0] # 以問号分割取前一部分
article_id = process_url.split('/')[-1] # 以 ‘/’ 分割擷取最後一個字元串即為文章的id
origin_url = response.url
print(title)
item = JianshuSpiderItem(title=title,avatar=avatar,author=author,publish_time=publish_time,
content=content,article_id=article_id,origin_url=origin_url)
return item
item.py檔案
import scrapy
class JianshuSpiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
avatar = scrapy.Field()
author = scrapy.Field()
publish_time = scrapy.Field()
content = scrapy.Field()
article_id = scrapy.Field()
origin_url = scrapy.Field()
settings.py檔案
ROBOTSTXT_OBEY = False
DOWNLOAD_DELAY = 1
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
DOWNLOADER_MIDDLEWARES = {
#'jianshu_spider.middlewares.JianshuSpiderDownloaderMiddleware': 543,
'jianshu_spider.middlewares.SeleniumDownloadMiddleware': 543,
}
ITEM_PIPELINES = {
'jianshu_spider.pipelines.JianshuSpiderPipeline': 300,
#'jianshu_spider.pipelines.JianshuTwistedPipeline': 300,
}
pipelines.py檔案
import pymysql
from twisted.enterprise import adbapi # 使用異步資料庫處理連接配接池
from pymysql import cursors # 資料庫遊标類
class JianshuSpiderPipeline(object):
def __init__(self):
params = {
'host':'127.0.0.1',
'port':3306,
'user':'root',
'password':'1326628437',
'database':'jianshu',
'charset':'utf8'
}
self.conn = pymysql.connect(**params)
self.sursor = self.conn.cursor()
self._sql = None
@property # 屬性操作,可直接調用
def sql(self):
if not self._sql:
self._sql = '''insert into article(title,author,avatar,publish_time,article_id,
origin_url,content) value(%s,%s,%s,%s,%s,%s,%s)'''
return self._sql
return self._sql
def process_item(self, item, spider):
self.sursor.execute(self.sql,(item['title'],item['author'],item['avatar'],item['publish_time'],
item['article_id'],item['origin_url'],item['content']))
self.conn.commit()
return item
# 異步實作插入資料庫,插入操作是io操作,資料量大時,會出現堵塞,異步插入很有必要
class JianshuTwistedPipeline(object):
def __init__(self):
params = {
'host':'127.0.0.1',
'port':3306,
'user':'root',
'password':'1326628437',
'database':'jianshu',
'charset':'utf8',
'cursorclass':cursors.DictCursor
}
# 調用異步連接配接池實作異步插入資料庫
self.dbpool = adbapi.ConnectionPool("pymysql",**params)
self._sql = None
@property
def sql(self):
if not self._sql:
self._sql = '''insert into article(title,author,avatar,publish_time,article_id,
origin_url,content) value(%s,%s,%s,%s,%s,%s,%s)'''
return self._sql
return self._sql
def process_item(self,item,spider):
# 異步插入資料
defer = self.dbpool.runInteraction(self.insert_item,item)
# 錯誤處理
defer.addErrback(self.handle_error,item,spider)
def insert_item(self,item,cursor):
cursor.execute(self.sql,(item['title'],item['author'],item['avatar'],item['publish_time'],
item['article_id'],item['origin_url'],item['content']))
def handle_error(self,item,error,spider):
print('+'*30 + 'error' + '+'*30)
print(error)
print('+'*30 + 'error' + '+'*30)
middleware.py檔案
from selenium import webdriver
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from scrapy.http.response.html import HtmlResponse
# 用selenium重寫請求過程,實作去爬取一些用ajax加載的頁面
# 一些點贊數,評論數,喜歡數,推薦閱讀的文章連結都是ajax加載的。
class SeleniumDownloadMiddleware(object):
def __init__(self):
self.browser = webdriver.Chrome() #
self.wait = WebDriverWait(self.browser,10)
def process_request(self,request,spider):
self.browser.get(request.url)
print('我正在用selenium自動化工具下載下傳url')
time.sleep(1)
try:
while True:
# 這裡因為有些文章下方有許多加載更多,在文章被一下專欄收錄裡,是以要重複點選
showmore = self.browser.find_element_by_class_name('show-more')
showmore.click()
time.sleep(0.3)
if not showmore:
break
except:
pass
source = self.browser.page_source
response = HtmlResponse(url=self.browser.current_url,request=request,body=source,encoding='utf-8')
return response