本次python爬蟲百步百科,裡面詳細分析了爬蟲的步驟,對每一步代碼都有詳細的注釋說明,可通過本案例掌握python爬蟲的特點:
1、爬蟲排程入口(crawler_main.py)
# coding:utf-8
from com.wenhy.crawler_baidu_baike import url_manager, html_downloader, html_parser, html_outputer
print "爬蟲百度百科排程入口"
# 建立爬蟲類
class SpiderMain(object):
# 初始化 url管理器 html下載下傳器 解析器 輸出器
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.output = html_outputer.HtmlOutput()
def craw(urls, downloader, parser, output, root_url):
# 計數
count = 1
# 添加url到url管理器中
urls.add_new_url(root_url)
# 判斷是否有新的URL
while urls.has_new_url():
try:
# 擷取新的URL
new_url = urls.get_new_url()
print 'crawler %d : %s' % (count, new_url)
# 下載下傳html頁面資料
html_cont = downloader.download(new_url)
# 解析頁面得到新的url清單,新的資料
new_urls, new_data = parser.parser(new_url, html_cont)
# 把解析到的url數組批量添加到url管理器中
urls.add_new_urls(new_urls)
# 收集資料
output.collect_data(new_data)
# 爬蟲1000頁面
if count == 500:
break
count = count + 1
except Exception as e:
print 'Crawler Failed ', e
output.output_html()
if __name__ == '__main__':
# 入口URL 百度百科位址
root_url = "http://baike.baidu.com/item/Python"
# 建立爬蟲
obj_spider = SpiderMain()
# 啟動爬蟲
craw(obj_spider.urls, obj_spider.downloader, obj_spider.parser, obj_spider.output, root_url)
2、封裝URL管理器(url_manager.py)
# coding:utf-8
print "URL管理器"
class UrlManager(object):
# 初始化url容器(set集合)
def __init__(self):
self.new_urls = set()
self.old_urls = set()
# 添加一個新的待爬取url
def add_new_url(self, url):
if url is None:
return
# 判斷url不在新的url集合中也不再舊的url集合中 說明是一個全新的url
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
# 批量添加解析頁面的URL
def add_new_urls(self, urls):
if urls is None or len(urls) == 0:
return
for url in urls:
self.add_new_url(url)
# 判斷是否有新的待爬取的url
def has_new_url(self):
# 如果新的url集合裡面len不等于0 說明有待爬取的url
return len(self.new_urls) != 0
# 獲得url位址
def get_new_url(self):
# 擷取url并移除目前url
new_url = self.new_urls.pop()
self.old_urls.add(new_url)
return new_url
3、HTML下載下傳器(html_downloader.py)
# coding:utf-8
import urllib2
print "下載下傳HTML"
class HtmlDownloader(object):
def download(self, url):
# 判斷是否為空
if url is None:
return None
# 下載下傳url
response = urllib2.urlopen(url)
# 判斷傳回結果是否為200
if response.getcode() != 200:
return None
# 成功 傳回頁面内容
return response.read()
4、HTML 解析器(html_parser.py)
# coding:utf-8
from bs4 import BeautifulSoup
import re
import urlparse
print "HTML 解析器"
class HtmlParser(object):
def _get_new_urls(self, page_url, soup):
# 建立一個集合儲存頁面解析出來的所有url
new_urls = set()
# /item/ 詞條url
links = soup.find_all('a', href=re.compile(r'/item/'))
for link in links:
# 擷取連接配接
new_url = link['href']
# url拼接
new_full_url = urlparse.urljoin(page_url, new_url)
# print 'new_full_url', new_full_url
# 添加到集合中url
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup):
# 定義一個dict 集合
res_data = {}
res_data['url'] = page_url
# < dd class ="lemmaWgt-lemmaTitle-title" >< h1 > 自由軟體 < / h1 >
title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')
res_data['title'] = title_node.get_text()
# <div class="lemma-summary" label-module="lemmaSummary">
summary_node = soup.find('div', class_='lemma-summary')
res_data['summary'] = summary_node.get_text()
return res_data
# 解析器
def parser(self, page_url, html_content):
if page_url is None or html_content is None:
return
# 使用BeautifulSoup 解析html頁面
soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf-8')
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls, new_data
5、HTML 輸出頁面(html_outputer.py)
# coding:utf-8
print "HTML 輸出頁面"
class HtmlOutput(object):
def __init__(self):
self.datas = []
# 收集資料
def collect_data(self, data):
if data is None:
return
self.datas.append(data)
# 展示資料
def output_html(self):
fout = open('output.html', 'w')
fout.write('<html>')
fout.write('<head>')
fout.write('<meta http-equiv="Content-Type" content="text/html;charset=utf-8">')
fout.write('</head>')
fout.write('<body>')
fout.write('<table border="1">')
for data in self.datas:
fout.write('<tr>')
fout.write('<td><a href = "%s">%s</a></td>' % (data['url'].encode('utf-8'), data['title'].encode('utf-8')))
fout.write('<td>%s</td>' % data['summary'].encode('utf-8'))
fout.write('</tr>')
fout.write('</table>')
fout.write('</body>')
fout.write('</html>')
fout.close()
總結:python爬蟲主要就是五個子產品:爬蟲啟動入口子產品,URL管理器存放已經爬蟲的URL和待爬蟲URL清單,html下載下傳器,html解析器,html輸出器
同時可以掌握到urllib2的使用、bs4(BeautifulSoup)頁面解析器、re正規表達式、urlparse、python基礎知識回顧(set集合操作)等相關内容。