百度文庫,文字類型的資源(如txt, doc, pdf),是可以在前端源碼分析擷取到的,如果能按照規則合理的提取這些文字資源,就可以實作免下載下傳券擷取資源.
作者完成了百度文庫的線上版爬蟲,隻需提供文庫資源頁面的url,就可以擷取對應的文庫資源(免下載下傳券)
線上測試位址:
http://zhaozhaoli.vicp.io/spider/bdwk
效果展示
初始界面.png
攝影課感想_百度文庫_and_方圓小站.png
擷取資源.png
展示文本.png
關于ppt的爬取,作者也寫出來了,但是我的個人伺服器帶寬太小,導緻前端等待時間過長,就把它注釋了...有興趣的可以看以前寫過的這篇文章 百度文庫下載下傳器 ,文章底部提供了編譯好的可執行檔案(支援ppt,txt,pdf,word格式的下載下傳),可以下載下傳到本地玩一玩...
核心源碼:
import os
import re
import json
import requests
from lxml import etree
import sys
from fangyuanxiaozhan.settings import BASE_DIR
# import tkinter
# 建立文庫基類
class BaiduWK(object):
def __init__(self, url):
self.title = None
self.url = url
self.docType = None
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36'}
self.get_response_content(self.url)
self.get_doc_type_and_title()
def get_response_content(self, url):
try:
response = requests.get(url, headers=self.headers)
return response.content
except Exception as e:
print(e)
pass
def get_doc_type_and_title(self):
# 擷取源碼
source_html = self.get_response_content(self.url)
# 解析源碼
content = source_html.decode('gbk')
# 擷取文檔類型
self.docType = re.findall(r"docType.*?\:.*?\'(.*?)\'\,", content)[0]
# 擷取文檔标題
self.title = re.findall(r"title.*?\:.*?\'(.*?)\'\,", content)[0]
# 建立擷取txt的類
class BDWKTXT(BaiduWK):
def __init__(self, url):
super().__init__(url)
self.docId = None
pass
def get_txt(self, url):
# 擷取源碼
source_html = self.get_response_content(url)
content = source_html.decode("gbk")
# 擷取docId
self.docId = re.findall(r"docId.*?(\w{24}?)\'\,", content)[0]
# 拼接請求url
token_url = "https://wenku.baidu.com/api/doc/getdocinfo?callback=cb&doc_id=" + self.docId
# 再次請求
first_json = self.get_response_content(token_url).decode()
str_first_json = re.match(r'.*?\((\{.*?\})\).*', first_json).group(1)
# print(str_first_json)
the_first_json = json.loads(str_first_json)
md5sum = the_first_json["md5sum"]
rn = the_first_json["docInfo"]["totalPageNum"]
rsign = the_first_json["rsign"]
# print(md5sum,"-->",rn)
# 請求目标url
target_url = "https://wkretype.bdimg.com/retype/text/" + self.docId + "?" + md5sum + "&callback=cb" + "&pn=1&rn=" + rn + "&type=txt" + "&rsign=" + rsign
# https://wkretype.bdimg.com/retype/text/de53bafaf705cc1755270982?md5sum=b89f055e765e6f73db57525bcfc3c2d2&sign=7b385f9cf7&callback=cb&pn=1&rn=12&type=txt
sec_json = self.get_response_content(target_url).decode()
# print(type(sec_json),"-->")
str_sec_json = re.match(r'.*?\(\[(.*)\]\)$', sec_json).group(1)
str_sec_json += ","
str_json_list = str_sec_json.split('},')
result_txt = ""
# 截取尾部空格
str_json_list = str_json_list[:-1]
for str_json in str_json_list:
str_json += "}"
pure_dic = json.loads(str_json)
pure_txt = pure_dic["parags"][0]["c"].strip()
result_txt += pure_txt
# 建立檔案目錄
try:
# path = "." + os.sep + self.docType
path = BASE_DIR + "/static/spider_file/bdwk" + os.sep + self.docType
print(type(path),"path-->",path)
os.makedirs(path)
except Exception as e:
# print("檔案夾%s已存在"%(path))
pass
# 建立檔案,儲存資訊
try:
print(os.path)
file_name = BASE_DIR+"/static/spider_file/bdwk" + os.sep + self.docType + os.sep + self.title + ".txt"
with open(file_name, 'w', encoding='utf-8') as f:
f.write(result_txt)
print("已經儲存為:", self.title + '.txt')
except Exception as e:
# print(e)
pass
# 建立擷取word的類
class BDWKDOC(BaiduWK):
def __init__(self, url):
super().__init__(url)
# 儲存資料來源url
self.pure_addr_list = list()
# 擷取資料來源url
def get_pure_addr_list(self):
# 擷取頁面源碼
source_html = self.get_response_content(self.url).decode('gbk')
# 從源碼中批量提取資料url
all_addr = re.findall(r'wkbos\.bdimg\.com.*?json.*?expire.*?\}', source_html)
pure_addr_list = list()
# 擷取文檔标題
self.title = etree.HTML(source_html).xpath("//title/text()")[0].strip()
# 淨化資料來源url清單
for addr in all_addr:
addr = "https://" + addr.replace("\\\\\\/", "/")
addr = addr[:-5]
pure_addr_list.append(addr)
# 将處理好的url清單儲存為全局屬性
self.pure_addr_list = pure_addr_list
return pure_addr_list
# 從資料來源的url清單中提取資料
def get_json_content(self, url_list):
content = ''
result = ''
sum = len(url_list)
i = 1
for pure_addr in url_list:
print("正在下載下傳第%d條資料, 剩餘%d條" % (i, sum - i))
i += 1
try:
# 擷取json資料
content = self.get_response_content(pure_addr).decode()
# 處理json資料
content = re.match(r'.*?\((.*)\)$', content).group(1)
# 将json資料中需要的内容提取出來
all_body_info = json.loads(content)["body"]
# 周遊擷取所有資訊,并将資訊拼接到一起
for body_info in all_body_info:
try:
result = result + body_info["c"].strip()
# print(">>",result)
except Exception as e:
print(e)
pass
except Exception as e:
print(e)
pass
# 建立檔案目錄
try:
path = BASE_DIR+"/static/spider_file/bdwk" + os.sep + self.docType
# print(type(path),"path-->",path)
os.makedirs(path)
except Exception as e:
# print("檔案夾%s已存在"%(path))
pass
# 建立檔案,儲存資訊
try:
file_name = BASE_DIR+"/static/spider_file/bdwk" + os.sep + self.docType + os.sep + self.title + ".txt"
with open(file_name, 'w', encoding='utf-8') as f:
f.write(result)
print("已經儲存為:", self.title + '.txt')
except Exception as e:
print(e)
# 建立擷取ppt的類
class BDWKPPT(BaiduWK):
def __init__(self, url):
self.all_img_url = list()
super().__init__(url)
# 擷取json資料儲存檔案
def get_ppt_json_info(self):
# 擷取源檔案
ppt_source_html = self.get_response_content(self.url)
# 解析源檔案
content = ppt_source_html.decode('gbk')
# 測試
with open("test.html", "w") as f:
f.write(content)
# 擷取文檔Id
self.docId = re.findall(r"docId.*?(\w{24}?)\'\,", content)[0]
# 拼接請求json的接口
source_json_url = 'https://wenku.baidu.com/browse/getbcsurl?doc_id=%s&type=ppt&callback=zhaozhao' % self.docId
# 擷取字元串類型的json資料
str_source_json = self.get_response_content(source_json_url).decode()
# 處理字元串類型的json資料,使其成為标準格式
pure_str_source_json = re.match(r'.*?\((.*?)\)', str_source_json).group(1)
# 将字元串json轉為可處理的正式json
source_json = json.loads(pure_str_source_json)
# 周遊字典中的資料類型list
for j in source_json['list']:
# 建立臨時清單
temp_num_url = list()
# 将url和page拼接到清單中
temp_num_url.append(j["zoom"])
temp_num_url.append(j["page"])
# 将清單資訊添加到全局變量中
self.all_img_url.append(temp_num_url)
# 建立檔案夾
try:
os.makedirs(BASE_DIR+"/static/spider_file/bdwk/ppt/%s" % (self.title))
except Exception as e:
pass
for img_url in self.all_img_url:
print(BASE_DIR+"正在擷取第%d頁資源(剩餘%d頁)" % (img_url[1], len(self.all_img_url) - img_url[1]))
data = self.get_response_content(img_url[0])
path = BASE_DIR+"/static/spider_file/bdwk/ppt/%s/%s" % (self.title, str(img_url[1]) + '.jpg')
with open(path, 'wb') as f:
f.write(data)
print("寫入完畢")
# 運作主程式
def main(url):
try:
# url = input("請輸入資源所在的網址:")
docType = BaiduWK(url).docType
except:
print("您輸入的url,有誤請重新輸入!")
os.exit()
print("類型為", "-->", docType)
if docType == "ppt2":
ppt = BDWKPPT(url)
print("您将要擷取的示範文稿(ppt)名稱為:", ppt.title)
ppt.get_ppt_json_info()
data = dict()
src = "/static/spider_file/bdwk/ppt/%s/" % (ppt.title)
title_name = ppt.title
data["src"] = src
data["title_name"] = title_name
return data
elif docType == "doc":
word = BDWKDOC(url)
print("您将要擷取的文檔(word)名稱為", word.title)
pure_addr_list = word.get_pure_addr_list()
word.get_json_content(pure_addr_list)
data = dict()
src = "/static/spider_file/bdwk/doc/%s.txt" % (word.title)
title_name = word.title
data["src"] = src
data["title_name"] = title_name+".txt"
return data
elif docType == "pdf":
pdf = BDWKDOC(url)
print("您将要擷取的PDF名稱為:", pdf.title)
pure_addr_list = pdf.get_pure_addr_list()
pdf.get_json_content(pure_addr_list)
data = dict()
src = "/static/spider_file/bdwk/pdf/%s.txt" % (pdf.title)
title_name = pdf.title
data["src"] = src
data["title_name"] = title_name+".txt"
return data
elif docType == "txt":
txt = BDWKTXT(url)
print("您将要下載下傳的文本文檔(txt)名稱為:", txt.title)
txt.get_txt(url)
data = dict()
src = "/static/spider_file/bdwk/txt/%s.txt" % (txt.title)
title_name = txt.title
data["src"] = src
data["title_name"] = title_name+".txt"
return data
else:
other = BDWKPPT(url)
print("暫不支援下載下傳%s類型" % (other.docType))
data = dict()
data["src"] = ""
data["title_name"] = "暫不支援%s類型"%(other.docType)
return data