天天看點

python爬蟲實戰之爬取成語大全

業餘時間,想學習中華文化之成語,身邊沒有成語詞典,網上一個個翻網頁又比較懶,花了半個小時搞定數字成語詞典,你值得擁有!

爬取思路

  • 找到首頁網址:https://www.chengyucidian.net/
  • 按照拼音字母種類抓取,注意有些字母沒有成語;
  • 擷取每個字母種類的每一頁所有成語連結
  • 擷取每一頁連結下的内容

廢話不多說,直接上代碼給各位看客拿去上手撸!

import requests
from bs4 import BeautifulSoup
import re


headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.2 Safari/605.1.15'
}


def getChengyu(cate, page):
    f=open("chengyu_urls.csv","a",encoding="utf-8")
    res=requests.get("https://www.chengyucidian.net/letter/"+str(cate)+"/p/"+str(page), headers=headers, allow_redirects=False)
    res.encoding="utf-8"
    soup=BeautifulSoup(res.text)
    urls=soup.select('div[class="cate"]')
    print(urls)
    urls=re.findall('\d+',str(urls))
    for url in urls:
        f.write("https://www.chengyucidian.net/cy/"+str(url)+".html"+"\n")

def getPageNum(cate):
    res = requests.get("https://www.chengyucidian.net/letter/"+str(cate), headers=headers, allow_redirects=False)
    res.encoding="utf-8"
    soup=BeautifulSoup(res.text)
    pagenum=soup.select('div[class="page"]')
    pagenum=re.findall('\d+',str(pagenum))[-1]

    return pagenum

def getIntroduction(url):
    res = requests.get(url, headers=headers, allow_redirects=False)
    res.encoding = "utf-8"
    soup = BeautifulSoup(res.text)
    chengyu=soup.select('h1')
    chengyu=str(chengyu[0]).replace("<h1>","")
    chengyu=chengyu.replace("</h1>","")
    print(chengyu)
    introText = soup.select('div[class="con"]')
    introText=str(introText[0]).replace("<p>","")
    introText = introText.replace("</p>", "")
    introText = introText.replace('<h4 id="chu">', "")
    introText = introText.replace('<h4 id="shi">', "")
    introText = introText.replace('</h4>', "")
    introText = introText.replace('<h4>', "")
    introText = introText.replace('<div class="con">', "")
    introText = introText.replace('</div>', "")
    introText = introText.replace('<strong>', "")
    introText = introText.replace('</strong>', "")
    introText = introText.replace('<p class="ciLs">', "")
    print(introText)
    return chengyu, introText


if __name__=="__main__":
    f1=open("chengyu_urls.csv","r",encoding="utf-8")
    f2=open("chengyu_introText.csv","a",encoding="utf-8")
    urls=f1.read().split("\n")
    print(len(urls))
    for url in urls[10:]:
        chengyu,introText=getIntroduction(url)
        f2.write(str(chengyu)+"\n"+"["+str(introText)+"]"+"\n")