天天看點

python爬蟲--爬取-糗事百科

"""
https://www.qiushibaike.com/8hr/page/1/
https://www.qiushibaike.com/8hr/page/2/
https://www.qiushibaike.com/8hr/page/3/

"""

import os
from lxml import etree
import requests
def qiushi(page_start,page_end):
    #其實的url
    base="https://www.qiushibaike.com/8hr/page/1/"
    path='./static/templates'
    if not  os.path.exists(path):
        os.makedirs(path)
    for page in range(int(page_start),int(page_end)+1):
        base_url="https://www.qiushibaike.com/8hr/page/{}/".format(page)
    #擷取内容
        headers={
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
        }
        response=requests.get(base_url,headers=headers)
        html=response.text
        # print(html)
        html_xml = etree.HTML(html)#将字元串轉換成xpath使用的格式
        # print(html_xml)
        # print(type(html_xml))#<class 'lxml.etree._Element'>
        #使用xpath爬取想要的内容
        #使用縮小範圍的方法擷取響應的内容
        #先擷取每一個li标簽:
        li_list=html_xml.xpath('//div[@class="recommend-article"]//li')
        # print(li_list)
        #循環周遊每一個li标簽
        print("========================第{}頁開始下載下傳".format(page),'=============================')
        big_dic={
            '第{}頁'.format(page):{}
        }
        for index,li in enumerate(li_list):
            #1 擷取糗事名稱
            name=li.xpath('.//a[@class="recmd-content"]/text()')[0]
            # print(name)
            #2 擷取:使用者昵稱:
            nickname=li.xpath('.//span[@class="recmd-name"]/text()')[0]
            # print(nickname)
            #3 點贊次數
            like1=li.xpath('.//div[@class="recmd-num"]//span[1]/text()')[0]
            # print(like1)
            #4 評論次數
            comment=li.xpath('.//div[@class="recmd-num"]//span[last()-1]/text()')[0]
            # print(comment)
            #擷取圖檔:
            picture1=li.xpath('./a/img/@src')[0]
            #拼接圖檔
            pic='https:'+picture1
            # print(pic)
            #建立一個字典:
            qiushi_dict={
                '糗事名稱':name,
                '使用者昵稱':nickname,
                '點贊次數':like1,
                '評論次數':comment,
                '圖檔':pic,
            }
            big_dic.get('第{}頁'.format(page))[index+1]=qiushi_dict
            import json#導入
            big_dc=json.dumps(big_dic)
            # print(qiushi_dict)
            path1=path+str(page)
            with open(path1+'.txt','w',encoding='utf-8') as f:
                f.write(str(big_dc))
                print(big_dc)
if __name__ == '__main__':
    page_start = input('請輸入要爬的起始頁:')
    page_end = input('輸入要爬取的終止頁')
    qiushi(page_start,page_end)

"""
遇到的問題:擷取不到内容
加請求頭

"""