天天看點

11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程

内容:詳情崗位url + 崗位詳情資訊 + Excel檔案和Mysql資料存儲

bugger:拉勾網反爬蟲能力強,無法持續爬取。由于沒有使用IP池,爬取20個左右崗位,則不能爬取。

作者:Irain

QQ:2573396010

微信:18802080892

GitHub項目連結:9 LGW.py.

視訊資源連結:https://www.bilibili.com/video/BV1P4411f7rP?p=68

1 引用第三方庫

import requests
from lxml import etree
import time
import re
import pandas as pd
import os
import pymysql
from sqlalchemy import create_engine
           
import requests
    import lxml
    import re
    import pandas as pd
    import pymysql
    import sqlalchemy
    print("pandas", pd.__version__)
    print("requests", requests.__version__)
    print("lxml", lxml.__version__)
    print("re", re.__version__)
    print("pymysql", pymysql.__version__)
    print("sqlalchemy", sqlalchemy.__version__)
    
           
11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程

2 設定頭檔案

# 設定頭檔案
headers = {
        'referer': 'https: // www.lagou.com / jobs / list_Java / p - city_0? & cl = false & fromSearch = true & labelWords = & suginput =',
        'sec - fetch - dest': 'empty',
        'user-agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
        'x-anit-forge-code': '0',
        'x-anit-forge-code': '0',
        'x-anit-forge-token': None,
        'cookie':'user_trace_token=20200321131413-8ddfc34a-aa52-4262-9096-d9d05da3fd5a; _ga=GA1.2.542181339.1584767653; LGUID=20200321131416-b45c1477-a981-4087-b131-a7b9974b5600; JSESSIONID=ABAAABAABFIAAAC35F27D5B4D5AE1C5F2C9F333E9BBCCF4; WEBTJ-ID=20200321131422-170fb837a3113d-030b63af31512b-5d1f391c-1049088-170fb837a326cf; index_location_city=%E5%85%A8%E5%9B%BD; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22170fb83bf0b209-0245070180f324-5d1f391c-1049088-170fb83bf0c16d%22%2C%22%24device_id%22%3A%22170fb83bf0b209-0245070180f324-5d1f391c-1049088-170fb83bf0c16d%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; X_MIDDLE_TOKEN=3ff5a12012ebfd67a30924ca6d31c84e; _putrc=4EF91627A84C9BD4123F89F2B170EADC; login=true; unick=%E7%BD%97%E4%BC%9F%E6%98%8E; LG_LOGIN_USER_ID=7e1df141aefd772eaccf6aebd297375c9de7ab50fa2a947b5f2da2737a55fd6a; LG_HAS_LOGIN=1; _gid=GA1.2.593238733.1588814276; gate_login_token=9ad2b13c5356891d10a151c6a4e98f1624cc225476cf2d8a45fa860da49f565c; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; privacyPolicyPopup=false; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1588814469; hasDeliver=9; TG-TRACK-CODE=index_search; SEARCH_ID=1f60a80b4dd34e76b07c7be3127da27b; X_HTTP_TOKEN=9ea9eb5a8fddf22666701988513d84c5d39321ba5e; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1588911281; LGRID=20200508121443-eaeb9b7d-79a1-4032-b14f-f24c5fc48cbc'
}
positions= [] # 存儲崗位資訊
           

3 擷取詳情崗位url

11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程
11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程
11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程
11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程
def request_list_page(): # 爬取崗位url
    url = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'# java的url
    data = { # 資料包
        'fisrt':'false',
        'pn':1,
        'kd':'java'
    }
    for x in range(1,10): # 通路頁數
        data['pn'] = x
        response = requests.post(url, headers=headers, data=data)
        while re.findall(r"操作太頻繁",response.text) :  # 拉勾網反爬蟲,沒有使用IP池,短時間不能多次通路
            response = requests.post(url, headers=headers, data=data)
            time.sleep(1)
        result = response.json()          # json方法:如果傳回是json資料,那麼該方法自動的load成字典
        print(result)
        positions = result['content']['positionResult']['result'] # 擷取Ajax檔案中崗位資訊
        for position in positions:
            positionId = position['positionId']
            position_url = 'https://www.lagou.com/jobs/%s.html' % positionId  # 擷取崗位詳情頁面url
            parse_position_detail(position_url)  #  爬取崗位詳情頁面
        time.sleep(1)
        break  # 隻爬取第一頁,删除break
           

4 擷取崗位詳細資訊

11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程
def parse_position_detail(url):  #  爬取崗位詳情頁面
    response = requests.get(url, headers=headers)
    while re.findall(r"操作太頻繁", response.text):  # 拉勾網反爬蟲,沒有使用IP池,短時間不能多次通路
        response = requests.get(url, headers=headers)
        time.sleep(1)
    text =response.text
    print(response.status_code)
    html = etree.HTML(text)
    position_name = html.xpath("//h1[@class='name']/text()")[0] #  擷取崗位名稱
    job_request_spans = html.xpath("//dd[@class='job_request']//span")
    salary = job_request_spans[0].xpath('.//text()')[0].strip() #  擷取工資
    city = job_request_spans[1].xpath('.//text()')[0].strip() #  擷取工作城市
    city =  re.sub(r"[\s/]","",city)
    work_year = job_request_spans[2].xpath('.//text()')[0].strip() #  擷取工作年齡
    work_year = re.sub(r"[\s/]", "", work_year)
    education = job_request_spans[3].xpath('.//text()')[0].strip() #  擷取學習
    education = re.sub(r"[\s/]", "", education)
    desc = "".join(html.xpath("//dd[@class='job_bt']//text()")).strip() #  擷取崗位描述
    company_name = html.xpath("//em[@class='fl-cn']/text()")[0].strip()# 擷取招聘企業
    position = {
        'name': position_name,
        'company_name': company_name,
        'salary': salary,
        'city': city,
        'work_year': work_year,
        'education': education,
        'desc': desc,
    }
    positions.append(position)
    print('-' * 88)
    print(position)
           
11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程

5 Excel檔案和Mysql資料存儲

def save():
    if (os.path.exists('positions.xlsx')): # 删除已存在的Excel檔案
        os.remove('positions.xlsx')
    data = pd.DataFrame(positions)
    data.to_excel('positions.xlsx')  # 儲存到Excel
    data = pd.read_excel('positions.xlsx', index_col=0)
    conn = pymysql.connect(host='127.0.0.1',  # 資料庫位址
                                port=3366,  # 通路端口
                                user='root',  # 登入名
                                passwd='123',  # 登入密碼
                                db='scrapy'  # 通路資料庫名
                                )  # 連接配接資料庫
    cur = conn.cursor()  # 建立操作遊标
    cur.execute('drop table if exists lgw_positions')  # 删除已存在的表
    New_Table = """CREATE TABLE lgw_positions(
                              `index` int(2) ,
                              `name` VARCHAR (16),
                              company_name VARCHAR (16),
                              salary VARCHAR (16),
                              city VARCHAR (16),
                              education  VARCHAR (16),
                              work_year VARCHAR (16),
                              `desc` Text)"""
    cur.execute(New_Table) # 建立新表
    # create_engine('mysql+pymysql://使用者名:密碼@主機/庫名?charset=utf8')
    engine = create_engine('mysql+pymysql://root:[email protected]:3366/scrapy?charset=utf8')
    data.to_sql('lgw_positions', con=engine, if_exists='append')  # 資料寫入資料庫
    pass
           
11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程
11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程
11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程
11.5 拉勾網普通爬蟲--scrapy爬蟲初學者學習過程

釋出:2020年5月11日