天天看点

python爬虫程序requests采用get和post方式

第1个爬虫程序:

main.py

#! python
# -*- coding: utf-8 -*-
from urllib.request import urlopen
url = "https://www.hao123.com/"


if __name__ == '__main__':

    resp = urlopen(url)

    # 把读取到网页的页面源代码写入myGetFile.html文件
    with open("myGetFile.html", mode="w", encoding='utf-8') as f:
        f.write(resp.read().decode('utf-8'))

    f.close()  # 关闭文件
    resp.close()  # 关闭resp响应

    print("结束")
           

第2个爬虫程序:

main2.py

# -*- coding: utf-8 -*-
import requests


if __name__ == '__main__':
    query = input("请输入一个你喜欢的明星:")

    url = f"https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&srcqid=5655130659909863611&tn=50000021_hao_pg&wd={query}"

    dic = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                     "Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.47"}
    resp = requests.get(url, headers=dic)  # 处理一个小小的反爬

    # print(resp)
    # print(resp.text)  # 打印读取到的网页的页面源代码
    
	# 把读取到网页的页面源代码写入myGetFile.html文件
    with open("myGetFile.html", mode="w", encoding='utf-8') as f:
        f.write(resp.text)  # 读取到网页的页面源代码

    f.close()  # 关闭文件
    resp.close()  # 关闭resp响应连接

    print("结束")
           

第3个爬虫程序:

main3.py

# -*- coding: utf-8 -*-
import requests

if __name__ == '__main__':
    url = "https://fanyi.baidu.com/sug"

    s = input("请输入你要翻译的英文单词:")
    # 要翻译的英文单词
    dat = {"kw": s}

    # 用户代理
    dicHeaders = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                                "Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.47"}

    # 发送post请求,发送的数据必须放在字典中,通过data参数进行传递
    resp = requests.post(url, headers=dicHeaders, data=dat)
    # print(resp.text)
    # 将服务器返回的内容直接处理成json(),就是字典格式
    print(resp.json())

    resp.close()  # 关闭resp响应连接

    print("结束")
           

第4个爬虫程序:

main4.py

# -*- coding: utf-8 -*-
import requests

if __name__ == '__main__':
    url = "https://movie.douban.com/j/chart/top_list"

    # 参数
    dicParam = {"type": "24",
                "interval_id": "100:90",
                "action": "",
                "start": "0",
                "limit": "20"
                }

    # 用户代理
    dicHeaders = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                                "Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.47"}

    # 发送get请求,发送的数据必须放在字典中,通过params参数进行传递
    resp = requests.get(url=url, params=dicParam, headers=dicHeaders)  # 处理小小的反爬
    # print(resp.text)
    # 将服务器返回的内容直接处理成json(),就是字典格式
    # print(resp.json())
    objData = resp.json()

    # 打印信息
    for i in range(0, len(objData)):
        print(objData[i])

    resp.close()  # 关闭resp响应连接

    print("结束")
           

第5个爬虫程序:

main5.py

# -*- coding: utf-8 -*-
import requests

if __name__ == '__main__':
    url = "https://movie.douban.com/j/chart/top_list"

    # 进行连续2轮获取数据,每轮20个数据
    for n in range(0, 2):
        # 参数
        dicParam = {"type": "24",
                    "interval_id": "100:90",
                    "action": "",
                    "start": f"{20*n}",
                    "limit": "20"
                    }

        # 用户代理
        dicHeaders = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                                "Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.47"}

        # 发送post请求,发送的数据必须放在字典中,通过data参数进行传递
        resp = requests.get(url=url, params=dicParam, headers=dicHeaders)  # 处理小小的反爬
        # print(resp.text)
        # 将服务器返回的内容直接处理成json(),就是字典格式
        # print(resp.json())

        objData = resp.json()

        print(f"第{n+1}轮20个数据:")
        # 打印信息
        for i in range(0, len(objData)):
            print(objData[i])

        resp.close()  # 关闭resp响应连接

    print("结束")
           

关注公众号,获取更多资料

python爬虫程序requests采用get和post方式