本次作业来源于:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE1/homework/3002
读入包:
import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import time
import random
import pandas as pd
0.从新闻url获取点击次数,并整理成函数
- newsUrl
- newsId(re.search())
- clickUrl(str.format())
- requests.get(clickUrl)
- re.search()/.split()
- str.lstrip(),str.rstrip()
- int
- 整理成函数
- 获取新闻发布时间及类型转换也整理成函数
获取点击次数函数:
def clickCounts(url):
id = re.findall('\d+',url)[-1]
clickUrl = "http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(id)
clickStruct = requests.get(clickUrl).text
clickCounts = int(clickStruct.split('.html')[-1][2:-3])
return clickCounts
1.从新闻url获取新闻详情: 字典,anews
获取发布时间:
def newsdt(showInfo):
newsDate = showInfo.split()[0].split(':')[1]
newsTime = showInfo.split()[1]
newsDT = newsDate + ' ' + newsTime
dt = datetime.strptime(newsDT,'%Y-%m-%d %H:%M:%S')
return dt
获取新闻详情:
def anews(url):
newsDetail = {}
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
newsDetail['newsTitle'] = soup.select('.show-title')[0].text
showInfo = soup.select('.show-info')[0].text
newsDetail['newsDT'] = newsdt(showInfo)
newsDetail['newsClick'] = clickCounts(url)
return newsDetail
newsUrl = "http://news.gzcc.cn/html/2019/xiaoyuanxinwen_0404/11155.html"
print(anews(newsUrl))
2.从列表页的url获取新闻url:列表append(字典) alist
def alist(listUrl):
res = requests.get(listUrl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
newsList = []
for news in soup.select('li'):
if len(news.select('.news-list-title'))>0:
newsUrl = news.select('a')[0]['href']
newsDesc = news.select('.news-list-description')[0].text
newsDict = anews(newsUrl)
newsDict['newsUrl'] = newsUrl
newsDict['description'] = newsDesc
newsList.append(newsDict)
return newsList
listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
allnews = alist(listUrl)
for newtro in allnews:
print(newtro)
3.生成所页列表页的url并获取全部新闻 :列表extend(列表) allnews
*每个同学爬学号尾数开始的10个列表页
allnews = []
for i in range(2,12):
listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
allnews.extend(alist(listUrl))
for n in allnews:
print(n)
#统计所爬取的新闻总数
print(len(allnews))
4.设置合理的爬取间隔
import time
import random
time.sleep(random.random()*3)
5.用pandas做简单的数据处理并保存
保存到csv或excel文件
newsdf.to_csv(r'F:\duym\爬虫\gzccnews.csv')
newsdf = pd.DataFrame(allnews)
for i in range(5):
print(i)
time.sleep(random.random() * 3) print(newsdf)
# 保存到本地csv文件
newsdf.to_csv(r'E:\gzccnews.csv',encoding='utf-8')
