作业要求来自:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE2/homework/2941
1.从新闻url获取新闻详情: 字典,anews
2.从列表页的url获取新闻url:列表append(字典) alist
3.生成所页列表页的url并获取全部新闻 :列表extend(列表) allnews
*每个同学爬学号尾数开始的10个列表页
4.设置合理的爬取间隔
源代码如下:
import requests
import re
from bs4 import BeautifulSoup
from datetime import datetime
import pandas as pd
import time
import random
#从新闻url获取新闻详情: 字典,anews
def click(url):#获取点击次数
id=re.findall('(\d{1,5})',url)[-1]
clickUrl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(id)
res=requests.get(clickUrl)
newsClick=res.text.split('.html')[-1].lstrip("('").rstrip("');")
return newsClick
def newsdt(showinfo):#转换成datetime类型
newsDate=showinfo.split()[0].split(':')[1]
newsTime=showinfo.split()[1]
newsDT=newsDate+' '+newsTime
dt=datetime.strptime(newsDT,'%Y-%m-%d %H:%M:%S')
return dt
def new(url):#获取校园的点击次数,时间,标题
newsDetail={}
res=requests.get(url)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')
newsDetail['nenewsTitle']=soup.select('.show-title')[0].text
showinfo=soup.select('.show-info')[0].text
newsDetail['newsDT']=newsdt(showinfo)
newsDetail['newsClick']=click(url)
#newsDetail['newDesc']=soup.select('.news-list-description')[0].text
return newsDetail
#listUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/'
#print(soup)
def alist(url):#从列表页的url获取新闻url:列表append(字典) alist
res=requests.get(listUrl)
res.encoding='utf-8'
soup = BeautifulSoup(res.text,'html.parser')
newsList = []
for news in soup.select('li'):
if len(news.select('.news-list-title'))>0:
newsUrl=news.select('a')[0]['href']#获取链接
#print(newsUrl)
newsDesc=news.select('.news-list-description')[0].text
newsDict=new(newsUrl)
newsDict['description']=newsDesc
newsList.append(newsDict)
return newsList
#print(newsList)
#alist(listUrl)
i=int(soup.select('#pages')[0].text.split('..')[1].rstrip(' 下一页 '))#获取校园新闻的页数,去除多余的东西
allnews=[]
for i in range(96,106):#获取96~106页数的校园新闻的各项信息,生成所页列表页的url并获取全部新闻 :列表extend(列表) allnews
listUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
allnews.extend(alist(listUrl))
for i in range(5):#设置合理的爬取间隔
time.sleep(random.random()*3)
newsdf = pd.DataFrame(allnews)#转化为二维的表格型数据结构
newsdf
运行结果如下图所示:

5.用pandas做简单的数据处理并保存
保存到csv或excel文件
import sqlite3
newsdf.to_csv(r'F:\gzccnews.csv')
运行结果:
成功保存到了F盘中的gzccnews5.csv中
保存到数据库
import sqlite3
with sqlite3.connect('gzccnewsdb5.sqlite') as db:
newsdf.to_sql('gzccnewsdb5',db)
with sqlite3.connect('gzccnewsdb5.sqlite') as db:
df2=pd.read_sql_query('SELECT * FROM gzccnewsdb5',con=db)
df2[df2['newsClick']>300]
运行结果: