作業來自于:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE1/homework/3159
爬取豆瓣電影《無雙》影評
1.首先分析網頁
在豆瓣網站中,需要浏覽影評,是需要使用者登入的;是以,要爬取影評網頁,就需要注冊使用者、登入,捉取cookie,模拟使用者登入。
def headerRandom():
ua = UserAgent()
uheader=ua.random
return uheader
def url_xp(url):
header={'User-Agent':headerRandom(),
'referer': 'https: // movie.douban.com / subject / 26425063 / collections',
'host': 'movie.douban.com'}
cookie={'Cookie':'bid=1b6T3XfY1Lg; ll="118296"; __yadk_uid=8Rh5yYIVjjTiWVlvXwLMTjTiysdDbnhc; _vwo_uuid_v2=D004713234FC45F9F75852DA2C22AA70D|64d0230cd2e8f38db28c1e01475587bc; douban-fav-remind=1; __utma=30149280.1603033016.1527918356.1539482818.1555739204.4; __utmc=30149280; __utmz=30149280.1555739204.4.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; dbcl2="195240672:1OaW66LYWvU"; ck=XrRZ; ap_v=0,6.0; push_noty_num=0; push_doumail_num=0; __utmv=30149280.19524; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1555739431%2C%22https%3A%2F%2Fwww.douban.com%2F%22%5D; _pk_ses.100001.4cf6=*; __utma=223695111.1276142029.1532846523.1532846523.1555739431.2; __utmb=223695111.0.10.1555739431; __utmc=223695111; __utmz=223695111.1555739431.2.1.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; douban-profile-remind=1; __utmt=1; __utmb=30149280.14.10.1555739204; _pk_id.100001.4cf6=d5d70594f72eeb39.1532846523.2.1555744004.1532846523.'}
response = requests.get(url=url,cookies=cookie,headers=header)
time.sleep(random.random() * 2)
response.encoding = 'utf-8'
soup = bs(response.content,"lxml")
return soup
影評頁面:
需要爬取的資訊有:評論使用者名、評分等級、評論時間、評論内容、該評論的贊同和反對數量。如圖所示。
經過初步分析,評論使用者名、評分等級、評論時間、該評論的贊同和反對數量都可以在本網頁中爬取到;但是,評論内容卻不在本網頁中。那就隻能繼續觀察。最後,可以發現,所有的評論内容,隐藏在這樣的'https://movie.douban.com/j/review/9682284/full',經過仔細觀察,這樣的網頁是有關則的:https://movie.douban.com/j/review/+該使用者id+/full組成的。
在該該網址的内容中存在一個小坑,它的内容一眼看過去是以一個字典的模式存放的,當我想取字典内容出來時,卻發現報錯,經過觀察和修改,其實它的内容就是一個很長的字元串。
2.爬取資料
1.csv儲存評論使用者名、評分等級、評論時間、評論内容、該評論的贊同和反對數量
2.txt儲存評論内容
3.分析資料
在分析使用者評論星級中,根據資料繪制出的統計表,如下圖所示:
整體而言,使用者給《無雙》評分還是挺高的,力薦42%,推薦46%,意味着好評就占了88%了。
在全部評論中,有贊同評論和反對評論數量統計,分析資料如下圖:
如上圖統計表所示,使用者評論給電影《無雙》星級評分“力薦”和“推薦”的,贊同數遠遠高于所有,說明使用者給出的評分還是被絕大多數人認可的,也證明《無雙》對的起豆瓣的8.1評分,是一部值得推薦的好電影。
再來看看關鍵的評論字眼:
總結:
1.《無雙》這部影片,在國産影片中,還是比較不錯的,雖然它是在香港主拍,但也融入了很多内地的元素,結合的不錯。
2.根據使用者評論的關鍵詞中可見,港片千年好評的警匪題材還是一樣受歡迎。
3.‘劇情’、‘反轉’、‘真’、‘架’是《無雙》的評論關鍵字眼,隻有這樣結局出乎意料、有些思考邏輯、新奇的劇情的影片,才可能會深受網友歡迎
4.網友對影片的要求還是:影片不要太過于淺顯易懂,能讓人引發思考的更能吸引要求和更能展現好片的趨向
完整代碼:
import requests
import bs4
from bs4 import BeautifulSoup as bs
from datetime import datetime
import re
import pandas as pd
from lxml import etree
import time
import random
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from scipy.misc import imread
import fake_useragent
from fake_useragent import UserAgent
def headerRandom():
ua = UserAgent()
uheader=ua.random
return uheader
def url_xp(url):
header={'User-Agent':headerRandom(),
'referer': 'https: // movie.douban.com / subject / 26425063 / collections',
'host': 'movie.douban.com'}
cookie={'Cookie':'bid=1b6T3XfY1Lg; ll="118296"; __yadk_uid=8Rh5yYIVjjTiWVlvXwLMTjTiysdDbnhc; _vwo_uuid_v2=D004713234FC45F9F75852DA2C22AA70D|64d0230cd2e8f38db28c1e01475587bc; douban-fav-remind=1; __utma=30149280.1603033016.1527918356.1539482818.1555739204.4; __utmc=30149280; __utmz=30149280.1555739204.4.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; dbcl2="195240672:1OaW66LYWvU"; ck=XrRZ; ap_v=0,6.0; push_noty_num=0; push_doumail_num=0; __utmv=30149280.19524; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1555739431%2C%22https%3A%2F%2Fwww.douban.com%2F%22%5D; _pk_ses.100001.4cf6=*; __utma=223695111.1276142029.1532846523.1532846523.1555739431.2; __utmb=223695111.0.10.1555739431; __utmc=223695111; __utmz=223695111.1555739431.2.1.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; douban-profile-remind=1; __utmt=1; __utmb=30149280.14.10.1555739204; _pk_id.100001.4cf6=d5d70594f72eeb39.1532846523.2.1555744004.1532846523.'}
response = requests.get(url=url,cookies=cookie,headers=header)
time.sleep(random.random() * 2)
response.encoding = 'utf-8'
soup = bs(response.content,"lxml")
return soup
def url_bs(url):
header={'User-Agent':headerRandom(),
'referer': 'https: // movie.douban.com / subject / 26425063 / collections',
'host': 'movie.douban.com'}
cookie={'Cookie':'bid=1b6T3XfY1Lg; ll="118296"; __yadk_uid=8Rh5yYIVjjTiWVlvXwLMTjTiysdDbnhc; _vwo_uuid_v2=D004713234FC45F9F75852DA2C22AA70D|64d0230cd2e8f38db28c1e01475587bc; douban-fav-remind=1; __utma=30149280.1603033016.1527918356.1539482818.1555739204.4; __utmc=30149280; __utmz=30149280.1555739204.4.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; dbcl2="195240672:1OaW66LYWvU"; ck=XrRZ; ap_v=0,6.0; push_noty_num=0; push_doumail_num=0; __utmv=30149280.19524; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1555739431%2C%22https%3A%2F%2Fwww.douban.com%2F%22%5D; _pk_ses.100001.4cf6=*; __utma=223695111.1276142029.1532846523.1532846523.1555739431.2; __utmb=223695111.0.10.1555739431; __utmc=223695111; __utmz=223695111.1555739431.2.1.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; douban-profile-remind=1; __utmt=1; __utmb=30149280.14.10.1555739204; _pk_id.100001.4cf6=d5d70594f72eeb39.1532846523.2.1555744004.1532846523.'}
response = requests.get(url=url,cookies=cookie,headers=header)
time.sleep(random.random() * 2)
response.encoding = 'utf-8'
soup = bs(response.text, 'html.parser')
return soup
#評論使用者名
def user(url):
html = etree.HTML(url_xp(url).decode())
users= html.xpath("//div[@class='main review-item']//a[@class='name']//text()")
return users
#時間
def times(url):
html = etree.HTML(url_xp(url).decode())
time= html.xpath("//div[@class='main review-item']//span[@class='main-meta']//text()")
timeList = []
for i in time:
now1 = datetime.strptime(i, '%Y-%m-%d %H:%M:%S')
newstime = datetime.strftime(now1, '%Y{y}-%m{m}-%d{d} %H{H}:%M{M}:%S{S}').format(y='年', m='月', d='日', H='時', M='分', S='秒')
timeList.append(newstime)
return timeList
#星級
def levels(url):
html = etree.HTML(url_xp(url).decode())
level = html.xpath("//div[@class='main review-item']//span//@title")
return level
#使用者id
def userId(url):
html = etree.HTML(url_xp(url).decode())
ids = html.xpath("//div[@class='review-list']//div//@data-cid")
return ids
#評論
def comments(url):
commentList = []
#根據每個使用者的id,擷取評論
for i in userId(url):
curl = 'https://movie.douban.com/j/review/{}/full'.format(i)
o = url_bs(curl).text
#re正則比對中文
pinglun = re.findall(r'[\u4e00-\u9fa5]+', o)
strs = ''
for j in pinglun:
strs = strs + j + ','
commentList.append(strs)
return commentList
#有用的數量
def yes(url):
html = etree.HTML(url_xp(url).decode())
count = html.xpath("//div[@class='action']//a[@class='action-btn up']//span//text()")
yy = []
for i in count:
yy.append(i.replace('\n', '').replace(' ', ''))
return yy
#沒用的數量
def no(url):
html = etree.HTML(url_xp(url).decode())
count = html.xpath("//div[@class='action']//a[@class='action-btn down']//span//text()")
nn = []
for i in count:
nn.append(i.replace('\n', '').replace(' ', ''))
return nn
#字典
def dist(url):
uu = user(url)
tt = times(url)
ll = levels(url)
cc=comments(url)
yy=yes(url)
nn=no(url)
new_list = []
new_dict = []
mid = map(list, zip(uu, tt, ll,cc,yy,nn))
for item in mid:
new_dict = dict(zip(['user', 'time', 'level','comments','up','down'], item))
new_list.append(new_dict)
return new_list
#儲存評論
def save_comment(url):
co = comments(url)
print("第 1 頁爬取完成!")
j=1
for i in range(2, 279):
if i % 2 == 0:
url = 'https://movie.douban.com/subject/26425063/reviews?start={}'.format(str(i) + '0')
j=j+1
else:
continue
co.extend(comments(url))
print("第",j,"頁爬取完成!")
file = open(r'F:\douban\comments.txt', 'a', encoding='utf-8')
for i in range(len(co)):
s = co[i] + '\n'
file.write(s)
file.close()
#讀取評論、分詞
def cut_comment():
file = open(r'F:\douban\comments.txt', 'r', encoding='utf-8').read()
ss = ",。;!?”“\n\ufeff"
for i in ss:
file = file.replace(i, '')
# 分詞
cun = jieba.lcut(file)
return cun
#去除停用詞
def cut_stop():
#讀取停用詞
fe = open(r'F:\douban\stops_chinese.txt', 'r', encoding='UTF-8').read()
stops = fe.split('\n')
tokens = [token for token in cut_comment() if token not in stops]
return tokens
#關鍵詞統計
def count_comment():
diss = {}
for j in cut_stop():
if j not in diss:
diss[j] = 1
else:
diss[j] = diss[j] + 1
words_list = list(diss.items())
words_list.sort(key=getNumber, reverse=True)
return words_list
#處理數字
def getNumber(x):
y=x[1]
return y
#詞雲
def show_comment():
# 讀入圖檔
im = imread(r'F:\douban\timg.jpg')
wl_split = ''.join(cut_comment())
mywc = WordCloud(
mask=im,
height=600,
width=800,
background_color='#000000',
max_words=1000,
max_font_size=200,
font_path="C:\Windows\Fonts\msyh.ttc"
).generate(wl_split)
plt.imshow(mywc)
plt.axis("off")
# 顯示詞雲
plt.show()
url='https://movie.douban.com/subject/26425063/reviews'
cc='https://movie.douban.com/j/review/9682284/full'
#使用者、評論日期、評分
datas = dist(url)
print("第 1 頁爬取完成!")
j = 1
for i in range(2, 279):
if i % 2 == 0:
url = 'https://movie.douban.com/subject/26425063/reviews?start={}'.format(str(i) + '0')
j = j + 1
else:
continue
datas.extend(dist(url))
print("第", j, "頁爬取完成!")
newsdf = pd.DataFrame(datas)
newsdf.to_csv(r'F:\douban\douban.csv',mode='a')
save_comment(url)
for i in range(20):
print(count_comment()[i])
show_comment()