# coding: utf-8
import requests
from lxml import etree
import xlwt
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'
}
# 1.url
url = 'http://www.ygdy8.net/html/gndy/dyzz/index.html'
# 2.發起請求,接受響應
response = requests.get(url, headers=headers)
# 3.轉成樹形節點結構
html = etree.HTML(response.text)
# //select[@name="sldd"]/option[last()]/text() last()找到多個标簽中的最後一個标簽
total_page = html.xpath('//select[@name="sldd"]/option[last()]/text()')[0]
print('共有%s頁電影資訊,正在準備爬取!'%total_page)
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet(u'最新電影資訊')
sheet.write(0, 0, '電影名稱')
sheet.write(0, 1, '電影類型')
sheet.write(0, 2, '電影時長')
sheet.write(0, 3, '電影下載下傳位址')
count = 0
# 循環周遊所有頁
for x in range(1,int(total_page)+1):
print('正在爬取第%s頁資料,請稍後....'%x)
# 根據x的值,拼接完整頁面url位址
url = 'http://www.ygdy8.net/html/gndy/dyzz/list_23_%s.html'%x
response = requests.get(url, headers=headers)
html = etree.HTML(response.text)
# 4.使用xpath查找所有的href屬性值
hrefs = html.xpath('//a[@class="ulink"]/@href')
# for循環取出所有的href值
for href in hrefs:
count += 1
# print('正在爬取第%s個電影資訊..'%count)
# 拼接完整的url位址
detail_url = 'http://www.ygdy8.net%s'%href
# 發送請求,拿回詳情頁面的資料
detail_response = requests.get(detail_url, headers=headers)
# print detail_response.content
# 轉換樹形節點結構
detail_html = etree.HTML(detail_response.content)
# 根據xpath從詳情頁提取資料
movie_info = detail_html.xpath('//div[@id="Zoom"]//text()')
# for中存放的就是電影的所有資訊
for movie in movie_info:
if u'譯 名' in movie:
movie_name = movie.split(u' ')[-1]
elif u'類 别' in movie:
movie_type = movie.split(u' ')[-1]
elif u'片 長' in movie:
movie_time = movie.split(u' ')[-1]
download_url = detail_html.xpath('//tbody/tr/td/a/@href')
if download_url:
# if download_url[0][0]!='m':
if not download_url[0].startswith( 'm'):
print(download_url)
sheet.write(count, 0,movie_name)
sheet.write(count, 1,movie_type)
sheet.write(count, 2,movie_time)
sheet.write(count, 3,download_url)
workbook.save(u'電影天堂資料.xls')