天天看点

Python抓取天猫商品详细信息及交易记录

一、搭建Python环境

  1. 本帖使用的是Python 2.7
  2. 涉及到的模块:spynner, scrapy, bs4, pymmssql

二、要获取的天猫数据

三、数据抓取流程

四、源代码

#coding:utf-8
import spynner
from scrapy.selector import Selector
from bs4 import BeautifulSoup
import random
import pymssql


#------------------------接数据库-----------------------------#
server="localhost"
user="sa"
password = "123456"
conn=pymssql.connect(server,user,password,"TmallData")
if conn:
    print "DataBase connecting successfully!"
else:
    print "DataBase connecting error!"
cursor=conn.cursor()
#----------------------定义网页操作函数--------------------------#
def py_click_element(browser,pos):
    #点击网页中的元素
    #pos example:'a[href="#description" target="_blank" rel="external nofollow"  target="_blank" rel="external nofollow" ]'
    browser.click(pos)
    browser.wait(random.randint(,))
    return browser

def py_click_xpath(browser,xpath):
    xpath=xpath+'/@href'
    inner_href=Selector(text=browser.html).xpath(xpath).extract()
    pos='a[href="'+str(inner_href[])+'"]'
    browser=py_click_element(browser, pos)
    return browser

def py_webpage_load(browser,url):
    browser.load(url,load_timeout=)
    browser.wait()
    return browser

def py_check_element(browser,xpath):
    #按照xpath查找元素,如果存在则返回True,否则返回False
    if Selector(text=browser.html).xpath(xpath).extract()!=[]:
        return True
    else:
        return False

def py_extract_xpath(browser,xpath):
    if py_check_element(browser, xpath):
        return Selector(text=browser.html).xpath(xpath).extract()[]
    else:
        return "none"

def py_extract_xpaths(browser,xpaths):
    #批量提取网页内容
    length=len(xpaths)
    results=[]*length
    for i in range(length):
        results[i]=py_extract_xpath(browser, xpaths[i])
    return results

#-----------------------------数据库操作函数---------------------------#


#-----------------------------数据提取函数----------------------------#
def py_getDealReord(doc):
    soup=BeautifulSoup(doc,'lxml')
    tr=soup.find_all('tr')
    total_dealRecord=[([]*)for i in range(len(tr))] 
    i=-
    for this_tr in tr:
        i=i+
        td_user=this_tr.find_all('td',attrs={'class':"cell-align-l buyer"})
        for this_td in td_user:
            total_dealRecord[i][]=this_td.getText().strip(' ')
            #print username
        td_style=this_tr.find_all('td',attrs={'class':"cell-align-l style"})
        for this_td in td_style:
            total_dealRecord[i][]=this_td.getText(',').strip(' ')
            #print style
        td_quantity=this_tr.find_all('td',attrs={'class':"quantity"})
        for this_td in td_quantity:
            total_dealRecord[i][]=this_td.getText().strip(' ')
            #print quantity
        td_dealtime=this_tr.find_all('td',attrs={'class':"dealtime"})
        for this_td in td_dealtime:
            total_dealRecord[i][]=this_td.find('p',attrs={'class':"date"}).getText()
            total_dealRecord[i][]=this_td.find('p',attrs={'class':"time"}).getText()
    return total_dealRecord
#--------------------获取要抓取的所有商品链接-----------------------#
cursor.execute("""
select * from ProductURLs where BrandName='NB'
""")


file=open("H:\\Eclipse\\TmallCrawling\\HTMLParse\\errLog.txt")
InProductInfo=cursor.fetchall()
browser=spynner.Browser()
for temp_InProductInfo in InProductInfo:

    url='https:'+temp_InProductInfo[]

    BrandName=temp_InProductInfo[]
    ProductType=temp_InProductInfo[]
    print BrandName,'\t',ProductType,'\t',url
    #url= 'https://detail.tmall.com/item.htm?id=524425656711&rn=77636d6db8dea5e30060976fdaf9768d&abbucket=19' 

    try:
        browser=py_webpage_load(browser, url)
    except:
        print "Loading webpage failed."
        file.write(url)
        file.write('\n')
        continue

    xpaths=['//*[@id="J_PromoPrice"]/dd/div/span/text()',\
        '//*[@id="J_StrPriceModBox"]/dd/span/text()',\
        '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[1]/h1/text()',\
        '//*[@id="J_PostageToggleCont"]/p/span/text()',\
        '//*[@id="J_EmStock"]/text()',\
        '//*[@id="J_CollectCount"]/text()',\
        '//*[@id="J_ItemRates"]/div/span[2]/text()',\
        '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/ul/li[1]/div/span[2]/text()']
    out_ProductInfo=py_extract_xpaths(browser,xpaths)
    browser=py_click_element(browser,'a[href="#description" target="_blank" rel="external nofollow"  target="_blank" rel="external nofollow" ]')
    ProductProperty=py_extract_xpath(browser, '//*[@id="J_AttrUL"]')
    soup=BeautifulSoup(ProductProperty,'lxml')
    li=soup.find_all('li')
    prop=''
    for this_li in li:
        prop=prop+this_li.getText()+'\\'
    prop=prop[:len(prop)-]
    out_ProductProperty=prop
    print out_ProductProperty
    cursor.execute("""
    Insert into py_ProductInfo values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
    """,(BrandName,ProductType,url,\
         out_ProductInfo[],out_ProductInfo[],\
         out_ProductInfo[],out_ProductInfo[],\
         out_ProductInfo[],out_ProductInfo[],\
         out_ProductInfo[],out_ProductInfo[],\
         out_ProductProperty))
    conn.commit()
    Deal_PageCount=
    browser=py_click_element(browser, 'a[href="#J_DealRecord" target="_blank" rel="external nofollow" ]')
    #browser.browse(True)
    DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
    out_DealRecord=py_getDealReord(DealRecord)
    for temp_DealRecord in out_DealRecord:
        if str(temp_DealRecord[])=='0':
            continue
        cursor.execute("""
        Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
        """,(url,temp_DealRecord[],temp_DealRecord[],\
             temp_DealRecord[],temp_DealRecord[],\
             temp_DealRecord[]))
        conn.commit()
    Deal_PageCount=Deal_PageCount+
    print "Page ",Deal_PageCount
    for i in range():
        if (i==) or (i==):
            continue
        xpath='//*[@id="J_showBuyerList"]/div/div/a['+str(i)+']'
        if py_check_element(browser,xpath):
            browser=py_click_xpath(browser, xpath)
            DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
            out_DealRecord=py_getDealReord(DealRecord)
            for temp_DealRecord in out_DealRecord:
                if str(temp_DealRecord[])=='0':
                    continue
                cursor.execute("""
                Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
                """,(url,temp_DealRecord[],temp_DealRecord[],\
                     temp_DealRecord[],temp_DealRecord[],\
                     temp_DealRecord[]))
                conn.commit()
            Deal_PageCount=Deal_PageCount+
            print "Page ",Deal_PageCount
    while py_check_element(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]'):
        browser=py_click_xpath(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]')
        DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
        out_DealRecord=py_getDealReord(DealRecord)
        for temp_DealRecord in out_DealRecord:
            if str(temp_DealRecord[])=='0':
                continue
            cursor.execute("""
            Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
            """,(url,temp_DealRecord[],temp_DealRecord[],\
                 temp_DealRecord[],temp_DealRecord[],\
                 temp_DealRecord[]))
            conn.commit()
        Deal_PageCount=Deal_PageCount+
        print "Page ",Deal_PageCount