本人是个爬虫小萌新,看了网上教程学着做爬虫爬取91处理网www.91chuli.com,如果有什么问题请大佬们反馈,谢谢。
以下是用lxml来爬取的。
from lxml import etree
def getHTMLText(url):
kv = {
'cookie': 'ssids=1581214855718752; sfroms=JIAOYIMALL001; historyScanGame=%5B%225667%22%2Cnull%5D; session=1581214855718753-7; showFixGuideDialog=true'
, 'user-agent': 'Mozilla/5.0'}
r = requests.get(url, headers=kv)
r.raise_for_status()
r.encoding = 'utf-8'
return r.text
def shixian(url):
htmls = etree.HTML(url)
mc = htmls.xpath(
'//div[@class="wrap"]/div[@class="mod-con sel-content "]/div[@class="bd"]/ul[@class="list-con specialList"]/li[@name="goodsItem"]/span[@class="name"]/span[@class="is-account"]/a/text()')
price = htmls.xpath(
'//div[@class="wrap"]/div[@class="mod-con sel-content "]/div[@class="bd"]/ul[@class="list-con specialList"]/li[@name="goodsItem"]/span[@class="price"]')
count = 0
tplt = "{:4}\t{:12}\t{:20}"
print(tplt.format("91处理网"))
for i in range(len(mc)):
count = count + 1
print(tplt.format(count, price[i].text, mc[i], chr(12288)))
if name == 'main':
url='https://www.91chuli.com/'
lists=[]
url=url+'-n'+'1'+'.html'
html=getHTMLText(url)
shixian(html)