在lxml中使用XPath语法:
获取所有li标签:
from lxml import etreehtml = etree.parse('hello.html')print type(html) # 显示etree.parse() 返回类型result = html.xpath('//li')print(result) # 打印<li>标签的元素集合
获取所有li元素下的所有class属性的值:
from lxml import etreehtml = etree.parse('hello.html')result = html.xpath('//li/@class')print(result)
获取li标签下href为www.baidu.com的a标签:
from lxml import etreehtml = etree.parse('hello.html')result = html.xpath('//li/a[@href="www.baidu.com"]')print(result)
获取li标签下所有span标签:
from lxml import etreehtml = etree.parse('hello.html')#result = html.xpath('//li/span')#注意这么写是不对的:#因为 / 是用来获取子元素的,而 <span> 并不是 <li> 的子元素,所以,要用双斜杠result = html.xpath('//li//span')print(result)
获取li标签下的a标签里的所有class:
from lxml import etreehtml = etree.parse('hello.html')result = html.xpath('//li/a//@class')print(result)
获取最后一个li的a的href属性对应的值:
from lxml import etreehtml = etree.parse('hello.html')result = html.xpath('//li[last()]/a/@href')# 谓语 [last()] 可以找到最后一个元素print(result)
获取倒数第二个li元素的内容:
from lxml import etreehtml = etree.parse('hello.html')result = html.xpath('//li[last()-1]/a')# text 方法可以获取元素内容print(result[0].text)
获取倒数第二个li元素的内容的第二种方式:
from lxml import etreehtml = etree.parse('hello.html')result = html.xpath('//li[last()-1]/a/text()')print(result)
使用requests和xpath爬取电影天堂
import requestsfrom lxml import etreeBASE_DOMAIN = 'http://www.dytt8.net'HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36','Referer': 'http://www.dytt8.net/html/gndy/dyzz/list_23_2.html'}def spider(): url = 'http://www.dytt8.net/html/gndy/dyzz/list_23_1.html' resp = requests.get(url,headers=HEADERS) # resp.content:经过编码后的字符串 # resp.text:没有经过编码,也就是unicode字符串 # text:相当于是网页中的源代码了 text = resp.content.decode('gbk') # tree:经过lxml解析后的一个对象,以后使用这个对象的xpath方法,就可以 # 提取一些想要的数据了 tree = etree.HTML(text) # xpath/beautifulsou4 all_a = tree.xpath("//div[@class='co_content8']//a") for a in all_a: title = a.xpath("text()")[0] href = a.xpath("@href")[0] if href.startswith('/'): detail_url = BASE_DOMAIN + href crawl_detail(detail_url) breakdef crawl_detail(url): resp = requests.get(url,headers=HEADERS) text = resp.content.decode('gbk') tree = etree.HTML(text) create_time = tree.xpath("//div[@class='co_content8']/ul/text()")[0].strip() imgs = tree.xpath("//div[@id='Zoom']//img/@src") # 电影海报 cover = imgs[0] # 电影截图 screenshoot = imgs[1] # 获取span标签下所有的文本 infos = tree.xpath("//div[@id='Zoom']//text()") for index,info in enumerate(infos): if info.startswith("◎年 代"): year = info.replace("◎年 代","").strip() if info.startswith("◎豆瓣评分"): douban_rating = info.replace("◎豆瓣评分",'').strip() #print(douban_rating) if info.startswith("◎主 演"): # 从当前位置,一直往下面遍历 actors = [info] for x in range(index+1,len(infos)): actor = infos[x] if actor.startswith("◎"): actors.append(actor.strip()) print(",".join(actors))if __name__ == '__main__':spider()
从giebook上复制过来的,博客园的markdown好像对格式不兼容,缩进没办法显示