python爬虫scrapy项目(二)
爬取目标:房天下全国租房信息网站(起始url:http://zu.fang.com/cities.aspx)
爬取内容:城市;名字;出租方式;价格;户型;面积;地址;交通
反反爬措施:设置随机user-agent、设置请求延时操作、
1、开始创建项目
1 scrapy startproject fang
2、进入fang文件夹,执行启动spider爬虫文件代码,编写爬虫文件。
1 scrapy genspider zufang "zu.fang.com"
命令执行完,用Python最好的IDE—pycharm打开该文件目录
3、编写该目录下的items.py文件,设置你需要爬取的字段。
import scrapy class HomeproItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field() city = scrapy.Field() #城市
title = scrapy.Field() # 名字
rentway = scrapy.Field() # 出租方式
price = scrapy.Field() #价格
housetype = scrapy.Field() # 户型
area = scrapy.Field() # 面积
address = scrapy.Field() # 地址
traffic = scrapy.Field() # 交通
4、进入spiders文件夹,打开hr.py文件,开始编写爬虫文件
# -*- coding: utf-8 -*-
import scrapy
from homepro.items import HomeproItem
from scrapy_redis.spiders import RedisCrawlSpider
# scrapy.Spider
class HomeSpider(RedisCrawlSpider):
name = 'home'
allowed_domains = ['zu.fang.com']
# start_urls = ['http://zu.fang.com/cities.aspx'] redis_key = 'homespider:start_urls'
def parse(self, response):
hrefs = response.xpath('//div[@class="onCont"]/ul/li/a/@href').extract()
for href in hrefs:
href = 'http:'+ href
yield scrapy.Request(url=href,callback=self.parse_city,dont_filter=True) def parse_city(self, response):
page_num = response.xpath('//div[@id="rentid_D10_01"]/span[@class="txt"]/text()').extract()[0].strip('共页')
# print('*' * 100)
# print(page_num)
# print(response.url) for page in range(1, int(page_num)):
if page == 1:
url = response.url
else:
url = response.url + 'house/i%d' % (page + 30)
print('*' * 100)
print(url)
yield scrapy.Request(url=url, callback=self.parse_houseinfo, dont_filter=True) def parse_houseinfo(self, response):
divs = response.xpath('//dd[@class="info rel"]')
for info in divs:
city = info.xpath('//div[@class="guide rel"]/a[2]/text()').extract()[0].rstrip("租房")
title = info.xpath('.//p[@class="title"]/a/text()').extract()[0]
rentway = info.xpath('.//p[@class="font15 mt12 bold"]/text()')[0].extract().replace(" ", '').lstrip('\r\n')
housetype = info.xpath('.//p[@class="font15 mt12 bold"]/text()')[1].extract().replace(" ", '')
area = info.xpath('.//p[@class="font15 mt12 bold"]/text()')[2].extract().replace(" ", '')
addresses = info.xpath('.//p[@class ="gray6 mt12"]//span/text()').extract()
address = '-'.join(i for i in addresses)
try:
des = info.xpath('.//p[@class ="mt12"]//span/text()').extract()
traffic = '-'.join(i for i in des)
except Exception as e:
traffic = "暂无详细信息" p_name = info.xpath('.//div[@class ="moreInfo"]/p/text()').extract()[0]
p_price = info.xpath('.//div[@class ="moreInfo"]/p/span/text()').extract()[0]
price = p_price + p_name item = HomeproItem()
item['city'] = city
item['title'] = title
item['rentway'] = rentway
item['price'] = price
item['housetype'] = housetype
item['area'] = area
item['address'] = address
item['traffic'] = traffic
yield item
5、设置setting.py文件,配置scrapy运行的相关内容
# 指定使用scrapy-redis的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler" # 指定使用scrapy-redis的去重
DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter' # 指定排序爬取地址时使用的队列,
# 默认的 按优先级排序(Scrapy默认),由sorted set实现的一种非FIFO、LIFO方式。
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue' REDIS_HOST = '10.8.153.73'
REDIS_PORT = 6379
# 是否在关闭时候保留原来的调度器和去重记录,True=保留,False=清空
SCHEDULER_PERSIST = True
6、然后把代码发给其他附属机器,分别启动.子程序redis链接主服务器redis。
redis-cli -h 主服务器ip
7、主服务器先启动redis-server,再启动redis-cli
lpush homespider:start_urls 起始的url