彼岸网 图片下载
img.py
# -*- coding: utf-8 -*-import scrapyfrom imgPro.items import ImgproItemclass ImgSpider(scrapy.Spider): name = 'img' # allowed_domains = ['www.xxx.com'] # 起始的url start_urls = ['http://www.netbian.com/meinv/index.htm'] # 其他url url = 'http://www.netbian.com/meinv/index_%d.htm' page = 2 def parse(self, response): li_list = response.xpath('//*[@id="main"]/div[2]/ul/li') for li in li_list: img_src = li.xpath('./a/img/@src').extract_first() item = ImgproItem() item['img_src'] = img_src print(img_src) yield item if self.page <= 3: new_url = format(self.url % self.page) self.page+=1 yield scrapy.Request(url=new_url,callback=self.parse)
items.py
# -*- coding: utf-8 -*-# Define here the models for your scraped items## See documentation in:# https://docs.scrapy.org/en/latest/topics/items.htmlimport scrapyclass ImgproItem(scrapy.Item): # define the fields for your item here like: img_src = scrapy.Field()
piplines.py
# -*- coding: utf-8 -*-# Define your item pipelines here## Don't forget to add your pipeline to the ITEM_PIPELINES setting# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.htmlclass ImgproPipeline(object): def process_item(self, item, spider): return item# 导入模块import scrapyfrom scrapy.pipelines.images import ImagesPipelineclass ImgPileLine(ImagesPipeline): # 接收item且将item中存储的img_src进行请求发送 def get_media_requests(self,item,info): yield scrapy.Request(url=item['img_src']) # 指定数据存储的路径(文件夹【在配置文件中指定】+图片名称【该方法中返回】) def file_path(self,request,response=None,info=None): img_name=request.url.split('/')[-1] return img_name # 将item传递给下一个即将被执行的管道类 def item_completed(self,result,item,info): return item
settings.py
注意更改 开启的管道类 设置文件路径 IMG_STORE
# -*- coding: utf-8 -*-# Scrapy settings for imgPro project## For simplicity, this file contains only settings considered important or# commonly used. You can find more settings consulting the documentation:## https://docs.scrapy.org/en/latest/topics/settings.html# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html# https://docs.scrapy.org/en/latest/topics/spider-middleware.htmlBOT_NAME = 'imgPro'SPIDER_MODULES = ['imgPro.spiders']NEWSPIDER_MODULE = 'imgPro.spiders'# Crawl responsibly by identifying yourself (and your website) on the user-agent#USER_AGENT = 'imgPro (+http://www.yourdomain.com)'# UA 伪装USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36'# Obey robots.txt rules# 不遵从robot协议ROBOTSTXT_OBEY = False# 日志等级LOG_LEVEL='ERROR'# 设置文件存储路径IMAGES_STORE='./imgs'# Configure maximum concurrent requests performed by Scrapy (default: 16)#CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay# See also autothrottle settings and docs#DOWNLOAD_DELAY = 3# The download delay setting will honor only one of:#CONCURRENT_REQUESTS_PER_DOMAIN = 16#CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)#COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)#TELNETCONSOLE_ENABLED = False# Override the default request headers:#DEFAULT_REQUEST_HEADERS = {# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',# 'Accept-Language': 'en',#}# Enable or disable spider middlewares# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html#SPIDER_MIDDLEWARES = {# 'imgPro.middlewares.ImgproSpiderMiddleware': 543,#}# Enable or disable downloader middlewares# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#DOWNLOADER_MIDDLEWARES = {# 'imgPro.middlewares.ImgproDownloaderMiddleware': 543,#}# Enable or disable extensions# See https://docs.scrapy.org/en/latest/topics/extensions.html#EXTENSIONS = {# 'scrapy.extensions.telnet.TelnetConsole': None,#}# Configure item pipelines# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html #更改对应的管道名ITEM_PIPELINES = { 'imgPro.pipelines.ImgPileLine': 300,}# Enable and configure the AutoThrottle extension (disabled by default)# See https://docs.scrapy.org/en/latest/topics/autothrottle.html#AUTOTHROTTLE_ENABLED = True# The initial download delay#AUTOTHROTTLE_START_DELAY = 5# The maximum download delay to be set in case of high latencies#AUTOTHROTTLE_MAX_DELAY = 60# The average number of requests Scrapy should be sending in parallel to# each remote server#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0# Enable showing throttling stats for every response received:#AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings#HTTPCACHE_ENABLED = True#HTTPCACHE_EXPIRATION_SECS = 0#HTTPCACHE_DIR = 'httpcache'#HTTPCACHE_IGNORE_HTTP_CODES = []#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'