蜘蛛侠-scrapy框架
·
1.下载方式
widows ,打开一个cmd,输入pip install scrapy(必须安装python环境)
2. 新建一个项目
scrapy startproject 项目名
3.自动生成一个爬取百度的py文件
scrapy genspider baidu www.baidu.com
spiders下的baidu.py是scrapy自动为我们生成的
4. 项目的配置文件settings.py
# Scrapy settings for myfirstPj project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'myfirstPj'
SPIDER_MODULES = ['myfirstPj.spiders']
NEWSPIDER_MODULE = 'myfirstPj.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT:默认是注释的,这个东西非常重要,如果不写很容易被判断为电脑,简单点洗一个Mozilla/5.0即可
#USER_AGENT = 'myfirstPj (+http://www.yourdomain.com)'
# Obey robots.txt rules
# ROBOTSTXT_OBEY:是否遵循机器人协议,默认是true,需要改为false,否则很多东西爬不了
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS:最大并发数,很好理解,就是同时允许开启多少个爬虫线程
CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY:下载延迟时间,单位是秒,控制爬虫爬取的频率,根据你的项目调整,不要太快也不要太慢,默认是3秒,即爬一个停3秒,设置为1秒性价比较高,如果要爬取的文件较多,写零点几秒也行
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# COOKIES_ENABLED:是否保存COOKIES,默认关闭,开机可以记录爬取过程中的COKIE,非常好用的一个参数
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS:默认请求头,上面写了一个USER_AGENT,其实这个东西就是放在请求头里面的,这个东西可以根据你爬取的内容做相应设置。
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent':'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36'
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'myfirstPj.middlewares.MyfirstpjSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'myfirstPj.middlewares.MyfirstpjDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES:项目管道,300为优先级,越低越爬取的优先度越高
ITEM_PIPELINES = {
'myfirstPj.pipelines.MyfirstpjPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# 日志等级与日志保存
# 1.DEBUG 调试信息
# 2.INFO 一般信息
# 3.WARNING 警告
# 4.ERROR 普通错误
# 5.CRITICAL 严重错误
LOG_LEVEL = "WARNING"
LOG_FILE = "logs/test.log"
5. 修改定义的数据结构item
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class MyfirstpjItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name = scrapy.Field()
description = scrapy.Field()
6.编写爬虫程序,在spiders下的爬虫文件
import scrapy
from ..items import MyfirstpjItem
class BaiduSpider(scrapy.Spider):
name = 'baidu'
allowed_domains = ['v.qq.com']
start_urls = ['https://v.qq.com/x/bu/pagesheet/list?append=1&channel=cartoon&iarea=1&listpage=2&offset=0&pagesize=30']
offset = 0
def parse(self, response):
items = MyfirstpjItem()
lists = response.xpath('//div[@class="list_item"]')
for i in lists:
items['name'] = i.xpath('./a/@title').get()
items['description'] = i.xpath('./div/div/@title').get()
yield items
if self.offset < 120:
self.offset += 30
url = 'https://v.qq.com/x/bu/pagesheet/list?append=1&channel=cartoon&iarea=1&listpage=2&offset={}&pagesize=30'.format(
str(self.offset))
yield scrapy.Request(url=url, callback=self.parse)
7.交给管道输出
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class MyfirstpjPipeline:
def process_item(self, item, spider):
print(item)
return item
9.新建日志文件夹
10. 运行文件
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:拾壹
@Time: 2021/8/30 16:35
"""
from scrapy import cmdline
# cmdline.execute('scrapy crawl baidu'.split())
cmdline.execute('scrapy crawl baidu -o baidu.csv'.split())
# cmdline.execute('scrapy crawl baidu -o baidu.json'.split())
11. 运行结果
更多推荐
已为社区贡献1条内容
所有评论(0)