scrapy 一些设置和问题
2019-02-20 00:46:06来源:博客园 阅读 ()
scrapy设置ua池
设置后在setting启用
DOWNLOADER_MIDDLEWARES = {
'laogou.middlewares.LaogouDownloaderMiddleware': 543,
'laogou.middlewares.randomUserAgentMiddleware': 400,
'laogou.middlewares.randomProxyMiddleware': 400,
}
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware class randomUserAgentMiddleware(UserAgentMiddleware): def __init__(self,user_agent=''): self.user_agent = user_agent def process_request(self, request, spider): ua = random.choice(self.user_agent_list) if ua: request.headers.setdefault('User-Agent', ua) user_agent_list = [ \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \ "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \ "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \ "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \ "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24" ]
scrapy设置ip池
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware class randomHttpProxyMiddleware(HttpProxyMiddleware): def __init__(self,ip = ''): self.ip = ip def process_request(self, request, spider): ip = random.choice(self.ip_list) if ip: request.meta['proxy'] = ip ip_list = [ 'https://182.122.176.49:9999', 'https://125.123.141.20:9999' ]
scrapy 设置自定义cookie:class LaogouwangSpider(scrapy.Spider):
name = 'laogouwang'
# allowed_domains = ['www.laogou.com']
# start_urls = ['http://www.laogou.com/'] def start_requests(self): url = 'https://www.lagou.com/' yield scrapy.Request(url=url,callback=self.parse,meta={'cookiejar':1}) def parse(self, response): print(response.request.headers.getlist('Cookie')) print(response.headers.getlist('Set-Cookie')) url = 'https://www.lagou.com/jobs/list_'+ str(settings.keys) +'?city='+ str(settings.cidy) +'&cl=false&fromSearch=true&labelWords=&suginput=' print(response.meta['cookiejar'])
yield scrapy.Request(url=url,callback=self.download,meta={'cookiejar':response.meta['cookiejar'],'id':1},dont_filter=True)
def download(self, response):
# print(response.text)
print(response.request.headers.getlist('Cookie'))
print(response.headers.getlist('Set-Cookie'))
i = response.meta.get('id')
file = 'false'
if i == 1:
file = 'true'
data = {
"first":file,
"pn":str(i),
"kd":str(settings.keys)
}
headers_post = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Content-Length': str(len(urllib.parse.urlencode(data))),
'Connection': 'keep-alive',
'Referer':str(response.url),
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
}
print(headers_post)
print(str(response.url))
print(data)
url = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'
yield scrapy.FormRequest(url=url,formdata=data,headers=headers_post,callback=self.files,dont_filter=True,meta={'cookiejar':True,'dont_redirect': True,'handle_httpstatus_list': [301,302]})
meta={'cookiejar':1}这个是启动cookei记录,在后面的请求中使用'cookiejar':response.meta['cookiejar']可以更新cookie。
注意,需要在setting中设置COOKIES_ENABLED = True
获取请求cookies是response.request.headers.getlist('Cookie'),响应cookies是response.headers.getlist('Set-Cookie')。
静止重定向dont_filter=True。
在meta里使用'dont_redirect': True,'handle_httpstatus_list': [301,302]可以在当前scrapy请求里禁用重定向。
scrapy 使用日志
import datetime,os time = datetime.datetime.now().strftime('%Y_%m_%H_%M_%S') LOG_FILE = 'logs'+ os.sep +str(time) + '_' + "laogou.log" LOG_LEVEL = "DEBUG"
LOG_STDOUT = true
scrapy提供五种日志级别。
1.CRITICAL -- 关键错误
2.ERROR -- 一般级别的错误
3.WARNING -- 警告信息
4.INFO -- 信息消息的日志(建议生产模式使用)
5.DEBUG -- 调试消息的日志(建议开发模式)
LOG_FILE 用于日志输出记录的文件名 默认None
LOG_LEVEL 要记录的最低级别 默认DEBUG
LOG_STDOUT 如果为true 则进程的所有标准输出和错误都重定向到日志,列如print() 默认false
使用文件启动spider
#laogoustrart.py
from laogou.spiders.laogouwang import LaogouwangSpider from scrapy.crawler import CrawlerProcess from scrapy.utils.project import get_project_settings process = CrawlerProcess(get_project_settings()) process.crawl(LaogouwangSpider) process.start()
原文链接:https://www.cnblogs.com/dayouzi/p/10390873.html
如有疑问请与原作者联系
标签:
版权申明:本站文章部分自网络,如有侵权,请联系:west999com@outlook.com
特别注意:本站所有转载文章言论不代表本站观点,本站所提供的摄影照片,插画,设计作品,如需使用,请与原作者联系,版权归原作者所有
- 使用scrapy框架爬取全书网书籍信息。 2019-08-13
- pycharm查看函数用法,参数信息的设置方法 2019-07-24
- scrapy学习笔记(二)框架结构工作原理 2019-07-24
- linux python升级及全局环境变量设置 2019-05-24
- scrapy-redis 分布式哔哩哔哩网站用户爬虫 2019-05-22
IDC资讯: 主机资讯 注册资讯 托管资讯 vps资讯 网站建设
网站运营: 建站经验 策划盈利 搜索优化 网站推广 免费资源
网络编程: Asp.Net编程 Asp编程 Php编程 Xml编程 Access Mssql Mysql 其它
服务器技术: Web服务器 Ftp服务器 Mail服务器 Dns服务器 安全防护
软件技巧: 其它软件 Word Excel Powerpoint Ghost Vista QQ空间 QQ FlashGet 迅雷
网页制作: FrontPages Dreamweaver Javascript css photoshop fireworks Flash