python 爬虫登录保存会话去获取只有登录能获取的…
2018-10-26 05:29:35来源:博客园 阅读 ()
#!/usr/bin/env python # -*- coding: utf-8 -*- # import ConfigParser import datetime import sys import requests from requests.cookies import RequestsCookieJar from bs4 import BeautifulSoup import log_config import time import random import re def is_form_hash(tag): return tag.has_attr('name') and tag.get('name') == 'formhash' def is_refer(tag): return tag.has_attr('name') and tag.get('name') == 'referer' class haifeng_crawler: def __init__(self, user_name, pass_word): self.cookies = dict() self.username = user_name self.password = pass_word self.session = requests.session() def update_cookies(self, new_cookies): for key in new_cookies: self.cookies[key] = new_cookies[key] def req_get(self, url): requests.session().cookies = requests.utils.cookiejar_from_dict(self.cookies) resp = self.session.get(url) self.update_cookies(requests.utils.dict_from_cookiejar(resp.cookies)) print(self.cookies) return resp def req_post(self, url, data): requests.session().cookies = requests.utils.cookiejar_from_dict(self.cookies) resp = self.session.post(url, data) self.update_cookies(requests.utils.dict_from_cookiejar(resp.cookies)) #print(self.cookies) return resp def login(self): url = 'http://www.96bbs.com/member.php?mod=logging&action=login&infloat=yes&handlekey=login&inajax=1&ajaxtarget=fwin_content_login' page_res = self.req_get(url) #print(page_res.text) soup = BeautifulSoup(page_res.text, "html.parser") rt = soup.find('root') if rt: rt = rt.text soup = BeautifulSoup(rt, "html.parser") else: return None; #print(rt); bb = is_form_hash cc = is_refer formhash = soup.find(bb).get("value") referer = soup.find(cc).get("value") print(formhash) print(referer) url = 'http://www.96bbs.com/member.php?mod=logging&action=login&loginsubmit=yes&handlekey=login&loginhash=LVCbx&inajax=1' data = { 'formhash': formhash, 'referer': referer, 'username': self.username, 'password': '加密后的密码', 'questionid': 0, 'answer': '' } resp = self.req_post(url,data) soup = BeautifulSoup(resp.text, "html.parser") rt = soup.find('root').text print(rt) def visit_home(self): url = 'http://www.96bbs.com/forum.php' self.req_get(url) def visit_attachment(self,url): resp = self.req_get(url) print(resp.status_code) print(resp.text) return resp if __name__ == "__main__": haifeng = haifeng_crawler("你的用户名","密码需要根据页面取获取加密后的密码") haifeng.login() haifeng.visit_attachment("http://www.96bbs.com/forum.php?mod=attachment&aid=MjI0NzQ5OHw3YjNkMWMwY3wxNTQwMzYxMzEwfDQ5NzM5OXwzNTM5NTgy")
标签:
版权申明:本站文章部分自网络,如有侵权,请联系:west999com@outlook.com
特别注意:本站所有转载文章言论不代表本站观点,本站所提供的摄影照片,插画,设计作品,如需使用,请与原作者联系,版权归原作者所有
- python3基础之“术语表(2)” 2019-08-13
- python3 之 字符串编码小结(Unicode、utf-8、gbk、gb2312等 2019-08-13
- Python3安装impala 2019-08-13
- 小白如何入门 Python 爬虫? 2019-08-13
- python_字符串方法 2019-08-13
IDC资讯: 主机资讯 注册资讯 托管资讯 vps资讯 网站建设
网站运营: 建站经验 策划盈利 搜索优化 网站推广 免费资源
网络编程: Asp.Net编程 Asp编程 Php编程 Xml编程 Access Mssql Mysql 其它
服务器技术: Web服务器 Ftp服务器 Mail服务器 Dns服务器 安全防护
软件技巧: 其它软件 Word Excel Powerpoint Ghost Vista QQ空间 QQ FlashGet 迅雷
网页制作: FrontPages Dreamweaver Javascript css photoshop fireworks Flash