python 爬虫登录保存会话去获取只有登录能获取的…

2018-10-26 05:29:35来源:博客园 阅读 ()

新老客户大回馈,云服务器低至5折

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import ConfigParser

import datetime
import sys
import requests
from requests.cookies import RequestsCookieJar
from bs4 import BeautifulSoup
import log_config
import time
import random
import re


def is_form_hash(tag):
    return tag.has_attr('name') and tag.get('name') == 'formhash'


def is_refer(tag):
    return tag.has_attr('name') and tag.get('name') == 'referer'


class haifeng_crawler:

    def __init__(self, user_name, pass_word):
        self.cookies = dict()
        self.username = user_name
        self.password = pass_word
        self.session = requests.session()

    def update_cookies(self, new_cookies):
        for key in new_cookies:
            self.cookies[key] = new_cookies[key]

    def req_get(self, url):
        requests.session().cookies = requests.utils.cookiejar_from_dict(self.cookies)
        resp = self.session.get(url)
        self.update_cookies(requests.utils.dict_from_cookiejar(resp.cookies))
        print(self.cookies)
        return resp

    def req_post(self, url, data):
        requests.session().cookies = requests.utils.cookiejar_from_dict(self.cookies)
        resp = self.session.post(url, data)
        self.update_cookies(requests.utils.dict_from_cookiejar(resp.cookies))
        #print(self.cookies)
        return resp



    def login(self):
        url = 'http://www.96bbs.com/member.php?mod=logging&action=login&infloat=yes&handlekey=login&inajax=1&ajaxtarget=fwin_content_login'
        page_res = self.req_get(url)
        #print(page_res.text)
        soup = BeautifulSoup(page_res.text, "html.parser")
        rt = soup.find('root')
        if rt:
            rt = rt.text
            soup = BeautifulSoup(rt, "html.parser")
        else:
            return None;
        #print(rt);
        bb = is_form_hash
        cc = is_refer
        formhash = soup.find(bb).get("value")
        referer = soup.find(cc).get("value")
        print(formhash)
        print(referer)
        url = 'http://www.96bbs.com/member.php?mod=logging&action=login&loginsubmit=yes&handlekey=login&loginhash=LVCbx&inajax=1'
        data = {
            'formhash': formhash,
            'referer': referer,
            'username': self.username,
            'password': '加密后的密码',
            'questionid': 0,
            'answer': ''
        }
        resp = self.req_post(url,data)
        soup = BeautifulSoup(resp.text, "html.parser")
        rt = soup.find('root').text
        print(rt)

    def visit_home(self):
        url = 'http://www.96bbs.com/forum.php'
        self.req_get(url)

    def visit_attachment(self,url):
        resp = self.req_get(url)
        print(resp.status_code)
        print(resp.text)
        return resp


if __name__ == "__main__":

    haifeng = haifeng_crawler("你的用户名","密码需要根据页面取获取加密后的密码")
    haifeng.login()
    haifeng.visit_attachment("http://www.96bbs.com/forum.php?mod=attachment&aid=MjI0NzQ5OHw3YjNkMWMwY3wxNTQwMzYxMzEwfDQ5NzM5OXwzNTM5NTgy")

 

标签:

版权申明:本站文章部分自网络,如有侵权,请联系:west999com@outlook.com
特别注意:本站所有转载文章言论不代表本站观点,本站所提供的摄影照片,插画,设计作品,如需使用,请与原作者联系,版权归原作者所有

上一篇:Python全栈学习_day009知识点

下一篇:第八天- 基础数据操作补充 集合set 深浅拷贝