python爬虫----(1. 基本模块)
python爬虫,web spider。爬取网站获取网页数据,并进行分析提取。
基本模块使用的是 urllib,urllib2,re,等模块
(一)基本用法,例子
(1)进行基本GET请求,获取网页html
#!coding=utf-8 import urllib import urllib2 url = ‘http://www.baidu.com/‘ # 获取请求 request = urllib2.Request(url) try: # 根据request,得到返回response response = urllib2.urlopen(request) except urllib2.HTTPError, e: if hasattr(e, ‘reason‘): print e.reason # 读取response的body html = response.read() # 读取response的headers headers = response.info()
(2)表单提交
#!coding=utf-8 import urllib2 import urllib post_url = ‘‘ post_data = urllib.urlencode({ ‘username‘: ‘username‘, ‘password‘: ‘password‘, }) post_headers = { ‘User-Agent‘: ‘Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:31.0) Gecko/20100101 Firefox/31.0‘, } request = urllib2.Request( url=post_url, data=post_data, headers=post_headers, ) response = urllib2.urlopen(request) html = response.read()
(3)
#!coding=utf-8 import urllib2 import re page_num = 1 url = ‘http://tieba.baidu.com/p/3238280985?see_lz=1&pn=‘+str(page_num) myPage = urllib2.urlopen(url).read().decode(‘gbk‘) myRe = re.compile(r‘class="d_post_content j_d_post_content ">(.*?)</div>‘, re.DOTALL) items = myRe.findall(myPage) f = open(‘baidu.txt‘, ‘a+‘) import sys reload(sys) sys.setdefaultencoding(‘utf-8‘) i = 0 texts = [] for item in items: i += 1 print i text = item.replace(‘<br>‘, ‘‘) text.replace(‘\n‘, ‘‘).replace(‘ ‘, ‘‘) + ‘\n‘ print text f.write(text) f.close()
(4)
#coding:utf-8 ‘‘‘ 模拟登陆163邮箱并下载邮件内容 ‘‘‘ import urllib import urllib2 import cookielib import re import time import json class Email163: header = {‘User-Agent‘:‘Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6‘} user = ‘‘ cookie = None sid = None mailBaseUrl=‘http://twebmail.mail.163.com‘ def __init__(self): self.cookie = cookielib.CookieJar() cookiePro = urllib2.HTTPCookieProcessor(self.cookie) urllib2.install_opener(urllib2.build_opener(cookiePro)) def login(self,user,pwd): ‘‘‘ 登录 ‘‘‘ postdata = urllib.urlencode({ ‘username‘:user, ‘password‘:pwd, ‘type‘:1 }) #注意版本不同,登录URL也不同 req = urllib2.Request( url=‘https://ssl.mail.163.com/entry/coremail/fcg/ntesdoor2?funcid=loginone&language=-1&passtype=1&iframe=1&product=mail163&from=web&df=email163&race=-2_45_-2_hz&module=&uid=‘+user+‘&style=10&net=t&skinid=null‘, data=postdata, headers=self.header, ) res = str(urllib2.urlopen(req).read()) #print res patt = re.compile(‘sid=([^"]+)‘,re.I) patt = patt.search(res) uname = user.split(‘@‘)[0] self.user = user if patt: self.sid = patt.group(1).strip() #print self.sid print ‘%s Login Successful.....‘%(uname) else: print ‘%s Login failed....‘%(uname) def getInBox(self): ‘‘‘ 获取邮箱列表 ‘‘‘ print ‘\nGet mail lists.....\n‘ sid = self.sid url = self.mailBaseUrl+‘/jy3/list/list.do?sid=‘+sid+‘&fid=1&fr=folder‘ res = urllib2.urlopen(url).read() #获取邮件列表 mailList = [] patt = re.compile(‘<div\s+class="tdLike Ibx_Td_From"[^>]+>.*?href="([^"]+)"[^>]+>(.*?)<\/a>.*?<div\s+class="tdLike Ibx_Td_Subject"[^>]+>.*?href="[^>]+>(.*?)<\/a>‘,re.I|re.S) patt = patt.findall(res) if patt==None: return mailList for i in patt: line = { ‘from‘:i[1].decode(‘utf8‘), ‘url‘:self.mailBaseUrl+i[0], ‘subject‘:i[2].decode(‘utf8‘) } mailList.append(line) return mailList def getMailMsg(self,url): ‘‘‘ 下载邮件内容 ‘‘‘ content=‘‘ print ‘\n Download.....%s\n‘%(url) res = urllib2.urlopen(url).read() patt = re.compile(‘contentURL:"([^"]+)"‘,re.I) patt = patt.search(res) if patt==None: return content url = ‘%s%s‘%(self.mailBaseUrl,patt.group(1)) time.sleep(1) res = urllib2.urlopen(url).read() Djson = json.JSONDecoder(encoding=‘utf8‘) jsonRes = Djson.decode(res) if ‘resultVar‘ in jsonRes: content = Djson.decode(res)[‘resultVar‘] time.sleep(3) return content ‘‘‘ Demon ‘‘‘ #初始化 mail163 = Email163() #登录 mail163.login(‘[email protected]‘,‘944898186‘) time.sleep(2) #获取收件箱 elist = mail163.getInBox() #获取邮件内容 for i in elist: print ‘主题:%s 来自:%s 内容:\n%s‘%(i[‘subject‘].encode(‘utf8‘),i[‘from‘].encode(‘utf8‘),mail163.getMailMsg(i[‘url‘]).encode(‘utf8‘))
郑重声明:本站内容如果来自互联网及其他传播媒体,其版权均属原媒体及文章作者所有。转载目的在于传递更多信息及用于网络分享,并不代表本站赞同其观点和对其真实性负责,也不构成任何其他建议。