時間:2023-04-24 14:48:02 | 來源:網(wǎng)站運營
時間:2023-04-24 14:48:02 來源:網(wǎng)站運營
用 python 爬蟲抓站的一些技巧總結(jié):學用python也有3個多月了,用得最多的還是各類爬蟲腳本:寫過抓代理本機驗證的腳本,寫過在discuz論壇中自動登錄自動發(fā)貼的腳本,寫過自動收郵件的腳本,寫過簡單的驗證碼識別的腳本,本來想寫google music的抓取腳本的,結(jié)果有了強大的gmbox,也就不用寫了。import urllib2
content = urllib2.urlopen('http://XXXX').read()
import urllib2
proxy_support = urllib2.ProxyHandler({'http':'http://XX.XX.XX.XX:XXXX'})
opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
content = urllib2.urlopen('http://XXXX').read()
import urllib2, cookielib
cookie_support= urllib2.HTTPCookieProcessor(cookielib.CookieJar())
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
content = urllib2.urlopen('http://XXXX').read()
opener = urllib2.build_opener(proxy_support, cookie_support, urllib2.HTTPHandler)
import urllib
postdata=urllib.urlencode({
'username':'XXXXX',
'password':'XXXXX',
'continueURI':'http://www.verycd.com/',
'fk':fk,
'login_submit':'登錄'
})
req = urllib2.Request(
url = 'http://secure.verycd.com/signin/*/http://www.verycd.com/',
data = postdata
)
result = urllib2.urlopen(req).read()
headers = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
}
req = urllib2.Request(
url = 'http://secure.verycd.com/signin/*/http://www.verycd.com/',
data = postdata,
headers = headers
)
headers = {
'Referer':'http://www.cnbeta.com/articles'
}
from threading import Thread
from Queue import Queue
from time import sleep
#q是任務隊列
#NUM是并發(fā)線程總數(shù)
#JOBS是有多少任務
q = Queue()
NUM = 2
JOBS = 10
#具體的處理函數(shù),負責處理單個任務
def do_somthing_using(arguments):
print arguments
#這個是工作進程,負責不斷從隊列取數(shù)據(jù)并處理
def working():
while True:
arguments = q.get()
do_somthing_using(arguments)
sleep(1)
q.task_done()
#fork NUM個線程等待隊列
for i in range(NUM):
t = Thread(target=working)
t.setDaemon(True)
t.start()
#把JOBS排入隊列
for i in range(JOBS):
q.put(i)
#等待所有JOBS完成
q.join()
import urllib2
from gzip import GzipFile
from StringIO import StringIO
class ContentEncodingProcessor(urllib2.BaseHandler):
"""A handler to add gzip capabilities to urllib2 requests """
# add headers to requests
def http_request(self, req):
req.add_header("Accept-Encoding", "gzip, deflate")
return req
# decode
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get("content-encoding") == "gzip":
gz = GzipFile(
fileobj=StringIO(resp.read()),
mode="r"
)
resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
# deflate
if resp.headers.get("content-encoding") == "deflate":
gz = StringIO( deflate(resp.read()) )
resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) # 'class to add info() and
resp.msg = old_resp.msg
return resp
# deflate support
import zlib
def deflate(data): # zlib only provides the zlib compress format, not the deflate format;
try: # so on top of all there's this workaround:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
encoding_support = ContentEncodingProcessor
opener = urllib2.build_opener( encoding_support, urllib2.HTTPHandler )
#直接用opener打開網(wǎng)頁,如果服務器支持gzip/defalte則自動解壓縮
content = opener.open(url).read()
from twisted.web.client import getPage
from twisted.internet import reactor
links = [ 'http://www.verycd.com/topics/%d/'%i for i in range(5420,5430) ]
def parse_page(data,url):
print len(data),url
def fetch_error(error,url):
print error.getErrorMessage(),url
# 批量抓取鏈接
for url in links:
getPage(url,timeout=5) /
.addCallback(parse_page,url) / #成功則調(diào)用parse_page方法
.addErrback(fetch_error,url) #失敗則調(diào)用fetch_error方法
reactor.callLater(5, reactor.stop) #5秒鐘后通知reactor結(jié)束程序
reactor.run()
f = Fetcher(threads=10) #設定下載線程數(shù)為10
for url in urls:
f.push(url) #把所有url推入下載隊列
while f.taskleft(): #若還有未完成下載的線程
content = f.pop() #從下載完成隊列中取出結(jié)果
do_with(content) # 處理content內(nèi)容
import urllib2
from threading import Thread,Lock
from Queue import Queue
import time
class Fetcher:
def __init__(self,threads):
self.opener = urllib2.build_opener(urllib2.HTTPHandler)
self.lock = Lock() #線程鎖
self.q_req = Queue() #任務隊列
self.q_ans = Queue() #完成隊列
self.threads = threads
for i in range(threads):
t = Thread(target=self.threadget)
t.setDaemon(True)
t.start()
self.running = 0
def __del__(self): #解構時需等待兩個隊列完成
time.sleep(0.5)
self.q_req.join()
self.q_ans.join()
def taskleft(self):
return self.q_req.qsize()+self.q_ans.qsize()+self.running
def push(self,req):
self.q_req.put(req)
def pop(self):
return self.q_ans.get()
def threadget(self):
while True:
req = self.q_req.get()
with self.lock: #要保證該操作的原子性,進入critical area
self.running += 1
try:
ans = self.opener.open(req).read()
except Exception, what:
ans = ''
print what
self.q_ans.put((req,ans))
with self.lock:
self.running -= 1
self.q_req.task_done()
time.sleep(0.1) # don't spam
if __name__ == "__main__":
links = [ 'http://www.verycd.com/topics/%d/'%i for i in range(5420,5430) ]
f = Fetcher(threads=10)
for url in links:
f.push(url)
while f.taskleft():
url,content = f.pop()
print url,len(content)
from threading import stack_size
stack_size(32768*16)
def get(self,req,retries=3):
try:
response = self.opener.open(req)
data = response.read()
except Exception , what:
print what,req
if retries>0:
return self.get(req,retries-1)
else:
print 'GET Failed',req
return ''
return data
import socket
socket.setdefaulttimeout(10) #設置10秒后連接超時
def login(self,username,password):
import urllib
data=urllib.urlencode({'username':username,
'password':password,
'continue':'http://www.verycd.com/',
'login_submit':u'登錄'.encode('utf-8'),
'save_cookie':1,})
url = 'http://www.verycd.com/signin'
self.opener.open(url,data).read()
關鍵詞:技巧,總結(jié),爬蟲
微信公眾號
版權所有? 億企邦 1997-2025 保留一切法律許可權利。