这篇文章主要介绍了Python爬虫爬取百度搜索内容代码实例,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友可以参考下
搜索引擎用的很频繁,现在利用Python爬虫提取百度搜索内容,同时再进一步提取内容分析就可以简便搜索过程。详细案例如下:
代码如下
# coding=utf8
import urllib2
import string
import urllib
import re
import random
#设置多个user_agents,防止百度限制IP
user_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533+
(KHTML, like Gecko) Element Browser 5.0',
'IBM WebExplorer /v0.94', 'Galaxy/1.0 [en] (Mac OS X 10.5.6; U; en)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko)
Version/6.0 Mobile/10A5355d Safari/8536.25',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko)
Chrome/28.0.1468.0 Safari/537.36',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)']
def baidu_search(keyword,pn):
p= {'wd': keyword}
res=urllib2.urlopen(("http://www.baidu.com/s?"+urllib.urlencode(p)+"&pn={0}&cl=3&rn=100").format(pn))
html=res.read()
return html
def getList(regex,text):
arr = []
res = re.findall(regex, text)
if res:
for r in res:
arr.append(r)
return arr
def getMatch(regex,text):
res = re.findall(regex, text)
if res:
return res[0]
return ""
def clearTag(text):
p &#61; re.compile(u&#39;<[^>]&#43;>&#39;)
retval &#61; p.sub("",text)
return retval
def geturl(keyword):
for page in range(10):
pn&#61;page*100&#43;1
html &#61; baidu_search(keyword,pn)
content &#61; unicode(html, &#39;utf-8&#39;,&#39;ignore&#39;)
arrList &#61; getList(u"
.*?", content)
for item in arrList:
regex &#61; u"
(.*?)"
link &#61; getMatch(regex,item)
url &#61; link[0]
#获取标题
#title &#61; clearTag(link[1]).encode(&#39;utf8&#39;)
try:
domain&#61;urllib2.Request(url)
r&#61;random.randint(0,11)
domain.add_header(&#39;User-agent&#39;, user_agents[r])
domain.add_header(&#39;connection&#39;,&#39;keep-alive&#39;)
response&#61;urllib2.urlopen(domain)
uri&#61;response.geturl()
print uri
except:
continue
if __name__&#61;&#61;&#39;__main__&#39;:
geturl(&#39;python&#39;)
*?href&#61;"(.*?)" rel&#61;"external nofollow" rel&#61;"external nofollow" .*?>*?class&#61;"t".*?>*?class&#61;"result".*?>
以上就是本文的全部内容&#xff0c;希望对大家的学习有所帮助&#xff0c;也希望大家多多支持脚本之家。