作者:北斗盖全球 | 来源:互联网 | 2023-01-03 13:02
最近学习Python廖雪峰老师的Python3教程想要用项目练练手。Python网络爬虫看起来是不错的练手项目,于是着手学习爬虫的制作。一开始并没有什么头绪,直到看到了慕课网的
最近学习Python
廖雪峰老师的Python 3教程
想要用项目练练手。Python网络爬虫看起来是不错的练手项目,于是着手学习爬虫的制作。
一开始并没有什么头绪,直到看到了慕课网的Python爬虫教程。
这个教程基于python2,但是给了我爬虫的基本思路。接着我用python3写了该教程上的百度百科(弱智)爬虫。
百度百科(弱智)爬虫 基于python3功能简介
爬取与某个百科页面相关的几百个百科页面。用的是深度优先搜索,所以会搜出很多莫名其妙的词条。实际上这个爬虫并没有什么卵用,只是拿来学习练手。
爬虫框架
有一个主程序和四个模块。
四个模块分别是
- url管理器:管理url
- html下载器:用request,下载网页的html代码
- html解析器:用BeautifulSoup4,解析下载下来的html代码,获得词条的标题和简介
- html输出器:输出为一个html,用表格的形式表现最终的数据
爬虫代码
主程序
from baike_spider import url_manager
from baike_spider import html_downloader
from baike_spider import html_parser
from baike_spider import html_outputer
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def crawl(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print('crawling %d : %s' % (count, new_url))
html_cOnt= self.downloader.download(new_url)
new_urls, new_data = self.parser.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
if count == 300:
break
count = count + 1
except:
print('crawl failed.')
self.outputer.output_html()
if __name__=='__main__':
root_url = 'http://baike.baidu.com/view/1927.htm'
obj_spider = SpiderMain()
obj_spider.crawl(root_url)
url管理器
class UrlManager(object):
def __init__(self):
self.new_urls = set()
self.old_urls = set()
def add_new_url(self, url):
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self, urls):
if urls is None or len(urls) == 0:
return
for url in urls:
self.add_new_url(url)
def has_new_url(self):
return len(self.new_urls) != 0
def get_new_url(self):
new_url = self.new_urls.pop()
self.old_urls.add(new_url)
return new_url
html下载器
from urllib import request
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
respOnse= request.urlopen(url)
if response.status != 200:
return None
data = response.read()
return data.decode('utf-8')
html解析器
from bs4 import BeautifulSoup
import re
from urllib.parse import urljoin
class HtmlParser(object):
def parse(self, page_url, html_cont):
if page_url is None or html_cont is None:
return
try:
soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
except ValueError as e:
print('except:', e)
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls, new_data
def _get_new_urls(self, page_url, soup):
new_urls = set()
links = soup.find_all('a', href = re.compile(r"/view/\d+\.htm"))
for link in links:
new_url = link['href']
new_full_url = urljoin(page_url, new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup):
res_data = {}
res_data['url'] = page_url
title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title',).find('h1')
res_data['title'] = title_node.get_text()