本文介绍如何使用Python爬取妙笔阁小说网仙侠系列中所有小说的信息,并将其保存为TXT和CSV格式。首先,打开妙笔阁小说网仙侠系列的页面,可以看到如下所示的小说列表。
我们需要爬取的信息包括小说的书名、作者、最新更新时间和简介。为了防止网站禁止爬取,需要构造请求头,模拟浏览器访问网站。
构造请求头
在想要爬取的页面按F12,然后点击Network,按F5刷新,随便点击进入一个请求,找到User-Agent字段,如下图所示。
user_agent = 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'
headers = {'User-Agent': user_agent}
发送请求并解析HTML
使用requests库发送HTTP请求,并将响应内容转换为HTML格式。
respOnse= requests.get("http://www.mbige.com/bookstore/xianxia/default-0-0-0-0-0-0-1.html")
response.encoding = response.apparent_encoding
html = lxml.html.fromstring(response.text)
解析HTML并保存为TXT文件
利用XPath解析HTML,提取所需信息并写入TXT文件。
with open("E:/xiaoshuo.txt", 'a+', encoding='utf8') as f:
divlst = html.xpath('//div[@id="alistbox"]')
for i in divlst:
title = ''.join(i.xpath('.//h2/a/text()'))
author = ''.join(i.xpath('.//div[@class="title"]/span/text()'))
newest = ''.join(i.xpath('.//li/a/text()'))
intro = ''.join([x.strip('\n') for x in i.xpath('.//div[@class="intro"]/text()') if x])
f.write(str(title) + '\n' + str(author) + '\n' + str(newest) + '\n' + str(intro) + '\n\n')
完整代码(保存为TXT格式)
import requests
import lxml.html
class myspider:
def __init__(self):
user_agent = 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'
self.headers = {'User-Agent': user_agent}
def gethtml(self, url):
try:
respOnse= requests.get(url)
except:
respOnse= None
else:
response.encoding = 'gb18030'
finally:
html = lxml.html.fromstring(response.text)
return html
def getpage(self, url):
with open("E:/xiaoshuo.txt", 'a+', encoding='utf-8') as f:
html = self.gethtml(url)
divlst = html.xpath('//div[@id="alistbox"]')
for i in divlst:
title = ''.join(i.xpath('.//h2/a/text()'))
author = ''.join(i.xpath('.//div[@class="title"]/span/text()'))
newest = ''.join(i.xpath('.//li/a/text()'))
intro = ''.join([x.strip('\n') for x in i.xpath('.//div[@class="intro"]/text()') if x])
f.write(str(title) + '\n' + str(author) + '\n' + str(newest) + '\n' + str(intro) + '\n\n')
if __name__ == '__main__':
sp = myspider()
url1 = 'http://www.mbige.com/bookstore/xianxia/default-0-0-0-0-0-0-{0}.html'
for num in range(1, 161):
url = url1.format(num)
sp.getpage(url)
print(str(num) + "page")
保存为CSV格式
保存为CSV格式只需在上述代码中增加几行代码。
import requests
import lxml.html
import csv
class myspider:
def __init__(self):
user_agent = 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'
self.headers = {'User-Agent': user_agent}
def gethtml(self, url):
try:
respOnse= requests.get(url)
except:
respOnse= None
else:
response.encoding = 'gb18030'
finally:
html = lxml.html.fromstring(response.text)
return html
def getpage(self, url):
with open("E:/xiaoshuo.csv", 'a+', encoding='utf-8') as f:
f1 = csv.writer(f)
f1.writerow(['书名', '作者', '最新更新', '简介'])
html = self.gethtml(url)
divlst = html.xpath('//div[@id="alistbox"]')
for i in divlst:
title = ''.join(i.xpath('.//h2/a/text()'))
author = ''.join(i.xpath('.//div[@class="title"]/span/text()'))
newest = ''.join(i.xpath('.//li/a/text()'))
intro = [x.strip('\n') for x in i.xpath('.//div[@class="intro"]/text()') if x]
f1.writerow([str(title), str(author), str(newest), str(intro)])
if __name__ == '__main__':
sp = myspider()
url1 = 'http://www.mbige.com/bookstore/xianxia/default-0-0-0-0-0-0-{0}.html'
for num in range(1, 161):
url = url1.format(num)
sp.getpage(url)
print(str(num) + "page")
得到的xiaoshuo.csv文件内容如下:
如果打开CSV文件时出现乱码,可以使用Notepad++打开文件,修改编码后再打开。