短时间向一个服务器发起高频请求,会被认定为异常请求,将当前ip列入黑名单
import requests
from lxml import etree
headers = {
'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/80.0.3987.132 safari/537.36'
}
url = "https://www.sogou.com/web?query=ip"
# 代理机制对应的就是get或者post方法中一个叫做proxies的参数
page_text = requests.get(url=url,headers=headers,proxies={"https":'221.1.200.242:38652'}).text
tree = etree.html(page_text)
# 在xpath表达式中不能出现tbody标签
ip = tree.xpath('//*[@id="ipsearchresult"]/strong/text()')[0]
print(ip)
爬取某一网站请求过多会被封ip,所以要使用代理池
import requests
import random
from lxml import etree
headers = {
'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/80.0.3987.132 safari/537.36'
}
all_ips = []
api_url = "购买代理精灵付费ip生成的html的地址"
api_text = requests.get(url=api_url,headers=headers).text
api_tree = etree.html(api_text)
datas = api_tree.path('//body//text()') # 取出所有文本
for data in datas:
dic = {
'https':data
}
all_ips.append(dic) # 以字典的形式[{},{}...]
url = "https://www.xicidaili.com/nn/%d" # 定义一个通用的url模板
ip_datas = [] # 解析到的所有数据
for page_num in range(1,50): # 页码越多,代理池ip就越多,ip有效时长多一点
new_url = format(url%page_num)
page_text=requests.get(url=new_url,headers=headers,proxies=random.choice(all_ips)).text
tree =etree.html(page_text)
tr_lst = tree.xpath('//*[@id="ip_list"]//tr')[1:]
for tr in tr_lst:# 局部数据解析
ip = tr.xpath('./td[2]/text()')[0]
prot = tr.xpath('./td[3]/text()')[0]
dic_ = {
"https":ip + ":" + prot
}
ip_datas.append(dic_)
print(len(datas)) # 爬取到的ip
验证码的处理
使用相关的打码平台进行验证码的动态识别
打码平台
超级鹰使用流程
注册【用户中心】身份的帐号
登录
创建一个软件
用户中心→软件id→添加软件
下载示例代码
#!/usr/bin/env python
# coding:utf-8
import requests
from hashlib import md5
class chaojiying_client(object):
def __init__(self, username, password, soft_id):
self.username = username
password = password.encode('utf8')
self.password = md5(password).hexdigest()
self.soft_id = soft_id
self.base_params = {
'user': self.username,
'pass2': self.password,
'softid': self.soft_id,
}
self.headers = {
'connection': 'keep-alive',
'user-agent': 'mozilla/4.0 (compatible; msie 8.0; windows nt 5.1; trident/4.0)',
}
def postpic(self, im, codetype):
"""
im: 图片字节
codetype: 题目类型 参考 http://www.chaojiying.com/price.html
"""
params = {
'codetype': codetype,
}
params.update(self.base_params)
files = {'userfile': ('ccc.jpg', im)}
r = requests.post('http://upload.chaojiying.net/upload/processing.php', data=params, files=files, headers=self.headers)
return r.json()
def reporterror(self, im_id):
"""
im_id:报错题目的图片id
"""
params = {
'id': im_id,
}
params.update(self.base_params)
r = requests.post('http://upload.chaojiying.net/upload/reporterror.php', data=params, headers=self.headers)
return r.json()
# 封装一个验证码识别的函数
def transform_code(imgpath,imgtype):
chaojiying = chaojiying_client('超级鹰用户名', '超级鹰用户名的密码', '用户中心>>软件id')
im = open(imgpath, 'rb').read()
return chaojiying.postpic(im, imgtype)['pic_str']
需求:爬取雪球网中的新闻标题和内容
import requests
headers = {
'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/80.0.3987.132 safari/537.36',
# 手动处理
'COOKIE':'aliyungf_tc=aqaaanpdtcedngqa0evi33fxpcso1bvs; acw_tc=2760823015846080972884781e8f986f089c7939870e775a86ffb898ca91d4; xq_a_token=a664afb60c7036c7947578ac1a5860c4cfb6b3b5; xqat=a664afb60c7036c7947578ac1a5860c4cfb6b3b5; xq_r_token=01d9e7361ed17caf0fa5eff6465d1c90dbde9ae2; xq_id_token=eyj0exaioijkv1qilcjhbgcioijsuzi1nij9.eyj1awqioi0xlcjpc3mioij1yyisimv4cci6mtu4ntm2mjywnywiy3rtijoxntg0nja4mdcwnzy4lcjjawqioijkowqwbjrbwnvwin0.gwlwgxwjdywungniatqxswjjo6nkjy9pcj0acif9vuhvsuxei7iw7_wivbhdc1wtk86j8ayj_bzd-kxyshad1z8kym6tv80l931tmestgj1i6up66wsauz3pydbc4ko1chueqmw_nca1uhsjwrc-4mokmmbbll6ryvpsocfrxrvrqy-dx_1ubcs_bsrcaakyoecwxo01tgfqqovebd9apgudaxtqc3hajptlzpqyh62cyyijzwhgsbi0emf1k1wmp_539girzempne7ngk6n1i8tqtdh_xadtfffk07g177w84nvujfsb8hpca6rzydugpamawqqjcpeusdzdkhkda; u=301584608097293; hm_lvt_1db88642e346389874251b5a1eded6e3=1584608100; device_id=24700f9f1986800ab4fcc880530dd0ed; COOKIEsu=901584608234987; hm_lpvt_1db88642e346389874251b5a1eded6e3=1584608235'
}
url = 'https://xueqiu.com/v4/statuses/public_timeline_by_category.json?'
params = {
'since_id': '-1',
'max_id': '20369159',
'count': '15',
'category': '-1',
}
page_json = requests.get(url=url,headers=headers,params=params).json()
print(page_json)
import requests
headers = {
'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/80.0.3987.132 safari/537.36'
}
# 实例化一个session对象
session = requests.session()
# 试图将COOKIE获取且存储到session对象中,url得试着来
session.get('https://xueqiu.com/',headers=headers)
# url地址
url = 'https://xueqiu.com/v4/statuses/public_timeline_by_category.json?'
# 发请求索携带的参数
params = {
'since_id': '-1',
'max_id': '20369159',
'count': '15',
'category': '-1',
}
# 携带COOKIE的发送求情
page_json = session.get(url=url,headers=headers,params=params).json()
print(page_json)
import requests
from lxml import etree
headers = {
'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/80.0.3987.132 safari/537.36'
}
# 网站涉及到COOKIE,直接实例化session对象,所有请求都用session发送
session = requests.session()
# 验证码识别
first_url = "https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx"
page_text = session.get(first_url,headers=headers).text
tree = etree.html(page_text)
code_img_src = "https://so.gushiwen.org/" + tree.xpath('//*[@id="imgcode"][email protected]')[0]
# 对验证码图片的地址进行发请求,产生了COOKIE
code_img_data = session.get(code_img_src,headers=headers).content
with open('./code.jpg','wb') as f:
f.write(code_img_data)
# 基于超级鹰识别验证码,可以将超级鹰的包导入,因为我用的是jupyter,上面运行过超级鹰的包和我写的函数,所以直接拿来用了,使用pycharm的话就导入一下
# from 超级鹰py文件路径 import transform_code
code_img_text = transform_code('./code.jpg',1902)
# 识别验证码也不是百分百成功的,所以查看一下
print(code_img_text)
url = 'https://so.gushiwen.org/user/login.aspx'
# 动态参数
data = {
'__viewstate': 'ldci9gbqvef2rdrer42gqu3m7xrls5iibh9mpop+qc1oncwpo9eqczsxuhhinxi26x0x19nb1l6gw26sc8qi4q/xnapck67ogf/fgdofhuewfpnrlznjctqf/no=',
'__viewstategenerator': 'c93be1ae',
'from': 'http://so.gushiwen.org/user/collect.aspx',
'email': '网站用户名',
'pwd': '网站用户名对应的密码',
'code': code_img_text, # 验证码
'denglu': '登录',
}
page_text = session.post(url=url,headers=headers,data=data).text
with open('./古诗文.html','w',encoding='utf-8') as f:
# 将登陆后的页面持久化存储
f.write(page_text)
遇到了动态变化的请求参数该如何处理?
基于百度ai开放平台,全部免费。
import requests
import os
from lxml import etree
from aip import aipspeech
# 请求头信息
headers = {
'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/80.0.3987.132 safari/537.36'
}
# 实例化一个aipspeech对象
""" 你的 appid ak sk """
app_id = '18937972'
api_key = 'zdkcbvifz6trove2txdelcgs'
secret_key = 'owrrlm2iebwa2vf8hu4um8q5oezwuanp'
client = aipspeech(app_id, api_key, secret_key)
# 创建存放音频的文件夹
dirname = 'static'
if not os.path.exists(dirname):
os.mkdir(dirname)
# 通用url地址
all_url = 'https://duanziwang.com/category/%e4%b8%80%e5%8f%a5%e8%af%9d%e6%ae%b5%e5%ad%90/{}/'
# 全站爬取,随便爬取10页
for num in range(1,11):
url = all_url.format(num)
page_text = requests.get(url=url,headers=headers).text
tree = etree.html(page_text)
article_list = tree.xpath('/html/body/section/div/div/main/article')
for art in article_list:
# 采集音频文件的存储路径、名字、后缀
title = "./static/" + art.xpath('./div[1]/h1/a/text()')[0] + ".mp3"
cOntent= art.xpath('./div[2]/p/text()')[0]
# 百度ai接口调用
result = client.synthesis(content, 'zh', 1, {
# 音量1-15
'vol': 5,
# 发声,0女,1男
'per': 0,
# 语速1-9
'spd': 5,
})
# 识别正确返回语音二进制 错误则返回dict 参照下面错误码
if not isinstance(result, dict):
with open(title, 'wb') as f:
f.write(result)
print("over!")
未完待续...