本文讲述了 Python 获取当前页面内的所有链接的五种方法,分享给大家仅供参考,具体如下:
# 利用 requests_html
from requests_html import HTMLSession
session = HTMLSession()
url = 'https://www.baidu.com'
r = session.get(url)
print(r.html.links)
print('*'*100)
# 利用 BeautifulSoup
import requests
from bs4 import BeautifulSoup
url = 'http://www.baidu.com'
res = requests.get(url)
soup = BeautifulSoup(res.text, 'lxml')
for a in soup.find_all('a'):
print(a['href'])
print('*'*100)
# 利用 re (不推荐用正则,太麻烦)
# 利用 lxml.etree
from lxml import etree
tree = etree.HTML(r.text)
for link in tree.xpath('//@href'):
print(link)
print('*'*100)
# 利用 selenium
from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(url)
for link in browser.find_elements_by_tag_name('a'):
print(link.get_attribute('href'))