作者:庾承泓_689 | 来源:互联网 | 2023-07-17 20:53
很遗憾,部分数据有些问题,不过还是可以进行爬取出来的先贴上源代码#encodingutf8fromseleniumimportwebdriverimportrefromb
很遗憾,部分数据有些问题,不过还是可以进行爬取出来的
先贴上源代码
from selenium import webdriver
import re
from bs4 import BeautifulSoup
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
driver = webdriver.PhantomJS()
def getdata_byQQ(QQ):
driver.get('https://user.qzone.qq.com/{}/311'.format(QQ))
driver.execute_script("window.scrollBy(0,3000)")
time.sleep(3)
driver.execute_script("window.scrollBy(0,5000)")
time.sleep(3)
page_data = driver.page_source
try:
driver.find_element_by_id('login_div')
a = True
except:
a = False
if a == True:
driver.switch_to_frame('login_frame')
driver.find_element_by_id('switcher_plogin').click()
driver.find_element_by_id('u').clear()
driver.find_element_by_id('u').send_keys('QQ号')
driver.find_element_by_id('p').clear()
driver.find_element_by_id('p').send_keys('QQ密码')
driver.find_element_by_id('login_button').click()
time.sleep(3)
driver.implicitly_wait(3)
try:
driver.find_element_by_id('QM_OwnerInfo_Icon')
b = True
except:
b = False
if b == True:
driver.switch_to_frame('app_canvas_frame')
cOntent= driver.find_element_by_css_selector('.content')
stime = driver.find_element_by_css_selector('.c_tx.c_tx3.goDetail')
for con,sti in zip(content,stime):
data = {
'time':sti.text,
'shuos':con.text
}
print(data)
pages = driver.page_source
soup = BeautifulSoup.get(pages,'lxml')
COOKIE = driver.get_COOKIEs()
COOKIE_dict = []
for c in COOKIE:
ck = "{0}={1};".format(c['name'], c['value'])
COOKIE_dict.append(ck)
i = ''
for c in COOKIE_dict:
i += c
print('COOKIEs:', i)
print("==========完成================")
driver.close()
driver.quit()
getdata_byQQ(643435675)
再贴上学习blog:http://zmister.com/archives/98.html