标签:vedio name get python dic 爬取 url 网页 page
#coding=gbk
from lxml import etree
import requests
from multiprocessing.dummy import Pool
import random
#@starttime:2021/11/25 10:21
#@endtime:2021/11/25 15:20
if __name__=='__main__':
# video_down_url = []
url='https://www.pearvideo.com/'
header={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36 Edg/95.0.1020.53'
}
respon1=requests.get(url=url,headers=header)
page_1=respon1.text.encode('utf-8')
# print(page_1)
# 写得不太不顺
page_1_xpa=etree.HTML(page_1)
# 坚信自己开始的想法并反复尝试(3~4次左右再改变)
page_1_list=page_1_xpa.xpath('//div[@class="vervideo-bd"]')
urls = []
# print(page_1_list)
for li in page_1_list:
str1=''.join(li.xpath('./a//@href'))
# 视频的访问地址
vedio_adress_1='https://www.pearvideo.com/'+ str1
header2 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36 Edg/95.0.1020.53',
'referer': vedio_adress_1
}
id=str1.split('_')[-1]
#不断地分析网页,得到了它请求时发了两个参数
#https://www.pearvideo.com/videoStatus.jsp?contId=1581126mrd=0.09969085979493786
params={
'contId':id,
'mrd':str(random.random())
}
vedio_name=''.join(li.xpath('./a/div[2]/text()'))
# print(vedio_adress_1)
vedio_page= requests.get(url='https://www.pearvideo.com/videoStatus.jsp',params=params,headers=header2).json()
url1=vedio_page['videoInfo']['videos']['srcUrl']
key='cont-'+id
video_down_url = url1.replace(url1.split('/')[-1].split('-')[0],key)
# print(video_down_url)
dic={
'name':vedio_name,
'url':video_down_url
}
urls.append(dic)
def get_vedio_data(dic):
url=dic['url']
vedio = requests.get(url=url, headers=header).content
with open(dic['name']+ '.mp4', 'wb') as fp:
fp.write(vedio)
print(dic['name'],'下载成功')
pool=Pool(5)
pool.map(get_vedio_data,urls)
献给还在梨视频爬取苦苦挣扎的小伙伴,我先往前走了.
标签:vedio,name,get,python,dic,爬取,url,网页,page 来源: https://blog.csdn.net/csdn_drinker/article/details/121545226
本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享; 2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关; 3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关; 4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除; 5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。