标签:status img cc await Wallhaven 高清 url async asyncio
import time
from pyquery import PyQuery
import os
import asyncio
import aiohttp
import warnings
# 获取一个请求里的所有图片页面详情链接
async def url_pages(page):
async with semaphore:
_url = 'https://wallhaven.cc/toplist?page={}'.format(page)
async with session.get(_url) as response:
result = await response.text()
status = response.status
while status == 429:
await asyncio.sleep(2)
async with session.get(_url) as resp:
result = await resp.text()
status = resp.status
for _item in PyQuery(result)('.thumb-listing-page li').items():
async with lock:
img_url.append(_item('.preview').attr('href'))
# 获取图片昵称、链接
async def get_img_link(_url):
async with session.get(_url) as response:
html = await response.text()
status = response.status
while status == 429:
await asyncio.sleep(2)
async with session.get(_url) as resp:
html = await resp.text()
status = resp.status
_url = PyQuery(html)('#wallpaper').attr('src')
img_name = os.path.basename(_url)
return _url, img_name
# 下载图片
async def img_save():
async with semaphore:
flag = 3
while flag:
if not len(img_url):
await asyncio.sleep(5)
flag = flag - 1
continue
_url = img_url.pop(0)
url, name = await get_img_link(_url)
global number
number = number + 1
print("下载第{}张 --> {}".format(number, name))
if os.path.exists(name):
continue
async with session.get(url) as res:
with open(name, 'wb') as f:
f.write(await res.read())
# 主方法
async def scrape_main():
global session
session = aiohttp.ClientSession()
scrape_index_tasks = [asyncio.ensure_future(img_save()) for i in range(20)]
scrape_index_tasks.extend([asyncio.ensure_future(url_pages(page)) for page in range(1, pages+1)])
await asyncio.wait(scrape_index_tasks)
await session.close()
print("任务爬取结束O(∩_∩)O~ 共爬取{}张图片".format(number))
if __name__ == '__main__':
warnings.filterwarnings("ignore", category=DeprecationWarning)
semaphore = asyncio.Semaphore(30)
start_time = time.time()
save_path = os.getcwd() + "\wallpaper"
if not os.path.exists(save_path):
os.mkdir(save_path)
os.chdir(save_path)
number = 0
img_url = []
session = None
lock = asyncio.Lock()
# 这个是输入的下载页数,最多201页,一页24张图片
pages = 3
asyncio.get_event_loop().run_until_complete(scrape_main())
print(f"累计耗时{time.time()-start_time:.2f}s")
标签:status,img,cc,await,Wallhaven,高清,url,async,asyncio 来源: https://www.cnblogs.com/echohye/p/16054292.html
本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享; 2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关; 3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关; 4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除; 5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。