标签:__ baidu utf list 采集 cookie 收录 print 百度
#!/usr/bin/env python # -*- coding: utf-8 -*- # author:么么哒 import requests import re def Reptile(): for num in range(0,750,10): with open('test.txt', 'r', encoding='utf-8') as f: for text in f.read().splitlines(): target = 'https://www.baidu.com/s?wd={}&pn={}&ie=utf-8&gpc=stf%3D1658043774%2C1658130174%7Cstftype%3D1'.format(text,num) headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36'} cookie = "你的cookie" cookie_dict = {i.split("=")[0]: i.split("=")[-1] for i in cookie.split("; ")} r = requests.get(url=target,headers=headers,cookies=cookie_dict) meme = r.text pattern = re.compile(r'","urlDisplay":"(.*?)","urlEncoded":"') result = re.findall(pattern,meme) #print(result) print(target) with open('./baidu-today.txt','a+',encoding = 'utf-8') as f1: for x in (result): try: pattern = re.compile(r'http(.*?)://([A-Za-z0-9]+[\-]?[A-Za-z0-9]+\.|[A-Za-z0-9]+\.)((\w|\?|\.|-)*)') s = str(x) print(s) m =(pattern.search(s).group(0)) m = str(m)+'\r' f1.write(m) except Exception as e: print (e) def filter(): try: with open('./baidu-today.txt', 'r') as f2:#打开文本过滤重复的url f_list = f2.readlines() set_list = list(set(f_list)) set_list.sort(key=f_list.index) for mm in (set_list): with open('./baidu-today去重后.txt','a+',encoding = 'utf-8') as f2: f2.write(mm) except Exception as e: print (e) finally: print ("恭喜你 去重复结束!") if __name__ == "__main__": Reptile() filter()
标签:__,baidu,utf,list,采集,cookie,收录,print,百度 来源: https://www.cnblogs.com/chrales/p/16490745.html
本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享; 2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关; 3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关; 4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除; 5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。