标签:MongoDb selenium EC element 爬取 item SELECTOR page browser
selenium_taobao_com.py
#!/usr/bin/env python3 # coding=utf-8 # Version:python3.6.1 # File:selenium_taobao_com.py # Author:LGSP_Harold from urllib.parse import quote import pymongo from selenium import webdriver from selenium.common.exceptions import TimeoutException from selenium.webdriver import ActionChains from selenium.webdriver.common.by import By from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from pyquery import PyQuery as pq # firefox_options = webdriver.FirefoxOptions() # firefox_options.add_argument('--headless') # browser = webdriver.Firefox(firefox_options=firefox_options) browser = webdriver.Firefox() wait = WebDriverWait(browser, 10) client = pymongo.MongoClient('mongodb://admin:admin@localhost:27017') db = client.db_taobao_com def index_page(page): """ 抓取索引页 :param page:页码 """ print('正在爬取第', page, '页') try: if page > 1: input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager div.form > input'))) submit = WebDriverWait(browser, 60, 3).until( EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager div.form span.btn.J_Submit'))) input.clear() input.send_keys(page) submit.click() wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, '#mainsrp-pager li.item.active > span'), str(page))) wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.m-itemlist .items .item'))) next_page = browser.find_element_by_xpath('//li[@class="item next"]') js4 = 'arguments[0].scrollIntoView();' browser.execute_script(js4, next_page) get_products() except TimeoutException: index_page(page) def get_products(): """ 获取商品数据 """ html = browser.page_source doc = pq(html) items = doc('#mainsrp-itemlist .items .item').items() for item in items: product = { 'image': item.find('.pic .img').attr('data-src'), 'price': item.find('.price').text(), 'deal': item.find('.deal-cnt').text(), 'title': item.find('.title').text(), 'shop': item.find('.shop').text(), 'location': item.find('.location').text() } print(product) save_to_mongo(product) def save_to_mongo(result): """ 保存至MongoDB :param result:结果 """ try: if db.collection_product.insert_one(result): print('存储到MongoDB成功') except Exception as e: print('存储到MongoDB失败') print(e) def login(): url = 'https://login.taobao.com/member/login.jhtml' browser.get(url=url) # 淘宝反爬机制会检测到selenium,无法使用账号密码登录(登录失败原因,验证码验证失败) # 破解方法:使用淘宝APP扫码登录 # username = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#fm-login-id'))) # password = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#fm-login-password'))) # submit = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.fm-btn > button'))) # # username.clear() # user = input('输入会员名/邮箱/手机号:') # password.clear() # pwd = input('输入登录密码:') # # username.send_keys(user) # password.send_keys(pwd) # submit.click() try: qr_code = WebDriverWait(browser, 30, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'i.iconfont.icon-qrcode'))) qr_code.click() # above = browser.find_element_by_class_name('i.iconfont.icon-qrcode') # ActionChains(browser).click(above).perform() print('请用淘宝APP扫码登录') if WebDriverWait(browser, 60, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.site-nav-login-info-nick'))): url_index = WebDriverWait(browser, 60, 3).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.site-nav-menu-hd > a > span'))) url_index.click() except TimeoutException: print('登录超时') browser.quit() def main(): login() """ 遍历每一页 """ goods = input('输入您要搜索的商品:') page = int(input('输入您要爬取的总页数:')) url = 'https://s.taobao.com/search?q=' + quote(goods) browser.get(url=url) for i in range(1, page + 1): index_page(i) browser.quit() if __name__ == '__main__': main()
标签:MongoDb,selenium,EC,element,爬取,item,SELECTOR,page,browser 来源: https://www.cnblogs.com/Harold-Hua/p/15169285.html
本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享; 2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关; 3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关; 4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除; 5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。