ICode9

精准搜索请尝试: 精确搜索
首页 > 数据库> 文章详细

scrapy爬取当当网整个悬疑类的书籍的src,name,img,mysql入库

2020-09-24 19:32:39  阅读:218  来源: 互联网

标签:src name self 当当网 item scrapy html


目标URL: http://category.dangdang.com/cp01.01.04.00.00.00.html

item.py

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class DangdangItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    src = scrapy.Field()
    name = scrapy.Field()
    price = scrapy.Field()

dang.py

import scrapy
from ..items import DangdangItem

class DangSpider(scrapy.Spider):
    name = 'dang'
    allowed_domains = ['category.dangdang.com']
    start_urls = ['http://category.dangdang.com/cp01.01.04.00.00.00.html']

    # http://category.dangdang.com/cp01.01.04.00.00.00.html
    # http://category.dangdang.com/pg2-cp01.01.04.00.00.00.html
    # http://category.dangdang.com/pg3-cp01.01.04.00.00.00.html
    base_url = 'http://category.dangdang.com/pg'
    page = 1 

    def parse(self, response):
        #src //ul[@id="component_59"]/li/a/img/@data-original
        #price //ul[@id="component_59"]/li/p[@class="price"]/span[1]/text()
        #name //ul[@id="component_59"]/li/p[@class="name"]/a/text()
        # first_src = response.xpath('//ul[@id="component_59"]/li[@class="line1"]/a/img/@src').extract_first()
        # othor_list = response.xpath('//ul[@id="component_59"]/li/a/img/@data-original')
        # price_list = response.xpath('//ul[@id="component_59"]/li/p[@class="price"]/span[1]/text()')
        # name_list = response.xpath('//ul[@id="component_59"]/li/p[@class="name"]/a/text()')
        
        # print(len(first_src),len(othor_list),len(price_list),len(name_list)) 

        

        li_list = response.xpath('//ul[@id="component_59"]/li')
        first_src = response.xpath('//ul[@id="component_59"]/li[@class="line1"]/a/img/@src').extract_first()
        for li in li_list:
            src = li.xpath('./a/img/@data-original').extract_first()
            if src:
                src=src
            else:
                src=first_src    
            # 部分a标签 有其他标签,解决方案
            name = li.xpath('./p[@class="name"]/a').xpath('string()').extract_first()
            price = li.xpath('./p[@class="price"]/span[1]/text()').extract_first()
            print(src,name,price)

            dang = DangdangItem(src=src,name=name,price=price)

            # 迭代提取dang数据项
            yield dang
        
        if self.page < 80:
            self.page = self.page + 1
            url = self.base_url + str(self.page) + '-cp01.01.04.00.00.00.html'
            print('00000000000000000')
            """
            这里能打印url但是没有回调函数没有执行去爬取下一个页面

            """
            print(url) 
            # 调用方法地址不用加圆括号,
            
            yield scrapy.Request(url=url,callback=self.parse)

pipelines.py

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


class DangdangPipeline:

    def open_spider(self, spider):
        self.fp = open('dang.json','a',encoding='utf-8')
    # def __init__(self):
        # print('爬取管道开启一直打开')

    def process_item(self, item, spider):
        # with open('dang.json','a',encoding='utf-8') as f:
        #     f.write(str(item))
        self.fp.write(str(item))
        return item

    # 在爬取结束之后执行该方法
    def close_spider(self, spider):
        self.fp.close()

        
import urllib.request

class DangdangImagePipeline(object):
    def process_item(self, item, spider):
        
        url = item['src']
        print("=========正在爬:{}======".format(item['name']))
        print(url)
        prefix = item['name'][0:14]
        filename = './img/' + prefix + '.jpg'
        urllib.request.urlretrieve(url=url,filename=filename)

        return item



import pymysql
from scrapy.utils.project import get_project_settings

class DangMysqlPipeline(object):
    #  host=None, user=None, password=None
    #  database=None, port=0, charset=''
    
    def open_spider(self,spider):
        settings = get_project_settings()
        self.conn = pymysql.Connect(host=settings['DB_HOST'],
                        user=settings['DB_USER'],
                        password=settings['DB_PASSWORD'],
                        # 使用pymysql的时候,port必须是整数,charset必须是utf8没有‘-’
                        database=settings['DB_DATABASE'],
                        port=settings['DB_PORT'],
                        charset=settings['DB_CHARSET'])

        # print(conn)

        # 游标cursor
        self.cursor = self.conn.cursor()
        
    def process_item(self, item, spider):
        # 验证数据管道是否能得到mysql基本参数:option="scrapy crawl read"
        # print(self.settings['DB_HOST'], self.settings['DB_USER'])
        # pymysql.Connect()  
        sql = 'insert into dang(src,name,price) values ("{}","{}","{}")'.format(item['src'],item['name'],item['price'])
        self.cursor.execute(sql)  
        self.conn.commit()
        return item


    def close_spider(self, spider):
        self.cursor().close()
        # 迷之错误
        self.conn.close()

setttings.py

# Scrapy settings for dangdang project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'dangdang'

SPIDER_MODULES = ['dangdang.spiders']
NEWSPIDER_MODULE = 'dangdang.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
  'Referer': 'http://category.dangdang.com/',
}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'dangdang.middlewares.DangdangSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'dangdang.middlewares.DangdangDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'dangdang.pipelines.DangdangPipeline': 300,
  # 'dangdang.pipelines.DangMysqlPipeline': 300,
  #  'dangdang.pipelines.DangdangImagePipeline': 299,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'


#  database=None, port=0, charset=''

DB_HOST='127.0.0.1'
DB_USER='root'
DB_PASSWORD='123456'
DB_DATABASE='community'
DB_PORT=3306
DB_CHARSET='utf8'

标签:src,name,self,当当网,item,scrapy,html
来源: https://www.cnblogs.com/qingzuihub/p/13726052.html

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有