ICode9

精准搜索请尝试: 精确搜索
首页 > 数据库> 文章详细

scrapy入门-获取电影排行榜保存到json,csv,mysql

2021-12-08 09:31:08  阅读:161  来源: 互联网

标签:py name maoyan move scrapy item json mysql div


1.下载包

pip install scrapy

2.在使用路径终端上创建项目指令: scrapy startproject 项目名

scrapy startproject maoyan

cd maoyan

scrapy genspider maoyan https://www.maoyan.com/

创建后目录大致页如下

|-ProjectName #项目文件夹

|-ProjectName #项目目录

|-items.py #定义数据结构

|-middlewares.py #中间件

|-pipelines.py #数据处理

|-settings.py #全局配置

|-spiders

|-__init__.py #爬虫文件

|-maoyan.py

|-scrapy.cfg #项目基本配置文件

3.settings设置如下: 

# 项目名
BOT_NAME = 'maoyan'

SPIDER_MODULES = ['maoyan.spiders']
NEWSPIDER_MODULE = 'maoyan.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'

# ROBOTSTXT_OBEY:是否遵循机器人协议,默认是true,需要改为false,否则很多东西爬不了
ROBOTSTXT_OBEY = False

# CONCURRENT_REQUESTS:最大并发数,很好理解,就是同时允许开启多少个爬虫线程
#CONCURRENT_REQUESTS = 32

# 下载延迟时间,单位是秒,控制爬虫爬取的频率
DOWNLOAD_DELAY = 3

# DEFAULT_REQUEST_HEADERS:默认请求头,上面写了一个USER_AGENT,其实这个东西就是放在请求头里面的,这个东西可以根据你爬取的内容做相应设置。
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

#ITEM_PIPELINES:项目管道,300为优先级,越低越爬取的优先度越高
ITEM_PIPELINES = {
   'myfirstPj.pipelines.MyfirstpjPipeline': 300,
}
#编码格式 , 不设置的话json就会乱码
FEED_EXPORT_ENCODING = 'utf-8'
USER_AGENT 在浏览器中可看到:

 4.在items.py上编写 

import scrapy

class MaoyanItem(scrapy.Item):
    move_name=scrapy.Field()
    peaple_name=scrapy.Field()
    move_time = scrapy.Field()
    describe= scrapy.Field()

 5.创建一个maoyan_spider.py文件

 

 

 6.在 maoyan_spider.py上编写 

import scrapy
from maoyan.items import MaoyanItem

class MaoyanSpiderSpider(scrapy.Spider):
    name = 'maoyan_spider' #项目名
    allowed_domains = ['maoyan.com'] #域名
    #url入口
    start_urls = ['https://www.maoyan.com/board/4?timeStamp=1638539026134&channelId=40011&index=1&signKey=a675982b76014e4a8b7f3beb5afe7441&sVersion=1&webdriver=false']

    def parse(self, response):
        # //*[@id="app"]/div/div/div[1]/dl/dd[3]/div
        move_list = response.xpath("//*[@id='app']/div/div/div[1]/dl//div/div/div[1]")
        for d in move_list:
            maoyan_item=MaoyanItem()## 初始化item对象保存爬取的信息
            # //*[@id="app"]/div/div/div[1]/dl/dd[3]/div/div/div[1]/p[1]/a name
            # //*[@id="app"]/div/div/div[1]/dl/dd[3]/div/div/div[1]/p[3] time
            # //*[@id="app"]/div/div/div[1]/dl/dd[3]/div/div/div[1]/p[2] peaple
            n_list= []
            p_list= []
            # 电影名
            aaa=d.xpath(".//p[1]/a").extract_first().split('"') # 切割第一个数据
            for aa in aaa:
                n_list.append(aa)
            maoyan_item['move_name'] = n_list[3]
            # 主演
            bbb=d.xpath(".//p[2]").extract_first().split('\n')
            for bb in bbb:
                p_list.append(bb)
            maoyan_item['peaple_name'] = p_list[1].replace('主演:','').strip()
            # 上映时间
            move_time1 = d.xpath(".//p[3]").extract()
            for t in move_time1:
                ccc=re.search(r"(\d{4}-\d{1,2}-\d{1,2})",t).group(0)
                maoyan_item['move_time'] =ccc
            print(maoyan_item)
            yield maoyan_item   # 提交到调度器

7.手动创建一个main.py,用来运行的,也可以用指令

from scrapy import cmdline
cmdline.execute('scrapy crawl maoyan_spider'.split())

8.运行main.py 

9.存储方式:json,csv,mysql

 1)保存到json——注意路径

scrapy crawl maoyan_spider -o test.json

 

 2)保存到csv——注意路径

scrapy crawl maoyan_spider -o test.csv

 

3)保存到数据库mysql

 

#settings.py
# mongo_host='192.168.x.xxx'
# mongo_post=27017
# mongo_db_name='maoyan'
# mongo_db_collection='maoyan_movie'

MYSQL_HOST = 'localhost'
MYSQL_DBNAME = 'maoyan_sql'
MYSQL_USER = 'root'
MYSQL_PASSWD = '1234'


#pipelines.py
# mongo_host='192.168.x.xxx'
# mongo_post=27017
# mongo_db_name='maoyan'
# mongo_db_collection='maoyan_movie'
MYSQL_HOST = 'localhost'
MYSQL_DBNAME = 'maoyan_sql'
MYSQL_USER = 'root'
MYSQL_PASSWD = '1234'#pipelines.py
from itemadapter import ItemAdapter
import pymysql
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
#连接数据库
from maoyan import settings
def dbHandle():
    conn = pymysql.connect(
        host = "localhost",
        user = "root",
        passwd = "1234",
        charset = "utf8mp4",
        use_unicode = False
    )
    return conn
class MaoyanPipeline:
    def __init__(self):
        # 连接数据库
        self.connect = pymysql.connect(
            host=settings.MYSQL_HOST,
            db=settings.MYSQL_DBNAME,
            user=settings.MYSQL_USER,
            passwd=settings.MYSQL_PASSWD,
            charset='utf8',
            use_unicode=True)
        # 通过cursor执行增删查改
        self.cursor = self.connect.cursor()
    def process_item(self, item, spider):
        try:
            # 插入数据
            self.cursor.execute(
                """insert into move(move_name,peaple_name,move_time) value (%s, %s, %s)""",
                (item['move_name'],
                 item['peaple_name'],
                 item['move_time']))
            # 提交sql语句
            self.connect.commit()
        except BaseException as e:
            # 出现错误时打印错误日志
            print("error:------------", e, "-----------------")
        return item
#dbmongo部分参考
    # def __init__(self):
    #     host=mongo_host
    #     post=mongo_post
    #     dbname=mongo_db_name
    #     sheetname=mongo_db_collection
    #     client=pymongo.MongoClient(host=host,post=post)
    #     mydb=client[dbname]
    #     self.post=mydb[sheetname]#读写操作
    # def process_item(self, item, spider):
    #     data=dict(item)#先转字典,再数据插入
    #     self.post.insert(data)
    #     return item
# # class HellospiderPipeline(object):
#     def process_item(self, item, spider):
#         dbObject = dbHandle()
#         cursor = dbObject.cursor()
#         cursor.execute("USE maoyan_sql")
#         #插入数据库
#         sql = "INSERT INTO move(move_name,peaple_name,move_time) VALUES(%s,%s,%s)"
#         try:
#             cursor.execute(sql,
#                            ( item['move_name'], item['peaple_name'], item['move_time']))
#             cursor.connection.commit()
#         except BaseException as e:
#             print("错误在这里>>>>>>>>>>>>>", e, "<<<<<<<<<<<<<")
#             dbObject.rollback()
#         return item

数据库中查看如下:

 

结尾:#以下仅供参考:

#middlewares.py
#定义ip代理中间件 import base64 class my_proxy(object): def process_request(self,request,spider): request.meta['proxy']='http-xxx.com:端口号' proxy_name_pass=b'用户名:密码' encode_pass_name=base64.b64encode(proxy_name_pass)#加密 request.headers['proxy-Authorization']='Basic '+encode_pass_name.decode()
#中间件定义之后一定要在settings文件内启用
#settings.py DOWNLOADER_MIDDLEWARES = { # 'maoyan.middlewares.MaoyanDownloaderMiddleware': 543, 'maoyan.middlewares.my_proxy': 543 }
#middlewares.py #定义useragent中间件 class my_useragent(object): def process_request(self,request,spider): USER_AGENT_LIST=[百度一下就有] agent=random.choice(USER_AGENT_LIST) request.headers['User_Agent']=agent #settings.py优先级不能相同 DOWNLOADER_MIDDLEWARES = { 'maoyan.middlewares.my_proxy': 543, 'maoyan.middlewares.my_uesragent': 544, }

标签:py,name,maoyan,move,scrapy,item,json,mysql,div
来源: https://www.cnblogs.com/97hong/p/15659448.html

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有