Scrapy / Pipeline不将数据插入MySQL数据库

时间:2015-03-23 22:32:45

标签: mysql scrapy mysql-python pipeline

我正在scrapy中创建一个管道,将存储的数据存储在mysql数据库中。当蜘蛛在终端中运行时,它可以完美地工作。甚至管道都打开了。但是,数据未发送到数据库。任何帮助赞赏! :)

这是管道代码:

import sys
import MySQLdb
import hashlib
from scrapy.exceptions import DropItem
from scrapy.http import Request
from tutorial.items import TutorialItem

class MySQLTest(object):
    def __init__(self):
        db = MySQLdb.connect(user='root', passwd='', host='localhost', db='python')
        cursor = db.cursor()

    def process_item(self, spider, item):    
        try:
            cursor.execute("INSERT INTO info (venue, datez) VALUES (%s, %s)", (item['artist'], item['date']))       
            self.conn.commit()

        except MySQLdb.Error, e:
            print "Error %d: %s" % (e.args[0], e.args[1])

            return item

并且继承蜘蛛代码

import scrapy # Import required libraries.
from scrapy.selector import HtmlXPathSelector # Allows for path detection in a websites code.
from scrapy.spider import BaseSpider # Used to create a simple spider to extract data.
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor # Needed for the extraction of href links in HTML to crawl further pages.
from scrapy.contrib.spiders import CrawlSpider # Needed to make the crawl spider.
from scrapy.contrib.spiders import Rule # Allows specified rules to affect what the link 
import spotipy
import soundcloud
import mysql.connector

from tutorial.items import TutorialItem

class AllGigsSpider(CrawlSpider):
    name = "allGigs" # Name of the Spider. In command promt, when in the correct folder, enter "scrapy crawl Allgigs".
    allowed_domains = ["www.allgigs.co.uk"] # Allowed domains is a String NOT a URL. 
    start_urls = [
        "http://www.allgigs.co.uk/whats_on/London/clubbing-1.html",
        "http://www.allgigs.co.uk/whats_on/London/festivals-1.html",
        "http://www.allgigs.co.uk/whats_on/London/comedy-1.html",
        "http://www.allgigs.co.uk/whats_on/London/theatre_and_opera-1.html",
        "http://www.allgigs.co.uk/whats_on/London/dance_and_ballet-1.html"
    ] # Specify the starting points for the web crawler.  

    rules = [
        Rule(SgmlLinkExtractor(restrict_xpaths='//div[@class="more"]'), # Search the start URL's for 
        callback="parse_me", 
        follow=True),
    ]

    def parse_me(self, response):
        for info in response.xpath('//div[@class="entry vevent"]|//div[@class="resultbox"]'):
            item = TutorialItem() # Extract items from the items folder.
            item ['artist'] = info.xpath('.//span[@class="summary"]//text()').extract() # Extract artist information.
            item ['date'] = info.xpath('.//span[@class="dates"]//text()').extract() # Extract date information.
            #item ['endDate'] = info.xpath('.//abbr[@class="dtend"]//text()').extract() # Extract end date information.         
            #item ['startDate'] = info.xpath('.//abbr[@class="dtstart"]//text()').extract() # Extract start date information.
            item ['genre'] = info.xpath('.//div[@class="header"]//text()').extract()
            yield item # Retreive items in item.
            client = soundcloud.Client(client_id='401c04a7271e93baee8633483510e263')
            tracks = client.get('/tracks', limit=1, license='cc-by-sa', q= item['artist']) 
            for track in tracks:
                print(tracks)

1 个答案:

答案 0 :(得分:0)

我相信问题出现在我的settings.py文件中,我错过了一个逗号...打哈欠。

ITEM_PIPELINES = {
    'tutorial.pipelines.MySQLTest': 300,
}
相关问题