Scrapy不递归地爬网所有链接

时间:2018-12-18 15:50:40

标签: python-3.x scrapy scrapy-spider

我需要网站所有页面上的所有内部链接进行分析。我搜索发现很多类似的问题。 我通过Mithu找到了这段代码,给出了可能的答案。但是,这并未提供第二级页面深度的所有可能的链接。 生成的csv文件只有676条记录,但是网站有1000条记录。

1000 records 工作代码

import csv // Done to avoid line gaps in the generated csv file
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from eylinks.items import LinkscrawlItem
outfile = open("data.csv", "w", newline='')
writer = csv.writer(outfile)
class ToscrapeSpider(scrapy.Spider):

    name = "toscrapesp"
    start_urls = ["http://books.toscrape.com/"]

    rules = ([Rule(LinkExtractor(allow=r".*"), callback='parse', follow=True)])


    def parse(self, response):
        extractor = LinkExtractor(allow_domains='toscrape.com')
        links = extractor.extract_links(response)
        for link in links:
            yield scrapy.Request(link.url, callback=self.collect_data)

    def collect_data(self, response):
        global writer                                  
        for item in response.css('.product_pod'):
            product = item.css('h3 a::text').extract_first()
            value = item.css('.price_color::text').extract_first()
            lnk = response.url
            stats = response.status
            print(lnk)
            yield {'Name': product, 'Price': value,"URL":lnk,"Status":stats}  
            writer.writerow([product,value,lnk,stats]) 

1 个答案:

答案 0 :(得分:1)

对于摘录链接,请尝试以下操作:

# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
import csv 

outfile = open("data.csv", "w", newline='')
writer = csv.writer(outfile)
class BooksScrapySpider(scrapy.Spider):
    name = 'books'
    allowed_domains = ['books.toscrape.com']
    start_urls = ['http://books.toscrape.com/']

    def parse(self, response):
        books = response.xpath('//h3/a/@href').extract()
        for book in books:
            url = response.urljoin(book)
            yield Request(url, callback=self.parse_book)

        next_page_url = response.xpath(
            "//a[text()='next']/@href").extract_first()
        absolute_next_page = response.urljoin(next_page_url)
        yield Request(absolute_next_page)

    def parse_book(self, response):

        title = response.css("h1::text").extract_first()
        price = response.xpath(
            "//*[@class='price_color']/text()").extract_first()
        url = response.request.url

        yield {'title': title,
               'price': price,
               'url': url,
               'status': response.status}
        writer.writerow([title,price,url,response.status])