7

Scrapy で XMLFeedSpider を使用して、不動産 Web サイトをスクラップします。

私のスパイダーによって (start_urls を介して) 生成された各 URL 要求は、一連の広告と次のページへのリンクを含む XML のページを返します (検索結果は 50 個の広告に制限されています)。

したがって、この追加ページをスパイダーの新しいリクエストとして追加するにはどうすればよいのでしょうか?

私はしばらくスタックオーバーフローを検索してきましたが、私の問題に対する簡単な答えが見つかりません!

以下は、スパイダーにあるコードです。Paul が言及した parse_nodes() メソッドで更新しましたが、何らかの理由で次の URL が取得されません。

adapt_response メソッドで追加のリクエストを生成できますか?

from scrapy.spider import log
from scrapy.selector import XmlXPathSelector
from scrapy.contrib.spiders import XMLFeedSpider
from crawler.items import RefItem, PicItem
from crawler.seloger_helper import urlbuilder
from scrapy.http import Request

class Seloger_spider_XML(XMLFeedSpider):
    name = 'Seloger_spider_XML'
    allowed_domains = ['seloger.com']
    iterator = 'iternodes' # This is actually unnecessary, since it's the default value
    itertag = 'annonce'  

'''Spider Initialized with department as argument'''
def __init__(self, departement=None, *args, **kwargs):
    super(Seloger_spider_XML, self).__init__(*args, **kwargs)
    #self.start_urls = urlbuilder(departement) #helper function which generate start_urls
    self.start_urls = ['http://ws.seloger.com/search.xml?cp=72&idtt=2&tri=d_dt_crea&SEARCHpg=1']

def parse_node(self, response, node):

    items = []
    item = RefItem()  

    item['ref'] = int(''.join(node.select('//annonce/idAnnonce/text()').extract()))
    item['desc'] = ''.join(node.select('//annonce/descriptif/text()').extract()).encode('utf-8')
    item['libelle'] = ''.join(node.select('//annonce/libelle/text()').extract()).encode('utf-8')
    item['titre'] = ''.join(node.select('//annonce/titre/text()').extract()).encode('utf-8')
    item['ville'] = ''.join(node.select('//annonce/ville/text()').extract()).encode('utf-8')
    item['url'] =''.join(node.select('//annonce/permaLien/text()').extract()).encode('utf-8')
    item['prix'] = ''.join(node.select('//annonce/prix/text()').extract())
    item['prixunite'] = ''.join(node.select('//annonce/prixUnite/text()').extract())
    item['datemaj'] = ''.join(node.select('//annonce/dtFraicheur/text()').extract())[:10]
    item['datecrea'] = ''.join(node.select('//annonce/dtCreation/text()').extract())[:10]
    item['lati'] = (''.join(node.select('//annonce/latitude/text()').extract()))
    item['longi'] = (''.join(node.select('//annonce/longitude/text()').extract()))
    item['surface'] = (''.join(node.select('//annonce/surface/text()').extract()))
    item['surfaceunite'] = (''.join(node.select('//annonce/surfaceUnite/text()').extract()))
    item['piece'] = (''.join(node.select('//annonce/nbPiece/text()').extract())).encode('utf-8')
    item['ce'] = (''.join(node.select('//annonce/dbilanEmissionGES/text()').extract())).encode('utf-8')

    items.append(item)

    for photos in node.select('//annonce/photos'):
            for link in photos.select('photo/thbUrl/text()').extract():
                pic = PicItem()
                pic['pic'] = link.encode('utf-8')
                pic['refpic'] = item['ref']
                items.append(pic)

    return items

    def parse_nodes(self, response, nodes):
        for n in super(Seloger_spider_XML, self).parse_nodes(response, nodes):
            yield n
    # once you're done with item/nodes
    # look for the next page link using XPath
    # these lines are borrowed form
    # https://github.com/scrapy/scrapy/blob/master/scrapy/contrib/spiders/feed.py#L73
        selector = XmlXPathSelector(response)
        self._register_namespaces(selector)
        for link_url in selector.select('//pageSuivante/text()').extract():
            yield Request(link_url) 

ジルさんありがとう

4

1 に答える 1

7

メソッドをオーバーライドparse_nodes()して、「次のページ」の URL 抽出をフックできます。

以下の例は、 Scrapy docs XMLFeedExampleに基づいています:

from scrapy import log
from scrapy.contrib.spiders import XMLFeedSpider
from myproject.items import TestItem
from scrapy.selector import XmlXPathSelector
from scrapy.http import Request

class MySpider(XMLFeedSpider):
    name = 'example.com'
    allowed_domains = ['example.com']
    start_urls = ['http://www.example.com/feed.xml']
    iterator = 'iternodes' # This is actually unnecessary, since it's the default value
    itertag = 'item'

    def parse_node(self, response, node):
        log.msg('Hi, this is a <%s> node!: %s' % (self.itertag, ''.join(node.extract())))

        item = Item()
        item['id'] = node.select('@id').extract()
        item['name'] = node.select('name').extract()
        item['description'] = node.select('description').extract()
        return item

    def parse_nodes(self, response, nodes):
        # call built-in method that itself calls parse_node()
        # and yield whatever it returns
        for n in super(MySpider, self).parse_nodes(response, nodes):
            yield n

        # once you're done with item/nodes
        # look for the next page link using XPath
        # these lines are borrowed form
        # https://github.com/scrapy/scrapy/blob/master/scrapy/contrib/spiders/feed.py#L73
        selector = XmlXPathSelector(response)
        self._register_namespaces(selector)
        for link_url in selector.select('//pageSuivante/text()').extract():
            print "link_url", link_url
            yield Request(link_url)
于 2013-10-06T21:48:16.110 に答える