0

メインファイル

from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from bloggerx.items import BloggerxItem
from scrapy.spider import BaseSpider

class BloggerxSpider(BaseSpider):
    name = 'bloggerx'
    allowed_domains = ['abcr.com']
    start_urls = ['http://www.abcr.com/profile/07372831905432746031']
    def parse(self,response):
        hxs = HtmlXPathSelector(response)
        item = BloggerxItem()
        item['gender'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Gender")]/following-sibling::node()/text()').extract()
        item['blogger_since'] = hxs.select('/html/body/div[2]/div/div[2]/div/p[2]/text()').re('\d+')
        item['profile_views'] = hxs.select('/html/body/div[2]/div/div[2]/div/p[3]/text()').re('\d+')
        item['industry'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Industry")]/following-sibling::node()/span/a/text()').extract()
        item['occupation'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Occupation")]/following-sibling::node()/span/a/text()').extract()
        item['locality'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="locality"]/a/text()').extract()
        item['region'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="region"]/a/text()').extract()
        item['country'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="country-name"]/a/text()').extract()
        item['introduction'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Introduction")]/following-sibling::node()/text()').extract()
        item['interests'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Interests")]/following-sibling::node()/span/a/text()').extract()
        item['email1'] = hxs.select('//html/body/div[2]/div/div[2]/div/ul/li/script/text()').re('[\w.]+@[\w.]+[com]')
        item['email2'] = hxs.select('/html/body/div[2]/div/div[2]/div/ul/li[3]/div/text()').extract()
        item['website'] = hxs.select('//html/body/div[2]/div/div[2]/div/ul/li[2]/a/@href').extract()
        item['films'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Films")]/following-sibling::node()/span/a/text()').extract()
        item['music'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Music")]/following-sibling::node()/span/a/text()').extract()
        item['books'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Books")]/following-sibling::node()/span/a/text()').extract()
        item['blogs_follow'] = hxs.select('//html/body/div[2]/div/div[3]/ul[2]/li/a/text()').extract()
        item['blogs_follow_link'] = hxs.select('//html/body/div[2]/div/div[3]/ul[2]/li/a/@href').extract()
        item['author_blogs'] =  hxs.select('//html/body/div[2]/div/div[3]/ul/li/span/a/text()').extract()
        item['author_blogs_link'] = hxs.select('//html/body/div[2]/div/div[3]/ul/li/span/a/@href').extract()
        return item

アイテムファイル

from scrapy.item import Item, Field

class BloggerxItem(Item):
    # define the fields for your item here like:
    # name = Field()
    gender = Field()
    blogger_since = Field()
    profile_views = Field()
    industry = Field()
    occupation = Field()
    locality = Field()
    introduction = Field()
    interests = Field()
    email1 = Field()
    website = Field()
    films = Field()
    music = Field()
    books = Field()
    region = Field()
    country = Field()
    email2 = Field()
    blogs_follow = Field()
    blogs_follow_link = Field()
    author_blogs = Field()
    author_blogs_link = Field()
    pass

私が実行したときの出力:スクレイピークロールブロガーx -o items.json -t json

2013-03-07 16:39:24+0530 [scrapy] INFO: Scrapy 0.16.4 started (bot: bloggerx)
2013-03-07 16:39:24+0530 [scrapy] DEBUG: Enabled extensions: FeedExporter, LogStats, TelnetConsole, CloseSpider, WebService, CoreStats, SpiderState
2013-03-07 16:39:25+0530 [scrapy] DEBUG: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, RedirectMiddleware, CookiesMiddleware, HttpCompressionMiddleware, ChunkedTransferMiddleware, DownloaderStats
2013-03-07 16:39:25+0530 [scrapy] DEBUG: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2013-03-07 16:39:25+0530 [scrapy] DEBUG: Enabled item pipelines: 
2013-03-07 16:39:25+0530 [bloggerx] INFO: Spider opened
2013-03-07 16:39:25+0530 [bloggerx] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2013-03-07 16:39:25+0530 [scrapy] DEBUG: Telnet console listening on 0.0.0.0:6028
2013-03-07 16:39:25+0530 [scrapy] DEBUG: Web service listening on 0.0.0.0:6085
2013-03-07 16:39:27+0530 [bloggerx] DEBUG: Crawled (200) <GET http://www.abcr.com/profile/07372831905432746031> (referer: None)
2013-03-07 16:39:27+0530 [bloggerx] INFO: Closing spider (finished)
2013-03-07 16:39:27+0530 [bloggerx] INFO: Dumping Scrapy stats:
    {'downloader/request_bytes': 249,
     'downloader/request_count': 1,
     'downloader/request_method_count/GET': 1,
     'downloader/response_bytes': 13459,
     'downloader/response_count': 1,
     'downloader/response_status_count/200': 1,
     'finish_reason': 'finished',
     'finish_time': datetime.datetime(2013, 3, 7, 11, 9, 27, 320389),
     'log_count/DEBUG': 7,
     'log_count/INFO': 4,
     'response_received_count': 1,
     'scheduler/dequeued': 1,
     'scheduler/dequeued/memory': 1,
     'scheduler/enqueued': 1,
     'scheduler/enqueued/memory': 1,
     'start_time': datetime.datetime(2013, 3, 7, 11, 9, 25, 967450)}
2013-03-07 16:39:27+0530 [bloggerx] INFO: Spider closed (finished)

生成された出力ファイルは空で、個々の hxs.select ステートメントは、scrapy シェルで試したときに正常に動作します。私がしている愚かなことはありますか?

4

4 に答える 4

1

遅くなりそうです。しかし、最近、私の研究が進む限り、スクレイピーを学びました...

ヘッダーからクロール スパイダーを呼び出し、ベース スパイダー エラーを使用しています。

from scrapy.contrib.spiders import CrawlSpider, Rule
class BloggerxSpider(BaseSpider):

修正後:

 from scrapy.contrib.spiders import **CrawlSpider**, Rule

class BloggerxSpider(**CrawlSpider**):

また

from scrapy.spider import BaseSpider

class BloggerxSpider(BaseSpider):
于 2013-06-03T07:16:20.363 に答える
0

代わりに、def parse_bloggerは、defparseを配置する必要があります。

def parseは、フレームワークでの解析のデフォルトです。別の名前を付けたい場合は、その新しいものに応答を送信する必要があります。

呼び出す必要のある独自の解析メソッドをコールバックとして使用する場合、これは独自のリクエストを作成する場合の例です。

request = Request("http://something", callback=self.parse_blogger)
于 2013-03-07T11:28:23.000 に答える
0

ルールを明示的に定義しておらず、リンクをたどることを気にしない場合は、代わりに BaseSpider を使用しますが、コールバックの名前はparse.

from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from bloggerx.items import BloggerxItem

class BloggerxSpider(BaseSpider):
    ...

CrawlSpiders の場合、ドキュメンテーションには、CrawlSpider のメソッドをオーバーライドしてスパイダーが正しくクロールしないため、 callback に名前を付けるべきではないと明示的に記載されていることに注意してください。parseparse

于 2013-03-07T11:49:06.630 に答える
0

のエントリがないため、ログ出力は私には奇妙に思えますstart_urls。サーバーは、Scrapy がデフォルトで無視する 404 で応答するため、アイテムは返されません。また、スパイダーは宣言していないBaseSpiderため、このコードはコンパイルされないため、ここでコピー/貼り付けの問題が発生しているようです。

編集 - - - - - - - - -

ドメインを blogger.com に変更したところ、1 つのアイテムが返されるようになりました。

2013-03-08 09:02:28-0600 [scrapy] INFO: Scrapy 0.17.0 started (bot: oneoff)
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Overridden settings: {'NEWSPIDER_MODULE': 'oneoff.spiders', 'SPIDER_MODULES': ['oneoff.spiders'], 'USER_AGENT': 'Chromium OneOff 24.0.1312.56 Ubuntu 12.04 (24.0.1312.56-0ubuntu0.12.04.1)', 'BOT_NAME': 'oneoff'}
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Enabled extensions: LogStats, TelnetConsole, CloseSpider, WebService, CoreStats, SpiderState
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, MetaRefreshMiddleware, HttpCompressionMiddleware, RedirectMiddleware, CookiesMiddleware, ChunkedTransferMiddleware, DownloaderStats
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Enabled item pipelines:
2013-03-08 09:02:28-0600 [bloggerx] INFO: Spider opened
2013-03-08 09:02:28-0600 [bloggerx] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Telnet console listening on 0.0.0.0:6024
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Web service listening on 0.0.0.0:6081
2013-03-08 09:02:28-0600 [bloggerx] DEBUG: Crawled (200) <GET http://www.blogger.com/profile/07372831905432746031> (referer: None)
2013-03-08 09:02:28-0600 [bloggerx] DEBUG: Scraped from <200 http://www.blogger.com/profile/07372831905432746031>
    {'author_blogs': [u'Inserire comunicati stampa per il turismo',
                      u'Inserire Comunicati stampa e Article Marketing',
                      u'Video Quacos'],
     'author_blogs_link': [u'http://comunicati-stampa-per-il-turismo.blogspot.com/',
                           u'http://comunicati-stampa-vendita-online.blogspot.com/',
                           u'http://quacos.blogspot.com/'],
     'blogger_since': [u'2008'],
     'blogs_follow': [u'Abandonware Time',
                      u'AltroSeo.com',
                      u'ANSIMA notizie',
                      u'Cinnamon Girl',
                      u'enigmamigarun',
                      u'Fake Books - Libri di una riga.',
                      u'FM - COSMETICA E NON SOLO ',
                      u'GS BARBARIANS',
                      u'Il Disinformatico',
                      u'Linus&#39; blog',
                      u'Montefeltro Nuoto Master',
                      u'Nella Tana del Coniglio',
                      u'PHP and tips'],
     'blogs_follow_link': [u'http://squakenet.blogspot.com/',
                           u'http://www.altroseo.com/',
                           u'http://ansima.blogspot.com/',
                           u'http://cinnamongirl82.blogspot.com/',
                           u'http://enigmaamigarun.blogspot.com/',
                           u'http://fake-books.blogspot.com/',
                           u'http://valeriacosmeticafm.blogspot.com/',
                           u'http://gsbarbarians.blogspot.com/',
                           u'http://attivissimo.blogspot.com/',
                           u'http://torvalds-family.blogspot.com/',
                           u'http://montefeltronuotomaster.blogspot.com/',
                           u'http://anonimoconiglio.blogspot.com/',
                           u'http://phpntips.blogspot.com/'],
     'books': [],
     'country': [],
     'email1': [u'bloggiovanni.cappellini@gmail.com'],
     'email2': [u'cappogio@hotmail.com'],
     'films': [],
     'gender': [],
     'industry': [],
     'interests': [],
     'introduction': [],
     'locality': [],
     'music': [],
     'occupation': [],
     'profile_views': [u'553'],
     'region': [],
     'website': [u'http://www.quacos.com']}
2013-03-08 09:02:28-0600 [bloggerx] INFO: Closing spider (finished)
2013-03-08 09:02:28-0600 [bloggerx] INFO: Dumping Scrapy stats:
    {'downloader/request_bytes': 288,
     'downloader/request_count': 1,
     'downloader/request_method_count/GET': 1,
     'downloader/response_bytes': 13615,
     'downloader/response_count': 1,
     'downloader/response_status_count/200': 1,
     'finish_reason': 'finished',
     'finish_time': datetime.datetime(2013, 3, 8, 15, 2, 28, 948533),
     'item_scraped_count': 1,
     'log_count/DEBUG': 9,
     'log_count/INFO': 4,
     'response_received_count': 1,
     'scheduler/dequeued': 1,
     'scheduler/dequeued/memory': 1,
     'scheduler/enqueued': 1,
     'scheduler/enqueued/memory': 1,
     'start_time': datetime.datetime(2013, 3, 8, 15, 2, 28, 379242)}
2013-03-08 09:02:28-0600 [bloggerx] INFO: Spider closed (finished)

クモ:

from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from bloggerx.items import BloggerxItem

class BloggerxSpider(BaseSpider):
    name = 'bloggerx'
    allowed_domains = ['blogger.com']
    start_urls = ['http://www.blogger.com/profile/07372831905432746031']

    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        item = BloggerxItem()
        item['gender'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Gender")]/following-sibling::node()/text()').extract()
        item['blogger_since'] = hxs.select('/html/body/div[2]/div/div[2]/div/p[2]/text()').re('\d+')
        item['profile_views'] = hxs.select('/html/body/div[2]/div/div[2]/div/p[3]/text()').re('\d+')
        item['industry'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Industry")]/following-sibling::node()/span/a/text()').extract()
        item['occupation'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Occupation")]/following-sibling::node()/span/a/text()').extract()
        item['locality'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="locality"]/a/text()').extract()
        item['region'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="region"]/a/text()').extract()
        item['country'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="country-name"]/a/text()').extract()
        item['introduction'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Introduction")]/following-sibling::node()/text()').extract()
        item['interests'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Interests")]/following-sibling::node()/span/a/text()').extract()
        item['email1'] = hxs.select('//html/body/div[2]/div/div[2]/div/ul/li/script/text()').re('[\w.]+@[\w.]+[com]')
        item['email2'] = hxs.select('/html/body/div[2]/div/div[2]/div/ul/li[3]/div/text()').extract()
        item['website'] = hxs.select('//html/body/div[2]/div/div[2]/div/ul/li[2]/a/@href').extract()
        item['films'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Films")]/following-sibling::node()/span/a/text()').extract()
        item['music'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Music")]/following-sibling::node()/span/a/text()').extract()
        item['books'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Books")]/following-sibling::node()/span/a/text()').extract()
        item['blogs_follow'] = hxs.select('//html/body/div[2]/div/div[3]/ul[2]/li/a/text()').extract()
        item['blogs_follow_link'] = hxs.select('//html/body/div[2]/div/div[3]/ul[2]/li/a/@href').extract()
        item['author_blogs'] = hxs.select('//html/body/div[2]/div/div[3]/ul/li/span/a/text()').extract()
        item['author_blogs_link'] = hxs.select('//html/body/div[2]/div/div[3]/ul/li/span/a/@href').extract()

        return item
于 2013-03-08T02:52:48.710 に答える