2

This spider is supposed to loop through http://www.saylor.org/site/syllabus.php?cid=NUMBER, where NUMBER is 1 to 404 and extract each page. But for some reason it skips pages in the loop. Many pages. For example, it skips 1 through 16. Can someone tell me what's going on?

Here's the code:

 from scrapy.spider import BaseSpider
 from scrapy.http import Request
 from opensyllabi.items import OpensyllabiItem

 import boto

 class OpensyllabiSpider(BaseSpider):
      name = 'saylor'
      allowed_domains = ['saylor.org']
      max_cid = 405
      i = 1

      def start_requests(self):
          for self.i in range(1, self.max_cid):
              yield Request('http://www.saylor.org/site/syllabus.php?cid=%d' % self.i, callback=self.parse_Opensyllabi)

      def parse_Opensyllabi(self, response):
          Opensyllabi = OpensyllabiItem()
          Opensyllabi['url'] = response.url
          Opensyllabi['body'] = response.body

          filename = ("/root/opensyllabi/data/saylor" + '%d' % self.i)
          syllabi = open(filename, "w")
          syllabi.write(response.body)

          return Opensyllabi
4

1 に答える 1

3

これを試して

class OpensyllabiSpider(BaseSpider):
      name = 'saylor'
      allowed_domains = ['saylor.org']
      max_cid = 405

      def start_requests(self):
          for i in range(1, self.max_cid):
              yield Request('http://www.saylor.org/site/syllabus.php?cid=%d' % i, 
                    meta={'index':i},
                    callback=self.parse_Opensyllabi)

      def parse_Opensyllabi(self, response):
          Opensyllabi = OpensyllabiItem()
          Opensyllabi['url'] = response.url
          Opensyllabi['body'] = response.body


          filename = ("/root/opensyllabi/data/saylor" + '%d' % response.request.meta['index'])
          syllabi = open(filename, "w")
          syllabi.write(response.body)

          return Opensyllabi
于 2013-01-16T09:22:36.570 に答える