import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes"
def start_requests(self):
urls = [
'http://q...content-available-to-author-only...e.com/page/1/',
'http://q...content-available-to-author-only...e.com/page/2/',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
page = response.url.split("/")[-2]
filename = 'quotes-%s.html' % page
with open(filename, 'wb') as f:
f.write(response.body)
self.log('Saved file %s' % filename)
aW1wb3J0IHNjcmFweQoKCmNsYXNzIFF1b3Rlc1NwaWRlcihzY3JhcHkuU3BpZGVyKToKCW5hbWUgPSAicXVvdGVzIgoKCWRlZiBzdGFydF9yZXF1ZXN0cyhzZWxmKToKCQl1cmxzID0gWwoJCQknaHR0cDovL3EuLi5jb250ZW50LWF2YWlsYWJsZS10by1hdXRob3Itb25seS4uLmUuY29tL3BhZ2UvMS8nLAoJCQknaHR0cDovL3EuLi5jb250ZW50LWF2YWlsYWJsZS10by1hdXRob3Itb25seS4uLmUuY29tL3BhZ2UvMi8nLAoJCV0KCQlmb3IgdXJsIGluIHVybHM6CgkJCXlpZWxkIHNjcmFweS5SZXF1ZXN0KHVybD11cmwsIGNhbGxiYWNrPXNlbGYucGFyc2UpCgkJCQoJZGVmIHBhcnNlKHNlbGYsIHJlc3BvbnNlKToKCQlwYWdlID0gcmVzcG9uc2UudXJsLnNwbGl0KCIvIilbLTJdCgkJZmlsZW5hbWUgPSAncXVvdGVzLSVzLmh0bWwnICUgcGFnZQoJCXdpdGggb3BlbihmaWxlbmFtZSwgJ3diJykgYXMgZjoKCQkJZi53cml0ZShyZXNwb25zZS5ib2R5KQoJCXNlbGYubG9nKCdTYXZlZCBmaWxlICVzJyAlIGZpbGVuYW1lKQ==