Web Scraping in Python
Thomas Laetsch
Data Scientist, NYU
import scrapy
from scrapy.crawler import CrawlerProcess
class SpiderClassName(scrapy.Spider):
name = "spider_name"
# the code for your spider
...
process = CrawlerProcess()
process.crawl(SpiderClassName)
process.start()
import scrapy
from scrapy.crawler import CrawlerProcess
class SpiderClassName(scrapy.Spider):
name = "spider_name"
# the code for your spider
...
# initiate a CrawlerProcess
process = CrawlerProcess()
# tell the process which spider to use
process.crawl(YourSpider)
# start the crawling process
process.start()
class DCspider( scrapy.Spider ):
name = 'dc_spider'
def start_requests( self ):
urls = [ 'https://www.datacamp.com/courses/all' ]
for url in urls:
yield scrapy.Request( url = url, callback = self.parse )
def parse( self, response ):
# simple example: write out the html
html_file = 'DC_courses.html'
with open( html_file, 'wb' ) as fout:
fout.write( response.body )
start_requests
Web Scraping in Python