I have been looking for a better way to scrape external website from another a main source website. To better explain it let me use an example with yelp.com to explain what am trying to do (though my target is not yelp).
- I would scrape title and address
- visit link that titles leads to to get company website
- I would like to extract emails from source code of the main website. (I know its difficult, but i am not crawling all pages i am assuming most site have contact in their url e.g site.com/contact.php)
- The point is while scraping from yelp and storing data in a field, I want to get external data from a companies main website.
Below is my code can't figure out how to do it using scrapy.
# -*- coding: utf-8 -*-
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from comb.items import CombItem, SiteItem
class ComberSpider(CrawlSpider):
name = "comber"
allowed_domains = ["example.com"]
query = 'shoe'
page = 'http://www.example.com/corp/' + query + '/1.html'
start_urls = (
page,
)
rules = (Rule(LinkExtractor(allow=(r'corp/.+/\d+\.html'), restrict_xpaths=("//a[@class='next']")),
callback="parse_items", follow=True),
)
def parse_items(self, response):
for sel in response.xpath("//div[@class='item-main']"):
item = CombItem()
item['company_name'] = sel.xpath("h2[@class='title']/a/text()").extract()
item['contact_url'] = sel.xpath("div[@class='company']/a/@href").extract()[0]
item['gold_supplier'] = sel.xpath("div[@class='item-title']/a/@title").extract()[0]
company_details = sel.xpath("div[@class='attrs']/div[@class='attr']/span['name']/text()").extract()
item = self.parse_meta(sel, item, company_details)
request = scrapy.Request(item['contact_url'], callback=self.parse_site)
request.meta['item'] = item
yield request
def parse_meta(self, sel, item, company_details):
if (company_details):
if "Products:" in company_details:
item['products'] = sel.xpath("./div[@class='value']//text()").extract()
if "Country/Region:" in company_details:
item['country'] = sel.xpath("./div[@class='right']"
+ "/span[@data-coun]/text()").extract()
if "Revenue:" in company_details:
item['revenue'] = sel.xpath("./div[@class='right']/"
+ "span[@data-reve]/text()").extract()
if "Markets:" in company_details:
item['markets'] = sel.xpath("./div[@class='value']/span[@data-mark]/text()").extract()
return item
def parse_site(self, response):
item = response.meta['item']
# this value of item['websites'] would be http://target-company.com, http://any-other-website.com
# my aim is to jump to the http://company.com and scrap data from it's contact page and
# store it as an item like item['emails'] = [[email protected], [email protected]]
# Please how can this be done in this same project
# the only thing i can think of is store the item['websites'] and other values of item and make another project
# even with that it would still not work because of the allowed_domains and start_urls
item['websites'] = response.xpath("//div[@class='company-contact-information']/table/tr/td/a/@href").extract()
print(item)
print('*'* 50)
yield item
"""
from scrapy.item import Item, Field
class CombItem(Item):
company_name = Field()
main_products = Field()
contact_url = Field()
revenue = Field()
gold_supplier = Field()
country = Field()
markets= Field()
Product_Home = Field()
websites = Field()
"""
#emails = Field() not implemented because emails need to be extracted from websites which is different from start_url