I'm trying to learn how to use asyncio to build an asynchronous web crawler. The following is a crude crawler to test out the framework:
import asyncio, aiohttp
from bs4 import BeautifulSoup
@asyncio.coroutine
def fetch(url):
with (yield from sem):
print(url)
response = yield from aiohttp.request('GET',url)
response = yield from response.read_and_close()
return response.decode('utf-8')
@asyncio.coroutine
def get_links(url):
page = yield from fetch(url)
soup = BeautifulSoup(page)
links = soup.find_all('a',href=True)
return [link['href'] for link in links if link['href'].find('www') != -1]
@asyncio.coroutine
def crawler(seed, depth, max_depth=3):
while True:
if depth > max_depth:
break
links = yield from get_links(seed)
depth+=1
coros = [asyncio.Task(crawler(link,depth)) for link in links]
yield from asyncio.gather(*coros)
sem = asyncio.Semaphore(5)
loop = asyncio.get_event_loop()
loop.run_until_complete(crawler("http://www.bloomberg.com",0))
Whilst asyncio seems to be documented quite well, aiohttp seems to have very little documentation so I'm struggling to work some things out myself.
Firstly, is there a way for us to detect the encoding of page response?
Secondly, can we request that the connections are kept-alive within a session? Or is this by default True like in requests?