async def errback_close_page(self, failure):
page = failure.request.meta["playwright_page"]
await page.close()
def start_requests(self):
if not self.start_urls and hasattr(self, "start_url"):
raise AttributeError(
"Crawling could not start: 'start_urls' not found "
"or empty (but found 'start_url' attribute instead, "
"did you miss an 's'?)"
)
for url in self.start_urls:
npo = self.npos[url]
logging.info("### crawl: %s", url)
yield scrapy.Request(
url, callback=self.my_parse, dont_filter=True,meta={"playwright": True, "playwright_include_page": True, 'start_time': datetime.utcnow()}, cb_kwargs={"npo": npo},errback= self.errback_close_page
)
Why am I getting this error and how can I fix this ? i have added the code used to parse as well
async def my_parse(self, response, npo): page = response.meta["playwright_page"]
Traceback (most recent call last):
File "/home/ec2-user/anaconda3/envs/JupyterSystemEnv/lib/python3.10/site-packages/twisted/internet/defer.py", line 1065, in adapt
extracted = result.result()
File "/home/ec2-user/SageMaker/xx", line 50, in errback_close_page
page = failure.request.meta["playwright_page"]
KeyError: 'playwright_page'