ImmoScrap/ImmoScrap/spiders/Leboncoin.py

37 lines
1.4 KiB
Python

import scrapy
filters = {"category": {"id": "9"},
"enums": {"real_estate_type": ["1", "2", "5"], "ad_type": ["offer"]},
"ranges": {"rooms": {}, "square": {}, "price": {"min": 0, "max": 300000}},
"location": {"area": {"lat": 45.521971, "lng": 4.869926, "radius": 1000}, "city_zipcodes": [], "departments": [], "disable_region": False, "locations": [], "regions": []},
"keywords": {"type": "all"},
"owner": {}}
data = {"pivot": "0,0,0", "limit": 100, "limit_alu": 1,
"filters": filters,
"sort_by": "time", "sort_order": "desc"}
headers = {"User-Agent": "LBC;Android;6.0;Android SDK built for x86;phone;616a1ca77ca70180;wwan;4.30.4.0;70400;3",
"api_key": "ba0c2dad52b3ec", "Content-Type": "application/json; charset=UTF-8", "Accept-Encoding": "gzip, deflate"}
class LeboncoinSpider(scrapy.Spider):
name = "leboncoin"
def start_requests(self):
urls = [
'https://api.leboncoin.fr/api/adfinder/v1/search',
'http://quotes.toscrape.com/page/2/',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
page = response.url.split("/")[-2]
filename = 'quotes-%s.html' % page
with open(filename, 'wb') as f:
f.write(response.body)
self.log('Saved file %s' % filename)