Refactoring for readability
This commit is contained in:
@@ -157,39 +157,31 @@ class ImgurApiMIMEParser(BaseMIMEParser):
|
|||||||
domain = 'album'
|
domain = 'album'
|
||||||
else:
|
else:
|
||||||
# This could be a gallery or a single image, but there doesn't
|
# This could be a gallery or a single image, but there doesn't
|
||||||
# seem to be a way to reliably distinguish between the two just
|
# seem to be a way to reliably distinguish between the two.
|
||||||
# from the URL. So we assume a gallery, which appears to be more
|
# Assume a gallery, which appears to be more common, and fallback
|
||||||
# common, and fallback to an image request upon failure.
|
# to an image request upon failure.
|
||||||
domain = 'gallery'
|
domain = 'gallery'
|
||||||
|
|
||||||
if not cls.CLIENT_ID:
|
if not cls.CLIENT_ID:
|
||||||
# Use the old page scraper if no API key was provided
|
return cls.fallback(url, domain)
|
||||||
if domain == 'album':
|
|
||||||
return ImgurScrapeAlbumMIMEParser.get_mimetype(url)
|
|
||||||
else:
|
|
||||||
return ImgurScrapeMIMEParser.get_mimetype(url)
|
|
||||||
|
|
||||||
api_url = endpoint.format(domain=domain, page_hash=page_hash)
|
api_url = endpoint.format(domain=domain, page_hash=page_hash)
|
||||||
r = requests.get(api_url, headers=headers)
|
r = requests.get(api_url, headers=headers)
|
||||||
|
|
||||||
if r.status_code != 200 or 'error' in r.json():
|
if domain == 'gallery' and r.status_code != 200:
|
||||||
# Fallback and try to download using the image endpoint
|
# Not a gallery, try to download using the image endpoint
|
||||||
api_url = endpoint.format(domain='image', page_hash=page_hash)
|
api_url = endpoint.format(domain='image', page_hash=page_hash)
|
||||||
r = requests.get(api_url, headers=headers)
|
r = requests.get(api_url, headers=headers)
|
||||||
|
|
||||||
if r.status_code != 200 or 'error' in r.json():
|
if r.status_code != 200:
|
||||||
if r.status_code != 200:
|
_logger.warning('Imgur API failure, status %s', r.status_code)
|
||||||
_logger.warning('Imgur API request failed, status %s', r.status_code)
|
return cls.fallback(url, domain)
|
||||||
else:
|
|
||||||
_logger.warning('Imgur API request failed, resp %s', r.json())
|
|
||||||
|
|
||||||
# Fallback to using the old page scraper
|
data = r.json().get('data')
|
||||||
if domain == 'album':
|
if not data:
|
||||||
return ImgurScrapeAlbumMIMEParser.get_mimetype(url)
|
_logger.warning('Imgur API failure, resp %s', r.json())
|
||||||
else:
|
return cls.fallback(url, domain)
|
||||||
return ImgurScrapeMIMEParser.get_mimetype(url)
|
|
||||||
|
|
||||||
data = r.json()['data']
|
|
||||||
if 'images' in data:
|
if 'images' in data:
|
||||||
# TODO: handle imgur albums with mixed content, i.e. jpeg and gifv
|
# TODO: handle imgur albums with mixed content, i.e. jpeg and gifv
|
||||||
link = ' '.join([d['link'] for d in data['images'] if not d['animated']])
|
link = ' '.join([d['link'] for d in data['images'] if not d['animated']])
|
||||||
@@ -201,6 +193,16 @@ class ImgurApiMIMEParser(BaseMIMEParser):
|
|||||||
link = link.replace('http://', 'https://')
|
link = link.replace('http://', 'https://')
|
||||||
return link, mime
|
return link, mime
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fallback(cls, url, domain):
|
||||||
|
"""
|
||||||
|
Attempt to use one of the scrapers if the API doesn't work
|
||||||
|
"""
|
||||||
|
if domain == 'album':
|
||||||
|
return ImgurScrapeAlbumMIMEParser.get_mimetype(url)
|
||||||
|
else:
|
||||||
|
return ImgurScrapeMIMEParser.get_mimetype(url)
|
||||||
|
|
||||||
|
|
||||||
class ImgurScrapeMIMEParser(BaseMIMEParser):
|
class ImgurScrapeMIMEParser(BaseMIMEParser):
|
||||||
"""
|
"""
|
||||||
|
|||||||
Reference in New Issue
Block a user