PyLint and PEP8 formatting

This commit is contained in:
Michael Lazar
2018-04-02 17:54:15 -04:00
parent ec951cf163
commit 100515769c
19 changed files with 103 additions and 85 deletions

View File

@@ -59,7 +59,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=execfile-builtin,zip-builtin-not-iterating,range-builtin-not-iterating,hex-method,old-division,file-builtin,long-builtin,input-builtin,no-absolute-import,invalid-name,delslice-method,suppressed-message,coerce-builtin,buffer-builtin,import-star-module-level,round-builtin,old-ne-operator,apply-builtin,missing-final-newline,basestring-builtin,xrange-builtin,getslice-method,filter-builtin-not-iterating,map-builtin-not-iterating,raw_input-builtin,indexing-exception,dict-iter-method,metaclass-assignment,setslice-method,next-method-called,intern-builtin,using-cmp-argument,missing-docstring,oct-method,backtick,print-statement,reload-builtin,long-suffix,old-raise-syntax,unicode-builtin,nonzero-method,old-octal-literal,cmp-method,useless-suppression,dict-view-method,parameter-unpacking,unpacking-in-except,coerce-method,unichr-builtin,raising-string,cmp-builtin,reduce-builtin,standarderror-builtin
disable=execfile-builtin,zip-builtin-not-iterating,range-builtin-not-iterating,hex-method,old-division,file-builtin,long-builtin,input-builtin,no-absolute-import,invalid-name,delslice-method,suppressed-message,coerce-builtin,buffer-builtin,import-star-module-level,round-builtin,old-ne-operator,apply-builtin,missing-final-newline,basestring-builtin,xrange-builtin,getslice-method,filter-builtin-not-iterating,map-builtin-not-iterating,raw_input-builtin,indexing-exception,dict-iter-method,metaclass-assignment,setslice-method,next-method-called,intern-builtin,using-cmp-argument,missing-docstring,oct-method,backtick,print-statement,reload-builtin,long-suffix,old-raise-syntax,unicode-builtin,nonzero-method,old-octal-literal,cmp-method,useless-suppression,dict-view-method,parameter-unpacking,unpacking-in-except,coerce-method,unichr-builtin,raising-string,cmp-builtin,reduce-builtin,standarderror-builtin,no-else-return,too-many-locals,too-many-statements,too-few-public-methods,too-many-public-methods,too-many-instance-attributes
[REPORTS]
@@ -337,7 +337,7 @@ exclude-protected=_asdict,_fields,_replace,_source,_make
[DESIGN]
# Maximum number of arguments for function / method
max-args=5
max-args=7
# Argument names that match this expression will be ignored. Default to name
# with leading underscore

View File

@@ -24,7 +24,6 @@ _____/ /_/ \___/____/|__/ \___//_/
(RTV)
"""
from __future__ import unicode_literals
from .__version__ import __version__

View File

@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position
from __future__ import unicode_literals
from __future__ import print_function
@@ -44,9 +46,9 @@ from .subreddit_page import SubredditPage
from .exceptions import ConfigError, SubredditError
from .__version__ import __version__
_logger = logging.getLogger(__name__)
# Pycharm debugging note:
# You can use pycharm to debug a curses application by launching rtv in a
# console window (python -m rtv) and using pycharm to attach to the remote

View File

@@ -13,7 +13,6 @@ from six.moves import configparser
from . import docs, __version__
from .objects import KeyMap
PACKAGE = os.path.dirname(__file__)
HOME = os.path.expanduser('~')
TEMPLATES = os.path.join(PACKAGE, 'templates')
@@ -30,7 +29,6 @@ THEMES = os.path.join(XDG_CONFIG_HOME, 'rtv', 'themes')
def build_parser():
parser = argparse.ArgumentParser(
prog='rtv', description=docs.SUMMARY,
epilog=docs.CONTROLS,

View File

@@ -747,7 +747,7 @@ class SubscriptionContent(Content):
name = 'Popular Subreddits'
items = reddit.get_popular_subreddits(limit=None)
else:
raise exceptions.SubscriptionError('Invalid type %s', content_type)
raise exceptions.SubscriptionError('Invalid type %s' % content_type)
return cls(name, items, loader)

View File

@@ -56,11 +56,14 @@ class OpenGraphMIMEParser(BaseMIMEParser):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
for og_type in ['video', 'image']:
tag = soup.find('meta',
attrs={'property':'og:' + og_type + ':secure_url'}) or \
soup.find('meta', attrs={'property': 'og:' + og_type})
prop = 'og:' + og_type + ':secure_url'
tag = soup.find('meta', attrs={'property': prop})
if not tag:
prop = 'og:' + og_type
tag = soup.find('meta', attrs={'property': prop})
if tag:
return BaseMIMEParser.get_mimetype(tag.get('content'))
return url, None
@@ -148,7 +151,7 @@ class RedditVideoMIMEParser(BaseMIMEParser):
rep = sorted(reps, reverse=True,
key=lambda t: int(t.get('bandwidth')))[0]
return url + '/' + rep.find('baseurl').text, 'video/mp4'
else:
return request_url, 'video/x-youtube'
@@ -291,7 +294,7 @@ class ImgurScrapeAlbumMIMEParser(BaseMIMEParser):
if urls:
return " ".join(urls), 'image/x-imgur-album'
else:
return url, None
@@ -324,7 +327,7 @@ class TwitchMIMEParser(BaseMIMEParser):
suffix = '-preview.jpg'
if thumbnail.endswith(suffix):
return thumbnail.replace(suffix, '.mp4'), 'video/mp4'
else:
return url, None
@@ -348,7 +351,7 @@ class VidmeMIMEParser(BaseMIMEParser):
resp = requests.get('https://api.vid.me/videoByUrl?url=' + url)
if resp.status_code == 200 and resp.json()['status']:
return resp.json()['video']['complete_url'], 'video/mp4'
else:
return url, None
@@ -371,24 +374,28 @@ class LiveleakMIMEParser(BaseMIMEParser):
urls = []
videos = soup.find_all('video')
for vid in videos:
source = vid.find('source', attr={'res': 'HD'}) \
or vid.find('source')
source = vid.find('source', attr={'res': 'HD'})
source = source or vid.find('source')
if source:
urls.append((source.get('src'), source.get('type')))
# TODO: Handle pages with multiple videos
if urls:
return urls[0]
else:
iframe = soup.find_all(lambda t: t.name == 'iframe' and
'youtube.com' in t['src'])
def filter_iframe(t):
return t.name == 'iframe' and 'youtube.com' in t['src']
iframe = soup.find_all(filter_iframe)
if iframe:
return YoutubeMIMEParser.get_mimetype(iframe[0]['src'].strip('/'))
else:
return url, None
class ClippitUserMIMEParser(BaseMIMEParser):
"""
Clippit uses a video player container
"""
pattern = re.compile(r'https?://(www\.)?clippituser\.tv/c/.+$')
@@ -447,8 +454,8 @@ class FlickrMIMEParser(OpenGraphMIMEParser):
"""
Flickr uses the Open Graph protocol
"""
pattern = re.compile(r'https?://(www\.)?flickr\.com/photos/[^/]+/[^/]+/?$')
# TODO: handle albums/photosets (https://www.flickr.com/services/api)
pattern = re.compile(r'https?://(www\.)?flickr\.com/photos/[^/]+/[^/]+/?$')
class WorldStarHipHopMIMEParser(BaseMIMEParser):
@@ -466,18 +473,21 @@ class WorldStarHipHopMIMEParser(BaseMIMEParser):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
source = soup.find_all(lambda t: t.name == 'source' and
t['src'] and t['type'] == 'video/mp4')
def filter_source(t):
return t.name == 'source' and t['src'] and t['type'] == 'video/mp4'
source = soup.find_all(filter_source)
if source:
return source[0]['src'], 'video/mp4'
else:
iframe = soup.find_all(lambda t: t.name == 'iframe' and
'youtube.com' in t['src'])
def filter_iframe(t):
return t.name == 'iframe' and 'youtube.com' in t['src']
iframe = soup.find_all(filter_iframe)
if iframe:
return YoutubeMIMEParser.get_mimetype(iframe[0]['src'])
else:
return url, None
return url, None
# Parsers should be listed in the order they will be checked

View File

@@ -18,7 +18,6 @@ from .config import TEMPLATES
from .exceptions import InvalidRefreshToken
from .packages.praw.errors import HTTPException, OAuthException
_logger = logging.getLogger(__name__)
INDEX = os.path.join(TEMPLATES, 'index.html')
@@ -82,11 +81,11 @@ class OAuthHandler(BaseHTTPRequestHandler):
thread.daemon = True
thread.start()
def log_message(self, format, *args):
def log_message(self, fmt, *args):
"""
Redirect logging to our own handler instead of stdout
"""
_logger.debug(format, *args)
_logger.debug(fmt, *args)
def build_body(self, template_file=INDEX):
"""

View File

@@ -7,8 +7,8 @@ Reference:
https://github.com/kennethreitz/requests/blob/master/requests/packages/__init__.py
"""
from __future__ import absolute_import
import sys
import sys
__praw_hash__ = '1e82eb0f8690a2acbdc15d030130dc50507eb4ba'
__praw_bundled__ = True
@@ -18,6 +18,7 @@ try:
from . import praw
except ImportError:
import praw
if not praw.__version__.startswith('3.'):
raise RuntimeError('Invalid PRAW version ({0}) detected, '
'rtv requires PRAW version 3'.format(praw.__version__))

View File

@@ -23,11 +23,12 @@ def logged_in(f):
"""
Decorator for Page methods that require the user to be authenticated.
"""
@wraps(f)
def wrapped_method(self, *args, **kwargs):
if not self.reddit.is_oauth_session():
self.term.show_notification('Not logged in')
return
return None
return f(self, *args, **kwargs)
return wrapped_method
@@ -58,7 +59,7 @@ class Page(object):
def refresh_content(self, order=None, name=None):
raise NotImplementedError
def _draw_item(self, window, data, inverted):
def _draw_item(self, win, data, inverted):
raise NotImplementedError
def get_selected_item(self):
@@ -501,7 +502,8 @@ class Page(object):
# if the content will fill up the page, given that it is dependent
# on the size of the terminal.
self.nav.flip((len(self._subwindows) - 1))
return self._draw_content()
self._draw_content()
return
if self.nav.cursor_index >= len(self._subwindows):
# Don't allow the cursor to go over the number of subwindows

View File

@@ -377,13 +377,15 @@ class Terminal(object):
"""
if not self.config['enable_media']:
return self.open_browser(url)
self.open_browser(url)
return
try:
with self.loader('Checking link', catch_exception=False):
command, entry = self.get_mailcap_entry(url)
except exceptions.MailcapEntryNotFound:
return self.open_browser(url)
self.open_browser(url)
return
_logger.info('Executing command: %s', command)
needs_terminal = 'needsterminal' in entry
@@ -811,7 +813,7 @@ class Terminal(object):
# Prune empty lines at the bottom of the textbox.
for item in stack[::-1]:
if len(item) == 0:
if item:
stack.pop()
else:
break

View File

@@ -1,3 +1,5 @@
# pylint: disable=bad-whitespace
import os
import codecs
import curses
@@ -167,20 +169,20 @@ class Theme(object):
# Create the "Selected" versions of elements, which are prefixed with
# the @ symbol. For example, "@CommentText" represents how comment
# text is formatted when it is highlighted by the cursor.
for name in self.DEFAULT_THEME['normal']:
dest = '@{0}'.format(name)
self._set_fallback(elements, name, 'Selected', dest)
for name in self.DEFAULT_THEME['cursor']:
dest = '@{0}'.format(name)
self._set_fallback(elements, name, 'SelectedCursor', dest)
for key in self.DEFAULT_THEME['normal']:
dest = '@{0}'.format(key)
self._set_fallback(elements, key, 'Selected', dest)
for key in self.DEFAULT_THEME['cursor']:
dest = '@{0}'.format(key)
self._set_fallback(elements, key, 'SelectedCursor', dest)
# Fill in the ``None`` values for all of the elements with normal text
for name in self.DEFAULT_THEME['normal']:
self._set_fallback(elements, name, 'Normal')
for name in self.DEFAULT_THEME['cursor']:
self._set_fallback(elements, name, 'Normal')
for name in self.DEFAULT_THEME['page']:
self._set_fallback(elements, name, 'Normal')
for key in self.DEFAULT_THEME['normal']:
self._set_fallback(elements, key, 'Normal')
for key in self.DEFAULT_THEME['cursor']:
self._set_fallback(elements, key, 'Normal')
for key in self.DEFAULT_THEME['page']:
self._set_fallback(elements, key, 'Normal')
self.elements = elements
@@ -554,5 +556,3 @@ class ThemeList(object):
def previous(self, theme):
return self._step(theme, -1)

View File

@@ -25,6 +25,7 @@ Thanks to the following people for their contributions to this project.
"""
def main():
logging.captureWarnings(True)
@@ -53,5 +54,6 @@ def main():
with open(FILENAME, 'wb') as fp:
fp.write(text)
if __name__ == '__main__':
main()

View File

@@ -79,5 +79,6 @@ def main():
with open(os.path.join(ROOT, 'rtv.1'), 'w') as fp:
fp.write(out)
if __name__ == '__main__':
main()

View File

@@ -278,5 +278,6 @@ def main():
theme = theme_list.next(theme)
theme = theme_list.previous(theme)
sys.exit(main())

View File

@@ -190,6 +190,7 @@ def test_content_flatten_comments_3(reddit):
self.id = comment_id
self.parent_id = parent_id
self.replies = []
def __repr__(self):
return '%s (%s)' % (self.id, self.parent_id)