Merge branch 'master' into themes

This commit is contained in:
Michael Lazar
2017-09-07 22:31:43 -04:00
65 changed files with 20824 additions and 2168 deletions

View File

@@ -23,6 +23,14 @@ except ImportError:
sys.exit('Fatal Error: Your python distribution appears to be missing '
'_curses.so.\nWas it compiled without support for curses?')
# If we want to override the $BROWSER variable that the python webbrowser
# references, it needs to be done before the webbrowser module is imported
# for the first time.
webbrowser_import_warning = ('webbrowser' in sys.modules)
RTV_BROWSER, BROWSER = os.environ.get('RTV_BROWSER'), os.environ.get('BROWSER')
if RTV_BROWSER:
os.environ['BROWSER'] = RTV_BROWSER
from . import docs
from . import packages
from .packages import praw
@@ -30,7 +38,7 @@ from .config import Config, copy_default_config, copy_default_mailcap
from .theme import Theme
from .oauth import OAuthHelper
from .terminal import Terminal
from .objects import curses_session
from .objects import curses_session, patch_webbrowser
from .subreddit_page import SubredditPage
from .exceptions import ConfigError
from .__version__ import __version__
@@ -108,13 +116,18 @@ def main():
filename=config['log'],
format='%(asctime)s:%(levelname)s:%(filename)s:%(lineno)d:%(message)s')
_logger.info('Starting new session, RTV v%s', __version__)
_logger.info('%s, %s', sys.executable, sys.version)
env = [
('$DISPLAY', os.getenv('DISPLAY')),
('$TERM', os.getenv('TERM')),
('$XDG_CONFIG_HOME', os.getenv('XDG_CONFIG_HOME')),
('$BROWSER', os.getenv('BROWSER')),
('$PAGER', os.getenv('PAGER')),
('$RTV_EDITOR', os.getenv('RTV_EDITOR')),
('$RTV_URLVIEWER', os.getenv('RTV_URLVIEWER'))]
('$RTV_URLVIEWER', os.getenv('RTV_URLVIEWER')),
('$RTV_BROWSER', RTV_BROWSER),
('$BROWSER', BROWSER),
('$PAGER', os.getenv('PAGER')),
('$VISUAL', os.getenv('VISUAL')),
('$EDITOR', os.getenv('EDITOR'))]
_logger.info('Environment: %s', env)
else:
# Add an empty handler so the logger doesn't complain
@@ -139,6 +152,8 @@ def main():
warnings.warn(text)
config['ascii'] = True
_logger.info('RTV module path: %s', os.path.abspath(__file__))
# Check the praw version
if packages.__praw_bundled__:
_logger.info('Using packaged PRAW distribution, '
@@ -147,6 +162,12 @@ def main():
_logger.info('Packaged PRAW not found, falling back to system '
'installed version %s', praw.__version__)
# Update the webbrowser module's default behavior
patch_webbrowser()
if webbrowser_import_warning:
_logger.warning('webbrowser module was unexpectedly imported before'
'$BROWSER could be overwritten')
# Construct the reddit user agent
user_agent = docs.AGENT.format(version=__version__)

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__version__ = '1.16.0'
__version__ = '1.18.0'

View File

@@ -35,13 +35,14 @@ def build_parser():
epilog=docs.CONTROLS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'-V', '--version', action='version', version='rtv '+__version__)
'link', metavar='URL', nargs='?',
help='[optional] Full URL of a submission to open')
parser.add_argument(
'-s', dest='subreddit',
help='Name of the subreddit that will be opened on start')
help='Name of the subreddit that will be loaded on start')
parser.add_argument(
'-l', dest='link',
help='Full URL of a submission that will be opened on start')
'-l', dest='link_deprecated',
help=argparse.SUPPRESS) # Deprecated, use the positional arg instead
parser.add_argument(
'--log', metavar='FILE', action='store',
help='Log HTTP requests to the given file')
@@ -61,8 +62,7 @@ def build_parser():
'--list-themes', metavar='FILE', action='store_const', const=True,
help='List all of the available color themes')
parser.add_argument(
'--non-persistent', dest='persistent', action='store_const',
const=False,
'--non-persistent', dest='persistent', action='store_const', const=False,
help='Forget the authenticated user when the program exits')
parser.add_argument(
'--clear-auth', dest='clear_auth', action='store_const', const=True,
@@ -76,6 +76,8 @@ def build_parser():
parser.add_argument(
'--enable-media', dest='enable_media', action='store_const', const=True,
help='Open external links using programs defined in the mailcap config')
parser.add_argument(
'-V', '--version', action='version', version='rtv '+__version__)
return parser
@@ -218,6 +220,12 @@ class Config(object):
parser = build_parser()
args = vars(parser.parse_args())
# Overwrite the deprecated "-l" option into the link variable
if args['link_deprecated'] and args['link'] is None:
args['link'] = args['link_deprecated']
args.pop('link_deprecated', None)
# Filter out argument values that weren't supplied
return {key: val for key, val in args.items() if val is not None}

View File

@@ -326,6 +326,7 @@ class SubmissionContent(Content):
self.max_indent_level = max_indent_level
self.name = submission_data['permalink']
self.order = order
self.query = None
self._loader = loader
self._submission = submission
self._submission_data = submission_data
@@ -435,11 +436,14 @@ class SubredditContent(Content):
list for repeat access.
"""
def __init__(self, name, submissions, loader, order=None, max_title_rows=4):
def __init__(self, name, submissions, loader, order=None,
max_title_rows=4, query=None, filter_nsfw=False):
self.name = name
self.order = order
self.query = query
self.max_title_rows = max_title_rows
self.filter_nsfw = filter_nsfw
self._loader = loader
self._submissions = submissions
self._submission_data = []
@@ -474,22 +478,33 @@ class SubredditContent(Content):
query (text): Content to search for on the given subreddit or
user's page.
"""
# TODO: refactor this into smaller methods
# TODO: This desperately needs to be refactored
# Strip leading, trailing, and redundant backslashes
parts = [seg for seg in name.strip(' /').split('/') if seg]
# Check for the resource type, assume /r/ as the default
if len(parts) >= 3 and parts[2] == 'm':
# E.g. /u/multi-mod/m/android
# E.g. /u/civilization_phaze_3/m/multireddit ->
# resource_root = "u/civilization_phaze_3/m"
# parts = ["multireddit"]
resource_root, parts = '/'.join(parts[:3]), parts[3:]
elif len(parts) > 1 and parts[0] in ['r', 'u', 'user', 'domain']:
# E.g. /u/civilization_phaze_3 ->
# resource_root = "u"
# parts = ["civilization_phaze_3"]
#
# E.g. /r/python/top-week ->
# resource_root = "r"
# parts = ["python", "top-week"]
resource_root = parts.pop(0)
else:
resource_root = 'r'
if resource_root == 'user':
resource_root = 'u'
elif resource_root.startswith('user/'):
resource_root = 'u' + resource_root[4:]
# There should at most two parts left, the resource and the order
if len(parts) == 1:
@@ -517,13 +532,21 @@ class SubredditContent(Content):
else:
period = None
if order not in ['hot', 'top', 'rising', 'new', 'controversial', None]:
if query:
# The allowed orders for sorting search results are different
orders = ['relevance', 'top', 'comments', 'new', None]
period_allowed = ['top', 'comments']
else:
orders = ['hot', 'top', 'rising', 'new', 'controversial', None]
period_allowed = ['top', 'controversial']
if order not in orders:
raise InvalidSubreddit('Invalid order `%s`' % order)
if period not in ['all', 'day', 'hour', 'month', 'week', 'year', None]:
raise InvalidSubreddit('Invalid period `%s`' % period)
if period and order not in ['top', 'controversial']:
raise InvalidSubreddit('`%s` order does not allow sorting by'
' period' % order)
if period and order not in period_allowed:
raise InvalidSubreddit(
'`%s` order does not allow sorting by period' % order)
# On some objects, praw doesn't allow you to pass arguments for the
# order and period. Instead you need to call special helper functions
@@ -558,6 +581,15 @@ class SubredditContent(Content):
elif resource_root.endswith('/m'):
redditor = resource_root.split('/')[1]
if redditor == 'me':
if not reddit.is_oauth_session():
raise exceptions.AccountError('Not logged in')
else:
redditor = reddit.user.name
display_name = display_name.replace(
'/me/', '/{0}/'.format(redditor))
multireddit = reddit.get_multireddit(redditor, resource)
submissions = getattr(multireddit, method_alias)(limit=None)
@@ -602,8 +634,11 @@ class SubredditContent(Content):
# display name with the one returned by the request.
display_name = '/r/{0}'.format(subreddit.display_name)
filter_nsfw = (reddit.user and reddit.user.over_18 is False)
# We made it!
return cls(display_name, submissions, loader, order=display_order)
return cls(display_name, submissions, loader, order=display_order,
query=query, filter_nsfw=filter_nsfw)
@property
def range(self):
@@ -621,6 +656,7 @@ class SubredditContent(Content):
if index < 0:
raise IndexError
nsfw_count = 0
while index >= len(self._submission_data):
try:
with self._loader('Loading more submissions'):
@@ -630,10 +666,25 @@ class SubredditContent(Content):
except StopIteration:
raise IndexError
else:
# Skip NSFW posts based on the reddit user's profile settings.
# If we see 20+ NSFW posts at the beginning, assume the subreddit
# only has NSFW content and abort. This allows us to avoid making
# an additional API call to check if a subreddit is over18 (which
# doesn't work for things like multireddits anyway)
if self.filter_nsfw and submission.over_18:
nsfw_count += 1
if not self._submission_data and nsfw_count >= 20:
raise exceptions.SubredditError(
'You must be over 18+ to view this subreddit')
continue
else:
nsfw_count = 0
if hasattr(submission, 'title'):
data = self.strip_praw_submission(submission)
else:
# when submission is a saved commment
# when submission is a saved comment
data = self.strip_praw_comment(submission)
data['index'] = len(self._submission_data) + 1
@@ -659,6 +710,7 @@ class SubscriptionContent(Content):
self.name = name
self.order = None
self.query = None
self._loader = loader
self._subscriptions = subscriptions
self._subscription_data = []
@@ -669,7 +721,7 @@ class SubscriptionContent(Content):
raise exceptions.SubscriptionError('No content')
# Load 1024 subscriptions up front (one http request's worth)
# For most people this should be all of their subscriptions. Doing thi
# For most people this should be all of their subscriptions. This
# allows the user to jump to the end of the page with `G`.
if name != 'Popular Subreddits':
try:

View File

@@ -7,12 +7,11 @@ desktop:https://github.com/michael-lazar/rtv:{version}\
"""
SUMMARY = """
Reddit Terminal Viewer is a lightweight browser for www.reddit.com built into a
terminal window.
Reddit Terminal Viewer is a lightweight browser for https://www.reddit.com
"""
CONTROLS = """
Move the cursor using either the arrow keys or *Vim* style movement.
Move the cursor using the arrow keys or vim style movement.
Press `?` to open the help screen.
"""
@@ -30,6 +29,8 @@ https://github.com/michael-lazar/rtv
m : Move up one page
gg : Jump to the first post
G : Jump to the last post
J : Jump to the next sibling comment
K : Jump to the parent comment
1 : Sort by hot
2 : Sort by top
3 : Sort by rising
@@ -82,6 +83,11 @@ BANNER = """
[1]hot [2]top [3]rising [4]new [5]controversial
"""
BANNER_SEARCH = """
[1]relevance [2]top [3]comments [4]new
"""
FOOTER_SUBREDDIT = """
[?]Help [q]Quit [l]Comments [/]Prompt [u]Login [o]Open [c]Post [a/z]Vote
"""

View File

@@ -22,6 +22,10 @@ class SubmissionError(RTVError):
"Submission could not be loaded"
class SubredditError(RTVError):
"Subreddit could not be loaded"
class NoSubmissionsError(RTVError):
"No submissions for the given page"

View File

@@ -32,6 +32,7 @@ class BaseMIMEParser(object):
browser.
"""
filename = url.split('?')[0]
filename = filename.split('#')[0]
content_type, _ = mimetypes.guess_type(filename)
return url, content_type
@@ -54,12 +55,11 @@ class OpenGraphMIMEParser(BaseMIMEParser):
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
tag = soup.find('meta', attrs={'property': 'og:video:secure_url'})
tag = tag or soup.find('meta', attrs={'property': 'og:image'})
if tag:
return BaseMIMEParser.get_mimetype(tag.get('content'))
else:
return url, None
for og_type in ['og:video:secure_url', 'og:video', 'og:image']:
tag = soup.find('meta', attrs={'property': og_type})
if tag:
return BaseMIMEParser.get_mimetype(tag.get('content'))
return url, None
class GfycatMIMEParser(BaseMIMEParser):
@@ -77,7 +77,7 @@ class GfycatMIMEParser(BaseMIMEParser):
@staticmethod
def get_mimetype(url):
parts = url.split('/')
parts = url.replace('gifs/detail/', '').split('/')
api_url = '/'.join(parts[:-1] + ['cajax', 'get'] + parts[-1:])
resp = requests.get(api_url)
image_url = resp.json()['gfyItem']['webmUrl']
@@ -128,7 +128,107 @@ class RedditUploadsMIMEParser(BaseMIMEParser):
return url, content_type
class ImgurMIMEParser(BaseMIMEParser):
class RedditVideoMIMEParser(BaseMIMEParser):
"""
Reddit hosted videos/gifs.
Media uses MPEG-DASH format (.mpd)
"""
pattern = re.compile(r'https://v\.redd\.it/.+$')
@staticmethod
def get_mimetype(url):
request_url = url + '/DASHPlaylist.mpd'
page = requests.get(request_url)
soup = BeautifulSoup(page.content, 'html.parser')
if not soup.find('representation', attrs={'mimetype': 'audio/mp4'}):
reps = soup.find_all('representation',
attrs={'mimetype': 'video/mp4'})
rep = sorted(reps, reverse=True,
key=lambda t: int(t.get('bandwidth')))[0]
return url + '/' + rep.find('baseurl').text, 'video/mp4'
else:
return request_url, 'video/x-youtube'
class ImgurApiMIMEParser(BaseMIMEParser):
"""
Imgur now provides a json API exposing its entire infrastructure. Each Imgur
page has an associated hash and can either contain an album, a gallery,
or single image.
The default client token for RTV is shared among users and allows a maximum
global number of requests per day of 12,500. If we find that this limit is
not sufficient for all of rtv's traffic, this method will be revisited.
Reference:
https://apidocs.imgur.com
"""
CLIENT_ID = None
pattern = re.compile(
r'https?://(w+\.)?(m\.)?imgur\.com/'
r'((?P<domain>a|album|gallery)/)?(?P<hash>[a-zA-Z0-9]+)$')
@classmethod
def get_mimetype(cls, url):
endpoint = 'https://api.imgur.com/3/{domain}/{page_hash}'
headers = {'authorization': 'Client-ID {0}'.format(cls.CLIENT_ID)}
m = cls.pattern.match(url)
page_hash = m.group('hash')
if m.group('domain') in ('a', 'album'):
domain = 'album'
else:
# This could be a gallery or a single image, but there doesn't
# seem to be a way to reliably distinguish between the two.
# Assume a gallery, which appears to be more common, and fallback
# to an image request upon failure.
domain = 'gallery'
if not cls.CLIENT_ID:
return cls.fallback(url, domain)
api_url = endpoint.format(domain=domain, page_hash=page_hash)
r = requests.get(api_url, headers=headers)
if domain == 'gallery' and r.status_code != 200:
# Not a gallery, try to download using the image endpoint
api_url = endpoint.format(domain='image', page_hash=page_hash)
r = requests.get(api_url, headers=headers)
if r.status_code != 200:
_logger.warning('Imgur API failure, status %s', r.status_code)
return cls.fallback(url, domain)
data = r.json().get('data')
if not data:
_logger.warning('Imgur API failure, resp %s', r.json())
return cls.fallback(url, domain)
if 'images' in data:
# TODO: handle imgur albums with mixed content, i.e. jpeg and gifv
link = ' '.join([d['link'] for d in data['images'] if not d['animated']])
mime = 'image/x-imgur-album'
else:
link = data['mp4'] if data['animated'] else data['link']
mime = 'video/mp4' if data['animated'] else data['type']
link = link.replace('http://', 'https://')
return link, mime
@classmethod
def fallback(cls, url, domain):
"""
Attempt to use one of the scrapers if the API doesn't work
"""
if domain == 'album':
return ImgurScrapeAlbumMIMEParser.get_mimetype(url)
else:
return ImgurScrapeMIMEParser.get_mimetype(url)
class ImgurScrapeMIMEParser(BaseMIMEParser):
"""
The majority of imgur links don't point directly to the image, so we need
to open the provided url and scrape the page for the link.
@@ -156,7 +256,7 @@ class ImgurMIMEParser(BaseMIMEParser):
return BaseMIMEParser.get_mimetype(url)
class ImgurAlbumMIMEParser(BaseMIMEParser):
class ImgurScrapeAlbumMIMEParser(BaseMIMEParser):
"""
Imgur albums can contain several images, which need to be scraped from the
landing page. Assumes the following html structure:
@@ -204,6 +304,32 @@ class StreamableMIMEParser(OpenGraphMIMEParser):
pattern = re.compile(r'https?://(www\.)?streamable\.com/[^.]+$')
class TwitchMIMEParser(BaseMIMEParser):
"""
Non-streaming videos hosted by twitch.tv
"""
pattern = re.compile(r'https?://clips\.?twitch\.tv/[^.]+$')
@staticmethod
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
tag = soup.find('meta', attrs={'name': 'twitter:image'})
thumbnail = tag.get('content')
suffix = '-preview.jpg'
if thumbnail.endswith(suffix):
return thumbnail.replace(suffix, '.mp4'), 'video/mp4'
else:
return url, None
class OddshotMIMEParser(OpenGraphMIMEParser):
"""
Oddshot uses the Open Graph protocol
"""
pattern = re.compile(r'https?://oddshot\.tv/s(hot)?/[^.]+$')
class VidmeMIMEParser(BaseMIMEParser):
"""
Vidme provides a json api.
@@ -221,15 +347,63 @@ class VidmeMIMEParser(BaseMIMEParser):
return url, None
class LiveleakMIMEParser(BaseMIMEParser):
"""
https://www.liveleak.com/view?i=12c_3456789
<video>
<source src="https://cdn.liveleak.com/..mp4" res="HD" type="video/mp4">
<source src="https://cdn.liveleak.com/..mp4" res="SD" type="video/mp4">
</video>
Sometimes only one video source is available
"""
pattern = re.compile(r'https?://((www|m)\.)?liveleak\.com/view\?i=\w+$')
@staticmethod
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
urls = []
videos = soup.find_all('video')
for vid in videos:
source = vid.find('source', attr={'res': 'HD'}) \
or vid.find('source')
if source:
urls.append((source.get('src'), source.get('type')))
# TODO: Handle pages with multiple videos
# TODO: Handle pages with youtube embeds
if urls:
return urls[0]
else:
return url, None
class ClippitUserMIMEParser(BaseMIMEParser):
"""
"""
pattern = re.compile(r'https?://(www\.)?clippituser\.tv/c/.+$')
@staticmethod
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
tag = soup.find(id='jwplayer-container')
quality = ['data-{}-file'.format(_) for _ in ['hd', 'sd']]
return tag.get(quality[0]), 'video/mp4'
# Parsers should be listed in the order they will be checked
parsers = [
ClippitUserMIMEParser,
OddshotMIMEParser,
StreamableMIMEParser,
VidmeMIMEParser,
InstagramMIMEParser,
GfycatMIMEParser,
ImgurAlbumMIMEParser,
ImgurMIMEParser,
ImgurApiMIMEParser,
RedditUploadsMIMEParser,
RedditVideoMIMEParser,
YoutubeMIMEParser,
LiveleakMIMEParser,
TwitchMIMEParser,
GifvMIMEParser,
BaseMIMEParser]

View File

@@ -3,12 +3,14 @@ from __future__ import unicode_literals
import re
import os
import sys
import time
import signal
import inspect
import weakref
import logging
import threading
import webbrowser
import curses
import curses.ascii
from contextlib import contextmanager
@@ -23,6 +25,31 @@ from .packages import praw
_logger = logging.getLogger(__name__)
def patch_webbrowser():
"""
Patch webbrowser on macOS to support setting BROWSER=firefox,
BROWSER=chrome, etc..
https://bugs.python.org/issue31348
"""
if sys.platform != 'darwin' or 'BROWSER' not in os.environ:
return
# This is a copy of what's at the end of webbrowser.py, except that
# it adds MacOSXOSAScript entries instead of GenericBrowser entries.
_userchoices = os.environ["BROWSER"].split(os.pathsep)
for cmdline in reversed(_userchoices):
if cmdline in ('safari', 'firefox', 'chrome', 'default'):
browser = webbrowser.MacOSXOSAScript(cmdline)
try:
webbrowser.register(cmdline, None, browser, update_tryorder=-1)
except TypeError:
# 3.7 nightly build changed the method signature
# pylint: disable=unexpected-keyword-arg
webbrowser.register(cmdline, None, browser, preferred=True)
@contextmanager
def curses_session():
"""
@@ -605,14 +632,16 @@ class KeyMap(object):
self.set_bindings(bindings)
def set_bindings(self, bindings):
# Clear the keymap before applying the bindings to avoid confusion.
# If a user defines custom bindings in their config file, they must
# explicitly define ALL of the bindings.
self._keymap = {}
new_keymap = {}
for command, keys in bindings.items():
if not isinstance(command, Command):
command = Command(command)
self._keymap[command] = keys
new_keymap[command] = keys
if not self._keymap:
self._keymap = new_keymap
else:
self._keymap.update(new_keymap)
def get(self, command):
if not isinstance(command, Command):
@@ -656,4 +685,4 @@ class KeyMap(object):
except (AttributeError, ValueError, TypeError):
raise exceptions.ConfigError('Invalid configuration! "%s" is not a '
'valid key' % key)
'valid key' % key)

View File

@@ -116,32 +116,29 @@ class Page(object):
@PageController.register(Command('SORT_HOT'))
def sort_content_hot(self):
self.refresh_content(order='hot')
if self.content.query:
self.refresh_content(order='relevance')
else:
self.refresh_content(order='hot')
@PageController.register(Command('SORT_TOP'))
def sort_content_top(self):
choices = {
'\n': 'top',
'1': 'top-hour',
'2': 'top-day',
'3': 'top-week',
'4': 'top-month',
'5': 'top-year',
'6': 'top-all'}
message = docs.TIME_ORDER_MENU.strip().splitlines()
ch = self.term.show_notification(message)
ch = six.unichr(ch)
if ch not in choices:
order = self._prompt_period('top')
if order is None:
self.term.show_notification('Invalid option')
return
self.refresh_content(order=choices[ch])
else:
self.refresh_content(order=order)
@PageController.register(Command('SORT_RISING'))
def sort_content_rising(self):
self.refresh_content(order='rising')
if self.content.query:
order = self._prompt_period('comments')
if order is None:
self.term.show_notification('Invalid option')
else:
self.refresh_content(order=order)
else:
self.refresh_content(order='rising')
@PageController.register(Command('SORT_NEW'))
def sort_content_new(self):
@@ -149,24 +146,14 @@ class Page(object):
@PageController.register(Command('SORT_CONTROVERSIAL'))
def sort_content_controversial(self):
choices = {
'\n': 'controversial',
'1': 'controversial-hour',
'2': 'controversial-day',
'3': 'controversial-week',
'4': 'controversial-month',
'5': 'controversial-year',
'6': 'controversial-all'}
message = docs.TIME_ORDER_MENU.strip().splitlines()
ch = self.term.show_notification(message)
ch = six.unichr(ch)
if ch not in choices:
self.term.show_notification('Invalid option')
return
self.refresh_content(order=choices[ch])
if self.content.query:
self.term.flash()
else:
order = self._prompt_period('controversial')
if order is None:
self.term.show_notification('Invalid option')
else:
self.refresh_content(order=order)
@PageController.register(Command('MOVE_UP'))
def move_cursor_up(self):
@@ -421,6 +408,10 @@ class Page(object):
sub_name = sub_name.replace('/r/front', 'Front Page')
sub_name = sub_name.replace('/u/me', 'My Submissions')
sub_name = sub_name.replace('/u/saved', 'My Saved Submissions')
query = self.content.query
if query:
sub_name = 'Searching {0}: {1}'.format(sub_name, query)
self.term.add_line(window, sub_name, 0, 0)
# Set the terminal title
@@ -466,7 +457,9 @@ class Page(object):
window.erase()
window.bkgd(str(' '), self.term.attr('order_bar'))
items = docs.BANNER.strip().split(' ')
banner = docs.BANNER_SEARCH if self.content.query else docs.BANNER
items = banner.strip().split(' ')
distance = (n_cols - sum(len(t) for t in items) - 1) / (len(items) - 1)
spacing = max(1, int(distance)) * ' '
text = spacing.join(items)
@@ -574,3 +567,20 @@ class Page(object):
valid, redraw = self.nav.move_page(direction, len(self._subwindows)-1)
if not valid:
self.term.flash()
def _prompt_period(self, order):
choices = {
'\n': order,
'1': '{0}-hour'.format(order),
'2': '{0}-day'.format(order),
'3': '{0}-week'.format(order),
'4': '{0}-month'.format(order),
'5': '{0}-year'.format(order),
'6': '{0}-all'.format(order)}
message = docs.TIME_ORDER_MENU.strip().splitlines()
ch = self.term.show_notification(message)
ch = six.unichr(ch)
return choices.get(ch)

View File

@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import time
import curses
@@ -75,6 +76,11 @@ class SubmissionPage(Page):
order = order or self.content.order
url = name or self.content.name
# Hack to allow an order specified in the name by prompt_subreddit() to
# override the current default
if order == 'ignore':
order = None
with self.term.loader('Refreshing page'):
self.content = SubmissionContent.from_url(
self.reddit, url, self.term.loader, order=order,
@@ -90,12 +96,25 @@ class SubmissionPage(Page):
name = self.term.prompt_input('Enter page: /')
if name is not None:
with self.term.loader('Loading page'):
content = SubredditContent.from_name(
self.reddit, name, self.term.loader)
if not self.term.loader.exception:
self.selected_subreddit = content
self.active = False
# Check if opening a submission url or a subreddit url
# Example patterns for submissions:
# comments/571dw3
# /comments/571dw3
# /r/pics/comments/571dw3/
# https://www.reddit.com/r/pics/comments/571dw3/at_disneyland
submission_pattern = re.compile(r'(^|/)comments/(?P<id>.+?)($|/)')
match = submission_pattern.search(name)
if match:
url = 'https://www.reddit.com/comments/{0}'
self.refresh_content('ignore', url.format(match.group('id')))
else:
with self.term.loader('Loading page'):
content = SubredditContent.from_name(
self.reddit, name, self.term.loader)
if not self.term.loader.exception:
self.selected_subreddit = content
self.active = False
@SubmissionController.register(Command('SUBMISSION_OPEN_IN_BROWSER'))
def open_link(self):
@@ -199,6 +218,52 @@ class SubmissionPage(Page):
else:
self.term.flash()
@SubmissionController.register(Command('SUBMISSION_GOTO_PARENT'))
def move_parent_up(self):
"""
Move the cursor up to the comment's parent. If the comment is
top-level, jump to the previous top-level comment.
"""
cursor = self.nav.absolute_index
if cursor > 0:
level = max(self.content.get(cursor)['level'], 1)
while self.content.get(cursor - 1)['level'] >= level:
self._move_cursor(-1)
cursor -= 1
self._move_cursor(-1)
else:
self.term.flash()
self.clear_input_queue()
@SubmissionController.register(Command('SUBMISSION_GOTO_SIBLING'))
def move_sibling_next(self):
"""
Jump to the next comment that's at the same level as the selected
comment and shares the same parent.
"""
cursor = self.nav.absolute_index
if cursor >= 0:
level = self.content.get(cursor)['level']
try:
move = 1
while self.content.get(cursor + move)['level'] > level:
move += 1
except IndexError:
self.term.flash()
else:
if self.content.get(cursor + move)['level'] == level:
for _ in range(move):
self._move_cursor(1)
else:
self.term.flash()
else:
self.term.flash()
self.clear_input_queue()
def _draw_item(self, win, data, inverted, highlight):
if data['type'] in ('MoreComments', 'HiddenComment'):
@@ -231,9 +296,11 @@ class SubmissionPage(Page):
if row in valid_rows:
if data['is_author']:
attr = self.term.attr('comment_author_self', highlight)
text = '{author} [S]'.format(**data)
else:
attr = self.term.attr('comment_author', highlight)
self.term.add_line(win, '{author}'.format(**data), row, 1, attr)
text = '{author}'.format(**data)
self.term.add_line(win, text, row, 1, attr)
if data['flair']:
attr = self.term.attr('user_flair', highlight)

View File

@@ -3,7 +3,6 @@ from __future__ import unicode_literals
import re
import time
import curses
from . import docs
from .content import SubredditContent
@@ -37,9 +36,18 @@ class SubredditPage(Page):
@SubredditController.register(Command('REFRESH'))
def refresh_content(self, order=None, name=None):
"Re-download all submissions and reset the page index"
"""
Re-download all submissions and reset the page index
"""
order = order or self.content.order
# Preserve the query if staying on the current page
if name is None:
query = self.content.query
else:
query = None
name = name or self.content.name
# Hack to allow an order specified in the name by prompt_subreddit() to
@@ -49,13 +57,15 @@ class SubredditPage(Page):
with self.term.loader('Refreshing page'):
self.content = SubredditContent.from_name(
self.reddit, name, self.term.loader, order=order)
self.reddit, name, self.term.loader, order=order, query=query)
if not self.term.loader.exception:
self.nav = Navigator(self.content.get)
@SubredditController.register(Command('SUBREDDIT_SEARCH'))
def search_subreddit(self, name=None):
"Open a prompt to search the given subreddit"
"""
Open a prompt to search the given subreddit
"""
name = name or self.content.name
@@ -71,7 +81,9 @@ class SubredditPage(Page):
@SubredditController.register(Command('PROMPT'))
def prompt_subreddit(self):
"Open a prompt to navigate to a different subreddit"
"""
Open a prompt to navigate to a different subreddit"
"""
name = self.term.prompt_input('Enter page: /')
if name is not None:
@@ -135,7 +147,9 @@ class SubredditPage(Page):
@SubredditController.register(Command('SUBREDDIT_OPEN_IN_BROWSER'))
def open_link(self):
"Open a link with the webbrowser"
"""
Open a link with the webbrowser
"""
data = self.get_selected_item()
if data['url_type'] == 'selfpost':
@@ -152,7 +166,9 @@ class SubredditPage(Page):
@SubredditController.register(Command('SUBREDDIT_POST'))
@logged_in
def post_submission(self):
"Post a new submission to the given subreddit"
"""
Post a new submission to the given subreddit
"""
# Check that the subreddit can be submitted to
name = self.content.name
@@ -198,7 +214,9 @@ class SubredditPage(Page):
@SubredditController.register(Command('SUBREDDIT_OPEN_SUBSCRIPTIONS'))
@logged_in
def open_subscriptions(self):
"Open user subscriptions page"
"""
Open user subscriptions page
"""
with self.term.loader('Loading subscriptions'):
page = SubscriptionPage(self.reddit, self.term, self.config,
@@ -217,7 +235,9 @@ class SubredditPage(Page):
@SubredditController.register(Command('SUBREDDIT_OPEN_MULTIREDDITS'))
@logged_in
def open_multireddit_subscriptions(self):
"Open user multireddit subscriptions page"
"""
Open user multireddit subscriptions page
"""
with self.term.loader('Loading multireddits'):
page = SubscriptionPage(self.reddit, self.term, self.config,

View File

@@ -42,6 +42,10 @@ video/*; mpv '%s' --autofit 640x480 --loop=inf; test=test -n "$DISPLAY"
# terminal when X is not available.
###############################################################################
# View images directly in your terminal with iTerm2
# curl -L https://iterm2.com/misc/install_shell_integration_and_utilities.sh | bash
# image/*; curl -s %s | ~/.iterm2/imgcat && read -n 1; needsterminal
# View true images in the terminal, supported by rxvt-unicode, xterm and st
# Requires the w3m-img package
# image/*; w3m -o 'ext_image_viewer=off' '%s'; needsterminal
@@ -57,6 +61,10 @@ image/*; curl -s '%s' | convert -resize 80x80 - jpg:/tmp/rtv.jpg && img2xterm /t
# Display images in classic ascii using img2txt and lib-caca
image/*; curl -s '%s' | convert - jpg:/tmp/rtv.jpg && img2txt -f utf8 /tmp/rtv.jpg; needsterminal; copiousoutput
# Full motion videos - requires a framebuffer to view
video/x-youtube; mpv -vo drm -quiet '%s'; needsterminal
video/*; mpv -vo drm -quiet '%s'; needsterminal
# Ascii videos
video/x-youtube; youtube-dl -q -o - '%s' | mplayer -cache 8192 -vo caca -quiet -; needsterminal
video/*; wget '%s' -O - | mplayer -cache 8192 -vo caca -quiet -; needsterminal
# video/x-youtube; youtube-dl -q -o - '%s' | mplayer -cache 8192 -vo caca -quiet -; needsterminal
# video/*; wget '%s' -O - | mplayer -cache 8192 -vo caca -quiet -; needsterminal

View File

@@ -63,6 +63,11 @@ oauth_redirect_port = 65000
; Access permissions that will be requested.
oauth_scope = edit,history,identity,mysubreddits,privatemessages,read,report,save,submit,subscribe,vote
; This is a separate token for the imgur api. It's used to extract images
; from imgur links and albums so they can be opened with mailcap.
; See https://imgur.com/account/settings/apps to generate your own key.
imgur_client_id = 93396265f59dec9
[bindings]
##############
# Key Bindings
@@ -129,12 +134,14 @@ SUBMISSION_POST = c
SUBMISSION_EXIT = h, <KEY_LEFT>
SUBMISSION_OPEN_IN_PAGER = l, <KEY_RIGHT>
SUBMISSION_OPEN_IN_URLVIEWER = b
SUBMISSION_GOTO_PARENT = K
SUBMISSION_GOTO_SIBLING = J
; Subreddit page
SUBREDDIT_SEARCH = f
SUBREDDIT_POST = c
SUBREDDIT_OPEN = l, <KEY_RIGHT>
SUBREDDIT_OPEN_IN_BROWSER = o, <LF>, <KEY_ENTER>, <KEY_ENTER>
SUBREDDIT_OPEN_IN_BROWSER = o, <LF>, <KEY_ENTER>
SUBREDDIT_OPEN_SUBSCRIPTIONS = s
SUBREDDIT_OPEN_MULTIREDDITS = S
SUBREDDIT_FRONTPAGE = p

View File

@@ -13,12 +13,11 @@ import webbrowser
import subprocess
import curses.ascii
from curses import textpad
from multiprocessing import Process
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import six
#pylint: disable=import-error
from six.moves.urllib.parse import quote
from kitchen.text.display import textual_width_chop
from . import exceptions, mime_parsers
@@ -63,7 +62,11 @@ class Terminal(object):
self._display = None
self._mailcap_dict = mailcap.getcaps()
self._term = os.environ['TERM']
self._term = os.environ.get('TERM')
# This is a hack, the MIME parsers should be stateless
# but we need to load the imgur credentials from the config
mime_parsers.ImgurApiMIMEParser.CLIENT_ID = config['imgur_client_id']
@property
def up_arrow(self):
@@ -474,11 +477,12 @@ class Terminal(object):
python webbrowser will try to determine the default to use based on
your system.
For browsers requiring an X display, we call
webbrowser.open_new_tab(url) and redirect stdout/stderr to devnull.
This is a workaround to stop firefox from spewing warning messages to
the console. See http://bugs.python.org/issue22277 for a better
description of the problem.
For browsers requiring an X display, we open a new subprocess and
redirect stdout/stderr to devnull. This is a workaround to stop
BackgroundBrowsers (e.g. xdg-open, any BROWSER command ending in "&"),
from spewing warning messages to the console. See
http://bugs.python.org/issue22277 for a better description of the
problem.
For console browsers (e.g. w3m), RTV will suspend and display the
browser window within the same terminal. This mode is triggered either
@@ -489,40 +493,38 @@ class Terminal(object):
headless
There may be other cases where console browsers are opened (xdg-open?)
but are not detected here.
but are not detected here. These cases are still unhandled and will
probably be broken if we incorrectly assume that self.display=True.
"""
if self.display:
# Note that we need to sanitize the url before inserting it into
# the python code to prevent injection attacks.
command = (
"import webbrowser\n"
"from six.moves.urllib.parse import unquote\n"
"webbrowser.open_new_tab(unquote('%s'))" % quote(url))
args = [sys.executable, '-c', command]
with self.loader('Opening page in a new window'), \
open(os.devnull, 'ab+', 0) as null:
p = subprocess.Popen(args, stdout=null, stderr=null)
# Give the browser 5 seconds to open a new tab. Because the
with self.loader('Opening page in a new window'):
def open_url_silent(url):
# This used to be done using subprocess.Popen().
# It was switched to multiprocessing.Process so that we
# can re-use the webbrowser instance that has been patched
# by RTV. It's also safer because it doesn't inject
# python code through the command line.
null = open(os.devnull, 'ab+', 0)
sys.stdout, sys.stderr = null, null
webbrowser.open_new_tab(url)
p = Process(target=open_url_silent, args=(url,))
p.start()
# Give the browser 7 seconds to open a new tab. Because the
# display is set, calling webbrowser should be non-blocking.
# If it blocks or returns an error, something went wrong.
try:
start = time.time()
while time.time() - start < 10:
code = p.poll()
if code == 0:
break # Success
elif code is not None:
raise exceptions.BrowserError(
'Program exited with status=%s' % code)
time.sleep(0.01)
else:
p.join(7)
if p.is_alive():
raise exceptions.BrowserError(
'Timeout opening browser')
'Timeout waiting for browser to open')
finally:
# Can't check the loader exception because the oauth module
# supersedes this loader and we need to always kill the
# process if escape is pressed
# This will be hit on the browser timeout, but also if the
# user presses the ESC key. We always want to kill the
# webbrowser process if it hasn't opened the tab and
# terminated by now.
try:
p.terminate()
except OSError:
@@ -579,7 +581,10 @@ class Terminal(object):
fp.write(data)
_logger.info('File created: %s', filepath)
editor = os.getenv('RTV_EDITOR') or os.getenv('EDITOR') or 'nano'
editor = (os.getenv('RTV_EDITOR') or
os.getenv('VISUAL') or
os.getenv('EDITOR') or
'nano')
command = shlex.split(editor) + [filepath]
try:
with self.suspend():
@@ -788,7 +793,7 @@ class Terminal(object):
out = '\n'.join(stack)
return out
def clear_screen(self):
"""
In the beginning this always called touchwin(). However, a bug
@@ -798,14 +803,14 @@ class Terminal(object):
this in their tmux.conf or .bashrc file which can cause issues.
Using clearok() instead seems to fix the problem, with the trade off
of slightly more expensive screen refreshes.
Update: It was discovered that using clearok() introduced a
separate bug for urxvt users in which their screen flashed when
scrolling. Heuristics were added to make it work with as many
configurations as possible. It's still not perfect
(e.g. urxvt + xterm-256color) will screen flash, but it should
work in all cases if the user sets their TERM correctly.
Reference:
https://github.com/michael-lazar/rtv/issues/343
https://github.com/michael-lazar/rtv/issues/323