diff --git a/ebook_converter/utils/default_tweaks.py b/ebook_converter/data/default_tweaks.py
similarity index 100%
rename from ebook_converter/utils/default_tweaks.py
rename to ebook_converter/data/default_tweaks.py
diff --git a/ebook_converter/data/jacket/stylesheet.css b/ebook_converter/data/jacket/stylesheet.css
new file mode 100644
index 0000000..0d7a71d
--- /dev/null
+++ b/ebook_converter/data/jacket/stylesheet.css
@@ -0,0 +1,149 @@
+/*
+** Book Jacket generation
+**
+** The template for Book Jackets is template.xhtml
+** This CSS is inserted into the generated HTML at conversion time
+**
+** Users can control parts of the presentation of a generated book jacket by
+** editing this file and template.xhtml
+**
+** The general form of a generated Book Jacket:
+**
+** Title
+** Series: series [series_index]
+** Published: year_of_publication
+** Rating: #_of_stars
+** Tags: tag1, tag2, tag3 ...
+**
+** Comments
+**
+** If a book does not have Series information, a date of publication, a rating or tags
+** the corresponding row is automatically removed from the generated book jacket.
+*/
+
+/*
+** Banner
+** Only affects EPUB, kindle ignores this type of formatting
+*/
+.cbj_banner {
+ background: #eee;
+ color: black;
+ border: thin solid black;
+ margin: 1em;
+ padding: 1em;
+ border-radius:8px;
+ }
+
+/*
+** Title
+*/
+table.cbj_header td.cbj_title {
+ font-size: 1.5em;
+ font-style: italic;
+ text-align: center;
+}
+
+/*
+** Series
+*/
+table.cbj_header td.cbj_series {
+ text-align: center;
+}
+
+/*
+** Author
+*/
+table.cbj_header td.cbj_author {
+ text-align: center;
+}
+
+/*
+** Publisher/published
+*/
+table.cbj_header td.cbj_pubdata {
+ text-align: center;
+}
+
+/*
+** Table containing Rating and Tags
+*/
+table.cbj_header {
+ width: 100%;
+ }
+
+/*
+** General formatting for banner labels
+*/
+table.cbj_header td.cbj_label {
+ font-family: sans-serif;
+ text-align: right;
+ width: 33%;
+ }
+
+/*
+** General formatting for banner content
+*/
+table.cbj_header td.cbj_content {
+ font-family: sans-serif;
+ text-align: left;
+ width:67%;
+ }
+
+/*
+** Metadata divider
+*/
+hr.metadata_divider {
+ width:90%;
+ margin-left:5%;
+ border-top: solid white 0px;
+ border-right: solid white 0px;
+ border-bottom: solid black 1px;
+ border-left: solid white 0px;
+ }
+
+
+
+/*
+** To skip a banner item (Series|Published|Rating|Tags),
+** edit the appropriate CSS rule below.
+*/
+table.cbj_header tr.cbj_series {
+ /* Uncomment the next line to remove 'Series' from banner section */
+ /* display:none; */
+ }
+
+table.cbj_header tr.cbj_pubdata {
+ /* Uncomment the next line to remove 'Published (year of publication)' from banner section */
+ /* display:none; */
+ }
+
+table.cbj_header tr.cbj_rating {
+ /* Uncomment the next line to remove 'Rating' from banner section */
+ /* display:none; */
+ }
+
+table.cbj_header tr.cbj_tags {
+ /* Uncomment the next line to remove 'Tags' from banner section */
+ /* display:none; */
+ }
+
+hr {
+ /* This rule controls formatting for any hr elements contained in the jacket */
+ border-top: 0px solid white;
+ border-right: 0px solid white;
+ border-bottom: 2px solid black;
+ border-left: 0px solid white;
+ margin-left: 10%;
+ width: 80%;
+ }
+
+.cbj_footer {
+ font-family: sans-serif;
+ font-size: 0.8em;
+ margin-top: 8px;
+ text-align: center;
+ }
+
+.cbj_comments {
+ font-family: sans-serif;
+ }
diff --git a/ebook_converter/data/jacket/template.xhtml b/ebook_converter/data/jacket/template.xhtml
new file mode 100644
index 0000000..d71499a
--- /dev/null
+++ b/ebook_converter/data/jacket/template.xhtml
@@ -0,0 +1,58 @@
+
+
+ {title_str}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ebook_converter/data/lrf.xsl b/ebook_converter/data/lrf.xsl
new file mode 100644
index 0000000..d197ab6
--- /dev/null
+++ b/ebook_converter/data/lrf.xsl
@@ -0,0 +1,210 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ aut
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ bkp
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ application/xhtml+xml
+ .xhtml
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ no match for element: "
+
+ "
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Table of Contents
+
+
+
+
+
+
+
+
+ .xhtml#
+
+
+
+
+
+
+
+
+
diff --git a/ebook_converter/data/new_nav.html b/ebook_converter/data/new_nav.html
new file mode 100644
index 0000000..b4af9de
--- /dev/null
+++ b/ebook_converter/data/new_nav.html
@@ -0,0 +1,9 @@
+
+
+
+ Navigation
+
+
+
+
+
diff --git a/ebook_converter/ebooks/conversion/plugins/fb2_input.py b/ebook_converter/ebooks/conversion/plugins/fb2_input.py
index bddb692..e6ee0ab 100644
--- a/ebook_converter/ebooks/conversion/plugins/fb2_input.py
+++ b/ebook_converter/ebooks/conversion/plugins/fb2_input.py
@@ -6,6 +6,7 @@ __copyright__ = '2008, Anatoly Shipitsin '
Convert .fb2 files to .lrf
"""
import os, re
+import pkg_resources
from ebook_converter.customize.conversion import InputFormatPlugin, OptionRecommendation
from ebook_converter import guess_type
@@ -86,8 +87,9 @@ class FB2Input(InputFormatPlugin):
css = re.sub(r'name\s*=\s*', 'class=', css)
self.extract_embedded_content(doc)
log.debug('Converting XML to HTML...')
- with open(P('templates/fb2.xsl'), 'rb') as f:
- ss = f.read().decode('utf-8')
+ with open(pkg_resources.resource_filename('ebook_converter',
+ 'data/fb2.xsl')) as f:
+ ss = f.read().decode()
ss = ss.replace("__FB_NS__", fb_ns)
if options.no_inline_fb2_toc:
log('Disabling generation of inline FB2 TOC')
diff --git a/ebook_converter/ebooks/conversion/plugins/html_output.py b/ebook_converter/ebooks/conversion/plugins/html_output.py
index af839d5..d4eb1b8 100644
--- a/ebook_converter/ebooks/conversion/plugins/html_output.py
+++ b/ebook_converter/ebooks/conversion/plugins/html_output.py
@@ -6,6 +6,7 @@ __docformat__ = 'restructuredtext en'
import os, re, shutil
from os.path import dirname, abspath, relpath as _relpath, exists, basename
+import pkg_resources
from ebook_converter.customize.conversion import OutputFormatPlugin, OptionRecommendation
from ebook_converter import CurrentDir
@@ -95,19 +96,31 @@ class HTMLOutput(OutputFormatPlugin):
with open(opts.template_html_index, 'rb') as f:
template_html_index_data = f.read()
else:
- template_html_index_data = P('templates/html_export_default_index.tmpl', data=True)
+ with open(pkg_resources.
+ resource_filename('ebook_converter',
+ 'data/html_export_default_index.tmpl')
+ ) as fobj:
+ template_html_index_data = fobj.read().decode()
if opts.template_html is not None:
with open(opts.template_html, 'rb') as f:
template_html_data = f.read()
else:
- template_html_data = P('templates/html_export_default.tmpl', data=True)
+ with open(pkg_resources.
+ resource_filename('ebook_converter',
+ 'data/html_export_default.tmpl')
+ ) as fobj:
+ template_html_data = fobj.read().decode()
if opts.template_css is not None:
with open(opts.template_css, 'rb') as f:
template_css_data = f.read()
else:
- template_css_data = P('templates/html_export_default.css', data=True)
+ with open(pkg_resources.
+ resource_filename('ebook_converter',
+ 'data/html_export_default.css')
+ ) as fobj:
+ template_css_data = fobj.read().decode()
template_html_index_data = template_html_index_data.decode('utf-8')
template_html_data = template_html_data.decode('utf-8')
diff --git a/ebook_converter/ebooks/conversion/plugins/lrf_input.py b/ebook_converter/ebooks/conversion/plugins/lrf_input.py
index 78808f6..7f93995 100644
--- a/ebook_converter/ebooks/conversion/plugins/lrf_input.py
+++ b/ebook_converter/ebooks/conversion/plugins/lrf_input.py
@@ -7,6 +7,8 @@ __copyright__ = '2009, Kovid Goyal '
__docformat__ = 'restructuredtext en'
import os, sys
+import pkg_resources
+
from ebook_converter.customize.conversion import InputFormatPlugin
@@ -54,7 +56,12 @@ class LRFInput(InputFormatPlugin):
plot_map[ro] = imgstr[0].get('file')
self.log('Converting XML to HTML...')
- styledoc = safe_xml_fromstring(P('templates/lrf.xsl', data=True))
+
+ with open(pkg_resources.
+ resource_filename('ebook_converter',
+ 'data/lrf.xsl')) as fobj:
+ # TODO(gryf): change this nonsense to etree.parse() instead.
+ styledoc = safe_xml_fromstring(fobj.read())
media_type = MediaType()
styles = Styles()
text_block = TextBlock(styles, char_button_map, plot_map, log)
diff --git a/ebook_converter/ebooks/lrf/fonts.py b/ebook_converter/ebooks/lrf/fonts.py
index 0cbff70..3fe2cd4 100644
--- a/ebook_converter/ebooks/lrf/fonts.py
+++ b/ebook_converter/ebooks/lrf/fonts.py
@@ -5,29 +5,40 @@ __copyright__ = '2008, Kovid Goyal '
from PIL import ImageFont
+from ebook_converter.utils.fonts.scanner import font_scanner
+
'''
Default fonts used in the PRS500
'''
-LIBERATION_FONT_MAP = {
- 'Swis721 BT Roman' : 'LiberationSans-Regular',
- 'Dutch801 Rm BT Roman' : 'LiberationSerif-Regular',
- 'Courier10 BT Roman' : 'LiberationMono-Regular',
- }
-
+LIBERATION_FONT_MAP = {'Swis721 BT Roman': 'Liberation Sans Regular',
+ 'Dutch801 Rm BT Roman': 'Liberation Serif Regular',
+ 'Courier10 BT Roman': 'Liberation Mono Regular'}
+_LIB_CACHE = {}
FONT_FILE_MAP = {}
def get_font(name, size, encoding='unic'):
- '''
+ """
Get an ImageFont object by name.
@param size: Font height in pixels. To convert from pts:
sz in pixels = (dpi/72) * size in pts
- @param encoding: Font encoding to use. E.g. 'unic', 'symbol', 'ADOB', 'ADBE', 'aprm'
- @param manager: A dict that will store the PersistentTemporary
- '''
+ @param encoding: Font encoding to use. E.g. 'unic', 'symbol', 'ADOB',
+ 'ADBE', 'aprm'
+ """
if name in LIBERATION_FONT_MAP:
- return ImageFont.truetype(P('fonts/liberation/%s.ttf' % LIBERATION_FONT_MAP[name]), size, encoding=encoding)
+ if not _LIB_CACHE:
+ for key in font_scanner.cache['fonts']:
+ record = font_scanner.cache['fonts'][key]
+ _LIB_CACHE[record['family_name'] + ' ' +
+ record['subfamily_name']] = record['path']
+
+ fpath = _LIB_CACHE.get(LIBERATION_FONT_MAP[name])
+ if not fpath:
+ raise ValueError('There is no liberation font existing in the '
+ 'system. Please install them before converter '
+ 'use.')
+ return ImageFont.truetype(fpath, size, encoding=encoding)
elif name in FONT_FILE_MAP:
return ImageFont.truetype(FONT_FILE_MAP[name], size, encoding=encoding)
diff --git a/ebook_converter/ebooks/oeb/polish/toc.py b/ebook_converter/ebooks/oeb/polish/toc.py
index 0c46ce3..c5b8cda 100644
--- a/ebook_converter/ebooks/oeb/polish/toc.py
+++ b/ebook_converter/ebooks/oeb/polish/toc.py
@@ -10,6 +10,7 @@ import re
from collections import Counter, OrderedDict
from functools import partial
from operator import itemgetter
+import pkg_resources
from lxml import etree
from lxml.builder import ElementMaker
@@ -690,7 +691,10 @@ def commit_nav_toc(container, toc, lang=None, landmarks=None, previous_nav=None)
if previous_nav is not None:
root = previous_nav[1]
else:
- root = container.parse_xhtml(P('templates/new_nav.html', data=True).decode('utf-8'))
+ with open(pkg_resources.
+ resource_filename('ebook_converter',
+ 'data/new_nav.html')) as fobj:
+ root = container.parse_xhtml(fobj.read())
container.replace(tocname, root)
else:
root = container.parsed(tocname)
diff --git a/ebook_converter/ebooks/oeb/transforms/jacket.py b/ebook_converter/ebooks/oeb/transforms/jacket.py
index dd3002c..f9666a9 100644
--- a/ebook_converter/ebooks/oeb/transforms/jacket.py
+++ b/ebook_converter/ebooks/oeb/transforms/jacket.py
@@ -9,13 +9,13 @@ __docformat__ = 'restructuredtext en'
import sys, os, re
from xml.sax.saxutils import escape
from string import Formatter
+import pkg_resources
from ebook_converter import guess_type, strftime
from ebook_converter.constants import iswindows
from ebook_converter.ebooks.oeb.base import XPath, XHTML_NS, XHTML, xml2text, urldefrag, urlnormalize
from ebook_converter.library.comments import comments_to_html, markdown
from ebook_converter.utils.date import is_date_undefined, as_local_time
-from ebook_converter.utils.icu import sort_key
from ebook_converter.ebooks.chardet import strip_encoding_declarations
from ebook_converter.ebooks.metadata import fmt_sidx, rating_to_stars
from ebook_converter.polyglot.builtins import unicode_type, map
@@ -196,7 +196,7 @@ class Tags(unicode_type):
def __new__(self, tags, output_profile):
tags = [escape(x) for x in tags or ()]
t = unicode_type.__new__(self, ', '.join(tags))
- t.alphabetical = ', '.join(sorted(tags, key=sort_key))
+ t.alphabetical = ', '.join(sorted(tags))
t.tags_list = tags
return t
@@ -232,8 +232,14 @@ def postprocess_jacket(root, output_profile, has_data):
def render_jacket(mi, output_profile,
alt_title=_('Unknown'), alt_tags=[], alt_comments='',
alt_publisher='', rescale_fonts=False, alt_authors=None):
- css = P('jacket/stylesheet.css', data=True).decode('utf-8')
- template = P('jacket/template.xhtml', data=True).decode('utf-8')
+ with open(pkg_resources.resource_filename('ebook_converter',
+ 'data/jacket/stylesheet.css'),
+ 'rb') as fobj:
+ css = fobj.read().decode()
+ with open(pkg_resources.resource_filename('ebook_converter',
+ 'data/jacket/template.xhtml'),
+ 'rb') as fobj:
+ template = fobj.read().decode()
template = re.sub(r'', '', template, flags=re.DOTALL)
css = re.sub(r'/\*.*?\*/', '', css, flags=re.DOTALL)
diff --git a/ebook_converter/spell/__init__.py b/ebook_converter/spell/__init__.py
index ed92137..8b5198f 100644
--- a/ebook_converter/spell/__init__.py
+++ b/ebook_converter/spell/__init__.py
@@ -6,6 +6,8 @@ __license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal '
from collections import namedtuple
+import json
+import pkg_resources
from ebook_converter.utils.localization import canonicalize_lang
@@ -17,9 +19,31 @@ ccodes, ccodemap, country_names = None, None, None
def get_codes():
global ccodes, ccodemap, country_names
if ccodes is None:
- from ebook_converter.utils.serialize import msgpack_loads
- data = msgpack_loads(P('localization/iso3166.calibre_msgpack', allow_user_override=False, data=True))
- ccodes, ccodemap, country_names = data['codes'], data['three_map'], data['names']
+ src = pkg_resources.resource_filename('ebook_converter',
+ 'data/iso_3166-1.json')
+ with open(src, 'rb') as f:
+ db = json.load(f)
+ codes = set()
+ three_map = {}
+ name_map = {}
+ unicode_type = type(u'')
+ for x in db['3166-1']:
+ two = x.get('alpha_2')
+ if two:
+ two = unicode_type(two)
+ codes.add(two)
+ name_map[two] = x.get('name')
+ if name_map[two]:
+ name_map[two] = unicode_type(name_map[two])
+ three = x.get('alpha_3')
+ if three:
+ three_map[unicode_type(three)] = two
+ data = {'names': name_map,
+ 'codes': frozenset(codes),
+ 'three_map': three_map}
+
+ ccodes, ccodemap, country_names = (data['codes'], data['three_map'],
+ data['names'])
return ccodes, ccodemap
diff --git a/ebook_converter/utils/config_base.py b/ebook_converter/utils/config_base.py
index 5cc91c9..76b84f3 100644
--- a/ebook_converter/utils/config_base.py
+++ b/ebook_converter/utils/config_base.py
@@ -6,6 +6,7 @@ import os, re, traceback, numbers
from functools import partial
from collections import defaultdict
from copy import deepcopy
+import pkg_resources
from ebook_converter.utils.lock import ExclusiveFile
from ebook_converter.constants import config_dir, CONFIG_DIR_MODE, ispy3, preferred_encoding, filesystem_encoding, iswindows
@@ -634,7 +635,9 @@ def read_custom_tweaks():
def default_tweaks_raw():
- return P('default_tweaks.py', data=True, allow_user_override=False)
+ with open(pkg_resources.resource_filename('ebook_converter',
+ 'data/default_tweaks.py')) as f:
+ return f.read().encode()
def read_tweaks():
diff --git a/ebook_converter/utils/fonts/scanner.py b/ebook_converter/utils/fonts/scanner.py
index eed0728..efea5f1 100644
--- a/ebook_converter/utils/fonts/scanner.py
+++ b/ebook_converter/utils/fonts/scanner.py
@@ -195,8 +195,7 @@ class FontScanner(Thread):
def __init__(self, folders=[], allowed_extensions={'ttf', 'otf'}):
Thread.__init__(self)
- self.folders = folders + font_dirs() + [os.path.join(config_dir, 'fonts'),
- P('fonts/liberation')]
+ self.folders = folders + font_dirs()
self.folders = [os.path.normcase(os.path.abspath(font)) for font in
self.folders]
self.font_families = ()
diff --git a/ebook_converter/utils/fonts/utils.py b/ebook_converter/utils/fonts/utils.py
index a1ef2e4..4a51f8c 100644
--- a/ebook_converter/utils/fonts/utils.py
+++ b/ebook_converter/utils/fonts/utils.py
@@ -451,6 +451,7 @@ def get_font_for_text(text, candidate_font_data=None):
def test_glyph_ids():
from ebook_converter.utils.fonts.free_type import FreeType
+ # TODO(gryf): move this test to test files
data = P('fonts/liberation/LiberationSerif-Regular.ttf', data=True)
ft = FreeType()
font = ft.load_font(data)
@@ -462,6 +463,7 @@ def test_glyph_ids():
def test_supports_text():
+ # TODO(gryf): move this test to test files
data = P('fonts/calibreSymbols.otf', data=True)
if not supports_text(data, '.★½'):
raise RuntimeError('Incorrectly returning that text is not supported')
@@ -470,6 +472,7 @@ def test_supports_text():
def test_find_font():
+ # TODO(gryf): move this test to test files
from ebook_converter.utils.fonts.scanner import font_scanner
abcd = '诶比西迪'
family = font_scanner.find_font_for_text(abcd)[0]
diff --git a/ebook_converter/utils/localization.py b/ebook_converter/utils/localization.py
index b1e86dc..ff93cfb 100644
--- a/ebook_converter/utils/localization.py
+++ b/ebook_converter/utils/localization.py
@@ -2,7 +2,7 @@ __license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal '
__docformat__ = 'restructuredtext en'
-import os, locale, re, io, sys
+import re, io, sys
import json
from gettext import GNUTranslations, NullTranslations
import pkg_resources
@@ -12,64 +12,6 @@ from ebook_converter.polyglot.builtins import is_py3, iteritems, unicode_type
_available_translations = None
-def available_translations():
- global _available_translations
- if _available_translations is None:
- stats = P('localization/stats.calibre_msgpack', allow_user_override=False)
- if os.path.exists(stats):
- from ebook_converter.utils.serialize import msgpack_loads
- with open(stats, 'rb') as f:
- stats = msgpack_loads(f.read())
- else:
- stats = {}
- _available_translations = [x for x in stats if stats[x] > 0.1]
- return _available_translations
-
-
-def get_system_locale():
- from ebook_converter.constants import iswindows, isosx, plugins
- lang = None
- if iswindows:
- try:
- from ebook_converter.constants import get_windows_user_locale_name
- lang = get_windows_user_locale_name()
- lang = lang.strip()
- if not lang:
- lang = None
- except:
- pass # Windows XP does not have the GetUserDefaultLocaleName fn
- elif isosx:
- try:
- lang = plugins['usbobserver'][0].user_locale() or None
- except:
- # Fallback to environment vars if something bad happened
- import traceback
- traceback.print_exc()
- if lang is None:
- try:
- envvars = ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES', 'LANG']
- lang = locale.getdefaultlocale(envvars)[0]
-
- # lang is None in two cases: either the environment variable is not
- # set or it's "C". Stop looking for a language in the latter case.
- if lang is None:
- for var in envvars:
- if os.environ.get(var) == 'C':
- lang = 'en_US'
- break
- except:
- pass # This happens on Ubuntu apparently
- if lang is None and 'LANG' in os.environ: # Needed for OS X
- try:
- lang = os.environ['LANG']
- except:
- pass
- if lang:
- lang = lang.replace('-', '_')
- lang = '_'.join(lang.split('_')[:2])
- return lang
-
-
def sanitize_lang(lang):
if lang:
match = re.match('[a-z]{2,3}(_[A-Z]{2}){0,1}', lang)
@@ -83,101 +25,16 @@ def sanitize_lang(lang):
def get_lang():
- 'Try to figure out what language to display the interface in'
- from ebook_converter.utils.config_base import prefs
- lang = prefs['language']
- lang = os.environ.get('CALIBRE_OVERRIDE_LANG', lang)
- if lang:
- return lang
- try:
- lang = get_system_locale()
- except:
- import traceback
- traceback.print_exc()
- lang = None
- return sanitize_lang(lang)
+ return 'en_US'
def is_rtl():
return get_lang()[:2].lower() in {'he', 'ar'}
-def get_lc_messages_path(lang):
- hlang = None
- if zf_exists():
- if lang in available_translations():
- hlang = lang
- else:
- xlang = lang.split('_')[0].lower()
- if xlang in available_translations():
- hlang = xlang
- return hlang
-
-
-def zf_exists():
- return os.path.exists(P('localization/locales.zip',
- allow_user_override=False))
-
-
_lang_trans = None
-def get_all_translators():
- from zipfile import ZipFile
- with ZipFile(P('localization/locales.zip', allow_user_override=False), 'r') as zf:
- for lang in available_translations():
- mpath = get_lc_messages_path(lang)
- if mpath is not None:
- buf = io.BytesIO(zf.read(mpath + '/messages.mo'))
- yield lang, GNUTranslations(buf)
-
-
-def get_single_translator(mpath, which='messages'):
- from zipfile import ZipFile
- with ZipFile(P('localization/locales.zip', allow_user_override=False), 'r') as zf:
- path = '{}/{}.mo'.format(mpath, which)
- data = zf.read(path)
- buf = io.BytesIO(data)
- try:
- return GNUTranslations(buf)
- except Exception as e:
- import traceback
- traceback.print_exc()
- import hashlib
- sig = hashlib.sha1(data).hexdigest()
- raise ValueError('Failed to load translations for: {} (size: {} and signature: {}) with error: {}'.format(
- path, len(data), sig, e))
-
-
-def get_iso639_translator(lang):
- lang = sanitize_lang(lang)
- mpath = get_lc_messages_path(lang) if lang else None
- return get_single_translator(mpath, 'iso639') if mpath else None
-
-
-def get_translator(bcp_47_code):
- parts = bcp_47_code.replace('-', '_').split('_')[:2]
- parts[0] = lang_as_iso639_1(parts[0].lower()) or 'en'
- if len(parts) > 1:
- parts[1] = parts[1].upper()
- lang = '_'.join(parts)
- lang = {'pt':'pt_BR', 'zh':'zh_CN'}.get(lang, lang)
- available = available_translations()
- found = True
- if lang == 'en' or lang.startswith('en_'):
- return found, lang, NullTranslations()
- if lang not in available:
- lang = {'pt':'pt_BR', 'zh':'zh_CN'}.get(parts[0], parts[0])
- if lang not in available:
- lang = get_lang()
- if lang not in available:
- lang = 'en'
- found = False
- if lang == 'en':
- return True, lang, NullTranslations()
- return found, lang, get_single_translator(lang)
-
-
lcdata = {
'abday': ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'),
'abmon': ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
@@ -208,62 +65,9 @@ def load_po(path):
def set_translators():
- global _lang_trans, lcdata
- # To test different translations invoke as
- # CALIBRE_OVERRIDE_LANG=de_DE.utf8 program
- lang = get_lang()
- t = buf = iso639 = None
-
- if 'CALIBRE_TEST_TRANSLATION' in os.environ:
- buf = load_po(os.path.expanduser(os.environ['CALIBRE_TEST_TRANSLATION']))
-
- if lang:
- mpath = get_lc_messages_path(lang)
- if buf is None and mpath and os.access(mpath + '.po', os.R_OK):
- buf = load_po(mpath + '.po')
-
- if mpath is not None:
- from zipfile import ZipFile
- with ZipFile(P('localization/locales.zip',
- allow_user_override=False), 'r') as zf:
- if buf is None:
- buf = io.BytesIO(zf.read(mpath + '/messages.mo'))
- if mpath == 'nds':
- mpath = 'de'
- isof = mpath + '/iso639.mo'
- try:
- iso639 = io.BytesIO(zf.read(isof))
- except:
- pass # No iso639 translations for this lang
- if buf is not None:
- from ebook_converter.utils.serialize import msgpack_loads
- try:
- lcdata = msgpack_loads(zf.read(mpath + '/lcdata.calibre_msgpack'))
- except:
- pass # No lcdata
-
- if buf is not None:
- t = GNUTranslations(buf)
- if iso639 is not None:
- iso639 = _lang_trans = GNUTranslations(iso639)
- t.add_fallback(iso639)
-
- if t is None:
- t = NullTranslations()
-
- try:
- set_translators.lang = t.info().get('language')
- except Exception:
- pass
- if is_py3:
- t.install(names=('ngettext',))
- else:
- t.install(unicode=True, names=('ngettext',))
- # Now that we have installed a translator, we have to retranslate the help
- # for the global prefs object as it was instantiated in get_lang(), before
- # the translator was installed.
- from ebook_converter.utils.config_base import prefs
- prefs.retranslate_help()
+ t = NullTranslations()
+ set_translators.lang = t.info().get('language')
+ t.install(names=('ngettext',))
set_translators.lang = None
@@ -535,53 +339,5 @@ def get_udc():
return _udc
-def user_manual_stats():
- stats = getattr(user_manual_stats, 'stats', None)
- if stats is None:
- import json
- try:
- stats = json.loads(P('user-manual-translation-stats.json', allow_user_override=False, data=True))
- except EnvironmentError:
- stats = {}
- user_manual_stats.stats = stats
- return stats
-
-
def localize_user_manual_link(url):
- #lc = lang_as_iso639_1(get_lang())
- # if lc == 'en':
return url
- # stats = user_manual_stats()
- # if stats.get(lc, 0) < 0.3:
- # return url
- # from polyglot.urllib import urlparse, urlunparse
- # parts = urlparse(url)
- # path = re.sub(r'/generated/[a-z]+/', '/generated/%s/' % lc, parts.path or '')
- # path = '/%s%s' % (lc, path)
- # parts = list(parts)
- # parts[2] = path
- # return urlunparse(parts)
-
-
-def website_languages():
- stats = getattr(website_languages, 'stats', None)
- if stats is None:
- try:
- stats = frozenset(P('localization/website-languages.txt', allow_user_override=False, data=True).split())
- except EnvironmentError:
- stats = frozenset()
- website_languages.stats = stats
- return stats
-
-
-def localize_website_link(url):
- lc = lang_as_iso639_1(get_lang())
- langs = website_languages()
- if lc == 'en' or lc not in langs:
- return url
- from ebook_converter.polyglot.urllib import urlparse, urlunparse
- parts = urlparse(url)
- path = '/{}{}'.format(lc, parts.path)
- parts = list(parts)
- parts[2] = path
- return urlunparse(parts)
diff --git a/setup.cfg b/setup.cfg
index 56933a2..c62710a 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -34,6 +34,7 @@ install_requires =
msgpack
html5-parser
odfpy
+ setuptools
[options.entry_points]
console_scripts =