mirror of
https://github.com/gryf/ebook-converter.git
synced 2026-01-19 11:04:12 +01:00
Clean up a couple of not used localize functions
This commit is contained in:
@@ -7,13 +7,9 @@ from ebook_converter.customize.conversion import OptionRecommendation, DummyRepo
|
||||
from ebook_converter.library import current_library_name
|
||||
from ebook_converter.library.catalogs import AuthorSortMismatchException, EmptyCatalogException
|
||||
from ebook_converter.ptempfile import PersistentTemporaryFile
|
||||
from ebook_converter.utils.localization import calibre_langcode_to_name, canonicalize_lang, get_lang
|
||||
from ebook_converter.utils.localization import langcode_to_name, canonicalize_lang, get_lang
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
Option = namedtuple('Option', 'option, default, dest, action, help')
|
||||
|
||||
|
||||
@@ -293,7 +289,7 @@ class EPUB_MOBI(CatalogPlugin):
|
||||
self.fmt,
|
||||
'for %s ' % opts.output_profile if opts.output_profile else '',
|
||||
'CLI' if opts.cli_environment else 'GUI',
|
||||
calibre_langcode_to_name(canonicalize_lang(get_lang()), localize=False))
|
||||
langcode_to_name(canonicalize_lang(get_lang()), localize=False))
|
||||
)
|
||||
|
||||
# If exclude_genre is blank, assume user wants all tags as genres
|
||||
|
||||
@@ -13,12 +13,7 @@ from ebook_converter.utils.config import tweaks
|
||||
from ebook_converter.utils.titlecase import titlecase
|
||||
from ebook_converter.utils.icu import capitalize, strcmp, sort_key
|
||||
from ebook_converter.utils.date import parse_date, format_date, now, UNDEFINED_DATE
|
||||
from ebook_converter.utils.localization import calibre_langcode_to_name, canonicalize_lang
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
from ebook_converter.utils.localization import langcode_to_name, canonicalize_lang
|
||||
|
||||
|
||||
class FormatterFunctions(object):
|
||||
@@ -1426,7 +1421,7 @@ class BuiltinLanguageStrings(BuiltinFormatterFunction):
|
||||
retval = []
|
||||
for c in [c.strip() for c in lang_codes.split(',') if c.strip()]:
|
||||
try:
|
||||
n = calibre_langcode_to_name(c)
|
||||
n = langcode_to_name(c)
|
||||
if n:
|
||||
retval.append(n)
|
||||
except:
|
||||
|
||||
@@ -32,68 +32,10 @@ lcdata = {'abday': ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'),
|
||||
|
||||
|
||||
_iso639 = None
|
||||
_extra_lang_codes = {'pt_BR': 'Brazilian Portuguese',
|
||||
'en_GB': 'English (UK)',
|
||||
'zh_CN': 'Simplified Chinese',
|
||||
'zh_TW': 'Traditional Chinese',
|
||||
'en': 'English',
|
||||
'en_US': 'English (United States)',
|
||||
'en_AR': 'English (Argentina)',
|
||||
'en_AU': 'English (Australia)',
|
||||
'en_JP': 'English (Japan)',
|
||||
'en_DE': 'English (Germany)',
|
||||
'en_BG': 'English (Bulgaria)',
|
||||
'en_EG': 'English (Egypt)',
|
||||
'en_NZ': 'English (New Zealand)',
|
||||
'en_CA': 'English (Canada)',
|
||||
'en_GR': 'English (Greece)',
|
||||
'en_IN': 'English (India)',
|
||||
'en_NP': 'English (Nepal)',
|
||||
'en_TH': 'English (Thailand)',
|
||||
'en_TR': 'English (Turkey)',
|
||||
'en_CY': 'English (Cyprus)',
|
||||
'en_CZ': 'English (Czech Republic)',
|
||||
'en_PH': 'English (Philippines)',
|
||||
'en_PK': 'English (Pakistan)',
|
||||
'en_PL': 'English (Poland)',
|
||||
'en_HR': 'English (Croatia)',
|
||||
'en_HU': 'English (Hungary)',
|
||||
'en_ID': 'English (Indonesia)',
|
||||
'en_IL': 'English (Israel)',
|
||||
'en_RU': 'English (Russia)',
|
||||
'en_SG': 'English (Singapore)',
|
||||
'en_YE': 'English (Yemen)',
|
||||
'en_IE': 'English (Ireland)',
|
||||
'en_CN': 'English (China)',
|
||||
'en_TW': 'English (Taiwan)',
|
||||
'en_ZA': 'English (South Africa)',
|
||||
'es_PY': 'Spanish (Paraguay)',
|
||||
'es_UY': 'Spanish (Uruguay)',
|
||||
'es_AR': 'Spanish (Argentina)',
|
||||
'es_CR': 'Spanish (Costa Rica)',
|
||||
'es_MX': 'Spanish (Mexico)',
|
||||
'es_CU': 'Spanish (Cuba)',
|
||||
'es_CL': 'Spanish (Chile)',
|
||||
'es_EC': 'Spanish (Ecuador)',
|
||||
'es_HN': 'Spanish (Honduras)',
|
||||
'es_VE': 'Spanish (Venezuela)',
|
||||
'es_BO': 'Spanish (Bolivia)',
|
||||
'es_NI': 'Spanish (Nicaragua)',
|
||||
'es_CO': 'Spanish (Colombia)',
|
||||
'de_AT': 'German (AT)',
|
||||
'fr_BE': 'French (BE)',
|
||||
'nl': 'Dutch (NL)',
|
||||
'nl_BE': 'Dutch (BE)',
|
||||
'und': 'Unknown'}
|
||||
|
||||
_lcase_map = {}
|
||||
for k in _extra_lang_codes:
|
||||
_lcase_map[k.lower()] = k
|
||||
|
||||
|
||||
def _load_iso639():
|
||||
global _iso639
|
||||
|
||||
# NOTE(gryf): msgpacked data was originally added for speed purposes. In
|
||||
# my tests, I cannot see any speed gain either on python2 or python3. It
|
||||
# is even slower (around 4-8 times), than just using code below (which is
|
||||
@@ -155,12 +97,11 @@ def get_iso_language(lang_trans, lang):
|
||||
return lang_trans(ans)
|
||||
|
||||
|
||||
def calibre_langcode_to_name(lc, localize=True):
|
||||
def langcode_to_name(lc, localize=True):
|
||||
iso639 = _load_iso639()
|
||||
translate = lambda x: x
|
||||
try:
|
||||
return translate(iso639['by_3'][lc])
|
||||
except:
|
||||
return iso639['by_3'][lc]
|
||||
except Exception:
|
||||
pass
|
||||
return lc
|
||||
|
||||
@@ -190,52 +131,6 @@ def canonicalize_lang(raw):
|
||||
return iso639['name_map'].get(raw, None)
|
||||
|
||||
|
||||
_lang_map = None
|
||||
|
||||
|
||||
def lang_map():
|
||||
' Return mapping of ISO 639 3 letter codes to localized language names '
|
||||
iso639 = _load_iso639()
|
||||
translate = _
|
||||
global _lang_map
|
||||
if _lang_map is None:
|
||||
_lang_map = {k:translate(v) for k, v in iso639['by_3'].items()}
|
||||
return _lang_map
|
||||
|
||||
|
||||
def lang_map_for_ui():
|
||||
ans = getattr(lang_map_for_ui, 'ans', None)
|
||||
if ans is None:
|
||||
ans = lang_map().copy()
|
||||
for x in ('zxx', 'mis', 'mul'):
|
||||
ans.pop(x, None)
|
||||
lang_map_for_ui.ans = ans
|
||||
return ans
|
||||
|
||||
|
||||
def langnames_to_langcodes(names):
|
||||
'''
|
||||
Given a list of localized language names return a mapping of the names to 3
|
||||
letter ISO 639 language codes. If a name is not recognized, it is mapped to
|
||||
None.
|
||||
'''
|
||||
iso639 = _load_iso639()
|
||||
translate = _
|
||||
ans = {}
|
||||
names = set(names)
|
||||
for k, v in iso639['by_3'].items():
|
||||
tv = translate(v)
|
||||
if tv in names:
|
||||
names.remove(tv)
|
||||
ans[tv] = k
|
||||
if not names:
|
||||
break
|
||||
for x in names:
|
||||
ans[x] = None
|
||||
|
||||
return ans
|
||||
|
||||
|
||||
def lang_as_iso639_1(name_or_code):
|
||||
code = canonicalize_lang(name_or_code)
|
||||
if code is not None:
|
||||
|
||||
Reference in New Issue
Block a user