mirror of
https://github.com/gryf/ebook-converter.git
synced 2026-04-23 14:41:30 +02:00
Removed iteritems and itervalues, which are redundant
This commit is contained in:
@@ -17,7 +17,6 @@ from ebook_converter import (isbytestring, as_unicode, get_types_map)
|
||||
from ebook_converter.ebooks.oeb.parse_utils import barename, XHTML_NS, namespace, XHTML, parse_html, NotHTML
|
||||
from ebook_converter.utils.cleantext import clean_xml_chars
|
||||
from ebook_converter.utils.short_uuid import uuid4
|
||||
from ebook_converter.polyglot.builtins import iteritems, itervalues
|
||||
from ebook_converter.polyglot.urllib import unquote as urlunquote
|
||||
|
||||
|
||||
@@ -700,7 +699,7 @@ class Metadata(object):
|
||||
term = CALIBRE(local)
|
||||
self.term = term
|
||||
self.value = value
|
||||
for attr, value in tuple(iteritems(attrib)):
|
||||
for attr, value in tuple(attrib.items()):
|
||||
if isprefixname(value):
|
||||
attrib[attr] = qname(value, nsmap)
|
||||
nsattr = Metadata.OPF_ATTRS.get(attr, attr)
|
||||
@@ -857,7 +856,7 @@ class Metadata(object):
|
||||
|
||||
def to_opf1(self, parent=None):
|
||||
nsmap = self._opf1_nsmap
|
||||
nsrmap = {value: key for key, value in iteritems(nsmap)}
|
||||
nsrmap = {value: key for key, value in nsmap.items()}
|
||||
elem = element(parent, 'metadata', nsmap=nsmap)
|
||||
dcmeta = element(elem, 'dc-metadata', nsmap=OPF1_NSMAP)
|
||||
xmeta = element(elem, 'x-metadata')
|
||||
@@ -871,7 +870,7 @@ class Metadata(object):
|
||||
|
||||
def to_opf2(self, parent=None):
|
||||
nsmap = self._opf2_nsmap
|
||||
nsrmap = {value: key for key, value in iteritems(nsmap)}
|
||||
nsrmap = {value: key for key, value in nsmap.items()}
|
||||
elem = element(parent, OPF('metadata'), nsmap=nsmap)
|
||||
for term in self.items:
|
||||
for item in self.items[term]:
|
||||
@@ -1424,7 +1423,7 @@ class Guide(object):
|
||||
@property
|
||||
def item(self):
|
||||
"""The manifest item associated with this reference."""
|
||||
path = uurllib.parse.rldefrag(self.href)[0]
|
||||
path = urllib.parse.urldefrag(self.href)[0]
|
||||
hrefs = self.oeb.manifest.hrefs
|
||||
return hrefs.get(path, None)
|
||||
|
||||
@@ -1444,7 +1443,7 @@ class Guide(object):
|
||||
return self.refs.pop(type, None)
|
||||
|
||||
def remove_by_href(self, href):
|
||||
remove = [r for r, i in iteritems(self.refs) if i.href == href]
|
||||
remove = [r for r, i in self.refs.items() if i.href == href]
|
||||
for r in remove:
|
||||
self.remove(r)
|
||||
|
||||
@@ -1454,7 +1453,7 @@ class Guide(object):
|
||||
__iter__ = iterkeys
|
||||
|
||||
def values(self):
|
||||
return sorted(itervalues(self.refs), key=lambda ref: ref.ORDER.get(ref.type, 10000))
|
||||
return sorted(self.refs.values(), key=lambda ref: ref.ORDER.get(ref.type, 10000))
|
||||
|
||||
def items(self):
|
||||
for type, ref in self.refs.items():
|
||||
|
||||
@@ -5,7 +5,6 @@ from css_parser.css import PropertyValue
|
||||
from css_parser import profile as cssprofiles, CSSParser
|
||||
from ebook_converter.tinycss.fonts3 import parse_font, serialize_font_family
|
||||
from ebook_converter.ebooks.oeb.base import css_text
|
||||
from ebook_converter.polyglot.builtins import iteritems
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
@@ -137,7 +136,7 @@ def normalize_border(name, cssvalue):
|
||||
style = normalizers['border-' + EDGES[0]]('border-' + EDGES[0], cssvalue)
|
||||
vals = style.copy()
|
||||
for edge in EDGES[1:]:
|
||||
style.update({k.replace(EDGES[0], edge):v for k, v in iteritems(vals)})
|
||||
style.update({k.replace(EDGES[0], edge):v for k, v in vals.items()})
|
||||
return style
|
||||
|
||||
|
||||
@@ -251,7 +250,7 @@ def condense_rule(style):
|
||||
if prop.name and prop.name.startswith(x):
|
||||
expanded[x].append(prop)
|
||||
break
|
||||
for prefix, vals in iteritems(expanded):
|
||||
for prefix, vals in expanded.items():
|
||||
if len(vals) > 1 and {x.priority for x in vals} == {''}:
|
||||
condensers[prefix[:-1]](style, vals)
|
||||
|
||||
@@ -277,18 +276,35 @@ def test_normalization(return_tests=False): # {{{
|
||||
ans.update(expected)
|
||||
return ans
|
||||
|
||||
for raw, expected in iteritems({
|
||||
'some_font': {'font-family':'some_font'}, 'inherit':{k:'inherit' for k in font_composition},
|
||||
'1.2pt/1.4 A_Font': {'font-family':'A_Font', 'font-size':'1.2pt', 'line-height':'1.4'},
|
||||
'bad font': {'font-family':'"bad font"'}, '10% serif': {'font-family':'serif', 'font-size':'10%'},
|
||||
'12px "My Font", serif': {'font-family':'"My Font", serif', 'font-size': '12px'},
|
||||
'normal 0.6em/135% arial,sans-serif': {'font-family': 'arial, sans-serif', 'font-size': '0.6em', 'line-height':'135%', 'font-style':'normal'},
|
||||
'bold italic large serif': {'font-family':'serif', 'font-weight':'bold', 'font-style':'italic', 'font-size':'large'},
|
||||
'bold italic small-caps larger/normal serif':
|
||||
{'font-family':'serif', 'font-weight':'bold', 'font-style':'italic', 'font-size':'larger',
|
||||
'line-height':'normal', 'font-variant':'small-caps'},
|
||||
'2em A B': {'font-family': '"A B"', 'font-size': '2em'},
|
||||
}):
|
||||
for raw, expected in {'some_font': {'font-family':'some_font'},
|
||||
'inherit':{k:'inherit' for k in font_composition},
|
||||
'1.2pt/1.4 A_Font': {'font-family':'A_Font',
|
||||
'font-size':'1.2pt',
|
||||
'line-height':'1.4'},
|
||||
'bad font': {'font-family':'"bad font"'},
|
||||
'10% serif': {'font-family':'serif',
|
||||
'font-size':'10%'},
|
||||
'12px "My Font", serif':
|
||||
{'font-family':'"My Font", serif',
|
||||
'font-size': '12px'},
|
||||
'normal 0.6em/135% arial,sans-serif':
|
||||
{'font-family': 'arial, sans-serif',
|
||||
'font-size': '0.6em',
|
||||
'line-height':'135%',
|
||||
'font-style':'normal'},
|
||||
'bold italic large serif': {'font-family':'serif',
|
||||
'font-weight':'bold',
|
||||
'font-style':'italic',
|
||||
'font-size':'large'},
|
||||
'bold italic small-caps larger/normal serif':
|
||||
{'font-family':'serif',
|
||||
'font-weight':'bold',
|
||||
'font-style':'italic',
|
||||
'font-size':'larger',
|
||||
'line-height':'normal',
|
||||
'font-variant':'small-caps'},
|
||||
'2em A B': {'font-family': '"A B"',
|
||||
'font-size': '2em'}}.items():
|
||||
val = tuple(parseStyle('font: %s' % raw, validate=False))[0].cssValue
|
||||
style = normalizers['font']('font', val)
|
||||
self.assertDictEqual(font_dict(expected), style, raw)
|
||||
@@ -296,7 +312,7 @@ def test_normalization(return_tests=False): # {{{
|
||||
def test_border_normalization(self):
|
||||
def border_edge_dict(expected, edge='right'):
|
||||
ans = {'border-%s-%s' % (edge, x): DEFAULTS['border-%s-%s' % (edge, x)] for x in ('style', 'width', 'color')}
|
||||
for x, v in iteritems(expected):
|
||||
for x, v in expected.items():
|
||||
ans['border-%s-%s' % (edge, x)] = v
|
||||
return ans
|
||||
|
||||
@@ -312,39 +328,41 @@ def test_normalization(return_tests=False): # {{{
|
||||
ans['border-%s-%s' % (edge, val)] = expected
|
||||
return ans
|
||||
|
||||
for raw, expected in iteritems({
|
||||
'solid 1px red': {'color':'red', 'width':'1px', 'style':'solid'},
|
||||
'1px': {'width': '1px'}, '#aaa': {'color': '#aaa'},
|
||||
'2em groove': {'width':'2em', 'style':'groove'},
|
||||
}):
|
||||
for raw, expected in {'solid 1px red': {'color':'red',
|
||||
'width':'1px',
|
||||
'style':'solid'},
|
||||
'1px': {'width': '1px'},
|
||||
'#aaa': {'color': '#aaa'},
|
||||
'2em groove': {'width':'2em',
|
||||
'style':'groove'}}.items():
|
||||
for edge in EDGES:
|
||||
br = 'border-%s' % edge
|
||||
val = tuple(parseStyle('%s: %s' % (br, raw), validate=False))[0].cssValue
|
||||
self.assertDictEqual(border_edge_dict(expected, edge), normalizers[br](br, val))
|
||||
|
||||
for raw, expected in iteritems({
|
||||
'solid 1px red': {'color':'red', 'width':'1px', 'style':'solid'},
|
||||
'1px': {'width': '1px'}, '#aaa': {'color': '#aaa'},
|
||||
'thin groove': {'width':'thin', 'style':'groove'},
|
||||
}):
|
||||
for raw, expected in {'solid 1px red': {'color':'red',
|
||||
'width':'1px',
|
||||
'style':'solid'},
|
||||
'1px': {'width': '1px'},
|
||||
'#aaa': {'color': '#aaa'},
|
||||
'thin groove': {'width':'thin',
|
||||
'style':'groove'}}.items():
|
||||
val = tuple(parseStyle('%s: %s' % ('border', raw), validate=False))[0].cssValue
|
||||
self.assertDictEqual(border_dict(expected), normalizers['border']('border', val))
|
||||
|
||||
for name, val in iteritems({
|
||||
'width': '10%', 'color': 'rgb(0, 1, 1)', 'style': 'double',
|
||||
}):
|
||||
for name, val in {'width': '10%',
|
||||
'color': 'rgb(0, 1, 1)',
|
||||
'style': 'double'}.items():
|
||||
cval = tuple(parseStyle('border-%s: %s' % (name, val), validate=False))[0].cssValue
|
||||
self.assertDictEqual(border_val_dict(val, name), normalizers['border-'+name]('border-'+name, cval))
|
||||
|
||||
def test_edge_normalization(self):
|
||||
def edge_dict(prefix, expected):
|
||||
return {'%s-%s' % (prefix, edge) : x for edge, x in zip(EDGES, expected)}
|
||||
for raw, expected in iteritems({
|
||||
'2px': ('2px', '2px', '2px', '2px'),
|
||||
'1em 2em': ('1em', '2em', '1em', '2em'),
|
||||
'1em 2em 3em': ('1em', '2em', '3em', '2em'),
|
||||
'1 2 3 4': ('1', '2', '3', '4'),
|
||||
}):
|
||||
for raw, expected in {'2px': ('2px', '2px', '2px', '2px'),
|
||||
'1em 2em': ('1em', '2em', '1em', '2em'),
|
||||
'1em 2em 3em': ('1em', '2em', '3em', '2em'),
|
||||
'1 2 3 4': ('1', '2', '3', '4')}.items():
|
||||
for prefix in ('margin', 'padding'):
|
||||
cval = tuple(parseStyle('%s: %s' % (prefix, raw), validate=False))[0].cssValue
|
||||
self.assertDictEqual(edge_dict(prefix, expected), normalizers[prefix](prefix, cval))
|
||||
@@ -352,14 +370,13 @@ def test_normalization(return_tests=False): # {{{
|
||||
def test_list_style_normalization(self):
|
||||
def ls_dict(expected):
|
||||
ans = {'list-style-%s' % x : DEFAULTS['list-style-%s' % x] for x in ('type', 'image', 'position')}
|
||||
for k, v in iteritems(expected):
|
||||
for k, v in expected.items():
|
||||
ans['list-style-%s' % k] = v
|
||||
return ans
|
||||
for raw, expected in iteritems({
|
||||
'url(http://www.example.com/images/list.png)': {'image': 'url(http://www.example.com/images/list.png)'},
|
||||
'inside square': {'position':'inside', 'type':'square'},
|
||||
'upper-roman url(img) outside': {'position':'outside', 'type':'upper-roman', 'image':'url(img)'},
|
||||
}):
|
||||
for raw, expected in {'url(http://www.example.com/images/list.png)': {'image': 'url(http://www.example.com/images/list.png)'},
|
||||
'inside square': {'position':'inside', 'type':'square'},
|
||||
'upper-roman url(img) outside': {'position':'outside', 'type':'upper-roman', 'image':'url(img)'},
|
||||
}.items():
|
||||
cval = tuple(parseStyle('list-style: %s' % raw, validate=False))[0].cssValue
|
||||
self.assertDictEqual(ls_dict(expected), normalizers['list-style']('list-style', cval))
|
||||
|
||||
@@ -379,20 +396,18 @@ def test_normalization(return_tests=False): # {{{
|
||||
ae({'list-style', 'list-style-image', 'list-style-type', 'list-style-position'}, normalize_filter_css({'list-style'}))
|
||||
|
||||
def test_edge_condensation(self):
|
||||
for s, v in iteritems({
|
||||
(1, 1, 3) : None,
|
||||
(1, 2, 3, 4) : '2pt 3pt 4pt 1pt',
|
||||
(1, 2, 3, 2) : '2pt 3pt 2pt 1pt',
|
||||
(1, 2, 1, 3) : '2pt 1pt 3pt',
|
||||
(1, 2, 1, 2) : '2pt 1pt',
|
||||
(1, 1, 1, 1) : '1pt',
|
||||
('2%', '2%', '2%', '2%') : '2%',
|
||||
tuple('0 0 0 0'.split()) : '0',
|
||||
}):
|
||||
for s, v in {(1, 1, 3) : None,
|
||||
(1, 2, 3, 4) : '2pt 3pt 4pt 1pt',
|
||||
(1, 2, 3, 2) : '2pt 3pt 2pt 1pt',
|
||||
(1, 2, 1, 3) : '2pt 1pt 3pt',
|
||||
(1, 2, 1, 2) : '2pt 1pt',
|
||||
(1, 1, 1, 1) : '1pt',
|
||||
('2%', '2%', '2%', '2%') : '2%',
|
||||
tuple('0 0 0 0'.split()) : '0'}.items():
|
||||
for prefix in ('margin', 'padding'):
|
||||
css = {'%s-%s' % (prefix, x) : str(y)+'pt' if isinstance(y, numbers.Number) else y
|
||||
for x, y in zip(('left', 'top', 'right', 'bottom'), s)}
|
||||
css = '; '.join(('%s:%s' % (k, v) for k, v in iteritems(css)))
|
||||
css = '; '.join(('%s:%s' % (k, v) for k, v in css.items()))
|
||||
style = parseStyle(css)
|
||||
condense_rule(style)
|
||||
val = getattr(style.getProperty(prefix), 'value', None)
|
||||
|
||||
@@ -6,7 +6,6 @@ from ebook_converter import xml_replace_entities, force_unicode
|
||||
from ebook_converter.utils.xml_parse import safe_xml_fromstring
|
||||
from ebook_converter.constants import filesystem_encoding
|
||||
from ebook_converter.ebooks.chardet import xml_to_unicode, strip_encoding_declarations
|
||||
from ebook_converter.polyglot.builtins import iteritems, itervalues
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
@@ -136,8 +135,8 @@ def clean_word_doc(data, log):
|
||||
|
||||
|
||||
def ensure_namespace_prefixes(node, nsmap):
|
||||
namespace_uris = frozenset(itervalues(nsmap))
|
||||
fnsmap = {k:v for k, v in iteritems(node.nsmap) if v not in namespace_uris}
|
||||
namespace_uris = frozenset(nsmap.values())
|
||||
fnsmap = {k:v for k, v in node.nsmap.items() if v not in namespace_uris}
|
||||
fnsmap.update(nsmap)
|
||||
if fnsmap != dict(node.nsmap):
|
||||
node = clone_element(node, nsmap=fnsmap, in_context=False)
|
||||
@@ -230,7 +229,7 @@ def parse_html(data, log=None, decoder=None, preprocessor=None,
|
||||
for x in data.iterdescendants():
|
||||
try:
|
||||
x.tag = x.tag.lower()
|
||||
for key, val in list(iteritems(x.attrib)):
|
||||
for key, val in tuple(x.attrib.items()):
|
||||
del x.attrib[key]
|
||||
key = key.lower()
|
||||
x.attrib[key] = val
|
||||
|
||||
@@ -49,7 +49,6 @@ from ebook_converter.utils.ipc.simple_worker import WorkerError, fork_job
|
||||
from ebook_converter.utils.logging import default_log
|
||||
from ebook_converter.utils.xml_parse import safe_xml_fromstring
|
||||
from ebook_converter.utils.zipfile import ZipFile
|
||||
from ebook_converter.polyglot.builtins import iteritems
|
||||
|
||||
exists, join, relpath = os.path.exists, os.path.join, os.path.relpath
|
||||
|
||||
@@ -306,7 +305,7 @@ class Container(ContainerBase): # {{{
|
||||
'tweak_mode': self.tweak_mode,
|
||||
'name_path_map': {
|
||||
name:os.path.join(dest_dir, os.path.relpath(path, self.root))
|
||||
for name, path in iteritems(self.name_path_map)}
|
||||
for name, path in self.name_path_map.items()}
|
||||
}
|
||||
|
||||
def clone_data(self, dest_dir):
|
||||
@@ -667,7 +666,7 @@ class Container(ContainerBase): # {{{
|
||||
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
|
||||
ans[item.get('media-type').lower()].append(self.href_to_name(
|
||||
item.get('href'), self.opf_name))
|
||||
return {mt:tuple(v) for mt, v in iteritems(ans)}
|
||||
return {mt:tuple(v) for mt, v in ans.items()}
|
||||
|
||||
def manifest_items_with_property(self, property_name):
|
||||
' All manifest items that have the specified property '
|
||||
@@ -685,7 +684,7 @@ class Container(ContainerBase): # {{{
|
||||
predicate = predicate.__eq__
|
||||
elif hasattr(predicate, '__contains__'):
|
||||
predicate = predicate.__contains__
|
||||
for mt, names in iteritems(self.manifest_type_map):
|
||||
for mt, names in self.manifest_type_map.items():
|
||||
if predicate(mt):
|
||||
for name in names:
|
||||
yield name
|
||||
@@ -807,7 +806,7 @@ class Container(ContainerBase): # {{{
|
||||
the form (name, linear). Will raise an error if one of the names is not
|
||||
present in the manifest. '''
|
||||
imap = self.manifest_id_map
|
||||
imap = {name:item_id for item_id, name in iteritems(imap)}
|
||||
imap = {name:item_id for item_id, name in imap.items()}
|
||||
items = [item for item, name, linear in self.spine_iter]
|
||||
tail, last_tail = (items[0].tail, items[-1].tail) if items else ('\n ', '\n ')
|
||||
tuple(map(self.remove_from_xml, items))
|
||||
@@ -1071,7 +1070,7 @@ class Container(ContainerBase): # {{{
|
||||
if set(self.name_path_map) != set(other.name_path_map):
|
||||
return 'Set of files is not the same'
|
||||
mismatches = []
|
||||
for name, path in iteritems(self.name_path_map):
|
||||
for name, path in self.name_path_map.items():
|
||||
opath = other.name_path_map[name]
|
||||
with open(path, 'rb') as f1, open(opath, 'rb') as f2:
|
||||
if f1.read() != f2.read():
|
||||
@@ -1275,7 +1274,7 @@ class EpubContainer(Container):
|
||||
return
|
||||
|
||||
package_id = raw_unique_identifier = idpf_key = None
|
||||
for attrib, val in iteritems(self.opf.attrib):
|
||||
for attrib, val in self.opf.attrib.items():
|
||||
if attrib.endswith('unique-identifier'):
|
||||
package_id = val
|
||||
break
|
||||
@@ -1304,7 +1303,7 @@ class EpubContainer(Container):
|
||||
self.log.exception('Failed to parse obfuscation key')
|
||||
key = None
|
||||
|
||||
for font, alg in iteritems(fonts):
|
||||
for font, alg in fonts.items():
|
||||
tkey = key if alg == ADOBE_OBFUSCATION else idpf_key
|
||||
if not tkey:
|
||||
raise ObfuscationKeyMissing('Failed to find obfuscation key')
|
||||
@@ -1374,7 +1373,7 @@ class EpubContainer(Container):
|
||||
et = et.encode('ascii')
|
||||
f.write(et)
|
||||
zip_rebuilder(self.root, outpath)
|
||||
for name, data in iteritems(restore_fonts):
|
||||
for name, data in restore_fonts.items():
|
||||
with self.open(name, 'wb') as f:
|
||||
f.write(data)
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ from ebook_converter.ebooks.oeb.normalize_css import normalize_filter_css, norma
|
||||
from ebook_converter.ebooks.oeb.polish.pretty import pretty_script_or_style, pretty_xml_tree, serialize
|
||||
from ebook_converter.utils.icu import numeric_sort_key
|
||||
from ebook_converter.css_selectors import Select, SelectorError
|
||||
from ebook_converter.polyglot.builtins import iteritems, itervalues
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
@@ -60,7 +59,7 @@ def merge_identical_selectors(sheet):
|
||||
for rule in sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE):
|
||||
selector_map[rule.selectorText].append(rule)
|
||||
remove = []
|
||||
for rule_group in itervalues(selector_map):
|
||||
for rule_group in selector_map.values():
|
||||
if len(rule_group) > 1:
|
||||
for i in range(1, len(rule_group)):
|
||||
merge_declarations(rule_group[0].style, rule_group[i].style)
|
||||
@@ -85,23 +84,23 @@ def remove_unused_css(container, report=None, remove_unused_classes=False, merge
|
||||
return container.parsed(name)
|
||||
except TypeError:
|
||||
pass
|
||||
sheets = {name:safe_parse(name) for name, mt in iteritems(container.mime_map) if mt in OEB_STYLES}
|
||||
sheets = {k:v for k, v in iteritems(sheets) if v is not None}
|
||||
sheets = {name:safe_parse(name) for name, mt in container.mime_map.items() if mt in OEB_STYLES}
|
||||
sheets = {k:v for k, v in sheets.items() if v is not None}
|
||||
num_merged = 0
|
||||
if merge_rules:
|
||||
for name, sheet in iteritems(sheets):
|
||||
for name, sheet in sheets.items():
|
||||
num = merge_identical_selectors(sheet)
|
||||
if num:
|
||||
container.dirty(name)
|
||||
num_merged += num
|
||||
import_map = {name:get_imported_sheets(name, container, sheets) for name in sheets}
|
||||
if remove_unused_classes:
|
||||
class_map = {name:{icu_lower(x) for x in classes_in_rule_list(sheet.cssRules)} for name, sheet in iteritems(sheets)}
|
||||
style_rules = {name:tuple(sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE)) for name, sheet in iteritems(sheets)}
|
||||
class_map = {name:{icu_lower(x) for x in classes_in_rule_list(sheet.cssRules)} for name, sheet in sheets.items()}
|
||||
style_rules = {name:tuple(sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE)) for name, sheet in sheets.items()}
|
||||
|
||||
num_of_removed_rules = num_of_removed_classes = 0
|
||||
|
||||
for name, mt in iteritems(container.mime_map):
|
||||
for name, mt in container.mime_map.items():
|
||||
if mt not in OEB_DOCS:
|
||||
continue
|
||||
root = container.parsed(name)
|
||||
@@ -158,7 +157,7 @@ def remove_unused_css(container, report=None, remove_unused_classes=False, merge
|
||||
num_of_removed_classes += len(original_classes) - len(classes)
|
||||
container.dirty(name)
|
||||
|
||||
for name, sheet in iteritems(sheets):
|
||||
for name, sheet in sheets.items():
|
||||
unused_rules = style_rules[name]
|
||||
if unused_rules:
|
||||
num_of_removed_rules += len(unused_rules)
|
||||
@@ -245,7 +244,7 @@ def transform_css(container, transform_sheet=None, transform_style=None, names=(
|
||||
if not names:
|
||||
types = OEB_STYLES | OEB_DOCS
|
||||
names = []
|
||||
for name, mt in iteritems(container.mime_map):
|
||||
for name, mt in container.mime_map.items():
|
||||
if mt in types:
|
||||
names.append(name)
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import textwrap
|
||||
from ebook_converter.polyglot.builtins import iteritems
|
||||
|
||||
# from lxml.etree import Element
|
||||
|
||||
@@ -219,7 +218,7 @@ def pretty_xml(container, name, raw):
|
||||
|
||||
def fix_all_html(container):
|
||||
' Fix any parsing errors in all HTML files in the container. Fixing is done using the HTML5 parsing algorithm. '
|
||||
for name, mt in iteritems(container.mime_map):
|
||||
for name, mt in container.mime_map.items():
|
||||
if mt in OEB_DOCS:
|
||||
container.parsed(name)
|
||||
container.dirty(name)
|
||||
@@ -228,7 +227,7 @@ def fix_all_html(container):
|
||||
def pretty_all(container):
|
||||
' Pretty print all HTML/CSS/XML files in the container '
|
||||
xml_types = {guess_type('a.ncx'), guess_type('a.xml'), guess_type('a.svg')}
|
||||
for name, mt in iteritems(container.mime_map):
|
||||
for name, mt in container.mime_map.items():
|
||||
prettied = False
|
||||
if mt in OEB_DOCS:
|
||||
pretty_html_tree(container, container.parsed(name))
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import codecs, shutil, os, posixpath
|
||||
from ebook_converter.polyglot.builtins import iteritems, itervalues
|
||||
from functools import partial
|
||||
from collections import Counter, defaultdict
|
||||
import urllib.parse
|
||||
@@ -115,7 +114,7 @@ def replace_links(container, link_map, frag_map=lambda name, frag:frag, replace_
|
||||
:param replace_in_opf: If False, links are not replaced in the OPF file.
|
||||
|
||||
'''
|
||||
for name, media_type in iteritems(container.mime_map):
|
||||
for name, media_type in container.mime_map.items():
|
||||
if name == container.opf_name and not replace_in_opf:
|
||||
continue
|
||||
repl = LinkReplacer(name, container, link_map, frag_map)
|
||||
@@ -131,7 +130,7 @@ def replace_ids(container, id_map):
|
||||
|
||||
'''
|
||||
changed = False
|
||||
for name, media_type in iteritems(container.mime_map):
|
||||
for name, media_type in container.mime_map.items():
|
||||
repl = IdReplacer(name, container, id_map)
|
||||
container.replace_links(name, repl)
|
||||
if name == container.opf_name:
|
||||
@@ -183,19 +182,19 @@ def rename_files(container, file_map):
|
||||
:param file_map: A mapping of old canonical name to new canonical name, for
|
||||
example: :code:`{'text/chapter1.html': 'chapter1.html'}`.
|
||||
'''
|
||||
overlap = set(file_map).intersection(set(itervalues(file_map)))
|
||||
overlap = set(file_map).intersection(set(file_map.values()))
|
||||
if overlap:
|
||||
raise ValueError('Circular rename detected. The files %s are both rename targets and destinations' % ', '.join(overlap))
|
||||
for name, dest in iteritems(file_map):
|
||||
for name, dest in file_map.items():
|
||||
if container.exists(dest):
|
||||
if name != dest and name.lower() == dest.lower():
|
||||
# A case change on an OS with a case insensitive file-system.
|
||||
continue
|
||||
raise ValueError('Cannot rename {0} to {1} as {1} already exists'.format(name, dest))
|
||||
if len(tuple(itervalues(file_map))) != len(set(itervalues(file_map))):
|
||||
if len(file_map.values()) != len(set(file_map.values())):
|
||||
raise ValueError('Cannot rename, the set of destination files contains duplicates')
|
||||
link_map = {}
|
||||
for current_name, new_name in iteritems(file_map):
|
||||
for current_name, new_name in file_map.items():
|
||||
container.rename(current_name, new_name)
|
||||
if new_name != container.opf_name: # OPF is handled by the container
|
||||
link_map[current_name] = new_name
|
||||
@@ -217,7 +216,7 @@ def replace_file(container, name, path, basename, force_mt=None):
|
||||
rename_files(container, {name:nname})
|
||||
mt = force_mt or container.guess_type(nname)
|
||||
container.mime_map[nname] = mt
|
||||
for itemid, q in iteritems(container.manifest_id_map):
|
||||
for itemid, q in container.manifest_id_map.items():
|
||||
if q == nname:
|
||||
for item in container.opf_xpath('//opf:manifest/opf:item[@href and @id="%s"]' % itemid):
|
||||
item.set('media-type', mt)
|
||||
@@ -252,7 +251,7 @@ def get_recommended_folders(container, names):
|
||||
recommended folder is assumed to be the folder containing the OPF file. '''
|
||||
from ebook_converter.ebooks.oeb.polish.utils import guess_type
|
||||
counts = defaultdict(Counter)
|
||||
for name, mt in iteritems(container.mime_map):
|
||||
for name, mt in container.mime_map.items():
|
||||
folder = name.rpartition('/')[0] if '/' in name else ''
|
||||
counts[mt_to_category(container, mt)][folder] += 1
|
||||
|
||||
@@ -261,7 +260,7 @@ def get_recommended_folders(container, names):
|
||||
except KeyError:
|
||||
opf_folder = ''
|
||||
|
||||
recommendations = {category:counter.most_common(1)[0][0] for category, counter in iteritems(counts)}
|
||||
recommendations = {category:counter.most_common(1)[0][0] for category, counter in counts.items()}
|
||||
return {n:recommendations.get(mt_to_category(container, guess_type(os.path.basename(n))), opf_folder) for n in names}
|
||||
|
||||
|
||||
@@ -348,7 +347,7 @@ def remove_links_to(container, predicate):
|
||||
stylepath = XPath('//h:style')
|
||||
styleattrpath = XPath('//*[@style]')
|
||||
changed = set()
|
||||
for name, mt in iteritems(container.mime_map):
|
||||
for name, mt in container.mime_map.items():
|
||||
removed = False
|
||||
if mt in OEB_DOCS:
|
||||
root = container.parsed(name)
|
||||
|
||||
@@ -5,7 +5,6 @@ from ebook_converter.ebooks.oeb.base import barename, XPNSMAP, XPath, OPF, XHTML
|
||||
from ebook_converter.ebooks.oeb.polish.errors import MalformedMarkup
|
||||
from ebook_converter.ebooks.oeb.polish.toc import node_from_loc
|
||||
from ebook_converter.ebooks.oeb.polish.replace import LinkRebaser
|
||||
from ebook_converter.polyglot.builtins import iteritems
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
@@ -237,7 +236,7 @@ def split(container, name, loc_or_xpath, before=True, totals=None):
|
||||
a.set('href', '#' + purl.fragment)
|
||||
|
||||
# Fix all links in the container that point to anchors in the bottom tree
|
||||
for fname, media_type in iteritems(container.mime_map):
|
||||
for fname, media_type in container.mime_map.items():
|
||||
if fname not in {name, bottom_name}:
|
||||
repl = SplitLinkReplacer(fname, anchors_in_bottom, name, bottom_name, container)
|
||||
container.replace_links(fname, repl)
|
||||
@@ -438,7 +437,7 @@ def merge_html(container, names, master, insert_page_breaks=False):
|
||||
container.remove_item(name, remove_from_guide=False)
|
||||
|
||||
# Fix all links in the container that point to merged files
|
||||
for fname, media_type in iteritems(container.mime_map):
|
||||
for fname, media_type in container.mime_map.items():
|
||||
repl = MergeLinkReplacer(fname, anchor_map, master, container)
|
||||
container.replace_links(fname, repl)
|
||||
|
||||
@@ -471,7 +470,7 @@ def merge_css(container, names, master):
|
||||
|
||||
# Remove links to merged stylesheets in the html files, replacing with a
|
||||
# link to the master sheet
|
||||
for name, mt in iteritems(container.mime_map):
|
||||
for name, mt in container.mime_map.items():
|
||||
if mt in OEB_DOCS:
|
||||
removed = False
|
||||
root = p(name)
|
||||
|
||||
@@ -16,7 +16,6 @@ from ebook_converter.ebooks.oeb.polish.utils import guess_type, extract
|
||||
from ebook_converter.ebooks.oeb.polish.opf import set_guide_item, get_book_language
|
||||
from ebook_converter.ebooks.oeb.polish.pretty import pretty_html_tree
|
||||
from ebook_converter.utils.localization import get_lang, canonicalize_lang, lang_as_iso639_1
|
||||
from ebook_converter.polyglot.builtins import iteritems
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
@@ -168,7 +167,7 @@ def parse_ncx(container, ncx_name):
|
||||
if navmaps:
|
||||
process_ncx_node(container, navmaps[0], toc_root, ncx_name)
|
||||
toc_root.lang = toc_root.uid = None
|
||||
for attr, val in iteritems(root.attrib):
|
||||
for attr, val in root.attrib.items():
|
||||
if attr.endswith('lang'):
|
||||
toc_root.lang = str(val)
|
||||
break
|
||||
@@ -415,14 +414,14 @@ def from_xpaths(container, xpaths):
|
||||
name = container.abspath_to_name(spinepath)
|
||||
root = container.parsed(name)
|
||||
level_item_map = maps[name] = {i+1:frozenset(xp(root)) for i, xp in enumerate(xpaths)}
|
||||
for lvl, elems in iteritems(level_item_map):
|
||||
for lvl, elems in level_item_map.items():
|
||||
if elems:
|
||||
empty_levels.discard(lvl)
|
||||
# Remove empty levels from all level_maps
|
||||
if empty_levels:
|
||||
for name, lmap in tuple(iteritems(maps)):
|
||||
lmap = {lvl:items for lvl, items in iteritems(lmap) if lvl not in empty_levels}
|
||||
lmap = sorted(iteritems(lmap), key=itemgetter(0))
|
||||
for name, lmap in tuple(maps.items()):
|
||||
lmap = {lvl:items for lvl, items in lmap.items() if lvl not in empty_levels}
|
||||
lmap = sorted(lmap.items(), key=itemgetter(0))
|
||||
lmap = {i+1:items for i, (l, items) in enumerate(lmap)}
|
||||
maps[name] = lmap
|
||||
|
||||
@@ -440,9 +439,9 @@ def from_xpaths(container, xpaths):
|
||||
|
||||
return process_node(tocroot)
|
||||
|
||||
for name, level_item_map in iteritems(maps):
|
||||
for name, level_item_map in maps.items():
|
||||
root = container.parsed(name)
|
||||
item_level_map = {e:i for i, elems in iteritems(level_item_map) for e in elems}
|
||||
item_level_map = {e:i for i, elems in level_item_map.items() for e in elems}
|
||||
item_dirtied = False
|
||||
all_ids = set(root.xpath('//*/@id'))
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ from ebook_converter.ebooks import unit_convert
|
||||
from ebook_converter.ebooks.oeb.base import XHTML, XHTML_NS, CSS_MIME, OEB_STYLES, xpath, urlnormalize
|
||||
from ebook_converter.ebooks.oeb.normalize_css import DEFAULTS, normalizers
|
||||
from ebook_converter.css_selectors import Select, SelectorError, INAPPROPRIATE_PSEUDO_CLASSES
|
||||
from ebook_converter.polyglot.builtins import iteritems
|
||||
from ebook_converter.tinycss.media3 import CSSMedia3Parser
|
||||
|
||||
|
||||
@@ -785,7 +784,7 @@ class Style(object):
|
||||
self._get('padding-right'), base=self.parent_width)
|
||||
|
||||
def __str__(self):
|
||||
items = sorted(iteritems(self._style))
|
||||
items = sorted(self._style.items())
|
||||
return '; '.join("%s: %s" % (key, val) for key, val in items)
|
||||
|
||||
def cssdict(self):
|
||||
@@ -794,12 +793,12 @@ class Style(object):
|
||||
def pseudo_classes(self, filter_css):
|
||||
if filter_css:
|
||||
css = copy.deepcopy(self._pseudo_classes)
|
||||
for psel, cssdict in iteritems(css):
|
||||
for psel, cssdict in css.items():
|
||||
for k in filter_css:
|
||||
cssdict.pop(k, None)
|
||||
else:
|
||||
css = self._pseudo_classes
|
||||
return {k:v for k, v in iteritems(css) if v}
|
||||
return {k:v for k, v in css.items() if v}
|
||||
|
||||
@property
|
||||
def is_hidden(self):
|
||||
|
||||
@@ -16,7 +16,6 @@ from ebook_converter.ebooks.oeb.base import (XHTML, XHTML_NS, CSS_MIME, OEB_STYL
|
||||
from ebook_converter.ebooks.oeb.stylizer import Stylizer
|
||||
from ebook_converter.utils.filenames import ascii_filename, ascii_text
|
||||
from ebook_converter.utils.icu import numeric_sort_key
|
||||
from ebook_converter.polyglot.builtins import iteritems
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
@@ -208,7 +207,7 @@ class CSSFlattener(object):
|
||||
|
||||
def store_page_margins(self):
|
||||
self.opts._stored_page_margins = {}
|
||||
for item, stylizer in iteritems(self.stylizers):
|
||||
for item, stylizer in self.stylizers.items():
|
||||
margins = self.opts._stored_page_margins[item.href] = {}
|
||||
for prop, val in stylizer.page_rule.items():
|
||||
p, w = prop.partition('-')[::2]
|
||||
@@ -262,7 +261,7 @@ class CSSFlattener(object):
|
||||
if font[k] != 'normal':
|
||||
cfont[k] = font[k]
|
||||
rule = '@font-face { %s }'%('; '.join('%s:%s'%(k, v) for k, v in
|
||||
iteritems(cfont)))
|
||||
cfont.items()))
|
||||
rule = css_parser.parseString(rule)
|
||||
efi.append(rule)
|
||||
|
||||
@@ -527,7 +526,7 @@ class CSSFlattener(object):
|
||||
keep_classes = set()
|
||||
|
||||
if cssdict:
|
||||
items = sorted(iteritems(cssdict))
|
||||
items = sorted(cssdict.items())
|
||||
css = ';\n'.join(u'%s: %s' % (key, val) for key, val in items)
|
||||
classes = node.get('class', '').strip() or 'calibre'
|
||||
classes_list = classes.split()
|
||||
@@ -544,8 +543,8 @@ class CSSFlattener(object):
|
||||
node.attrib['class'] = match
|
||||
keep_classes.add(match)
|
||||
|
||||
for psel, cssdict in iteritems(pseudo_classes):
|
||||
items = sorted(iteritems(cssdict))
|
||||
for psel, cssdict in pseudo_classes.items():
|
||||
items = sorted(cssdict.items())
|
||||
css = ';\n'.join('%s: %s' % (key, val) for key, val in items)
|
||||
pstyles = pseudo_styles[psel]
|
||||
if css in pstyles:
|
||||
@@ -647,7 +646,7 @@ class CSSFlattener(object):
|
||||
gc_map[css] = href
|
||||
|
||||
ans = {}
|
||||
for css, items in iteritems(global_css):
|
||||
for css, items in global_css.items():
|
||||
for item in items:
|
||||
ans[item] = gc_map[css]
|
||||
return ans
|
||||
@@ -663,7 +662,7 @@ class CSSFlattener(object):
|
||||
fsize = self.context.dest.fbase
|
||||
self.flatten_node(html, stylizer, names, styles, pseudo_styles, fsize, item.id, recurse=False)
|
||||
self.flatten_node(html.find(XHTML('body')), stylizer, names, styles, pseudo_styles, fsize, item.id)
|
||||
items = sorted(((key, val) for (val, key) in iteritems(styles)))
|
||||
items = sorted(((key, val) for (val, key) in styles.items()))
|
||||
# :hover must come after link and :active must come after :hover
|
||||
psels = sorted(pseudo_styles, key=lambda x :
|
||||
{'hover':1, 'active':2}.get(x, 0))
|
||||
@@ -671,7 +670,7 @@ class CSSFlattener(object):
|
||||
styles = pseudo_styles[psel]
|
||||
if not styles:
|
||||
continue
|
||||
x = sorted(((k+':'+psel, v) for v, k in iteritems(styles)))
|
||||
x = sorted(((k+':'+psel, v) for v, k in styles.items()))
|
||||
items.extend(x)
|
||||
|
||||
css = ''.join(".%s {\n%s;\n}\n\n" % (key, val) for key, val in items)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import os, re
|
||||
from ebook_converter.utils.date import isoformat, now
|
||||
from ebook_converter import guess_type
|
||||
from ebook_converter.polyglot.builtins import iteritems
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
@@ -48,7 +47,7 @@ def meta_info_to_oeb_metadata(mi, m, log, override_input_metadata=False):
|
||||
m.clear('series')
|
||||
identifiers = mi.get_identifiers()
|
||||
set_isbn = False
|
||||
for typ, val in iteritems(identifiers):
|
||||
for typ, val in identifiers.items():
|
||||
has = False
|
||||
if typ.lower() == 'isbn':
|
||||
set_isbn = True
|
||||
|
||||
@@ -2,7 +2,6 @@ import numbers
|
||||
from collections import Counter
|
||||
|
||||
from ebook_converter.ebooks.oeb.base import barename, XPath
|
||||
from ebook_converter.polyglot.builtins import iteritems
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
@@ -149,7 +148,7 @@ class RemoveFakeMargins(object):
|
||||
self.levels[level].append(p)
|
||||
|
||||
remove = set()
|
||||
for k, v in iteritems(self.levels):
|
||||
for k, v in self.levels.items():
|
||||
num = len(v)
|
||||
self.log.debug('Found %d items of level:'%num, k)
|
||||
level = int(k.split('_')[-1])
|
||||
|
||||
@@ -15,7 +15,6 @@ from ebook_converter.ebooks.epub import rules
|
||||
from ebook_converter.ebooks.oeb.base import (OEB_STYLES, XPNSMAP as NAMESPACES,
|
||||
rewrite_links, XHTML, urlnormalize)
|
||||
from ebook_converter.ebooks.oeb.polish.split import do_split
|
||||
from ebook_converter.polyglot.builtins import iteritems
|
||||
from ebook_converter.polyglot.urllib import unquote
|
||||
from ebook_converter.css_selectors import Select, SelectorError
|
||||
|
||||
@@ -243,7 +242,7 @@ class FlowSplitter(object):
|
||||
|
||||
self.trees = [orig_tree]
|
||||
while ordered_ids:
|
||||
pb_id, (pattern, before) = next(iteritems(ordered_ids))
|
||||
pb_id, (pattern, before) = next(iter(ordered_ids.items()))
|
||||
del ordered_ids[pb_id]
|
||||
for i in range(len(self.trees)-1, -1, -1):
|
||||
tree = self.trees[i]
|
||||
|
||||
@@ -7,7 +7,6 @@ from collections import OrderedDict, Counter
|
||||
|
||||
from ebook_converter.ebooks.oeb.base import XPNSMAP, TOC, XHTML, xml2text, barename
|
||||
from ebook_converter.ebooks import ConversionError
|
||||
from ebook_converter.polyglot.builtins import itervalues
|
||||
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
@@ -269,8 +268,8 @@ class DetectStructure(object):
|
||||
return []
|
||||
|
||||
for document in self.oeb.spine:
|
||||
previous_level1 = list(itervalues(added))[-1] if added else None
|
||||
previous_level2 = list(itervalues(added2))[-1] if added2 else None
|
||||
previous_level1 = list(added.values())[-1] if added else None
|
||||
previous_level2 = list(added2.values())[-1] if added2 else None
|
||||
|
||||
level1_toc, level1_title = self.get_toc_parts_for_xpath(self.opts.level1_toc)
|
||||
for elem in find_matches(level1_toc, document.data):
|
||||
|
||||
@@ -2,7 +2,6 @@ from collections import defaultdict
|
||||
|
||||
from ebook_converter.ebooks.oeb.base import urlnormalize, css_text
|
||||
from ebook_converter.utils.fonts.sfnt.subset import subset, NoGlyphs, UnsupportedFont
|
||||
from ebook_converter.polyglot.builtins import iteritems, itervalues
|
||||
from ebook_converter.tinycss.fonts3 import parse_font_family
|
||||
|
||||
|
||||
@@ -148,7 +147,7 @@ class SubsetFonts(object):
|
||||
else:
|
||||
fonts[item.href] = font
|
||||
|
||||
for font in itervalues(fonts):
|
||||
for font in fonts.values():
|
||||
if not font['chars']:
|
||||
self.log('The font %s is unused. Removing it.'%font['src'])
|
||||
remove(font)
|
||||
@@ -167,8 +166,8 @@ class SubsetFonts(object):
|
||||
totals[1] += sz
|
||||
else:
|
||||
font['item'].data = raw
|
||||
nlen = sum(itervalues(new_stats))
|
||||
olen = sum(itervalues(old_stats))
|
||||
nlen = sum(new_stats.values())
|
||||
olen = sum(old_stats.values())
|
||||
self.log('Decreased the font %s to %.1f%% of its original size'%
|
||||
(font['src'], nlen/olen *100))
|
||||
totals[0] += nlen
|
||||
@@ -204,7 +203,7 @@ class SubsetFonts(object):
|
||||
if rule.type != rule.STYLE_RULE:
|
||||
continue
|
||||
props = {k:v for k,v in
|
||||
iteritems(get_font_properties(rule)) if v}
|
||||
get_font_properties(rule).items() if v}
|
||||
if not props:
|
||||
continue
|
||||
for sel in rule.selectorList:
|
||||
|
||||
Reference in New Issue
Block a user