1
0
mirror of https://github.com/gryf/ebook-converter.git synced 2026-04-20 21:21:35 +02:00

Removed iteritems and itervalues, which are redundant

This commit is contained in:
2020-04-21 18:20:08 +02:00
parent 9e076e0af4
commit 3234e4de27
82 changed files with 382 additions and 457 deletions
@@ -49,7 +49,6 @@ from ebook_converter.utils.ipc.simple_worker import WorkerError, fork_job
from ebook_converter.utils.logging import default_log
from ebook_converter.utils.xml_parse import safe_xml_fromstring
from ebook_converter.utils.zipfile import ZipFile
from ebook_converter.polyglot.builtins import iteritems
exists, join, relpath = os.path.exists, os.path.join, os.path.relpath
@@ -306,7 +305,7 @@ class Container(ContainerBase): # {{{
'tweak_mode': self.tweak_mode,
'name_path_map': {
name:os.path.join(dest_dir, os.path.relpath(path, self.root))
for name, path in iteritems(self.name_path_map)}
for name, path in self.name_path_map.items()}
}
def clone_data(self, dest_dir):
@@ -667,7 +666,7 @@ class Container(ContainerBase): # {{{
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
ans[item.get('media-type').lower()].append(self.href_to_name(
item.get('href'), self.opf_name))
return {mt:tuple(v) for mt, v in iteritems(ans)}
return {mt:tuple(v) for mt, v in ans.items()}
def manifest_items_with_property(self, property_name):
' All manifest items that have the specified property '
@@ -685,7 +684,7 @@ class Container(ContainerBase): # {{{
predicate = predicate.__eq__
elif hasattr(predicate, '__contains__'):
predicate = predicate.__contains__
for mt, names in iteritems(self.manifest_type_map):
for mt, names in self.manifest_type_map.items():
if predicate(mt):
for name in names:
yield name
@@ -807,7 +806,7 @@ class Container(ContainerBase): # {{{
the form (name, linear). Will raise an error if one of the names is not
present in the manifest. '''
imap = self.manifest_id_map
imap = {name:item_id for item_id, name in iteritems(imap)}
imap = {name:item_id for item_id, name in imap.items()}
items = [item for item, name, linear in self.spine_iter]
tail, last_tail = (items[0].tail, items[-1].tail) if items else ('\n ', '\n ')
tuple(map(self.remove_from_xml, items))
@@ -1071,7 +1070,7 @@ class Container(ContainerBase): # {{{
if set(self.name_path_map) != set(other.name_path_map):
return 'Set of files is not the same'
mismatches = []
for name, path in iteritems(self.name_path_map):
for name, path in self.name_path_map.items():
opath = other.name_path_map[name]
with open(path, 'rb') as f1, open(opath, 'rb') as f2:
if f1.read() != f2.read():
@@ -1275,7 +1274,7 @@ class EpubContainer(Container):
return
package_id = raw_unique_identifier = idpf_key = None
for attrib, val in iteritems(self.opf.attrib):
for attrib, val in self.opf.attrib.items():
if attrib.endswith('unique-identifier'):
package_id = val
break
@@ -1304,7 +1303,7 @@ class EpubContainer(Container):
self.log.exception('Failed to parse obfuscation key')
key = None
for font, alg in iteritems(fonts):
for font, alg in fonts.items():
tkey = key if alg == ADOBE_OBFUSCATION else idpf_key
if not tkey:
raise ObfuscationKeyMissing('Failed to find obfuscation key')
@@ -1374,7 +1373,7 @@ class EpubContainer(Container):
et = et.encode('ascii')
f.write(et)
zip_rebuilder(self.root, outpath)
for name, data in iteritems(restore_fonts):
for name, data in restore_fonts.items():
with self.open(name, 'wb') as f:
f.write(data)
+9 -10
View File
@@ -10,7 +10,6 @@ from ebook_converter.ebooks.oeb.normalize_css import normalize_filter_css, norma
from ebook_converter.ebooks.oeb.polish.pretty import pretty_script_or_style, pretty_xml_tree, serialize
from ebook_converter.utils.icu import numeric_sort_key
from ebook_converter.css_selectors import Select, SelectorError
from ebook_converter.polyglot.builtins import iteritems, itervalues
__license__ = 'GPL v3'
@@ -60,7 +59,7 @@ def merge_identical_selectors(sheet):
for rule in sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE):
selector_map[rule.selectorText].append(rule)
remove = []
for rule_group in itervalues(selector_map):
for rule_group in selector_map.values():
if len(rule_group) > 1:
for i in range(1, len(rule_group)):
merge_declarations(rule_group[0].style, rule_group[i].style)
@@ -85,23 +84,23 @@ def remove_unused_css(container, report=None, remove_unused_classes=False, merge
return container.parsed(name)
except TypeError:
pass
sheets = {name:safe_parse(name) for name, mt in iteritems(container.mime_map) if mt in OEB_STYLES}
sheets = {k:v for k, v in iteritems(sheets) if v is not None}
sheets = {name:safe_parse(name) for name, mt in container.mime_map.items() if mt in OEB_STYLES}
sheets = {k:v for k, v in sheets.items() if v is not None}
num_merged = 0
if merge_rules:
for name, sheet in iteritems(sheets):
for name, sheet in sheets.items():
num = merge_identical_selectors(sheet)
if num:
container.dirty(name)
num_merged += num
import_map = {name:get_imported_sheets(name, container, sheets) for name in sheets}
if remove_unused_classes:
class_map = {name:{icu_lower(x) for x in classes_in_rule_list(sheet.cssRules)} for name, sheet in iteritems(sheets)}
style_rules = {name:tuple(sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE)) for name, sheet in iteritems(sheets)}
class_map = {name:{icu_lower(x) for x in classes_in_rule_list(sheet.cssRules)} for name, sheet in sheets.items()}
style_rules = {name:tuple(sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE)) for name, sheet in sheets.items()}
num_of_removed_rules = num_of_removed_classes = 0
for name, mt in iteritems(container.mime_map):
for name, mt in container.mime_map.items():
if mt not in OEB_DOCS:
continue
root = container.parsed(name)
@@ -158,7 +157,7 @@ def remove_unused_css(container, report=None, remove_unused_classes=False, merge
num_of_removed_classes += len(original_classes) - len(classes)
container.dirty(name)
for name, sheet in iteritems(sheets):
for name, sheet in sheets.items():
unused_rules = style_rules[name]
if unused_rules:
num_of_removed_rules += len(unused_rules)
@@ -245,7 +244,7 @@ def transform_css(container, transform_sheet=None, transform_style=None, names=(
if not names:
types = OEB_STYLES | OEB_DOCS
names = []
for name, mt in iteritems(container.mime_map):
for name, mt in container.mime_map.items():
if mt in types:
names.append(name)
+2 -3
View File
@@ -1,5 +1,4 @@
import textwrap
from ebook_converter.polyglot.builtins import iteritems
# from lxml.etree import Element
@@ -219,7 +218,7 @@ def pretty_xml(container, name, raw):
def fix_all_html(container):
' Fix any parsing errors in all HTML files in the container. Fixing is done using the HTML5 parsing algorithm. '
for name, mt in iteritems(container.mime_map):
for name, mt in container.mime_map.items():
if mt in OEB_DOCS:
container.parsed(name)
container.dirty(name)
@@ -228,7 +227,7 @@ def fix_all_html(container):
def pretty_all(container):
' Pretty print all HTML/CSS/XML files in the container '
xml_types = {guess_type('a.ncx'), guess_type('a.xml'), guess_type('a.svg')}
for name, mt in iteritems(container.mime_map):
for name, mt in container.mime_map.items():
prettied = False
if mt in OEB_DOCS:
pretty_html_tree(container, container.parsed(name))
+10 -11
View File
@@ -1,5 +1,4 @@
import codecs, shutil, os, posixpath
from ebook_converter.polyglot.builtins import iteritems, itervalues
from functools import partial
from collections import Counter, defaultdict
import urllib.parse
@@ -115,7 +114,7 @@ def replace_links(container, link_map, frag_map=lambda name, frag:frag, replace_
:param replace_in_opf: If False, links are not replaced in the OPF file.
'''
for name, media_type in iteritems(container.mime_map):
for name, media_type in container.mime_map.items():
if name == container.opf_name and not replace_in_opf:
continue
repl = LinkReplacer(name, container, link_map, frag_map)
@@ -131,7 +130,7 @@ def replace_ids(container, id_map):
'''
changed = False
for name, media_type in iteritems(container.mime_map):
for name, media_type in container.mime_map.items():
repl = IdReplacer(name, container, id_map)
container.replace_links(name, repl)
if name == container.opf_name:
@@ -183,19 +182,19 @@ def rename_files(container, file_map):
:param file_map: A mapping of old canonical name to new canonical name, for
example: :code:`{'text/chapter1.html': 'chapter1.html'}`.
'''
overlap = set(file_map).intersection(set(itervalues(file_map)))
overlap = set(file_map).intersection(set(file_map.values()))
if overlap:
raise ValueError('Circular rename detected. The files %s are both rename targets and destinations' % ', '.join(overlap))
for name, dest in iteritems(file_map):
for name, dest in file_map.items():
if container.exists(dest):
if name != dest and name.lower() == dest.lower():
# A case change on an OS with a case insensitive file-system.
continue
raise ValueError('Cannot rename {0} to {1} as {1} already exists'.format(name, dest))
if len(tuple(itervalues(file_map))) != len(set(itervalues(file_map))):
if len(file_map.values()) != len(set(file_map.values())):
raise ValueError('Cannot rename, the set of destination files contains duplicates')
link_map = {}
for current_name, new_name in iteritems(file_map):
for current_name, new_name in file_map.items():
container.rename(current_name, new_name)
if new_name != container.opf_name: # OPF is handled by the container
link_map[current_name] = new_name
@@ -217,7 +216,7 @@ def replace_file(container, name, path, basename, force_mt=None):
rename_files(container, {name:nname})
mt = force_mt or container.guess_type(nname)
container.mime_map[nname] = mt
for itemid, q in iteritems(container.manifest_id_map):
for itemid, q in container.manifest_id_map.items():
if q == nname:
for item in container.opf_xpath('//opf:manifest/opf:item[@href and @id="%s"]' % itemid):
item.set('media-type', mt)
@@ -252,7 +251,7 @@ def get_recommended_folders(container, names):
recommended folder is assumed to be the folder containing the OPF file. '''
from ebook_converter.ebooks.oeb.polish.utils import guess_type
counts = defaultdict(Counter)
for name, mt in iteritems(container.mime_map):
for name, mt in container.mime_map.items():
folder = name.rpartition('/')[0] if '/' in name else ''
counts[mt_to_category(container, mt)][folder] += 1
@@ -261,7 +260,7 @@ def get_recommended_folders(container, names):
except KeyError:
opf_folder = ''
recommendations = {category:counter.most_common(1)[0][0] for category, counter in iteritems(counts)}
recommendations = {category:counter.most_common(1)[0][0] for category, counter in counts.items()}
return {n:recommendations.get(mt_to_category(container, guess_type(os.path.basename(n))), opf_folder) for n in names}
@@ -348,7 +347,7 @@ def remove_links_to(container, predicate):
stylepath = XPath('//h:style')
styleattrpath = XPath('//*[@style]')
changed = set()
for name, mt in iteritems(container.mime_map):
for name, mt in container.mime_map.items():
removed = False
if mt in OEB_DOCS:
root = container.parsed(name)
+3 -4
View File
@@ -5,7 +5,6 @@ from ebook_converter.ebooks.oeb.base import barename, XPNSMAP, XPath, OPF, XHTML
from ebook_converter.ebooks.oeb.polish.errors import MalformedMarkup
from ebook_converter.ebooks.oeb.polish.toc import node_from_loc
from ebook_converter.ebooks.oeb.polish.replace import LinkRebaser
from ebook_converter.polyglot.builtins import iteritems
__license__ = 'GPL v3'
@@ -237,7 +236,7 @@ def split(container, name, loc_or_xpath, before=True, totals=None):
a.set('href', '#' + purl.fragment)
# Fix all links in the container that point to anchors in the bottom tree
for fname, media_type in iteritems(container.mime_map):
for fname, media_type in container.mime_map.items():
if fname not in {name, bottom_name}:
repl = SplitLinkReplacer(fname, anchors_in_bottom, name, bottom_name, container)
container.replace_links(fname, repl)
@@ -438,7 +437,7 @@ def merge_html(container, names, master, insert_page_breaks=False):
container.remove_item(name, remove_from_guide=False)
# Fix all links in the container that point to merged files
for fname, media_type in iteritems(container.mime_map):
for fname, media_type in container.mime_map.items():
repl = MergeLinkReplacer(fname, anchor_map, master, container)
container.replace_links(fname, repl)
@@ -471,7 +470,7 @@ def merge_css(container, names, master):
# Remove links to merged stylesheets in the html files, replacing with a
# link to the master sheet
for name, mt in iteritems(container.mime_map):
for name, mt in container.mime_map.items():
if mt in OEB_DOCS:
removed = False
root = p(name)
+7 -8
View File
@@ -16,7 +16,6 @@ from ebook_converter.ebooks.oeb.polish.utils import guess_type, extract
from ebook_converter.ebooks.oeb.polish.opf import set_guide_item, get_book_language
from ebook_converter.ebooks.oeb.polish.pretty import pretty_html_tree
from ebook_converter.utils.localization import get_lang, canonicalize_lang, lang_as_iso639_1
from ebook_converter.polyglot.builtins import iteritems
__license__ = 'GPL v3'
@@ -168,7 +167,7 @@ def parse_ncx(container, ncx_name):
if navmaps:
process_ncx_node(container, navmaps[0], toc_root, ncx_name)
toc_root.lang = toc_root.uid = None
for attr, val in iteritems(root.attrib):
for attr, val in root.attrib.items():
if attr.endswith('lang'):
toc_root.lang = str(val)
break
@@ -415,14 +414,14 @@ def from_xpaths(container, xpaths):
name = container.abspath_to_name(spinepath)
root = container.parsed(name)
level_item_map = maps[name] = {i+1:frozenset(xp(root)) for i, xp in enumerate(xpaths)}
for lvl, elems in iteritems(level_item_map):
for lvl, elems in level_item_map.items():
if elems:
empty_levels.discard(lvl)
# Remove empty levels from all level_maps
if empty_levels:
for name, lmap in tuple(iteritems(maps)):
lmap = {lvl:items for lvl, items in iteritems(lmap) if lvl not in empty_levels}
lmap = sorted(iteritems(lmap), key=itemgetter(0))
for name, lmap in tuple(maps.items()):
lmap = {lvl:items for lvl, items in lmap.items() if lvl not in empty_levels}
lmap = sorted(lmap.items(), key=itemgetter(0))
lmap = {i+1:items for i, (l, items) in enumerate(lmap)}
maps[name] = lmap
@@ -440,9 +439,9 @@ def from_xpaths(container, xpaths):
return process_node(tocroot)
for name, level_item_map in iteritems(maps):
for name, level_item_map in maps.items():
root = container.parsed(name)
item_level_map = {e:i for i, elems in iteritems(level_item_map) for e in elems}
item_level_map = {e:i for i, elems in level_item_map.items() for e in elems}
item_dirtied = False
all_ids = set(root.xpath('//*/@id'))