mirror of
https://github.com/gryf/ebook-converter.git
synced 2025-12-18 21:20:17 +01:00
Finalizing logging transition.
This commit is contained in:
@@ -10,11 +10,6 @@ from ebook_converter.ebooks.lrf.fonts import FONT_FILE_MAP
|
|||||||
from ebook_converter.ebooks import ConversionError
|
from ebook_converter.ebooks import ConversionError
|
||||||
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
|
||||||
__docformat__ = "epytext"
|
|
||||||
|
|
||||||
|
|
||||||
class LRFParseError(Exception):
|
class LRFParseError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -46,17 +41,17 @@ def find_custom_fonts(options, logger):
|
|||||||
f = family(options.serif_family)
|
f = family(options.serif_family)
|
||||||
fonts['serif'] = font_scanner.legacy_fonts_for_family(f)
|
fonts['serif'] = font_scanner.legacy_fonts_for_family(f)
|
||||||
if not fonts['serif']:
|
if not fonts['serif']:
|
||||||
logger.warn('Unable to find serif family %s'%f)
|
logger.warning('Unable to find serif family %s', f)
|
||||||
if options.sans_family:
|
if options.sans_family:
|
||||||
f = family(options.sans_family)
|
f = family(options.sans_family)
|
||||||
fonts['sans'] = font_scanner.legacy_fonts_for_family(f)
|
fonts['sans'] = font_scanner.legacy_fonts_for_family(f)
|
||||||
if not fonts['sans']:
|
if not fonts['sans']:
|
||||||
logger.warn('Unable to find sans family %s'%f)
|
logger.warning('Unable to find sans family %s', f)
|
||||||
if options.mono_family:
|
if options.mono_family:
|
||||||
f = family(options.mono_family)
|
f = family(options.mono_family)
|
||||||
fonts['mono'] = font_scanner.legacy_fonts_for_family(f)
|
fonts['mono'] = font_scanner.legacy_fonts_for_family(f)
|
||||||
if not fonts['mono']:
|
if not fonts['mono']:
|
||||||
logger.warn('Unable to find mono family %s'%f)
|
logger.warning('Unable to find mono family %s', f)
|
||||||
return fonts
|
return fonts
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -15,9 +15,6 @@ from ebook_converter.ebooks.mobi.utils import convert_color_for_font_tag
|
|||||||
from ebook_converter.utils.imghdr import identify
|
from ebook_converter.utils.imghdr import identify
|
||||||
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.cam>'
|
|
||||||
|
|
||||||
MBP_NS = 'http://mobipocket.com/ns/mbp'
|
MBP_NS = 'http://mobipocket.com/ns/mbp'
|
||||||
|
|
||||||
|
|
||||||
@@ -455,13 +452,12 @@ class MobiMLizer(object):
|
|||||||
try:
|
try:
|
||||||
item = self.oeb.manifest.hrefs[base.urlnormalize(href)]
|
item = self.oeb.manifest.hrefs[base.urlnormalize(href)]
|
||||||
except:
|
except:
|
||||||
self.oeb.logger.warn('Failed to find image:',
|
self.oeb.logger.warning('Failed to find image:', href)
|
||||||
href)
|
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
width, height = identify(item.data)[1:]
|
width, height = identify(item.data)[1:]
|
||||||
except Exception:
|
except Exception:
|
||||||
self.oeb.logger.warn('Invalid image:', href)
|
self.oeb.logger.warning('Invalid image:', href)
|
||||||
else:
|
else:
|
||||||
if 'width' not in istate.attrib and 'height' not in \
|
if 'width' not in istate.attrib and 'height' not in \
|
||||||
istate.attrib:
|
istate.attrib:
|
||||||
|
|||||||
@@ -215,8 +215,8 @@ class BookHeader(object):
|
|||||||
}[self.codepage]
|
}[self.codepage]
|
||||||
except (IndexError, KeyError):
|
except (IndexError, KeyError):
|
||||||
self.codec = 'cp1252' if not user_encoding else user_encoding
|
self.codec = 'cp1252' if not user_encoding else user_encoding
|
||||||
log.warn('Unknown codepage %d. Assuming %s' % (self.codepage,
|
log.warning('Unknown codepage %d. Assuming %s', self.codepage,
|
||||||
self.codec))
|
self.codec)
|
||||||
# Some KF8 files have header length == 264 (generated by kindlegen
|
# Some KF8 files have header length == 264 (generated by kindlegen
|
||||||
# 2.9?). See https://bugs.launchpad.net/bugs/1179144
|
# 2.9?). See https://bugs.launchpad.net/bugs/1179144
|
||||||
max_header_length = 500 # We choose 500 for future versions of kindlegen
|
max_header_length = 500 # We choose 500 for future versions of kindlegen
|
||||||
|
|||||||
@@ -36,7 +36,8 @@ def update_internal_links(mobi8_reader, log):
|
|||||||
filename, idtag = mr.get_id_tag_by_pos_fid(
|
filename, idtag = mr.get_id_tag_by_pos_fid(
|
||||||
int(posfid, 32), int(offset, 32))
|
int(posfid, 32), int(offset, 32))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
log.warn('Invalid link, points to nowhere, ignoring')
|
log.warning('Invalid link, points to nowhere, '
|
||||||
|
'ignoring')
|
||||||
replacement = b'#'
|
replacement = b'#'
|
||||||
else:
|
else:
|
||||||
suffix = (b'#' + idtag) if idtag else b''
|
suffix = (b'#' + idtag) if idtag else b''
|
||||||
@@ -49,7 +50,8 @@ def update_internal_links(mobi8_reader, log):
|
|||||||
try:
|
try:
|
||||||
parts.append(raw.decode(mr.header.codec))
|
parts.append(raw.decode(mr.header.codec))
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
log.warn('Failed to decode text in KF8 part, replacing bad bytes')
|
log.warning('Failed to decode text in KF8 part, replacing bad '
|
||||||
|
'bytes')
|
||||||
parts.append(raw.decode(mr.header.codec, 'replace'))
|
parts.append(raw.decode(mr.header.codec, 'replace'))
|
||||||
|
|
||||||
# All parts are now unicode and have no internal links
|
# All parts are now unicode and have no internal links
|
||||||
@@ -130,7 +132,8 @@ def update_flow_links(mobi8_reader, resource_map, log):
|
|||||||
try:
|
try:
|
||||||
flow = flow.decode(mr.header.codec)
|
flow = flow.decode(mr.header.codec)
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
log.error('Flow part has invalid %s encoded bytes'%mr.header.codec)
|
log.error('Flow part has invalid %s encoded bytes',
|
||||||
|
mr.header.codec)
|
||||||
flow = flow.decode(mr.header.codec, 'replace')
|
flow = flow.decode(mr.header.codec, 'replace')
|
||||||
|
|
||||||
# links to raster image files from image tags
|
# links to raster image files from image tags
|
||||||
@@ -146,8 +149,8 @@ def update_flow_links(mobi8_reader, resource_map, log):
|
|||||||
replacement = '"%s"'%('../'+ href)
|
replacement = '"%s"'%('../'+ href)
|
||||||
tag = img_index_pattern.sub(replacement, tag, 1)
|
tag = img_index_pattern.sub(replacement, tag, 1)
|
||||||
else:
|
else:
|
||||||
log.warn('Referenced image %s was not recognized '
|
log.warning('Referenced image %s was not recognized '
|
||||||
'as a valid image in %s' % (num, tag))
|
'as a valid image in %s', num, tag)
|
||||||
srcpieces[j] = tag
|
srcpieces[j] = tag
|
||||||
flow = "".join(srcpieces)
|
flow = "".join(srcpieces)
|
||||||
|
|
||||||
@@ -164,16 +167,16 @@ def update_flow_links(mobi8_reader, resource_map, log):
|
|||||||
replacement = '"%s"'%('../'+ href)
|
replacement = '"%s"'%('../'+ href)
|
||||||
tag = url_img_index_pattern.sub(replacement, tag, 1)
|
tag = url_img_index_pattern.sub(replacement, tag, 1)
|
||||||
else:
|
else:
|
||||||
log.warn('Referenced image %s was not recognized as a '
|
log.warning('Referenced image %s was not recognized as a '
|
||||||
'valid image in %s' % (num, tag))
|
'valid image in %s', num, tag)
|
||||||
|
|
||||||
# process links to fonts
|
# process links to fonts
|
||||||
for m in font_index_pattern.finditer(tag):
|
for m in font_index_pattern.finditer(tag):
|
||||||
num = int(m.group(1), 32)
|
num = int(m.group(1), 32)
|
||||||
href = resource_map[num-1]
|
href = resource_map[num-1]
|
||||||
if href is None:
|
if href is None:
|
||||||
log.warn('Referenced font %s was not recognized as a '
|
log.warning('Referenced font %s was not recognized as a '
|
||||||
'valid font in %s' % (num, tag))
|
'valid font in %s', num, tag)
|
||||||
else:
|
else:
|
||||||
replacement = '"%s"'%('../'+ href)
|
replacement = '"%s"'%('../'+ href)
|
||||||
if href.endswith('.failed'):
|
if href.endswith('.failed'):
|
||||||
@@ -200,7 +203,8 @@ def update_flow_links(mobi8_reader, resource_map, log):
|
|||||||
num = int(m.group(1), 32)
|
num = int(m.group(1), 32)
|
||||||
fi = mr.flowinfo[num]
|
fi = mr.flowinfo[num]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
log.warn('Ignoring invalid flow reference in tag', tag)
|
log.warning('Ignoring invalid flow reference in '
|
||||||
|
'tag %s', tag)
|
||||||
tag = ''
|
tag = ''
|
||||||
else:
|
else:
|
||||||
if fi.format == 'inline':
|
if fi.format == 'inline':
|
||||||
@@ -237,7 +241,8 @@ def insert_flows_into_markup(parts, flows, mobi8_reader, log):
|
|||||||
try:
|
try:
|
||||||
fi = mr.flowinfo[num]
|
fi = mr.flowinfo[num]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
log.warn('Ignoring invalid flow reference: %s'%m.group())
|
log.warning('Ignoring invalid flow reference: %s',
|
||||||
|
m.group())
|
||||||
tag = ''
|
tag = ''
|
||||||
else:
|
else:
|
||||||
if fi.format == 'inline':
|
if fi.format == 'inline':
|
||||||
@@ -273,8 +278,8 @@ def insert_images_into_markup(parts, resource_map, log):
|
|||||||
replacement = '"%s"'%('../' + href)
|
replacement = '"%s"'%('../' + href)
|
||||||
tag = img_index_pattern.sub(replacement, tag, 1)
|
tag = img_index_pattern.sub(replacement, tag, 1)
|
||||||
else:
|
else:
|
||||||
log.warn('Referenced image %s was not recognized as '
|
log.warning('Referenced image %s was not recognized '
|
||||||
'a valid image in %s' % (num, tag))
|
'as a valid image in %s', num, tag)
|
||||||
srcpieces[j] = tag
|
srcpieces[j] = tag
|
||||||
part = "".join(srcpieces)
|
part = "".join(srcpieces)
|
||||||
# store away modified version
|
# store away modified version
|
||||||
@@ -296,8 +301,8 @@ def insert_images_into_markup(parts, resource_map, log):
|
|||||||
replacement = '%s%s%s'%(osep, '../' + href, csep)
|
replacement = '%s%s%s'%(osep, '../' + href, csep)
|
||||||
tag = img_index_pattern.sub(replacement, tag, 1)
|
tag = img_index_pattern.sub(replacement, tag, 1)
|
||||||
else:
|
else:
|
||||||
log.warn('Referenced image %s was not recognized as '
|
log.warning('Referenced image %s was not recognized '
|
||||||
'a valid image in %s' % (num, tag))
|
'as a valid image in %s', num, tag)
|
||||||
srcpieces[j] = tag
|
srcpieces[j] = tag
|
||||||
part = "".join(srcpieces)
|
part = "".join(srcpieces)
|
||||||
# store away modified version
|
# store away modified version
|
||||||
|
|||||||
@@ -231,7 +231,7 @@ class MobiReader(object):
|
|||||||
keep_doctype=False, sanitize_names=True)
|
keep_doctype=False, sanitize_names=True)
|
||||||
|
|
||||||
if root.tag != 'html':
|
if root.tag != 'html':
|
||||||
self.log.warn('File does not have opening <html> tag')
|
self.log.warning('File does not have opening <html> tag')
|
||||||
nroot = html.fromstring('<html><head></head><body></body></html>')
|
nroot = html.fromstring('<html><head></head><body></body></html>')
|
||||||
bod = nroot.find('body')
|
bod = nroot.find('body')
|
||||||
for child in list(root):
|
for child in list(root):
|
||||||
@@ -242,7 +242,7 @@ class MobiReader(object):
|
|||||||
htmls = list(root.xpath('//html'))
|
htmls = list(root.xpath('//html'))
|
||||||
|
|
||||||
if len(htmls) > 1:
|
if len(htmls) > 1:
|
||||||
self.log.warn('Markup contains multiple <html> tags, merging.')
|
self.log.warning('Markup contains multiple <html> tags, merging.')
|
||||||
# Merge all <head> and <body> sections
|
# Merge all <head> and <body> sections
|
||||||
for h in htmls:
|
for h in htmls:
|
||||||
p = h.getparent()
|
p = h.getparent()
|
||||||
@@ -833,9 +833,9 @@ class MobiReader(object):
|
|||||||
def warn_about_trailing_entry_corruption(self):
|
def warn_about_trailing_entry_corruption(self):
|
||||||
if not self.warned_about_trailing_entry_corruption:
|
if not self.warned_about_trailing_entry_corruption:
|
||||||
self.warned_about_trailing_entry_corruption = True
|
self.warned_about_trailing_entry_corruption = True
|
||||||
self.log.warn('The trailing data entries in this MOBI file are '
|
self.log.warning('The trailing data entries in this MOBI file are '
|
||||||
'corrupted, you might see corrupted text in the '
|
'corrupted, you might see corrupted text in the '
|
||||||
'output')
|
'output')
|
||||||
|
|
||||||
def text_section(self, index):
|
def text_section(self, index):
|
||||||
data = self.sections[index][0]
|
data = self.sections[index][0]
|
||||||
|
|||||||
@@ -205,9 +205,9 @@ class Mobi8Reader(object):
|
|||||||
# This can happen for some badly formed KF8 files, see for
|
# This can happen for some badly formed KF8 files, see for
|
||||||
# example, https://bugs.launchpad.net/bugs/1082669
|
# example, https://bugs.launchpad.net/bugs/1082669
|
||||||
if not inspos_warned:
|
if not inspos_warned:
|
||||||
self.log.warn('The div table for %s has incorrect '
|
self.log.warning('The div table for %s has incorrect '
|
||||||
'insert positions. Calculating '
|
'insert positions. Calculating '
|
||||||
'manually.' % skelname)
|
'manually.', skelname)
|
||||||
inspos_warned = True
|
inspos_warned = True
|
||||||
bp, ep = locate_beg_end_of_tag(skeleton, aidtext if
|
bp, ep = locate_beg_end_of_tag(skeleton, aidtext if
|
||||||
isinstance(aidtext, bytes)
|
isinstance(aidtext, bytes)
|
||||||
@@ -396,8 +396,8 @@ class Mobi8Reader(object):
|
|||||||
try:
|
try:
|
||||||
href, idtag = self.get_id_tag_by_pos_fid(*pos_fid)
|
href, idtag = self.get_id_tag_by_pos_fid(*pos_fid)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
self.log.warn('Invalid entry in NCX (title: %s), '
|
self.log.warning('Invalid entry in NCX (title: %s), '
|
||||||
'ignoring' % entry['text'])
|
'ignoring', entry['text'])
|
||||||
remove.append(entry)
|
remove.append(entry)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@@ -436,10 +436,10 @@ class Mobi8Reader(object):
|
|||||||
font = read_font_record(data)
|
font = read_font_record(data)
|
||||||
href = "fonts/%05d.%s" % (fname_idx, font['ext'])
|
href = "fonts/%05d.%s" % (fname_idx, font['ext'])
|
||||||
if font['err']:
|
if font['err']:
|
||||||
self.log.warn('Reading font record %d failed: %s' %
|
self.log.warning('Reading font record %d failed: %s',
|
||||||
(fname_idx, font['err']))
|
fname_idx, font['err'])
|
||||||
if font['headers']:
|
if font['headers']:
|
||||||
self.log.debug('Font record headers: %s' %
|
self.log.debug('Font record headers: %s',
|
||||||
font['headers'])
|
font['headers'])
|
||||||
with open(href.replace('/', os.sep), 'wb') as f:
|
with open(href.replace('/', os.sep), 'wb') as f:
|
||||||
f.write(font['font_data'] if font['font_data'] else
|
f.write(font['font_data'] if font['font_data'] else
|
||||||
@@ -484,7 +484,7 @@ class Mobi8Reader(object):
|
|||||||
mi.cover = resource_map[self.cover_offset]
|
mi.cover = resource_map[self.cover_offset]
|
||||||
|
|
||||||
if len(list(toc)) < 2:
|
if len(list(toc)) < 2:
|
||||||
self.log.warn('KF8 has no metadata Table of Contents')
|
self.log.warning('KF8 has no metadata Table of Contents')
|
||||||
|
|
||||||
for ref in guide:
|
for ref in guide:
|
||||||
if ref.type == 'toc':
|
if ref.type == 'toc':
|
||||||
|
|||||||
@@ -351,7 +351,7 @@ def align_block(raw, multiple=4, pad=b'\0'):
|
|||||||
return raw + pad*(multiple - extra)
|
return raw + pad*(multiple - extra)
|
||||||
|
|
||||||
|
|
||||||
def detect_periodical(toc, log=None):
|
def detect_periodical(toc, log):
|
||||||
'''
|
'''
|
||||||
Detect if the TOC object toc contains a periodical that conforms to the
|
Detect if the TOC object toc contains a periodical that conforms to the
|
||||||
structure required by kindlegen to generate a periodical.
|
structure required by kindlegen to generate a periodical.
|
||||||
@@ -360,25 +360,19 @@ def detect_periodical(toc, log=None):
|
|||||||
return False
|
return False
|
||||||
for node in toc.iterdescendants():
|
for node in toc.iterdescendants():
|
||||||
if node.depth() == 1 and node.klass != 'article':
|
if node.depth() == 1 and node.klass != 'article':
|
||||||
if log is not None:
|
log.debug('Not a periodical: Deepest node does not have '
|
||||||
log.debug(
|
'class="article"')
|
||||||
'Not a periodical: Deepest node does not have '
|
|
||||||
'class="article"')
|
|
||||||
return False
|
return False
|
||||||
if node.depth() == 2 and node.klass != 'section':
|
if node.depth() == 2 and node.klass != 'section':
|
||||||
if log is not None:
|
log.debug('Not a periodical: Second deepest node does not have '
|
||||||
log.debug(
|
'class="section"')
|
||||||
'Not a periodical: Second deepest node does not have'
|
|
||||||
' class="section"')
|
|
||||||
return False
|
return False
|
||||||
if node.depth() == 3 and node.klass != 'periodical':
|
if node.depth() == 3 and node.klass != 'periodical':
|
||||||
if log is not None:
|
log.debug('Not a periodical: Third deepest node does not have '
|
||||||
log.debug('Not a periodical: Third deepest node'
|
'class="periodical"')
|
||||||
' does not have class="periodical"')
|
|
||||||
return False
|
return False
|
||||||
if node.depth() > 3:
|
if node.depth() > 3:
|
||||||
if log is not None:
|
log.debug('Not a periodical: Has nodes of depth > 3')
|
||||||
log.debug('Not a periodical: Has nodes of depth > 3')
|
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|||||||
@@ -444,8 +444,8 @@ class Indexer(object): # {{{
|
|||||||
if self.is_periodical and self.masthead_offset is None:
|
if self.is_periodical and self.masthead_offset is None:
|
||||||
raise ValueError('Periodicals must have a masthead')
|
raise ValueError('Periodicals must have a masthead')
|
||||||
|
|
||||||
self.log('Generating MOBI index for a %s'%('periodical' if
|
self.log('Generating MOBI index for a %s', 'periodical' if
|
||||||
self.is_periodical else 'book'))
|
self.is_periodical else 'book')
|
||||||
self.is_flat_periodical = False
|
self.is_flat_periodical = False
|
||||||
if self.is_periodical:
|
if self.is_periodical:
|
||||||
periodical_node = next(iter(oeb.toc))
|
periodical_node = next(iter(oeb.toc))
|
||||||
@@ -634,8 +634,8 @@ class Indexer(object): # {{{
|
|||||||
offset = id_offsets[node.href]
|
offset = id_offsets[node.href]
|
||||||
label = self.cncx[node.title]
|
label = self.cncx[node.title]
|
||||||
except:
|
except:
|
||||||
self.log.warn('TOC item %s [%s] not found in document'%(
|
self.log.warn('TOC item %s [%s] not found in document',
|
||||||
node.title, node.href))
|
node.title, node.href)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if offset in seen:
|
if offset in seen:
|
||||||
|
|||||||
@@ -12,10 +12,6 @@ from ebook_converter.ebooks.mobi.utils import (encint, encode_trailing_data,
|
|||||||
from ebook_converter.ebooks.mobi.writer2.indexer import Indexer
|
from ebook_converter.ebooks.mobi.writer2.indexer import Indexer
|
||||||
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
# Disabled as I dont care about uncrossable breaks
|
# Disabled as I dont care about uncrossable breaks
|
||||||
WRITE_UNCROSSABLE_BREAKS = False
|
WRITE_UNCROSSABLE_BREAKS = False
|
||||||
NULL_INDEX = 0xffffffff
|
NULL_INDEX = 0xffffffff
|
||||||
@@ -89,7 +85,7 @@ class MobiWriter(object):
|
|||||||
def generate_index(self):
|
def generate_index(self):
|
||||||
self.primary_index_record_idx = None
|
self.primary_index_record_idx = None
|
||||||
if self.oeb.toc.count() < 1:
|
if self.oeb.toc.count() < 1:
|
||||||
self.log.warn('No TOC, MOBI index not generated')
|
self.log.warning('No TOC, MOBI index not generated')
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
self.indexer = Indexer(self.serializer, self.last_text_record_idx,
|
self.indexer = Indexer(self.serializer, self.last_text_record_idx,
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ class Resources(object):
|
|||||||
try:
|
try:
|
||||||
data = self.process_image(item.data)
|
data = self.process_image(item.data)
|
||||||
except:
|
except:
|
||||||
self.log.warn('Bad image file %r' % item.href)
|
self.log.warning('Bad image file %r', item.href)
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
if mh_href and item.href == mh_href:
|
if mh_href and item.href == mh_href:
|
||||||
@@ -117,7 +117,7 @@ class Resources(object):
|
|||||||
data = rescale_image(item.data, dimen=MAX_THUMB_DIMEN,
|
data = rescale_image(item.data, dimen=MAX_THUMB_DIMEN,
|
||||||
maxsizeb=MAX_THUMB_SIZE)
|
maxsizeb=MAX_THUMB_SIZE)
|
||||||
except:
|
except:
|
||||||
self.log.warn('Failed to generate thumbnail')
|
self.log.warning('Failed to generate thumbnail')
|
||||||
else:
|
else:
|
||||||
self.image_indices.add(len(self.records))
|
self.image_indices.add(len(self.records))
|
||||||
self.records.append(data)
|
self.records.append(data)
|
||||||
@@ -145,7 +145,7 @@ class Resources(object):
|
|||||||
try:
|
try:
|
||||||
data = self.process_image(item.data)
|
data = self.process_image(item.data)
|
||||||
except:
|
except:
|
||||||
self.log.warn('Bad image file %r' % item.href)
|
self.log.warning('Bad image file %r', item.href)
|
||||||
else:
|
else:
|
||||||
self.records.append(data)
|
self.records.append(data)
|
||||||
self.item_map[item.href] = len(self.records)
|
self.item_map[item.href] = len(self.records)
|
||||||
|
|||||||
@@ -380,7 +380,7 @@ class Serializer(object):
|
|||||||
is_start = (href and href == start_href)
|
is_start = (href and href == start_href)
|
||||||
# Iterate over all filepos items
|
# Iterate over all filepos items
|
||||||
if href not in id_offsets:
|
if href not in id_offsets:
|
||||||
self.logger.warn('Hyperlink target %r not found' % href)
|
self.logger.warning('Hyperlink target %r not found', href)
|
||||||
# Link to the top of the document, better than just ignoring
|
# Link to the top of the document, better than just ignoring
|
||||||
href, _ = urllib.parse.urldefrag(href)
|
href, _ = urllib.parse.urldefrag(href)
|
||||||
if href in self.id_offsets:
|
if href in self.id_offsets:
|
||||||
|
|||||||
@@ -1,11 +1,6 @@
|
|||||||
from ebook_converter.ebooks.oeb.base import XPath
|
from ebook_converter.ebooks.oeb.base import XPath
|
||||||
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
|
|
||||||
class CSSCleanup(object):
|
class CSSCleanup(object):
|
||||||
|
|
||||||
def __init__(self, log, opts):
|
def __init__(self, log, opts):
|
||||||
@@ -34,7 +29,7 @@ def remove_duplicate_anchors(oeb):
|
|||||||
anchor = tag.get(attr)
|
anchor = tag.get(attr)
|
||||||
if anchor is not None:
|
if anchor is not None:
|
||||||
if anchor in seen:
|
if anchor in seen:
|
||||||
oeb.log.debug('Removing duplicate anchor:', anchor)
|
oeb.log.debug('Removing duplicate anchor: %s', anchor)
|
||||||
tag.attrib.pop(attr)
|
tag.attrib.pop(attr)
|
||||||
else:
|
else:
|
||||||
seen.add(anchor)
|
seen.add(anchor)
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ class HTMLTOCAdder(object):
|
|||||||
title = self.title or oeb.translate(DEFAULT_TITLE)
|
title = self.title or oeb.translate(DEFAULT_TITLE)
|
||||||
style = self.style
|
style = self.style
|
||||||
if style not in STYLE_CSS:
|
if style not in STYLE_CSS:
|
||||||
oeb.logger.error('Unknown TOC style %r' % style)
|
oeb.logger.error('Unknown TOC style %r', style)
|
||||||
style = 'nested'
|
style = 'nested'
|
||||||
id, css_href = oeb.manifest.generate('tocstyle', 'tocstyle.css')
|
id, css_href = oeb.manifest.generate('tocstyle', 'tocstyle.css')
|
||||||
oeb.manifest.add(id, css_href, base.CSS_MIME, data=STYLE_CSS[style])
|
oeb.manifest.add(id, css_href, base.CSS_MIME, data=STYLE_CSS[style])
|
||||||
|
|||||||
@@ -65,17 +65,19 @@ class SVGRasterizer(object):
|
|||||||
box = [float(x) for x in filter(None, re.split('[, ]', view_box))]
|
box = [float(x) for x in filter(None, re.split('[, ]', view_box))]
|
||||||
sizes = [box[2]-box[0], box[3] - box[1]]
|
sizes = [box[2]-box[0], box[3] - box[1]]
|
||||||
except (TypeError, ValueError, IndexError):
|
except (TypeError, ValueError, IndexError):
|
||||||
logger.warn('SVG image has invalid viewBox="%s", ignoring the viewBox' % view_box)
|
logger.warning('SVG image has invalid viewBox="%s", ignoring '
|
||||||
|
'the viewBox', view_box)
|
||||||
else:
|
else:
|
||||||
for image in elem.xpath('descendant::*[local-name()="image" and '
|
for image in elem.xpath('descendant::*[local-name()="image" and '
|
||||||
'@height and contains(@height, "%")]'):
|
'@height and contains(@height, "%")]'):
|
||||||
logger.info('Found SVG image height in %, trying to convert...')
|
logger.info('Found SVG image height in %, trying to '
|
||||||
|
'convert...')
|
||||||
try:
|
try:
|
||||||
h = float(image.get('height').replace('%', ''))/100.
|
h = float(image.get('height').replace('%', ''))/100.
|
||||||
image.set('height', str(h*sizes[1]))
|
image.set('height', str(h*sizes[1]))
|
||||||
except:
|
except:
|
||||||
logger.exception('Failed to convert percentage height:',
|
logger.exception('Failed to convert percentage '
|
||||||
image.get('height'))
|
'height: %s', image.get('height'))
|
||||||
|
|
||||||
data = QByteArray(xml2str(elem, with_tail=False))
|
data = QByteArray(xml2str(elem, with_tail=False))
|
||||||
svg = QSvgRenderer(data)
|
svg = QSvgRenderer(data)
|
||||||
@@ -85,8 +87,8 @@ class SVGRasterizer(object):
|
|||||||
size.setHeight(sizes[1])
|
size.setHeight(sizes[1])
|
||||||
if width or height:
|
if width or height:
|
||||||
size.scale(width, height, Qt.KeepAspectRatio)
|
size.scale(width, height, Qt.KeepAspectRatio)
|
||||||
logger.info('Rasterizing %r to %dx%d'
|
logger.info('Rasterizing %r to %dx%d', elem, size.width(),
|
||||||
% (elem, size.width(), size.height()))
|
size.height())
|
||||||
image = QImage(size, QImage.Format_ARGB32_Premultiplied)
|
image = QImage(size, QImage.Format_ARGB32_Premultiplied)
|
||||||
image.fill(QColor("white").rgb())
|
image.fill(QColor("white").rgb())
|
||||||
painter = QPainter(image)
|
painter = QPainter(image)
|
||||||
@@ -186,8 +188,8 @@ class SVGRasterizer(object):
|
|||||||
href = self.images[key]
|
href = self.images[key]
|
||||||
else:
|
else:
|
||||||
logger = self.oeb.logger
|
logger = self.oeb.logger
|
||||||
logger.info('Rasterizing %r to %dx%d'
|
logger.info('Rasterizing %r to %dx%d', svgitem.href, size.width(),
|
||||||
% (svgitem.href, size.width(), size.height()))
|
size.height())
|
||||||
image = QImage(size, QImage.Format_ARGB32_Premultiplied)
|
image = QImage(size, QImage.Format_ARGB32_Premultiplied)
|
||||||
image.fill(QColor("white").rgb())
|
image.fill(QColor("white").rgb())
|
||||||
painter = QPainter(image)
|
painter = QPainter(image)
|
||||||
@@ -219,7 +221,7 @@ class SVGRasterizer(object):
|
|||||||
if not covers:
|
if not covers:
|
||||||
return
|
return
|
||||||
if str(covers[0]) not in self.oeb.manifest.ids:
|
if str(covers[0]) not in self.oeb.manifest.ids:
|
||||||
self.oeb.logger.warn('Cover not in manifest, skipping.')
|
self.oeb.logger.warning('Cover not in manifest, skipping.')
|
||||||
self.oeb.metadata.clear('cover')
|
self.oeb.metadata.clear('cover')
|
||||||
return
|
return
|
||||||
cover = self.oeb.manifest.ids[str(covers[0])]
|
cover = self.oeb.manifest.ids[str(covers[0])]
|
||||||
|
|||||||
@@ -7,10 +7,6 @@ from ebook_converter.ebooks.oeb.base import CSS_MIME, OEB_DOCS
|
|||||||
from ebook_converter.ebooks.oeb.base import urlnormalize, iterlinks
|
from ebook_converter.ebooks.oeb.base import urlnormalize, iterlinks
|
||||||
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
|
|
||||||
|
|
||||||
|
|
||||||
class ManifestTrimmer(object):
|
class ManifestTrimmer(object):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -69,5 +65,5 @@ class ManifestTrimmer(object):
|
|||||||
unchecked = new
|
unchecked = new
|
||||||
for item in oeb.manifest.values():
|
for item in oeb.manifest.values():
|
||||||
if item not in used:
|
if item not in used:
|
||||||
oeb.logger.info('Trimming %r from manifest' % item.href)
|
oeb.logger.info('Trimming %r from manifest', item.href)
|
||||||
oeb.manifest.remove(item)
|
oeb.manifest.remove(item)
|
||||||
|
|||||||
@@ -53,7 +53,8 @@ class TextileMLizer(OEB2HTML):
|
|||||||
def mlize_spine(self, oeb_book):
|
def mlize_spine(self, oeb_book):
|
||||||
output = ['']
|
output = ['']
|
||||||
for item in oeb_book.spine:
|
for item in oeb_book.spine:
|
||||||
self.log.debug('Converting %s to Textile formatted TXT...', item.href)
|
self.log.debug('Converting %s to Textile formatted TXT...',
|
||||||
|
item.href)
|
||||||
self.rewrite_ids(item.data, item)
|
self.rewrite_ids(item.data, item)
|
||||||
rewrite_links(item.data, partial(self.rewrite_link, page=item))
|
rewrite_links(item.data, partial(self.rewrite_link, page=item))
|
||||||
stylizer = Stylizer(item.data, item.href, oeb_book, self.opts, self.opts.output_profile)
|
stylizer = Stylizer(item.data, item.href, oeb_book, self.opts, self.opts.output_profile)
|
||||||
|
|||||||
Reference in New Issue
Block a user