epub converter: add internal links processing

- fix cleaning headings
- fix when chapter mark is not on the 1 level
This commit is contained in:
shirshasa
2021-05-25 12:58:14 +03:00
parent 0ac20999b5
commit 3eac136e07
2 changed files with 146 additions and 59 deletions

View File

@@ -1,6 +1,7 @@
import codecs
import json
import logging
import re
from os.path import dirname, normpath, join
from collections import defaultdict
from typing import Dict, Union
@@ -22,9 +23,9 @@ class EpubPostprocessor:
def __init__(self, file, access=None, logger=None):
self.file = file
self.access = access
self.logger = logger
self.logger: BookLogger = logger
self.ebooklib_book = epub.read_epub(file) # todo: log error from ebooklib
self.internal_links_found = 0
self.logger.log('Image processing.')
self.href2img_bytes = {}
self.old_image_path2_aws_path = {}
@@ -42,13 +43,13 @@ class EpubPostprocessor:
self.id_anchor_exist_in_nav_points = False
self.href2soup_html: Dict[str, BeautifulSoup] = self.build_href2soup_content()
self.logger.log('CSS processing.')
self.logger.log('CSS files processing.')
self.html_href2css_href = {}
self.css_href2content = {}
self.build_css_content()
# add css
# self.logger.log('CSS styles adding processing.')
# self.add_css_styles2soup()
self.logger.log('CSS styles adding.')
self.add_css_styles2soup()
self.logger.log('Footnotes processing.')
self.footnotes = []
@@ -57,16 +58,17 @@ class EpubPostprocessor:
self.logger.log(f'Added {len(self.footnotes)} footnotes.')
self.logger.log('TOC processing.')
self.href2ids = defaultdict(list)
self.added_to_toc_hrefs = []
self.added_to_toc_hrefs = set()
self.adjacency_list: Dict[Union[NavPoint, -1], Union[list, None]] = {} # k = -1 if root, v = None if leaf
self.build_adjacency_list_from_toc(self.ebooklib_book.toc)
# build simple toc from spine if needed
if not self.is_toc_valid():
self.build_adjacency_list_from_spine()
not_added = [x for x in self.href2soup_html if x not in self.added_to_toc_hrefs]
self.logger.log(f'html documents not added to TOC: {not_added}')
self.logger.log(f'Html documents not added to TOC: {not_added}.')
# read anchored blocks, split html into separate block
self.mark_and_line_href2soup_html() # used only after parsed toc, ids from toc needed
self.process_internal_links()
self.id_anchor2soup: Dict[tuple, BeautifulSoup] = {}
self.build_anchor2soup()
@@ -131,7 +133,7 @@ class EpubPostprocessor:
self.id_anchor_exist_in_nav_points = True
self.href2ids[node.href].append(node.id)
self.adjacency_list[node] = None
self.added_to_toc_hrefs.append(node.href)
self.added_to_toc_hrefs.add(node.href)
return node
elif isinstance(element, tuple):
@@ -147,7 +149,7 @@ class EpubPostprocessor:
sub_nodes.append(self.build_adjacency_list_from_toc(i, lvl + 1))
self.adjacency_list[node] = sub_nodes
self.added_to_toc_hrefs.append(node.href)
self.added_to_toc_hrefs.add(node.href)
return node
elif isinstance(element, list) and (lvl == 0):
@@ -173,7 +175,7 @@ class EpubPostprocessor:
for id_, _ in self.ebooklib_book.spine:
node = NavPoint(Section(manifest_id2href[id_], manifest_id2href[id_]))
self.adjacency_list[-1].append(node)
self.added_to_toc_hrefs.append(node.href)
self.added_to_toc_hrefs.add(node.href)
def mark_and_line_href2soup_html(self):
# mark
@@ -182,8 +184,8 @@ class EpubPostprocessor:
for i in ids:
soup = self.href2soup_html[href]
tag = soup.find(id=i)
new_h = soup.new_tag('h1')
new_h.attrs['class'] = 'internal-mark'
new_h = soup.new_tag('tmp')
new_h.attrs['class'] = 'converter-chapter-mark'
new_h.attrs['id'] = i
tag.insert_before(new_h)
@@ -192,6 +194,64 @@ class EpubPostprocessor:
soup = self.href2soup_html[href]
self.href2soup_html[href] = unwrap_structural_tags(soup)
@staticmethod
def _create_unique_id(href, id_):
return re.sub(r'([^\w\s])|_|-', '', href) + id_
def process_internal_links(self):
# rebuild ids to be unique in all documents
for href in self.added_to_toc_hrefs:
for tag in self.href2soup_html[href].find_all(attrs={'id': re.compile(r'.+')}):
if tag.attrs.get('class') == 'converter-chapter-mark':
continue
new_id = self._create_unique_id(href, tag.attrs['id'])
tag.attrs['id'] = new_id
# write placeholder to all internal links
internal_link_reg = re.compile(r'(^.+\.(html|xhtml)\#.+)|(^\#.+)')
for href in self.added_to_toc_hrefs:
soup = self.href2soup_html[href]
for internal_link_tag in soup.find_all('a', {'href': internal_link_reg}):
href_in_link, id_in_link = internal_link_tag.attrs['href'].split('#')
if not href_in_link:
href_in_link = href
# find full path
full_path = [path for path in self.added_to_toc_hrefs if href_in_link in path]
if not full_path:
self.logger.log(f'Error in {href} file. No {href_in_link} file found in added to TOC documents. '
f'While processing href in {internal_link_tag}.')
internal_link_tag.attrs['converter-mark'] = 'bad-link'
continue
if len(full_path) > 1:
self.logger.log(f'Warning in {href}. Multiple paths found {full_path} for file {href_in_link}'
f' while {internal_link_tag} processing. The first one will be chosen.')
href_in_link = full_path[0]
new_id = self._create_unique_id(href_in_link, id_in_link)
anchor_soup = self.href2soup_html[href_in_link]
anchor_tags = anchor_soup.find_all(attrs={'id': new_id})
if anchor_tags:
if len(anchor_tags) > 1:
self.logger.log(f'Warning in {href}: multiple anchors: {anchor_tags} found.'
f' While processing {internal_link_tag}')
anchor_tag = anchor_tags[0]
# if anchor is found we could add placeholder for link creation on server side.
internal_link_tag.attrs['placeholder'] = '{{tempStyleToAnchor-' + new_id + '}}'
anchor_tag.attrs['class'] = 'link-anchor'
del internal_link_tag.attrs['href']
self.internal_links_found += 1
else:
internal_link_tag.attrs['converter-mark'] = 'bad-link'
if 'page' not in id_in_link:
self.logger.log(f'Error in {href}. While processing {internal_link_tag} no anchor found.'
f' Should be anchor with new id={new_id} in {href_in_link} file.'
f' Old id={id_in_link}')
def build_one_anchored_section(self, node):
"""
к этому моементу html soup уже существует в линейном виде
@@ -248,11 +308,12 @@ class EpubPostprocessor:
# warning! not EpubHtmlItems won;t be added to chapter
if self.adjacency_list.get(node):
for sub_node in self.adjacency_list[node]:
sub_chapter_item = self.node2livecarta_chapter_item(sub_node, lvl+1)
sub_chapter_item = self.node2livecarta_chapter_item(sub_node, lvl + 1)
sub_nodes.append(sub_chapter_item)
if self.logger:
self.logger.log(f'Chapter: {title} is prepared.')
indent = ' ' * lvl
self.logger.log(f'{indent}Chapter: {title} is prepared.')
return ChapterItem(title_preprocessed, content_preprocessed, sub_nodes)
def convert_to_dict(self):
@@ -264,6 +325,8 @@ class EpubPostprocessor:
top_level_chapters.append(chapter)
top_level_dict_chapters = [x.to_dict() for x in top_level_chapters]
self.logger.log(f'Internal links found: {self.internal_links_found}.')
self.logger.log('End conversion.')
return {
"content": top_level_dict_chapters,
@@ -275,6 +338,8 @@ if __name__ == "__main__":
logger = logging.getLogger('epub')
file_handler = logging.StreamHandler()
logger.addHandler(file_handler)
file_handler = logging.FileHandler('epub.log', mode='w+')
logger.addHandler(file_handler)
logger_object = BookLogger(name=f'epub', main_logger=logger, book_id=0)