forked from LiveCarta/BookConverter
refactor converter
- split book class into html-preprocessor, json-converter, book(with main flow) classes. - pick out logging, law carta setup, updating status via api into separate objects - in html-preprocesser: add cleaning hrefs
This commit is contained in:
1003
src/book.py
1003
src/book.py
File diff suppressed because it is too large
Load Diff
111
src/config.py
Normal file
111
src/config.py
Normal file
@@ -0,0 +1,111 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from access import Access
|
||||
|
||||
|
||||
class BookLogger:
|
||||
def __init__(self, name, book_id, main_logger=None,
|
||||
filemode='w+', logging_level=logging.INFO,
|
||||
logging_format='%(asctime)s - %(levelname)s - %(message)s'):
|
||||
"""
|
||||
Method for Logger configuration. Logger will write in file.
|
||||
|
||||
:param name: name of the Logger.
|
||||
:param attr_name: name of attribute that will be added to self.
|
||||
:param filename: name of the log file.
|
||||
:param filemode: mode of opening log file.
|
||||
:param logging_level: logging level: 10 - debug, 20 - info, 30 - warning, 40 - error, 50 - critical.
|
||||
:param logging_format: format of record in log file.
|
||||
"""
|
||||
self.main_logger = main_logger
|
||||
|
||||
self.logger = logging.getLogger(name)
|
||||
folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
filename = f'logs/{book_id}_log.log'
|
||||
file_path = os.path.join(folder_path, filename)
|
||||
file_handler = logging.FileHandler(file_path, mode=filemode)
|
||||
file_format = logging.Formatter(fmt=logging_format)
|
||||
file_handler.setFormatter(file_format)
|
||||
self.logger.addHandler(file_handler)
|
||||
self.logger.setLevel(logging_level)
|
||||
|
||||
def log(self, message, logging_level=20):
|
||||
"""
|
||||
Method for logging.
|
||||
|
||||
:param message: body of the message
|
||||
:param logging_level: level of logging
|
||||
"""
|
||||
self.logger.log(msg=message, level=logging_level)
|
||||
|
||||
def log_error_to_main_log(self, message=''):
|
||||
"""
|
||||
Method for logging error to main log file.
|
||||
"""
|
||||
if self.main_logger:
|
||||
if not message:
|
||||
message = f'Error in book conversion. Check log file.'
|
||||
self.main_logger.error(message)
|
||||
|
||||
|
||||
class LawCartaConfig:
|
||||
SUPPORTED_LEVELS = 4
|
||||
SUPPORTED_HEADERS = {"h1", "h2", "h3", "h4"}
|
||||
HEADERS_LEVELS = {"h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "h9"}
|
||||
|
||||
# Main constant values
|
||||
DEFAULT_FONT_NAME = 'Times New Roman'
|
||||
DEFAULT_ALIGN_STYLE = 'left'
|
||||
WORD_DEFAULT_FONT_SIZE = 11
|
||||
LAWCARTA_DEFAULT_FONT_SIZE = 18
|
||||
FONT_CONVERT_RATIO = LAWCARTA_DEFAULT_FONT_SIZE / WORD_DEFAULT_FONT_SIZE
|
||||
font_correspondence_table = {
|
||||
"Arial": "arial,helvetica,sans-serif",
|
||||
"Comic Sans MS": "comic sans ms,cursive",
|
||||
"Courier New": "courier new,courier,monospace",
|
||||
"Georgia": "georgia,serif",
|
||||
"Lucida Sans Unicode": "lucida sans unicode,lucida grande,sans-serif",
|
||||
"Tahoma": "tahoma,geneva,sans-serif",
|
||||
"Times New Roman": "times new roman,times,serif",
|
||||
"Trebuchet MS": "trebuchet ms,helvetica,sans-serif",
|
||||
"Verdana": "verdana,geneva,sans-serif"
|
||||
}
|
||||
|
||||
|
||||
class BookApiWrapper:
|
||||
def __init__(self, access, logger_object, book_id=0):
|
||||
self.access: Access = access
|
||||
self.logger_object = logger_object
|
||||
self.book_id = book_id
|
||||
|
||||
def set_process_status(self):
|
||||
try:
|
||||
if self.access:
|
||||
self.access.update_status(self.book_id, self.access.PROCESS)
|
||||
self.logger_object.log(f'Status has been updated to [PROCESS].')
|
||||
except Exception as exc:
|
||||
self.logger_object.log("Can't update status of the book [PROCESS].", logging.ERROR)
|
||||
self.logger_object.log_error_to_main_log()
|
||||
raise exc
|
||||
|
||||
def set_generate_status(self):
|
||||
try:
|
||||
if self.access:
|
||||
self.access.update_status(self.book_id, self.access.GENERATE)
|
||||
self.logger_object.log(f'Status has been updated to [GENERATE].')
|
||||
except Exception as exc:
|
||||
self.logger_object.log("Can't update status of the book [GENERATE].", logging.ERROR)
|
||||
self.logger_object.log_error_to_main_log()
|
||||
raise exc
|
||||
|
||||
def set_error_status(self):
|
||||
try:
|
||||
if self.access:
|
||||
self.access.update_status(self.book_id, self.access.ERROR)
|
||||
self.logger_object.log(f'Status has been updated to [ERROR].')
|
||||
except Exception as exc:
|
||||
self.logger_object.log("Can't update status of the book [ERROR].", logging.ERROR)
|
||||
self.logger_object.log_error_to_main_log()
|
||||
raise exc
|
||||
|
||||
@@ -15,7 +15,7 @@ from book import Book
|
||||
|
||||
def configure_file_logger(name, filename='logs/converter_log.log', filemode='w+',
|
||||
logging_level=logging.INFO,
|
||||
logging_format='%(asctime)s - %(message)s'):
|
||||
logging_format='%(asctime)s - %(levelname)s - %(message)s'):
|
||||
logger = logging.getLogger(name)
|
||||
|
||||
folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
@@ -32,11 +32,10 @@ def configure_file_logger(name, filename='logs/converter_log.log', filemode='w+'
|
||||
|
||||
def convert_book(book_id, access, logger, libra_locker):
|
||||
logger.info(f'Start processing book-{book_id}.')
|
||||
logging_format = '%(asctime)s - %(levelname)s - %(message)s'
|
||||
|
||||
try:
|
||||
book = Book(book_id, access, main_logger=logger, libra_locker=libra_locker)
|
||||
book.conversion(logging_format=logging_format)
|
||||
book.conversion()
|
||||
except Exception as exc:
|
||||
raise exc
|
||||
|
||||
@@ -105,7 +104,7 @@ def local_run(books):
|
||||
|
||||
|
||||
def server_run():
|
||||
logger = configure_file_logger('consumer', logging_format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
logger = configure_file_logger('consumer')
|
||||
|
||||
folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
config_path = Path(os.path.join(folder_path, "config/queue_config.json"))
|
||||
|
||||
603
src/html_preprocessor.py
Normal file
603
src/html_preprocessor.py
Normal file
@@ -0,0 +1,603 @@
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
from shutil import copyfile
|
||||
|
||||
from bs4 import BeautifulSoup, NavigableString
|
||||
from config import BookConfig, BookLogger, BookApiWrapper
|
||||
|
||||
|
||||
class HTMLPreprocessor:
|
||||
|
||||
def __init__(self, html_soup, logger_object, book_api_wrapper=None):
|
||||
self.body_tag = html_soup.body
|
||||
self.html_soup = html_soup
|
||||
self.logger_object: BookLogger = logger_object
|
||||
self.book_api_wrapper: BookApiWrapper = book_api_wrapper
|
||||
self.top_level_headers = None
|
||||
self.content = list()
|
||||
|
||||
def _clean_tag(self, tag, attr_name, attr_value):
|
||||
"""
|
||||
Function to clean tags by its name and attribute value.
|
||||
|
||||
:param tag: Tag name to clean.
|
||||
:param attr_name: Attribute name.
|
||||
:param attr_value: Attribute value.
|
||||
"""
|
||||
tags = self.body_tag.find_all(tag, {attr_name: attr_value})
|
||||
for tag in tags:
|
||||
if len(tag.attrs) == 1:
|
||||
tag.unwrap()
|
||||
|
||||
def _clean_underline_links(self):
|
||||
"""
|
||||
Function cleans meaningless <u> tags before links.
|
||||
"""
|
||||
underlines = self.body_tag.find_all("u")
|
||||
for u in underlines:
|
||||
if u.find_all('a'):
|
||||
u.unwrap()
|
||||
|
||||
links = self.body_tag.find_all('a')
|
||||
for link in links:
|
||||
u = link.find_all('u')
|
||||
if u and len(u) == 1:
|
||||
u[0].unwrap()
|
||||
|
||||
@classmethod
|
||||
def convert_pt_to_px(cls, value):
|
||||
value = int(value)
|
||||
if value == BookConfig.WORD_DEFAULT_FONT_SIZE:
|
||||
return BookConfig.LAWCARTA_DEFAULT_FONT_SIZE
|
||||
else:
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def convert_font_pt_to_px(cls, style):
|
||||
"""
|
||||
Method converts point in the font-size to pixels.
|
||||
|
||||
:param style: Str with style to process.
|
||||
:return: Str with converted style.
|
||||
"""
|
||||
size = re.search(r"font-size: (\d{1,3})pt", style)
|
||||
|
||||
if size is None:
|
||||
return style
|
||||
|
||||
size = size.group(1)
|
||||
new_size = cls.convert_pt_to_px(size)
|
||||
|
||||
if new_size == BookConfig.LAWCARTA_DEFAULT_FONT_SIZE:
|
||||
return ""
|
||||
|
||||
return re.sub(size + "pt", str(new_size) + "px", style)
|
||||
|
||||
def _font_to_span(self):
|
||||
"""
|
||||
Function to convert <font> tag to <span>. If font style is default, then remove this tag.
|
||||
"""
|
||||
fonts = self.body_tag.find_all("font")
|
||||
for font in fonts:
|
||||
face = font.get("face")
|
||||
style = font.get("style")
|
||||
|
||||
font.attrs = {}
|
||||
font.name = "span"
|
||||
if style:
|
||||
style = self.convert_font_pt_to_px(style)
|
||||
if style != "":
|
||||
font.attrs["style"] = style
|
||||
if face is not None:
|
||||
face = re.sub(r",[\w,\- ]*$", "", face)
|
||||
if face != BookConfig.DEFAULT_FONT_NAME and BookConfig.font_correspondence_table.get(face):
|
||||
font.attrs["face"] = BookConfig.font_correspondence_table[face]
|
||||
else:
|
||||
font.attrs["face"] = BookConfig.DEFAULT_FONT_NAME
|
||||
|
||||
if len(font.attrs) == 0:
|
||||
font.unwrap()
|
||||
|
||||
assert len(self.body_tag.find_all("font")) == 0 # on this step there should be no more <font> tags
|
||||
|
||||
def _remove_table_of_contents(self):
|
||||
"""
|
||||
Function to remove table of content from file.
|
||||
"""
|
||||
tables = self.body_tag.find_all("div", id=re.compile(r'^Table of Contents\d+'))
|
||||
for table in tables:
|
||||
table.decompose()
|
||||
|
||||
def _change_table_of_contents(self):
|
||||
tables = self.body_tag.find_all("div", id=re.compile(r'^Table of Contents\d+'))
|
||||
for table in tables:
|
||||
table.wrap(self.html_soup.new_tag("TOC"))
|
||||
table.decompose()
|
||||
|
||||
def delete_content_before_toc(self):
|
||||
toc_tag = self.html_soup.new_tag('TOC')
|
||||
if toc_tag in self.content:
|
||||
ind = self.content.index(toc_tag) + 1
|
||||
self.content = self.content[ind:]
|
||||
|
||||
def clean_trash(self):
|
||||
"""
|
||||
Function to remove all styles and tags we don't need.
|
||||
"""
|
||||
self._clean_tag('span', 'style', re.compile(r'^background: #[0-9a-fA-F]{6}$'))
|
||||
self._clean_tag('span', 'lang', re.compile(r'^ru-RU$')) # todo: check for another languages
|
||||
self._clean_tag('span', 'style', re.compile('^letter-spacing: -?[\d\.]+pt$'))
|
||||
|
||||
self._clean_tag('font', 'color', re.compile(r'^#[0-9a-fA-F]{6}$'))
|
||||
self._clean_tag('font', 'face', re.compile(r'^Times New Roman[\w, ]+$'))
|
||||
|
||||
self._clean_tag("a", "name", "_GoBack")
|
||||
self._clean_underline_links()
|
||||
|
||||
self._font_to_span()
|
||||
# self._remove_table_of_contents()
|
||||
self._change_table_of_contents()
|
||||
|
||||
def _process_paragraph(self):
|
||||
"""
|
||||
Function to process <p> tags (text-align and text-indent value).
|
||||
"""
|
||||
paragraphs = self.body_tag.find_all('p')
|
||||
|
||||
for p in paragraphs:
|
||||
# libra converts some \n into <p> with 2 </br>
|
||||
# there we remove 1 unnecessary <br>
|
||||
brs = p.find_all('br')
|
||||
text = p.text
|
||||
if brs and text == '\n\n' and len(brs) == 2:
|
||||
brs[0].decompose()
|
||||
|
||||
align = p.get('align')
|
||||
style = p.get('style')
|
||||
|
||||
if style:
|
||||
indent = re.search(r'text-indent: ([\d\.]{1,4})in', style)
|
||||
margin_left = re.search(r'margin-left: ([\d\.]{1,4})in', style)
|
||||
margin_right = re.search(r'margin-right: ([\d\.]{1,4})in', style)
|
||||
margin_top = re.search(r'margin-top: ([\d\.]{1,4})in', style)
|
||||
margin_bottom = re.search(r'margin-bottom: ([\d\.]{1,4})in', style)
|
||||
else:
|
||||
indent = None
|
||||
margin_left = None
|
||||
margin_right = None
|
||||
margin_top = None
|
||||
margin_bottom = None
|
||||
|
||||
if margin_left and margin_right and margin_top and margin_bottom and \
|
||||
margin_left.group(1) == '0.6' and margin_right.group(1) == '0.6' and \
|
||||
margin_top.group(1) == '0.14' and margin_bottom.group(1) == '0.11':
|
||||
p.wrap(BeautifulSoup(features='lxml').new_tag('blockquote'))
|
||||
|
||||
p.attrs = {}
|
||||
style = ''
|
||||
|
||||
if align is not None and align != BookConfig.DEFAULT_ALIGN_STYLE:
|
||||
style += f'text-align: {align};'
|
||||
|
||||
if indent is not None:
|
||||
indent = indent.group(1)
|
||||
style += f'text-indent: {indent}in;'
|
||||
|
||||
if style:
|
||||
p.attrs['style'] = style
|
||||
|
||||
def _process_two_columns(self):
|
||||
"""
|
||||
Function to process paragraphs which has two columns layout.
|
||||
"""
|
||||
two_columns = self.body_tag.find_all("div", style="column-count: 2")
|
||||
for div in two_columns:
|
||||
for child in div.children:
|
||||
if child.name == "p":
|
||||
child["class"] = "columns2"
|
||||
div.unwrap()
|
||||
|
||||
def _process_tables(self):
|
||||
"""
|
||||
Function to process tables. Set "border" attribute.
|
||||
"""
|
||||
tables = self.body_tag.find_all("table")
|
||||
for table in tables:
|
||||
tds = table.find_all("td")
|
||||
|
||||
sizes = []
|
||||
for td in tds:
|
||||
style = td.get('style')
|
||||
|
||||
if style:
|
||||
match = re.search(r"border: ?(\d+\.?\d*)(p[tx])", style)
|
||||
|
||||
if match:
|
||||
size = match.group(1)
|
||||
units = match.group(2)
|
||||
|
||||
if units == "pt":
|
||||
size = self.convert_pt_to_px(size)
|
||||
|
||||
sizes.append(float(size))
|
||||
|
||||
width = td.get('width')
|
||||
|
||||
td.attrs = {}
|
||||
if width:
|
||||
td.attrs['width'] = width
|
||||
|
||||
if sizes:
|
||||
border_size = sum(sizes) / len(sizes)
|
||||
table.attrs['border'] = f'{border_size:.2}'
|
||||
|
||||
self.tables_amount = len(tables)
|
||||
|
||||
def _process_quotes(self):
|
||||
"""
|
||||
Function to process block quotes.
|
||||
After docx to html conversion block quotes are stored inside table with 1 cell.
|
||||
All text is wrapped in a <i> tag.
|
||||
Such tables will be replaced with <blockquote> tags.
|
||||
|
||||
<table cellpadding=\"7\" cellspacing=\"0\" width=\"614\">
|
||||
<col width=\"600\"/>
|
||||
<tr>
|
||||
<td width=\"600\">
|
||||
<p style=\"text-align: justify;\"><i>aaaaa</i></p>
|
||||
<p style=\"text-align: justify;\"><br/></p>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
"""
|
||||
tables = self.body_tag.find_all("table")
|
||||
for table in tables:
|
||||
trs = table.find_all("tr")
|
||||
tds = table.find_all("td")
|
||||
if len(trs) == 1 and len(tds) == 1 and tds[0].get('width') == '600':
|
||||
td = tds[0]
|
||||
is_zero_border = 'border: none;' in td.get('style')
|
||||
paragraphs = td.find_all("p")
|
||||
has_i_tag_or_br = [(p.i, p.br) for p in paragraphs]
|
||||
has_i_tag_or_br = [x[0] is not None or x[1] is not None
|
||||
for x in has_i_tag_or_br]
|
||||
|
||||
if all(has_i_tag_or_br) and is_zero_border:
|
||||
new_div = BeautifulSoup(features='lxml').new_tag('blockquote')
|
||||
for p in paragraphs:
|
||||
new_div.append(p)
|
||||
|
||||
table.replaceWith(new_div)
|
||||
|
||||
def _process_hrefs(self):
|
||||
a_tags_with_href = self.body_tag.find_all('a', {'href': re.compile('^.*http.+')})
|
||||
|
||||
# remove char=end of file for some editors
|
||||
for tag in a_tags_with_href:
|
||||
tag.string = tag.text.replace('\u200c', '')
|
||||
tag['href'] = tag.attrs.get('href').replace('%E2%80%8C', '')
|
||||
|
||||
# %E2%80%8C
|
||||
for tag in a_tags_with_href:
|
||||
print(tag)
|
||||
|
||||
@staticmethod
|
||||
def _clean_footnote_content(content):
|
||||
content = content.strip()
|
||||
return content.strip()
|
||||
|
||||
def _process_footnotes(self):
|
||||
"""
|
||||
Function returns list of footnotes and delete them from html_soup.
|
||||
"""
|
||||
footnote_anchors = self.body_tag.find_all('a', class_='sdfootnoteanc')
|
||||
footnote_content = self.body_tag.find_all('div', id=re.compile(r'^sdfootnote\d+$'))
|
||||
footnote_amt = len(footnote_anchors)
|
||||
|
||||
assert footnote_amt == len(footnote_content), \
|
||||
'Some ting went wrong with footnotes after libra conversion'
|
||||
|
||||
footnotes = []
|
||||
|
||||
for i, (anc_tag, cont_tag) in enumerate(zip(footnote_anchors, footnote_content)):
|
||||
if cont_tag.find('a').attrs.get('href') is None:
|
||||
cont_tag.a.decompose()
|
||||
continue
|
||||
assert anc_tag['name'] == cont_tag.find('a')['href'][1:], \
|
||||
'Something went wrong with footnotes after libra conversion'
|
||||
|
||||
new_tag = BeautifulSoup(features='lxml').new_tag('sup')
|
||||
new_tag['class'] = 'footnote-element'
|
||||
new_tag['data-id'] = i + 1
|
||||
new_tag['id'] = f'footnote-{i + 1}'
|
||||
new_tag.string = '*'
|
||||
anc_tag.replace_with(new_tag)
|
||||
|
||||
# extra digits in footnotes from documents downloaded from livecarta
|
||||
a_text = cont_tag.a.text
|
||||
if len(cont_tag.find_all('p')):
|
||||
sup = cont_tag.find_all('p')[0].find('sup')
|
||||
if sup and sup.text == a_text:
|
||||
sup.decompose()
|
||||
cont_tag.a.decompose()
|
||||
|
||||
unicode_string = ''
|
||||
for child in cont_tag.children:
|
||||
if type(child) is NavigableString:
|
||||
continue
|
||||
if child.name == 'blockquote':
|
||||
unicode_string += str(child)
|
||||
else:
|
||||
unicode_string += child.decode_contents()
|
||||
|
||||
content = self._clean_footnote_content(unicode_string)
|
||||
cont_tag.decompose()
|
||||
|
||||
footnotes.append(content)
|
||||
|
||||
self.footnotes = footnotes
|
||||
|
||||
def _process_images(self, access, html_path, book_id):
|
||||
"""
|
||||
Function to process <img> tag. Img should be sent Amazon S3 and then return new tag with valid link.
|
||||
For now images are moved to one folder.
|
||||
"""
|
||||
img_tags = self.body_tag.find_all('img')
|
||||
|
||||
if len(img_tags):
|
||||
if access is None:
|
||||
folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
new_path = pathlib.Path(os.path.join(folder_path, f'json/img_{book_id}/'))
|
||||
new_path.mkdir(exist_ok=True)
|
||||
|
||||
for img in img_tags:
|
||||
img_name = img.attrs.get('src')
|
||||
# quick fix for bad links
|
||||
if (len(img_name) >= 3) and img_name[:3] == '../':
|
||||
img_name = img_name[3:]
|
||||
|
||||
img_path = pathlib.Path(f'{html_path.parent}', f'{img_name}')
|
||||
|
||||
if access is not None:
|
||||
link = access.send_image(img_path, book_id)
|
||||
img.attrs['src'] = link
|
||||
self.logger_object.log(f'{img_name} successfully uploaded.')
|
||||
else:
|
||||
img_size = os.path.getsize(img_path)
|
||||
self.logger_object.log(f'{img_name} successfully loaded. Image size: {img_size}.', logging.DEBUG)
|
||||
new_img_path = new_path / img_name
|
||||
copyfile(img_path, new_img_path)
|
||||
img.attrs["src"] = str(new_img_path)
|
||||
|
||||
self.images = img_tags
|
||||
|
||||
def _process_footer(self):
|
||||
"""
|
||||
Function to process <div title="footer"> tags.
|
||||
All the tags will be deleted from file.
|
||||
"""
|
||||
divs = self.body_tag.find_all('div', {'title': 'footer'})
|
||||
for div in divs:
|
||||
div.decompose()
|
||||
|
||||
def _process_div(self):
|
||||
"""
|
||||
Function to process <div> tags. All the tags will be deleted from file, all content of the tags will stay.
|
||||
"""
|
||||
divs = self.body_tag.find_all("div")
|
||||
|
||||
for div in divs:
|
||||
div.unwrap()
|
||||
|
||||
def _process_toc_links(self):
|
||||
"""
|
||||
Function to extract nodes which contains TOC links, remove links from file and detect headers.
|
||||
"""
|
||||
toc_links = self.body_tag.find_all("a", {'name': re.compile(r'^_Toc\d+')})
|
||||
headers = [link.parent for link in toc_links]
|
||||
outline_level = "1" # All the unknown outlines will be predicted as <h1>
|
||||
for tag in headers:
|
||||
if re.search(r"^h\d$", tag.name):
|
||||
tag.a.unwrap()
|
||||
# outline_level = tag.name[-1] # TODO: add prediction of the outline level
|
||||
# TODO: escape from recounting paragraphs every time
|
||||
elif tag.name == "p":
|
||||
if tag in self.body_tag.find_all("p"):
|
||||
new_tag = BeautifulSoup(features="lxml").new_tag("h" + outline_level)
|
||||
text = tag.text
|
||||
tag.replaceWith(new_tag)
|
||||
new_tag.string = text
|
||||
else:
|
||||
# rethink document structure when you have toc_links, other cases?
|
||||
self.logger_object.log(f'Something went wrong in processing toc_links.'
|
||||
f' Check the structure of the file. '
|
||||
f'Tag name: {tag.name}')
|
||||
|
||||
@staticmethod
|
||||
def clean_header_title(title):
|
||||
"""
|
||||
Function to remove digits and extra spaces from headers.
|
||||
|
||||
:param title: Title to process.
|
||||
"""
|
||||
title = re.sub(r'\s+', ' ', title).strip()
|
||||
title = re.sub(r'^(?:\.?\d+\.? ?)+', '', title)
|
||||
# title = re.sub(r'^(?:\.?[MDCLXVIclxvi]+\.? ?)+ ', '', title) # delete chapter numbering from the title
|
||||
title = re.sub(r'^(?:[A-Za-z]\. ?)+', '', title)
|
||||
return title.strip()
|
||||
|
||||
def _preprocessing_headings(self):
|
||||
"""
|
||||
Function to convert all lower level headings to p tags
|
||||
"""
|
||||
pattern = f'^h[{BookConfig.SUPPORTED_LEVELS + 1}-9]$'
|
||||
header_tags = self.body_tag.find_all(re.compile(pattern))
|
||||
for tag in header_tags:
|
||||
tag.name = 'p'
|
||||
|
||||
def _get_top_level_headers(self):
|
||||
"""
|
||||
Function for gathering info about top-level chapters.
|
||||
|
||||
Assume:
|
||||
- Headers with smallest outline(or digit in <h>) are top level chapters.
|
||||
[ It is consistent with a recursive algorithm
|
||||
for saving content to a resulted json structure,
|
||||
which happens in header_to_json()]
|
||||
|
||||
"""
|
||||
headers_info = []
|
||||
header_tags = self.body_tag.find_all(re.compile("^h[1-9]$"))
|
||||
headers_outline = [int(re.sub(r"^h", "", tag.name)) for tag in header_tags]
|
||||
if headers_outline:
|
||||
top_level_outline = min(headers_outline)
|
||||
top_level_headers = [tag for tag in header_tags
|
||||
if int(re.sub(r"^h", "", tag.name)) == top_level_outline]
|
||||
|
||||
for tag in top_level_headers:
|
||||
if tag.parent.name == "li":
|
||||
tag.parent.unwrap()
|
||||
while tag.parent.name == "ol":
|
||||
tag.parent.unwrap()
|
||||
|
||||
title = tag.text
|
||||
title = re.sub(r'\s+', ' ', title).strip()
|
||||
number = re.match(r'^(?:\.?\d+\.? ?)+', title)
|
||||
is_numbered = number is not None
|
||||
|
||||
cleaned_title = self.clean_header_title(tag.text)
|
||||
is_introduction = cleaned_title.lower() == 'introduction'
|
||||
|
||||
headers_info.append({
|
||||
'title': cleaned_title,
|
||||
'is_numbered': is_numbered,
|
||||
'is_introduction': is_introduction})
|
||||
|
||||
return headers_info
|
||||
|
||||
def _mark_introduction_headers(self):
|
||||
"""
|
||||
Function to find out:
|
||||
what header shouldn't be numbered and can be treated as introduction chapter
|
||||
|
||||
Assume header(s) to be introduction if:
|
||||
1. one header not numbered, before 1 numbered header
|
||||
2. it is first header from the top level list and it equals to 'introduction'
|
||||
|
||||
Result :
|
||||
Mark each top-level header with flag should_be_numbered = true/false
|
||||
"""
|
||||
is_numbered_header = [header['is_numbered'] for header in self.top_level_headers]
|
||||
is_title = [header['is_introduction'] for header in self.top_level_headers]
|
||||
|
||||
first_not_numbered = is_numbered_header and is_numbered_header[0] == 0
|
||||
second_is_numbered_or_not_exist = all(is_numbered_header[1:2])
|
||||
first_header_is_introduction = is_title and is_title[0]
|
||||
|
||||
if (first_not_numbered and second_is_numbered_or_not_exist) or first_header_is_introduction:
|
||||
self.top_level_headers[0]['should_be_numbered'] = False
|
||||
for i in range(1, len(self.top_level_headers)):
|
||||
self.top_level_headers[i]['should_be_numbered'] = True
|
||||
else:
|
||||
for i in range(0, len(self.top_level_headers)):
|
||||
self.top_level_headers[i]['should_be_numbered'] = True
|
||||
|
||||
def _process_headings(self):
|
||||
"""
|
||||
Function to process tags <h>.
|
||||
"""
|
||||
header_tags = self.body_tag.find_all(re.compile("^h[1-9]$"))
|
||||
for tag in header_tags:
|
||||
if tag.parent.name == "li":
|
||||
tag.parent.unwrap()
|
||||
while tag.parent.name == "ol":
|
||||
tag.parent.unwrap()
|
||||
|
||||
title = tag.text
|
||||
title = self.clean_header_title(title)
|
||||
if title == "":
|
||||
tag.unwrap()
|
||||
else:
|
||||
assert tag.name in BookConfig.SUPPORTED_HEADERS, \
|
||||
f'Preprocessing went wrong, there is still h{BookConfig.SUPPORTED_LEVELS + 1}-h9 headings.'
|
||||
# if tag.name in ["h4", "h5", "h6"]:
|
||||
# tag.name = "h3" # All the lower level headings will be transformed to h3 headings
|
||||
|
||||
new_tag = BeautifulSoup(features='lxml').new_tag(name=tag.name)
|
||||
new_tag.string = title
|
||||
tag.replace_with(new_tag)
|
||||
|
||||
def _process_lists(self):
|
||||
"""
|
||||
Function to process tags <li>.
|
||||
Unwrap <p> tags.
|
||||
"""
|
||||
li_tags = self.body_tag.find_all("li")
|
||||
|
||||
for il_tag in li_tags:
|
||||
il_tag.attrs.update(il_tag.p.attrs)
|
||||
il_tag.p.unwrap()
|
||||
|
||||
def process_html(self, access, html_path, book_id):
|
||||
"""
|
||||
Process html code to satisfy LawCarta formatting.
|
||||
"""
|
||||
try:
|
||||
self.clean_trash()
|
||||
|
||||
# process main elements of the .html doc
|
||||
self.logger_object.log(f'Processing main elements of html.')
|
||||
self._preprocessing_headings()
|
||||
self._process_paragraph()
|
||||
self._process_two_columns()
|
||||
|
||||
self.logger_object.log('Block quotes processing.')
|
||||
self._process_quotes()
|
||||
|
||||
self.logger_object.log('Tables processing.')
|
||||
self._process_tables()
|
||||
self.logger_object.log(f'{self.tables_amount} tables have been processed.')
|
||||
|
||||
self.logger_object.log('Hrefs processing.')
|
||||
self._process_hrefs()
|
||||
|
||||
self.logger_object.log('Footnotes processing.')
|
||||
self._process_footnotes()
|
||||
self.logger_object.log(f'{len(self.footnotes)} footnotes have been processed.')
|
||||
|
||||
self.logger_object.log('Image processing.')
|
||||
self._process_images(access=access, html_path=html_path, book_id=book_id)
|
||||
self.logger_object.log(f'{len(self.images)} images have been processed.')
|
||||
|
||||
self._process_footer()
|
||||
self._process_div()
|
||||
|
||||
self.content = self.body_tag.find_all(recursive=False)
|
||||
|
||||
self.logger_object.log(f'Processing TOC and headers.')
|
||||
self._process_toc_links()
|
||||
|
||||
self.top_level_headers = self._get_top_level_headers()
|
||||
self._mark_introduction_headers()
|
||||
|
||||
self._process_headings()
|
||||
|
||||
self.content = self.body_tag.find_all(recursive=False)
|
||||
|
||||
self._process_lists()
|
||||
# delete text before table of content if exists
|
||||
self.delete_content_before_toc()
|
||||
|
||||
except Exception as exc:
|
||||
self.logger_object.log('Error has occurred while processing html.', logging.ERROR)
|
||||
self.logger_object.log_error_to_main_log()
|
||||
if self.book_api_wrapper:
|
||||
self.book_api_wrapper.set_error_status()
|
||||
raise exc
|
||||
|
||||
self.logger_object.log('End of processing .html file.')
|
||||
|
||||
return self.content, self.footnotes, self.top_level_headers
|
||||
145
src/json_converter.py
Normal file
145
src/json_converter.py
Normal file
@@ -0,0 +1,145 @@
|
||||
import logging
|
||||
import re
|
||||
import codecs
|
||||
import json
|
||||
|
||||
from copy import copy
|
||||
from config import BookConfig
|
||||
|
||||
|
||||
class JSONConverter:
|
||||
def __init__(self, content, footnotes, top_level_headers, logger_object, book_api_status=None):
|
||||
self.content_dict = None
|
||||
self.content = content
|
||||
self.footnotes = footnotes
|
||||
self.top_level_headers = top_level_headers
|
||||
self.logger_object = logger_object
|
||||
self.book_api_status = book_api_status
|
||||
|
||||
@staticmethod
|
||||
def format_html(html_text):
|
||||
"""
|
||||
Function to remove useless symbols from html code.
|
||||
|
||||
:param html_text: Text to process.
|
||||
:return: Cleaned text.
|
||||
"""
|
||||
new_text = re.sub(r'([\n\t])', ' ', html_text)
|
||||
return new_text
|
||||
|
||||
# TODO: rethink the function structure without indexes.
|
||||
def header_to_json(self, ind):
|
||||
"""
|
||||
Function process header and collects all content for it.
|
||||
|
||||
:param ind: Index of header in content list.
|
||||
"""
|
||||
if self.content[ind].name in BookConfig.SUPPORTED_HEADERS:
|
||||
title = self.content[ind].text
|
||||
curr_outline = int(re.sub(r"^h", "", self.content[ind].name)) # extract outline from tag
|
||||
result = {
|
||||
'title': title,
|
||||
'contents': [],
|
||||
'sub_items': []
|
||||
}
|
||||
ch_content = []
|
||||
ind += 1
|
||||
|
||||
while ind < len(self.content):
|
||||
# 1. next tag is a header
|
||||
if self.content[ind].name in BookConfig.SUPPORTED_HEADERS:
|
||||
outline = int(re.sub(r"^h", "", self.content[ind].name))
|
||||
# - recursion step until h_i > h_initial
|
||||
if outline > curr_outline:
|
||||
header_dict, ind = self.header_to_json(ind)
|
||||
if ch_content:
|
||||
result['contents'].append("".join(ch_content))
|
||||
ch_content = []
|
||||
result['sub_items'].append(header_dict)
|
||||
# - current h_i <= h_initial, end of recursion
|
||||
else:
|
||||
# return result, ind
|
||||
break
|
||||
# 2. next tag is not a header. add new paragraphs
|
||||
else:
|
||||
html_str = self.format_html(str(self.content[ind]))
|
||||
ch_content.append(html_str)
|
||||
ind += 1
|
||||
|
||||
if ch_content:
|
||||
result['contents'].append("".join(ch_content))
|
||||
return result, ind
|
||||
return ''
|
||||
|
||||
@staticmethod
|
||||
def _is_empty_p_tag(tag):
|
||||
if tag.name != 'p':
|
||||
return False
|
||||
|
||||
temp_tag = copy(tag)
|
||||
brs = temp_tag.find_all('br')
|
||||
for br in brs:
|
||||
br.decompose()
|
||||
|
||||
text = re.sub(r'\s+', '', temp_tag.text)
|
||||
if text:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def convert_to_json(self):
|
||||
"""
|
||||
Function which convert list of html nodes to appropriate json structure.
|
||||
"""
|
||||
json_strc = []
|
||||
ind = 0
|
||||
ch_num = 0
|
||||
ch_amt = 0
|
||||
|
||||
try:
|
||||
while ind < len(self.content):
|
||||
res = {}
|
||||
|
||||
if self.content[ind].name in BookConfig.SUPPORTED_HEADERS:
|
||||
res, ind = self.header_to_json(ind)
|
||||
|
||||
else:
|
||||
chapter_title = f'Untitled chapter {ch_num}'
|
||||
chapter = []
|
||||
while ind < len(self.content) and self.content[ind].name not in BookConfig.SUPPORTED_HEADERS:
|
||||
if not self._is_empty_p_tag(self.content[ind]):
|
||||
chapter.append(self.format_html(str(self.content[ind])))
|
||||
ind += 1
|
||||
if chapter:
|
||||
res = {
|
||||
'title': chapter_title,
|
||||
'contents': ["".join(chapter)],
|
||||
'sub_items': []
|
||||
}
|
||||
ch_num += 1
|
||||
|
||||
if res:
|
||||
json_strc.append(res)
|
||||
ch_amt += 1
|
||||
self.logger_object.log(f'Chapter {ch_amt} has been added to structure.')
|
||||
except Exception as exc:
|
||||
self.logger_object.log('Error has occurred while making json structure.', logging.ERROR)
|
||||
self.logger_object.log_error_to_main_log()
|
||||
if self.book_api_status:
|
||||
self.book_api_status.set_error_status()
|
||||
raise exc
|
||||
|
||||
# Add is_introduction field to json structure
|
||||
# after deleting content before toc, some chapters can be deleted
|
||||
if self.top_level_headers:
|
||||
same_first_titles = self.top_level_headers[0]['title'] == json_strc[0]['title']
|
||||
is_first_header_introduction = not self.top_level_headers[0]['should_be_numbered']
|
||||
|
||||
json_strc[0]['is_introduction'] = is_first_header_introduction and same_first_titles
|
||||
|
||||
self.content_dict = {
|
||||
"content": json_strc,
|
||||
"footnotes": self.footnotes
|
||||
}
|
||||
|
||||
return self.content_dict
|
||||
Reference in New Issue
Block a user