From f27eefb96befeedaab725081713955e8171b27dc Mon Sep 17 00:00:00 2001 From: shirshasa Date: Fri, 28 Aug 2020 11:58:04 +0300 Subject: [PATCH] refactor converter - split book class into html-preprocessor, json-converter, book(with main flow) classes. - pick out logging, law carta setup, updating status via api into separate objects - in html-preprocesser: add cleaning hrefs --- src/book.py | 1003 ++++---------------------------------- src/config.py | 111 +++++ src/consumer.py | 7 +- src/html_preprocessor.py | 603 +++++++++++++++++++++++ src/json_converter.py | 145 ++++++ 5 files changed, 961 insertions(+), 908 deletions(-) create mode 100644 src/config.py create mode 100644 src/html_preprocessor.py create mode 100644 src/json_converter.py diff --git a/src/book.py b/src/book.py index 6e7556e..6587901 100644 --- a/src/book.py +++ b/src/book.py @@ -3,113 +3,38 @@ import json import logging import os import pathlib -import re import subprocess from subprocess import PIPE from threading import Event -from copy import copy -from shutil import copyfile from bs4 import BeautifulSoup, NavigableString +from config import BookLogger, BookApiWrapper, LawCartaConfig +from html_preprocessor import HTMLPreprocessor +from json_converter import JSONConverter + class Book: - # Main constant values - DEFAULT_FONT_NAME = 'Times New Roman' - DEFAULT_ALIGN_STYLE = 'left' - WORD_DEFAULT_FONT_SIZE = 11 - LAWCARTA_DEFAULT_FONT_SIZE = 18 - FONT_CONVERT_RATIO = LAWCARTA_DEFAULT_FONT_SIZE / WORD_DEFAULT_FONT_SIZE - font_correspondence_table = { - "Arial": "arial,helvetica,sans-serif", - "Comic Sans MS": "comic sans ms,cursive", - "Courier New": "courier new,courier,monospace", - "Georgia": "georgia,serif", - "Lucida Sans Unicode": "lucida sans unicode,lucida grande,sans-serif", - "Tahoma": "tahoma,geneva,sans-serif", - "Times New Roman": "times new roman,times,serif", - "Trebuchet MS": "trebuchet ms,helvetica,sans-serif", - "Verdana": "verdana,geneva,sans-serif" - } - SUPPORTED_LEVELS = 4 - SUPPORTED_HEADERS = {"h1", "h2", "h3", "h4"} - HEADERS_LEVELS = {"h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "h9"} - - def __init__(self, book_id=0, access=None, docx_path=None, html_path=None, output_path=None, main_logger=None, - libra_locker=None): + def __init__(self, book_id=0, access=None, docx_path=None, html_path=None, output_path=None, + main_logger=None, libra_locker=None, + logging_format='%(asctime)s - %(levelname)s - %(message)s'): self.book_id = book_id self.access = access self.docx_path = docx_path # path to docx file, appears after downloading from server self.html_path = html_path # path to html file, file appears after libre-conversion self.output_path = output_path # path to json file self.libra_locker: Event() = libra_locker - self.main_logger = main_logger - self.logger = None - self.html_soup = None - self.body_tag = None - self.content = list() - self.footnotes = list() - self.images = list() - self.top_level_headers = None - self.content_dict = dict() - self.tables_amount = 0 + self.logger_object = BookLogger(name=f'{__name__}_{self.book_id}', + logging_format=logging_format, + book_id=book_id, + main_logger=main_logger) + self.book_api_wrapper = BookApiWrapper(access, self.logger_object, book_id) - assert self.SUPPORTED_LEVELS == len(self.SUPPORTED_HEADERS), \ + assert BookConfig.SUPPORTED_LEVELS == len(BookConfig.SUPPORTED_HEADERS), \ "Length of headers doesn't match allowed levels." - def configure_file_logger(self, name, attr_name='logger', - filename='logs/book_log.log', - filemode='w+', - logging_level=logging.INFO, - logging_format='%(asctime)s - %(message)s'): - """ - Method for Logger configuration. Logger will write in file. - - :param name: name of the Logger. - :param attr_name: name of attribute that will be added to self. - :param filename: name of the log file. - :param filemode: mode of opening log file. - :param logging_level: logging level: 10 - debug, 20 - info, 30 - warning, 40 - error, 50 - critical. - :param logging_format: format of record in log file. - """ - logger = logging.getLogger(name) - - folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - if self.book_id: - filename = f'logs/{self.book_id}_log.log' - - file_path = os.path.join(folder_path, filename) - - file_handler = logging.FileHandler(file_path, mode=filemode) - # file_format = logging.Formatter(fmt=logging_format, datefmt=date_format) - file_format = logging.Formatter(fmt=logging_format) - file_handler.setFormatter(file_format) - logger.addHandler(file_handler) - - logger.setLevel(logging_level) - - setattr(self, attr_name, logger) - - def log(self, message, logging_level=20): - """ - Method for logging. - - :param message: body of the message - :param logging_level: level of logging - """ - self.logger.log(msg=message, level=logging_level) - - def log_error_to_main_log(self, message=''): - """ - Method for logging error to main log file. - """ - if self.main_logger: - if not message: - message = f'Error in book conversion. Check {self.book_id}_log.log file.' - self.main_logger.error(message) - def save_docx(self, content): """ Save binary content of file to .docx. @@ -123,10 +48,10 @@ class Book: try: with open(file_path, 'wb+') as file: file.write(content) - self.log(f'File was saved to folder: {folder_path}.') + self.logger_object.log(f'File was saved to folder: {folder_path}.') except Exception as exc: - self.log("Error in writing docx file.", logging.ERROR) - self.log_error_to_main_log() + self.logger_object.log("Error in writing docx file.", logging.ERROR) + self.logger_object.log_error_to_main_log() raise exc self.docx_path = pathlib.Path(file_path) @@ -136,69 +61,39 @@ class Book: Method for getting and saving book from queue. """ try: - self.log(f'Start receiving file from server. URL: {self.access.url}/doc-convert/{self.book_id}/file') + self.logger_object.log(f'Start receiving file from server. URL: {self.access.url}/doc-convert/{self.book_id}/file') content = self.access.get_doc(self.book_id) - self.log('File was received from server.') + self.logger_object.log('File was received from server.') self.save_docx(content) except FileNotFoundError as f_err: - self.log("Can't get docx from server.", logging.ERROR) - self.log_error_to_main_log() + self.logger_object.log("Can't get docx from server.", logging.ERROR) + self.logger_object.log_error_to_main_log() raise f_err except Exception as exc: raise exc - def set_process_status(self): - try: - if self.access: - self.access.update_status(self.book_id, self.access.PROCESS) - self.log(f'Status has been updated to [PROCESS].') - except Exception as exc: - self.log("Can't update status of the book [PROCESS].", logging.ERROR) - self.log_error_to_main_log() - raise exc - - def set_generate_status(self): - try: - if self.access: - self.access.update_status(self.book_id, self.access.GENERATE) - self.log(f'Status has been updated to [GENERATE].') - except Exception as exc: - self.log("Can't update status of the book [GENERATE].", logging.ERROR) - self.log_error_to_main_log() - raise exc - - def set_error_status(self): - try: - if self.access: - self.access.update_status(self.book_id, self.access.ERROR) - self.log(f'Status has been updated to [ERROR].') - except Exception as exc: - self.log("Can't update status of the book [ERROR].", logging.ERROR) - self.log_error_to_main_log() - raise exc - def _libra_run(self, out_dir_path): command = ['libreoffice', '--headless', '--convert-to', 'html', f'{str(self.docx_path)}', '--outdir', f'{out_dir_path}'] result = subprocess.run(command, stdout=PIPE, stderr=PIPE) - self.log(f'STATUS book_{self.book_id}: {result.returncode}, {result.stdout}', logging.DEBUG) - self.log(f'ERROR book_{self.book_id}: {result.stderr}', logging.DEBUG) + self.logger_object.log(f'Result of libra conversion for book_{self.book_id}: {result.returncode}, {result.stdout}', logging.DEBUG) + self.logger_object.log(f'Any error while libra conversion for book_{self.book_id}: {result.stderr}', logging.DEBUG) def convert_doc_to_html(self): """ Method for convert .docx document to .html file. """ - self.log(f'File - {self.docx_path}.') + self.logger_object.log(f'File - {self.docx_path}.') print(f'{self.docx_path}') - self.log('Beginning of conversion from .docx to .html.') + self.logger_object.log('Beginning of conversion from .docx to .html.') try: f = open(self.docx_path) f.close() except FileNotFoundError as error: - self.log('Invalid path to input data.', logging.ERROR) - self.set_error_status() + self.logger_object.log('Invalid path to input data.', logging.ERROR) + self.book_api_wrapper.set_error_status() raise error folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -209,27 +104,27 @@ class Book: try: if self.libra_locker.isSet(): self.libra_locker.clear() - self.log('Got flag...', logging.DEBUG) + self.logger_object.log('Got flag...', logging.DEBUG) self._libra_run(out_dir_path) self.libra_locker.set() - self.log('Cleared flag...', logging.DEBUG) + self.logger_object.log('Cleared flag...', logging.DEBUG) else: while not self.libra_locker.isSet() and not is_book_converted: - self.log('Waiting for libra...', logging.DEBUG) + self.logger_object.log('Waiting for libra...', logging.DEBUG) flag = self.libra_locker.wait(50) if flag: if self.libra_locker.isSet(): self.libra_locker.clear() - self.log(f'Got flag!', logging.DEBUG) + self.logger_object.log(f'Got flag!', logging.DEBUG) self._libra_run(out_dir_path) self.libra_locker.set() break except Exception as exc: - self.log("Conversion has gone wrong. Libreoffice is not installed.", logging.ERROR) - self.log_error_to_main_log() - self.set_error_status() + self.logger_object.log("Conversion has gone wrong. Libreoffice is not installed.", logging.ERROR) + self.logger_object.log_error_to_main_log() + self.book_api_wrapper.set_error_status() raise exc out_dir_path = os.path.join(out_dir_path, f'{self.book_id}.html') @@ -239,13 +134,13 @@ class Book: f = open(self.html_path) f.close() except FileNotFoundError as exc: - self.log("Conversion has gone wrong. HTML file doesn't exist.", logging.ERROR) - self.log_error_to_main_log() - self.set_error_status() + self.logger_object.log("Conversion has gone wrong. HTML file doesn't exist.", logging.ERROR) + self.logger_object.log_error_to_main_log() + self.book_api_wrapper.set_error_status() raise exc - self.log('End of conversion from .docx to .html.') - self.log(f'Input file path after conversion: {self.html_path}.') + self.logger_object.log('End of conversion from .docx to .html.') + self.logger_object.log(f'Input file path after conversion: {self.html_path}.') def check_output_directory(self): if self.output_path is None: @@ -254,7 +149,7 @@ class Book: self.output_path = output_path self.output_path = pathlib.Path(self.output_path) - self.log(f'Output file path: {self.output_path}') + self.logger_object.log(f'Output file path: {self.output_path}') pathlib.Path(self.output_path).parent.mkdir(parents=True, exist_ok=True) self.output_path.touch(exist_ok=True) @@ -265,796 +160,96 @@ class Book: """ try: html_text = open(self.html_path, 'r', encoding='utf8').read() - self.log('HTML for book has been loaded.') + self.logger_object.log('HTML for book has been loaded.') except FileNotFoundError as exc: - self.log('There is no html to process. Conversion went wrong or you specified wrong paths.', logging.ERROR) - self.log_error_to_main_log() - self.set_error_status() + self.logger_object.log('There is no html to process.' + 'Conversion went wrong or you specified wrong paths.', logging.ERROR) + self.logger_object.log_error_to_main_log() + self.book_api_wrapper.set_error_status() raise exc - self.html_soup = BeautifulSoup(html_text, features='lxml') - self.body_tag = self.html_soup.body + html_soup = BeautifulSoup(html_text, features='lxml') + return html_soup - def _clean_tag(self, tag, attr_name, attr_value): - """ - Function to clean tags by its name and attribute value. - - :param tag: Tag name to clean. - :param attr_name: Attribute name. - :param attr_value: Attribute value. - """ - tags = self.body_tag.find_all(tag, {attr_name: attr_value}) - for tag in tags: - if len(tag.attrs) == 1: - tag.unwrap() - - def _clean_underline_links(self): - """ - Function cleans meaningless tags before links. - """ - underlines = self.body_tag.find_all("u") - for u in underlines: - if u.find_all('a'): - u.unwrap() - - links = self.body_tag.find_all('a') - for link in links: - u = link.find_all('u') - if u and len(u) == 1: - u[0].unwrap() - - @classmethod - def convert_pt_to_px(cls, value): - value = int(value) - if value == cls.WORD_DEFAULT_FONT_SIZE: - return cls.LAWCARTA_DEFAULT_FONT_SIZE - else: - return value - - @classmethod - def convert_font_pt_to_px(cls, style): - """ - Method converts point in the font-size to pixels. - - :param style: Str with style to process. - :return: Str with converted style. - """ - size = re.search(r"font-size: (\d{1,3})pt", style) - - if size is None: - return style - - size = size.group(1) - new_size = cls.convert_pt_to_px(size) - - if new_size == cls.LAWCARTA_DEFAULT_FONT_SIZE: - return "" - - return re.sub(size + "pt", str(new_size) + "px", style) - - def _font_to_span(self): - """ - Function to convert tag to . If font style is default, then remove this tag. - """ - fonts = self.body_tag.find_all("font") - for font in fonts: - face = font.get("face") - style = font.get("style") - - font.attrs = {} - font.name = "span" - if style: - style = self.convert_font_pt_to_px(style) - if style != "": - font.attrs["style"] = style - if face is not None: - face = re.sub(r",[\w,\- ]*$", "", face) - if face != self.DEFAULT_FONT_NAME and self.font_correspondence_table.get(face): - font.attrs["face"] = self.font_correspondence_table[face] - else: - font.attrs["face"] = self.DEFAULT_FONT_NAME - - if len(font.attrs) == 0: - font.unwrap() - - assert len(self.body_tag.find_all("font")) == 0 # on this step there should be no more tags - - def _remove_table_of_contents(self): - """ - Function to remove table of content from file. - """ - tables = self.body_tag.find_all("div", id=re.compile(r'^Table of Contents\d+')) - for table in tables: - table.decompose() - - def _change_table_of_contents(self): - tables = self.body_tag.find_all("div", id=re.compile(r'^Table of Contents\d+')) - for table in tables: - table.wrap(self.html_soup.new_tag("TOC")) - table.decompose() - - def delete_content_before_toc(self): - toc_tag = self.html_soup.new_tag('TOC') - if toc_tag in self.content: - ind = self.content.index(toc_tag) + 1 - self.content = self.content[ind:] - self.write_html_from_list() - - def clean_trash(self): - """ - Function to remove all styles and tags we don't need. - """ - self._clean_tag('span', 'style', re.compile(r'^background: #[0-9a-fA-F]{6}$')) - self._clean_tag('span', 'lang', re.compile(r'^ru-RU$')) # todo: check for another languages - self._clean_tag('span', 'style', re.compile('^letter-spacing: -?[\d\.]+pt$')) - - self._clean_tag('font', 'color', re.compile(r'^#[0-9a-fA-F]{6}$')) - self._clean_tag('font', 'face', re.compile(r'^Times New Roman[\w, ]+$')) - - self._clean_tag("a", "name", "_GoBack") - self._clean_underline_links() - - self._font_to_span() - # self._remove_table_of_contents() - self._change_table_of_contents() - - def _process_paragraph(self): - """ - Function to process

tags (text-align and text-indent value). - """ - paragraphs = self.body_tag.find_all('p') - - for p in paragraphs: - # libra converts some \n into

with 2
- # there we remove 1 unnecessary
- brs = p.find_all('br') - text = p.text - if brs and text == '\n\n' and len(brs) == 2: - brs[0].decompose() - - align = p.get('align') - style = p.get('style') - - if style: - indent = re.search(r'text-indent: ([\d\.]{1,4})in', style) - margin_left = re.search(r'margin-left: ([\d\.]{1,4})in', style) - margin_right= re.search(r'margin-right: ([\d\.]{1,4})in', style) - margin_top = re.search(r'margin-top: ([\d\.]{1,4})in', style) - margin_bottom = re.search(r'margin-bottom: ([\d\.]{1,4})in', style) - else: - indent = None - margin_left = None - margin_right = None - margin_top = None - margin_bottom = None - - if margin_left and margin_right and margin_top and margin_bottom and \ - margin_left.group(1) == '0.6' and margin_right.group(1) == '0.6' and \ - margin_top.group(1) == '0.14' and margin_bottom.group(1) == '0.11': - p.wrap(BeautifulSoup(features='lxml').new_tag('blockquote')) - - p.attrs = {} - style = '' - - if align is not None and align != self.DEFAULT_ALIGN_STYLE: - style += f'text-align: {align};' - - if indent is not None: - indent = indent.group(1) - style += f'text-indent: {indent}in;' - - if style: - p.attrs['style'] = style - - def _process_two_columns(self): - """ - Function to process paragraphs which has two columns layout. - """ - two_columns = self.body_tag.find_all("div", style="column-count: 2") - for div in two_columns: - for child in div.children: - if child.name == "p": - child["class"] = "columns2" - div.unwrap() - - def _process_tables(self): - """ - Function to process tables. Set "border" attribute. - """ - tables = self.body_tag.find_all("table") - for table in tables: - tds = table.find_all("td") - - sizes = [] - for td in tds: - style = td.get('style') - - if style: - match = re.search(r"border: ?(\d+\.?\d*)(p[tx])", style) - - if match: - size = match.group(1) - units = match.group(2) - - if units == "pt": - size = self.convert_pt_to_px(size) - - sizes.append(float(size)) - - width = td.get('width') - - td.attrs = {} - if width: - td.attrs['width'] = width - - if sizes: - border_size = sum(sizes)/len(sizes) - table.attrs['border'] = f'{border_size:.2}' - - self.tables_amount = len(tables) - - def _process_quotes(self): - """ - Function to process block quotes. - After docx to html conversion block quotes are stored inside table with 1 cell. - All text is wrapped in a tag. - Such tables will be replaced with

tags. - - - - - - -
-

aaaaa

-


-
- - """ - tables = self.body_tag.find_all("table") - for table in tables: - trs = table.find_all("tr") - tds = table.find_all("td") - if len(trs) == 1 and len(tds) == 1 and tds[0].get('width') == '600': - td = tds[0] - is_zero_border = 'border: none;' in td.get('style') - paragraphs = td.find_all("p") - has_i_tag_or_br = [(p.i, p.br) for p in paragraphs] - has_i_tag_or_br = [x[0] is not None or x[1] is not None - for x in has_i_tag_or_br] - - if all(has_i_tag_or_br) and is_zero_border: - new_div = BeautifulSoup(features='lxml').new_tag('blockquote') - for p in paragraphs: - new_div.append(p) - - table.replaceWith(new_div) - - # def _process_quotes(self): - # """ - # Function to process
tags. All tags will be replaced with
tags. - # """ - # dls = self.body_tag.find_all('dl') - # - # for dl in dls: - # pars = dl.find_all('p') - # for p in pars: - # p.wrap(BeautifulSoup(features='lxml').new_tag('blockquote')) - # new_div = BeautifulSoup(features='lxml').new_tag('div') - # for p in pars: - # new_div.append(p.parent) - # dl.replaceWith(new_div) - - @staticmethod - def _clean_footnote_content(content): - content = content.strip() - return content.strip() - - def _process_footnotes(self): - """ - Function returns list of footnotes and delete them from html_soup. - """ - footnote_anchors = self.body_tag.find_all('a', class_='sdfootnoteanc') - footnote_content = self.body_tag.find_all('div', id=re.compile(r'^sdfootnote\d+$')) - footnote_amt = len(footnote_anchors) - - assert footnote_amt == len(footnote_content),\ - 'Some ting went wrong with footnotes after libra conversion' - - footnotes = [] - - for i, (anc_tag, cont_tag) in enumerate(zip(footnote_anchors, footnote_content)): - if cont_tag.find('a').attrs.get('href') is None: - cont_tag.a.decompose() - continue - assert anc_tag['name'] == cont_tag.find('a')['href'][1:], \ - 'Something went wrong with footnotes after libra conversion' - - new_tag = BeautifulSoup(features='lxml').new_tag('sup') - new_tag['class'] = 'footnote-element' - new_tag['data-id'] = i + 1 - new_tag['id'] = f'footnote-{i + 1}' - new_tag.string = '*' - anc_tag.replace_with(new_tag) - - # extra digits in footnotes from documents downloaded from livecarta - a_text = cont_tag.a.text - if len(cont_tag.find_all('p')): - sup = cont_tag.find_all('p')[0].find('sup') - if sup and sup.text == a_text: - sup.decompose() - cont_tag.a.decompose() - - unicode_string = '' - for child in cont_tag.children: - if type(child) is NavigableString: - continue - if child.name == 'blockquote': - unicode_string += str(child) - else: - unicode_string += child.decode_contents() - - content = self._clean_footnote_content(unicode_string) - cont_tag.decompose() - - footnotes.append(content) - - self.footnotes = footnotes - - def _process_images(self): - """ - Function to process tag. Img should be sent Amazon S3 and then return new tag with valid link. - For now images are moved to one folder. - """ - img_tags = self.body_tag.find_all('img') - - if len(img_tags): - if self.access is None: - folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - new_path = pathlib.Path(os.path.join(folder_path, f'json/img_{self.book_id}/')) - new_path.mkdir(exist_ok=True) - - for img in img_tags: - img_name = img.attrs.get('src') - if (len(img_name) >= 3 ) and img_name[:3] == '../': - img_name = img_name[3:] - - img_path = pathlib.Path(f'{self.html_path.parent}', f'{img_name}') - - if self.access is not None: - link = self.access.send_image(img_path, self.book_id) - img.attrs['src'] = link - self.log(f'{img_name} successfully uploaded.') - else: - img_size = os.path.getsize(img_path) - print(f'{img_name} successfully loaded. Image size: {img_size}.') - new_img_path = new_path / img_name - copyfile(img_path, new_img_path) - img.attrs["src"] = str(new_img_path) - - self.images = img_tags - - def _process_footer(self): - """ - Function to process
tags. - All the tags will be deleted from file. - """ - divs = self.body_tag.find_all('div', {'title': 'footer'}) - for div in divs: - div.decompose() - - def _process_div(self): - """ - Function to process
tags. All the tags will be deleted from file, all content of the tags will stay. - """ - divs = self.body_tag.find_all("div") - - for div in divs: - div.unwrap() - - def _process_toc_links(self): - """ - Function to extract nodes which contains TOC links, remove links from file and detect headers. - """ - toc_links = self.body_tag.find_all("a", {'name': re.compile(r'^_Toc\d+')}) - headers = [link.parent for link in toc_links] - outline_level = "1" # All the unknown outlines will be predicted as

- for tag in headers: - if re.search(r"^h\d$", tag.name): - tag.a.unwrap() - # outline_level = tag.name[-1] # TODO: add prediction of the outline level - # TODO: escape from recounting paragraphs every time - elif tag.name == "p": - if tag in self.body_tag.find_all("p"): - new_tag = BeautifulSoup(features="lxml").new_tag("h" + outline_level) - text = tag.text - tag.replaceWith(new_tag) - new_tag.string = text - else: - # rethink document structure when you have toc_links, other cases? - self.logger.warning(f'Something went wrong in processing toc_links.' - f' Check the structure of the file. ' - f'Tag name: {tag.name}') - - @staticmethod - def clean_header_title(title): - """ - Function to remove digits and extra spaces from headers. - - :param title: Title to process. - """ - title = re.sub(r'\s+', ' ', title).strip() - title = re.sub(r'^(?:\.?\d+\.? ?)+', '', title) - # title = re.sub(r'^(?:\.?[MDCLXVIclxvi]+\.? ?)+ ', '', title) # delete chapter numbering from the title - title = re.sub(r'^(?:[A-Za-z]\. ?)+', '', title) - return title.strip() - - def _preprocessing_headings(self): - """ - Function to convert all lower level headings to p tags - """ - pattern = f'^h[{self.SUPPORTED_LEVELS + 1}-9]$' - header_tags = self.body_tag.find_all(re.compile(pattern)) - for tag in header_tags: - tag.name = 'p' - - def _get_top_level_headers(self): - """ - Function for gathering info about top-level chapters. - - Assume: - - Headers with smallest outline(or digit in ) are top level chapters. - [ It is consistent with a recursive algorithm - for saving content to a resulted json structure, - which happens in header_to_json()] - - """ - headers_info = [] - header_tags = self.body_tag.find_all(re.compile("^h[1-9]$")) - headers_outline = [int(re.sub(r"^h", "", tag.name)) for tag in header_tags] - if headers_outline: - top_level_outline = min(headers_outline) - top_level_headers = [tag for tag in header_tags - if int(re.sub(r"^h", "", tag.name)) == top_level_outline] - - for tag in top_level_headers: - if tag.parent.name == "li": - tag.parent.unwrap() - while tag.parent.name == "ol": - tag.parent.unwrap() - - title = tag.text - title = re.sub(r'\s+', ' ', title).strip() - number = re.match(r'^(?:\.?\d+\.? ?)+', title) - is_numbered = number is not None - - cleaned_title = self.clean_header_title(tag.text) - is_introduction = cleaned_title.lower() == 'introduction' - - headers_info.append({ - 'title': cleaned_title, - 'is_numbered': is_numbered, - 'is_introduction': is_introduction}) - - return headers_info - - def _mark_introduction_headers(self): - """ - Function to find out: - what header shouldn't be numbered and can be treated as introduction chapter - - Assume header(s) to be introduction if: - 1. one header not numbered, before 1 numbered header - 2. it is first header from the top level list and it equals to 'introduction' - - Result : - Mark each top-level header with flag should_be_numbered = true/false - """ - is_numbered_header = [header['is_numbered'] for header in self.top_level_headers] - is_title = [header['is_introduction'] for header in self.top_level_headers] - - first_not_numbered = is_numbered_header and is_numbered_header[0] == 0 - second_is_numbered_or_not_exist = all(is_numbered_header[1:2]) - first_header_is_introduction = is_title and is_title[0] - - if (first_not_numbered and second_is_numbered_or_not_exist) or first_header_is_introduction: - self.top_level_headers[0]['should_be_numbered'] = False - for i in range(1, len(self.top_level_headers)): - self.top_level_headers[i]['should_be_numbered'] = True - else: - for i in range(0, len(self.top_level_headers)): - self.top_level_headers[i]['should_be_numbered'] = True - - def _process_headings(self): - """ - Function to process tags . - """ - header_tags = self.body_tag.find_all(re.compile("^h[1-9]$")) - for tag in header_tags: - if tag.parent.name == "li": - tag.parent.unwrap() - while tag.parent.name == "ol": - tag.parent.unwrap() - - title = tag.text - title = self.clean_header_title(title) - if title == "": - tag.unwrap() - else: - assert tag.name in self.SUPPORTED_HEADERS, \ - f'Preprocessing went wrong, there is still h{self.SUPPORTED_LEVELS + 1}-h9 headings.' - # if tag.name in ["h4", "h5", "h6"]: - # tag.name = "h3" # All the lower level headings will be transformed to h3 headings - - new_tag = BeautifulSoup(features='lxml').new_tag(name=tag.name) - new_tag.string = title - tag.replace_with(new_tag) - - def _process_lists(self): - """ - Function to process tags
  • . - Unwrap

    tags. - """ - li_tags = self.body_tag.find_all("li") - - for il_tag in li_tags: - il_tag.attrs.update(il_tag.p.attrs) - il_tag.p.unwrap() - - def write_html_from_list(self, file_name='json/url_test.html'): + def write_html_from_list(self, body_tag, file_name='json/html_test.html'): folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) file_path = pathlib.Path(os.path.join(folder_path, file_name)) with open(file_path, 'w', encoding='utf-8') as f_out: - # f_out.write("".join([tag.prettify() for tag in self.content])) - f_out.write(self.body_tag.prettify()) - self.logger.info(f'Check test file - url_test.html.') + f_out.write(body_tag.prettify()) + self.logger_object.log(f'Check final prettified html: {file_name}.') - def process_html(self): - """ - Process html code to satisfy LawCarta formatting. - """ - self.log('Beginning of processing .html file.') - - try: - self.clean_trash() - - # process main elements of the .html doc - self.log(f'Processing main elements of html.') - self._preprocessing_headings() - self._process_paragraph() - self._process_two_columns() - - self.log('Block quotes processing.') - self._process_quotes() - - self.log('Tables processing.') - self._process_tables() - self.log(f'{self.tables_amount} tables have been processed.') - - self.log('Footnotes processing.') - self._process_footnotes() - self.log(f'{len(self.footnotes)} footnotes have been processed.') - - self.log('Image processing.') - self._process_images() - self.log(f'{len(self.images)} images have been processed.') - - self._process_footer() - self._process_div() - - self.content = self.body_tag.find_all(recursive=False) - - self.log(f'Processing TOC and headers.') - self._process_toc_links() - - self.top_level_headers = self._get_top_level_headers() - self._mark_introduction_headers() - - self._process_headings() - - self.content = self.body_tag.find_all(recursive=False) - - self._process_lists() - # delete text before table of content if exists - self.delete_content_before_toc() - - except Exception as exc: - self.log('Error has occurred while processing html.', logging.ERROR) - self.log_error_to_main_log() - self.set_error_status() - raise exc - - self.log('End of processing .html file.') - - @staticmethod - def format_html(html_text): - """ - Function to remove useless symbols from html code. - - :param html_text: Text to process. - :return: Cleaned text. - """ - new_text = re.sub(r'([\n\t])', ' ', html_text) - return new_text - - # TODO: rethink the function structure without indexes. - def header_to_json(self, ind): - """ - Function process header and collects all content for it. - - :param ind: Index of header in content list. - """ - if self.content[ind].name in self.SUPPORTED_HEADERS: - title = self.content[ind].text - curr_outline = int(re.sub(r"^h", "", self.content[ind].name)) # extract outline from tag - result = { - 'title': title, - 'contents': [], - 'sub_items': [] - } - ch_content = [] - ind += 1 - - while ind < len(self.content): - # 1. next tag is a header - if self.content[ind].name in self.SUPPORTED_HEADERS: - outline = int(re.sub(r"^h", "", self.content[ind].name)) - # - recursion step until h_i > h_initial - if outline > curr_outline: - header_dict, ind = self.header_to_json(ind) - if ch_content: - result['contents'].append("".join(ch_content)) - ch_content = [] - result['sub_items'].append(header_dict) - # - current h_i <= h_initial, end of recursion - else: - # return result, ind - break - # 2. next tag is not a header. add new paragraphs - else: - html_str = self.format_html(str(self.content[ind])) - ch_content.append(html_str) - ind += 1 - - if ch_content: - result['contents'].append("".join(ch_content)) - return result, ind - return '' - - @staticmethod - def _is_empty_p_tag(tag): - if tag.name != 'p': - return False - - temp_tag = copy(tag) - brs = temp_tag.find_all('br') - for br in brs: - br.decompose() - - text = re.sub(r'\s+', '', temp_tag.text) - if text: - return False - - return True - - def convert_to_json(self): - """ - Function which convert list of html nodes to appropriate json structure. - """ - json_strc = [] - ind = 0 - ch_num = 0 - ch_amt = 0 - - try: - while ind < len(self.content): - res = {} - - if self.content[ind].name in self.SUPPORTED_HEADERS: - res, ind = self.header_to_json(ind) - - else: - chapter_title = f'Untitled chapter {ch_num}' - chapter = [] - while ind < len(self.content) and self.content[ind].name not in self.SUPPORTED_HEADERS: - if not self._is_empty_p_tag(self.content[ind]): - chapter.append(self.format_html(str(self.content[ind]))) - ind += 1 - if chapter: - res = { - 'title': chapter_title, - 'contents': ["".join(chapter)], - 'sub_items': [] - } - ch_num += 1 - - if res: - json_strc.append(res) - ch_amt += 1 - self.log(f'Chapter {ch_amt} has been added to structure.') - except Exception as exc: - self.log('Error has occurred while making json structure.', logging.ERROR) - self.log_error_to_main_log() - self.set_error_status() - raise exc - - # Add is_introduction field to json structure - # after deleting content before toc, some chapters can be deleted - if self.top_level_headers: - same_first_titles = self.top_level_headers[0]['title'] == json_strc[0]['title'] - is_first_header_introduction = not self.top_level_headers[0]['should_be_numbered'] - - json_strc[0]['is_introduction'] = is_first_header_introduction and same_first_titles - - self.content_dict = { - "content": json_strc, - "footnotes": self.footnotes - } - - def write_json(self): + def write_to_json(self, content: dict): try: with codecs.open(self.output_path, 'w', encoding='utf-8') as f: - json.dump(self.content_dict, f, ensure_ascii=False) - self.log('Data has been saved to .json file.') + json.dump(content, f, ensure_ascii=False) + self.logger_object.log(f'Data has been saved to .json file: {self.output_path}') except Exception as exc: - self.log('Error has occurred while writing json file.', logging.ERROR) - # self.log_error_to_main_log() - # self.set_error_status() - # raise exc + self.logger_object.log('Error has occurred while writing json file.'+ str(exc), logging.ERROR) - def send_json_content(self): + def send_json_content(self, content: dict): try: - self.access.send_book(self.book_id, self.content_dict) - self.log(f'JSON data has been sent to server.') + self.access.send_book(self.book_id, content) + self.logger_object.log(f'JSON data has been sent to server.') except Exception as exc: - self.log('Error has occurred while sending json content.', logging.ERROR) - self.log_error_to_main_log() - self.set_error_status() + self.logger_object.log('Error has occurred while sending json content.', logging.ERROR) + self.logger_object.log_error_to_main_log() + self.book_api_wrapper.set_error_status() raise exc - def convert_from_html(self, logging_format): - self.configure_file_logger(f'{__name__}_{self.book_id}', logging_format=logging_format, filemode='w+') - self.read_html() - self.process_html() - self.convert_to_json() - self.write_json() + def convert_from_html(self): + html_soup = self.read_html() + parser = HTMLPreprocessor(html_soup, self.logger_object) + content, footnotes, top_level_headers = parser.process_html(self.access, self.html_path, self.book_id) + json_converter = JSONConverter(content, footnotes, top_level_headers, self.logger_object, self.book_api_wrapper) + content_dict = json_converter.convert_to_json() + self.write_to_json(content_dict) + self.write_html_from_list(parser.body_tag) def test_conversion(self): - self.configure_file_logger(self.book_id, - filemode='w+', - logging_format='%(asctime)s - %(levelname)s - %(message)s', - logging_level=logging.INFO) - self.log('Beginning of the test.') + self.logger_object.log('Beginning of the test.') folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) folder_path = os.path.join(folder_path, f'docx') file_path = os.path.join(folder_path, f'{self.book_id}.docx') self.docx_path = pathlib.Path(file_path) - self.log(f'Test docx path: {self.docx_path}') + self.logger_object.log(f'Test docx path: {self.docx_path}') self.convert_doc_to_html() self.check_output_directory() - self.read_html() - self.process_html() - self.convert_to_json() - self.write_json() - self.log('End of the test.') - def conversion(self, logging_format, filemode='w+'): - self.configure_file_logger(f'{__name__}_{self.book_id}', logging_format=logging_format, filemode=filemode) - self.log('Beginning of conversion from .docx to .json.') + html_soup = self.read_html() + parser = HTMLPreprocessor(html_soup, self.logger_object) + content, footnotes, top_level_headers = parser.process_html(self.access, self.html_path, self.book_id) + + json_converter = JSONConverter(content, footnotes, top_level_headers, self.logger_object, self.book_api_wrapper) + content_dict = json_converter.convert_to_json() + + self.write_to_json(content_dict) + self.write_html_from_list(parser.body_tag) + self.logger_object.log('End of the test.') + + def conversion(self): + self.logger_object.log('Beginning of conversion from .docx to .json.') self.get_docx() - self.set_process_status() + self.book_api_wrapper.set_process_status() self.convert_doc_to_html() self.check_output_directory() - self.read_html() - self.process_html() - self.set_generate_status() - self.convert_to_json() - self.write_json() - self.send_json_content() - self.log(f'End of the conversion to LawCarta format. Check {self.output_path}.') + + html_soup = self.read_html() + self.logger_object.log('Beginning of processing .html file.') + + parser = HTMLPreprocessor(html_soup, self.logger_object) + content, footnotes, top_level_headers = parser.process_html(self.access, self.html_path, self.book_id) + + self.logger_object.log('Beginning of processing json output.') + self.book_api_wrapper.set_generate_status() + + json_converter = JSONConverter(content, footnotes, top_level_headers, self.logger_object, self.book_api_wrapper) + content_dict = json_converter.convert_to_json() + self.write_to_json(content_dict) + self.send_json_content(content_dict) + self.logger_object.log(f'End of the conversion to LawCarta format. Check {self.output_path}.') if __name__ == "__main__": @@ -1063,4 +258,4 @@ if __name__ == "__main__": out_path = pathlib.Path(os.path.join(folder, 'json/ch13.json')) book = Book(html_path=file, output_path=out_path) - book.convert_from_html(logging_format='%(asctime)s - %(levelname)s - %(message)s') + book.convert_from_html() diff --git a/src/config.py b/src/config.py new file mode 100644 index 0000000..8f10bb9 --- /dev/null +++ b/src/config.py @@ -0,0 +1,111 @@ +import logging +import os + +from access import Access + + +class BookLogger: + def __init__(self, name, book_id, main_logger=None, + filemode='w+', logging_level=logging.INFO, + logging_format='%(asctime)s - %(levelname)s - %(message)s'): + """ + Method for Logger configuration. Logger will write in file. + + :param name: name of the Logger. + :param attr_name: name of attribute that will be added to self. + :param filename: name of the log file. + :param filemode: mode of opening log file. + :param logging_level: logging level: 10 - debug, 20 - info, 30 - warning, 40 - error, 50 - critical. + :param logging_format: format of record in log file. + """ + self.main_logger = main_logger + + self.logger = logging.getLogger(name) + folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + filename = f'logs/{book_id}_log.log' + file_path = os.path.join(folder_path, filename) + file_handler = logging.FileHandler(file_path, mode=filemode) + file_format = logging.Formatter(fmt=logging_format) + file_handler.setFormatter(file_format) + self.logger.addHandler(file_handler) + self.logger.setLevel(logging_level) + + def log(self, message, logging_level=20): + """ + Method for logging. + + :param message: body of the message + :param logging_level: level of logging + """ + self.logger.log(msg=message, level=logging_level) + + def log_error_to_main_log(self, message=''): + """ + Method for logging error to main log file. + """ + if self.main_logger: + if not message: + message = f'Error in book conversion. Check log file.' + self.main_logger.error(message) + + +class LawCartaConfig: + SUPPORTED_LEVELS = 4 + SUPPORTED_HEADERS = {"h1", "h2", "h3", "h4"} + HEADERS_LEVELS = {"h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "h9"} + + # Main constant values + DEFAULT_FONT_NAME = 'Times New Roman' + DEFAULT_ALIGN_STYLE = 'left' + WORD_DEFAULT_FONT_SIZE = 11 + LAWCARTA_DEFAULT_FONT_SIZE = 18 + FONT_CONVERT_RATIO = LAWCARTA_DEFAULT_FONT_SIZE / WORD_DEFAULT_FONT_SIZE + font_correspondence_table = { + "Arial": "arial,helvetica,sans-serif", + "Comic Sans MS": "comic sans ms,cursive", + "Courier New": "courier new,courier,monospace", + "Georgia": "georgia,serif", + "Lucida Sans Unicode": "lucida sans unicode,lucida grande,sans-serif", + "Tahoma": "tahoma,geneva,sans-serif", + "Times New Roman": "times new roman,times,serif", + "Trebuchet MS": "trebuchet ms,helvetica,sans-serif", + "Verdana": "verdana,geneva,sans-serif" + } + + +class BookApiWrapper: + def __init__(self, access, logger_object, book_id=0): + self.access: Access = access + self.logger_object = logger_object + self.book_id = book_id + + def set_process_status(self): + try: + if self.access: + self.access.update_status(self.book_id, self.access.PROCESS) + self.logger_object.log(f'Status has been updated to [PROCESS].') + except Exception as exc: + self.logger_object.log("Can't update status of the book [PROCESS].", logging.ERROR) + self.logger_object.log_error_to_main_log() + raise exc + + def set_generate_status(self): + try: + if self.access: + self.access.update_status(self.book_id, self.access.GENERATE) + self.logger_object.log(f'Status has been updated to [GENERATE].') + except Exception as exc: + self.logger_object.log("Can't update status of the book [GENERATE].", logging.ERROR) + self.logger_object.log_error_to_main_log() + raise exc + + def set_error_status(self): + try: + if self.access: + self.access.update_status(self.book_id, self.access.ERROR) + self.logger_object.log(f'Status has been updated to [ERROR].') + except Exception as exc: + self.logger_object.log("Can't update status of the book [ERROR].", logging.ERROR) + self.logger_object.log_error_to_main_log() + raise exc + diff --git a/src/consumer.py b/src/consumer.py index 2c57c7f..3c164ae 100644 --- a/src/consumer.py +++ b/src/consumer.py @@ -15,7 +15,7 @@ from book import Book def configure_file_logger(name, filename='logs/converter_log.log', filemode='w+', logging_level=logging.INFO, - logging_format='%(asctime)s - %(message)s'): + logging_format='%(asctime)s - %(levelname)s - %(message)s'): logger = logging.getLogger(name) folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -32,11 +32,10 @@ def configure_file_logger(name, filename='logs/converter_log.log', filemode='w+' def convert_book(book_id, access, logger, libra_locker): logger.info(f'Start processing book-{book_id}.') - logging_format = '%(asctime)s - %(levelname)s - %(message)s' try: book = Book(book_id, access, main_logger=logger, libra_locker=libra_locker) - book.conversion(logging_format=logging_format) + book.conversion() except Exception as exc: raise exc @@ -105,7 +104,7 @@ def local_run(books): def server_run(): - logger = configure_file_logger('consumer', logging_format='%(asctime)s - %(levelname)s - %(message)s') + logger = configure_file_logger('consumer') folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) config_path = Path(os.path.join(folder_path, "config/queue_config.json")) diff --git a/src/html_preprocessor.py b/src/html_preprocessor.py new file mode 100644 index 0000000..d3d1cd9 --- /dev/null +++ b/src/html_preprocessor.py @@ -0,0 +1,603 @@ +import logging +import os +import pathlib +import re +from shutil import copyfile + +from bs4 import BeautifulSoup, NavigableString +from config import BookConfig, BookLogger, BookApiWrapper + + +class HTMLPreprocessor: + + def __init__(self, html_soup, logger_object, book_api_wrapper=None): + self.body_tag = html_soup.body + self.html_soup = html_soup + self.logger_object: BookLogger = logger_object + self.book_api_wrapper: BookApiWrapper = book_api_wrapper + self.top_level_headers = None + self.content = list() + + def _clean_tag(self, tag, attr_name, attr_value): + """ + Function to clean tags by its name and attribute value. + + :param tag: Tag name to clean. + :param attr_name: Attribute name. + :param attr_value: Attribute value. + """ + tags = self.body_tag.find_all(tag, {attr_name: attr_value}) + for tag in tags: + if len(tag.attrs) == 1: + tag.unwrap() + + def _clean_underline_links(self): + """ + Function cleans meaningless tags before links. + """ + underlines = self.body_tag.find_all("u") + for u in underlines: + if u.find_all('a'): + u.unwrap() + + links = self.body_tag.find_all('a') + for link in links: + u = link.find_all('u') + if u and len(u) == 1: + u[0].unwrap() + + @classmethod + def convert_pt_to_px(cls, value): + value = int(value) + if value == BookConfig.WORD_DEFAULT_FONT_SIZE: + return BookConfig.LAWCARTA_DEFAULT_FONT_SIZE + else: + return value + + @classmethod + def convert_font_pt_to_px(cls, style): + """ + Method converts point in the font-size to pixels. + + :param style: Str with style to process. + :return: Str with converted style. + """ + size = re.search(r"font-size: (\d{1,3})pt", style) + + if size is None: + return style + + size = size.group(1) + new_size = cls.convert_pt_to_px(size) + + if new_size == BookConfig.LAWCARTA_DEFAULT_FONT_SIZE: + return "" + + return re.sub(size + "pt", str(new_size) + "px", style) + + def _font_to_span(self): + """ + Function to convert tag to . If font style is default, then remove this tag. + """ + fonts = self.body_tag.find_all("font") + for font in fonts: + face = font.get("face") + style = font.get("style") + + font.attrs = {} + font.name = "span" + if style: + style = self.convert_font_pt_to_px(style) + if style != "": + font.attrs["style"] = style + if face is not None: + face = re.sub(r",[\w,\- ]*$", "", face) + if face != BookConfig.DEFAULT_FONT_NAME and BookConfig.font_correspondence_table.get(face): + font.attrs["face"] = BookConfig.font_correspondence_table[face] + else: + font.attrs["face"] = BookConfig.DEFAULT_FONT_NAME + + if len(font.attrs) == 0: + font.unwrap() + + assert len(self.body_tag.find_all("font")) == 0 # on this step there should be no more tags + + def _remove_table_of_contents(self): + """ + Function to remove table of content from file. + """ + tables = self.body_tag.find_all("div", id=re.compile(r'^Table of Contents\d+')) + for table in tables: + table.decompose() + + def _change_table_of_contents(self): + tables = self.body_tag.find_all("div", id=re.compile(r'^Table of Contents\d+')) + for table in tables: + table.wrap(self.html_soup.new_tag("TOC")) + table.decompose() + + def delete_content_before_toc(self): + toc_tag = self.html_soup.new_tag('TOC') + if toc_tag in self.content: + ind = self.content.index(toc_tag) + 1 + self.content = self.content[ind:] + + def clean_trash(self): + """ + Function to remove all styles and tags we don't need. + """ + self._clean_tag('span', 'style', re.compile(r'^background: #[0-9a-fA-F]{6}$')) + self._clean_tag('span', 'lang', re.compile(r'^ru-RU$')) # todo: check for another languages + self._clean_tag('span', 'style', re.compile('^letter-spacing: -?[\d\.]+pt$')) + + self._clean_tag('font', 'color', re.compile(r'^#[0-9a-fA-F]{6}$')) + self._clean_tag('font', 'face', re.compile(r'^Times New Roman[\w, ]+$')) + + self._clean_tag("a", "name", "_GoBack") + self._clean_underline_links() + + self._font_to_span() + # self._remove_table_of_contents() + self._change_table_of_contents() + + def _process_paragraph(self): + """ + Function to process

    tags (text-align and text-indent value). + """ + paragraphs = self.body_tag.find_all('p') + + for p in paragraphs: + # libra converts some \n into

    with 2
    + # there we remove 1 unnecessary
    + brs = p.find_all('br') + text = p.text + if brs and text == '\n\n' and len(brs) == 2: + brs[0].decompose() + + align = p.get('align') + style = p.get('style') + + if style: + indent = re.search(r'text-indent: ([\d\.]{1,4})in', style) + margin_left = re.search(r'margin-left: ([\d\.]{1,4})in', style) + margin_right = re.search(r'margin-right: ([\d\.]{1,4})in', style) + margin_top = re.search(r'margin-top: ([\d\.]{1,4})in', style) + margin_bottom = re.search(r'margin-bottom: ([\d\.]{1,4})in', style) + else: + indent = None + margin_left = None + margin_right = None + margin_top = None + margin_bottom = None + + if margin_left and margin_right and margin_top and margin_bottom and \ + margin_left.group(1) == '0.6' and margin_right.group(1) == '0.6' and \ + margin_top.group(1) == '0.14' and margin_bottom.group(1) == '0.11': + p.wrap(BeautifulSoup(features='lxml').new_tag('blockquote')) + + p.attrs = {} + style = '' + + if align is not None and align != BookConfig.DEFAULT_ALIGN_STYLE: + style += f'text-align: {align};' + + if indent is not None: + indent = indent.group(1) + style += f'text-indent: {indent}in;' + + if style: + p.attrs['style'] = style + + def _process_two_columns(self): + """ + Function to process paragraphs which has two columns layout. + """ + two_columns = self.body_tag.find_all("div", style="column-count: 2") + for div in two_columns: + for child in div.children: + if child.name == "p": + child["class"] = "columns2" + div.unwrap() + + def _process_tables(self): + """ + Function to process tables. Set "border" attribute. + """ + tables = self.body_tag.find_all("table") + for table in tables: + tds = table.find_all("td") + + sizes = [] + for td in tds: + style = td.get('style') + + if style: + match = re.search(r"border: ?(\d+\.?\d*)(p[tx])", style) + + if match: + size = match.group(1) + units = match.group(2) + + if units == "pt": + size = self.convert_pt_to_px(size) + + sizes.append(float(size)) + + width = td.get('width') + + td.attrs = {} + if width: + td.attrs['width'] = width + + if sizes: + border_size = sum(sizes) / len(sizes) + table.attrs['border'] = f'{border_size:.2}' + + self.tables_amount = len(tables) + + def _process_quotes(self): + """ + Function to process block quotes. + After docx to html conversion block quotes are stored inside table with 1 cell. + All text is wrapped in a tag. + Such tables will be replaced with

    tags. + + + + + + +
    +

    aaaaa

    +


    +
    + + """ + tables = self.body_tag.find_all("table") + for table in tables: + trs = table.find_all("tr") + tds = table.find_all("td") + if len(trs) == 1 and len(tds) == 1 and tds[0].get('width') == '600': + td = tds[0] + is_zero_border = 'border: none;' in td.get('style') + paragraphs = td.find_all("p") + has_i_tag_or_br = [(p.i, p.br) for p in paragraphs] + has_i_tag_or_br = [x[0] is not None or x[1] is not None + for x in has_i_tag_or_br] + + if all(has_i_tag_or_br) and is_zero_border: + new_div = BeautifulSoup(features='lxml').new_tag('blockquote') + for p in paragraphs: + new_div.append(p) + + table.replaceWith(new_div) + + def _process_hrefs(self): + a_tags_with_href = self.body_tag.find_all('a', {'href': re.compile('^.*http.+')}) + + # remove char=end of file for some editors + for tag in a_tags_with_href: + tag.string = tag.text.replace('\u200c', '') + tag['href'] = tag.attrs.get('href').replace('%E2%80%8C', '') + + # %E2%80%8C + for tag in a_tags_with_href: + print(tag) + + @staticmethod + def _clean_footnote_content(content): + content = content.strip() + return content.strip() + + def _process_footnotes(self): + """ + Function returns list of footnotes and delete them from html_soup. + """ + footnote_anchors = self.body_tag.find_all('a', class_='sdfootnoteanc') + footnote_content = self.body_tag.find_all('div', id=re.compile(r'^sdfootnote\d+$')) + footnote_amt = len(footnote_anchors) + + assert footnote_amt == len(footnote_content), \ + 'Some ting went wrong with footnotes after libra conversion' + + footnotes = [] + + for i, (anc_tag, cont_tag) in enumerate(zip(footnote_anchors, footnote_content)): + if cont_tag.find('a').attrs.get('href') is None: + cont_tag.a.decompose() + continue + assert anc_tag['name'] == cont_tag.find('a')['href'][1:], \ + 'Something went wrong with footnotes after libra conversion' + + new_tag = BeautifulSoup(features='lxml').new_tag('sup') + new_tag['class'] = 'footnote-element' + new_tag['data-id'] = i + 1 + new_tag['id'] = f'footnote-{i + 1}' + new_tag.string = '*' + anc_tag.replace_with(new_tag) + + # extra digits in footnotes from documents downloaded from livecarta + a_text = cont_tag.a.text + if len(cont_tag.find_all('p')): + sup = cont_tag.find_all('p')[0].find('sup') + if sup and sup.text == a_text: + sup.decompose() + cont_tag.a.decompose() + + unicode_string = '' + for child in cont_tag.children: + if type(child) is NavigableString: + continue + if child.name == 'blockquote': + unicode_string += str(child) + else: + unicode_string += child.decode_contents() + + content = self._clean_footnote_content(unicode_string) + cont_tag.decompose() + + footnotes.append(content) + + self.footnotes = footnotes + + def _process_images(self, access, html_path, book_id): + """ + Function to process tag. Img should be sent Amazon S3 and then return new tag with valid link. + For now images are moved to one folder. + """ + img_tags = self.body_tag.find_all('img') + + if len(img_tags): + if access is None: + folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + new_path = pathlib.Path(os.path.join(folder_path, f'json/img_{book_id}/')) + new_path.mkdir(exist_ok=True) + + for img in img_tags: + img_name = img.attrs.get('src') + # quick fix for bad links + if (len(img_name) >= 3) and img_name[:3] == '../': + img_name = img_name[3:] + + img_path = pathlib.Path(f'{html_path.parent}', f'{img_name}') + + if access is not None: + link = access.send_image(img_path, book_id) + img.attrs['src'] = link + self.logger_object.log(f'{img_name} successfully uploaded.') + else: + img_size = os.path.getsize(img_path) + self.logger_object.log(f'{img_name} successfully loaded. Image size: {img_size}.', logging.DEBUG) + new_img_path = new_path / img_name + copyfile(img_path, new_img_path) + img.attrs["src"] = str(new_img_path) + + self.images = img_tags + + def _process_footer(self): + """ + Function to process
    tags. + All the tags will be deleted from file. + """ + divs = self.body_tag.find_all('div', {'title': 'footer'}) + for div in divs: + div.decompose() + + def _process_div(self): + """ + Function to process
    tags. All the tags will be deleted from file, all content of the tags will stay. + """ + divs = self.body_tag.find_all("div") + + for div in divs: + div.unwrap() + + def _process_toc_links(self): + """ + Function to extract nodes which contains TOC links, remove links from file and detect headers. + """ + toc_links = self.body_tag.find_all("a", {'name': re.compile(r'^_Toc\d+')}) + headers = [link.parent for link in toc_links] + outline_level = "1" # All the unknown outlines will be predicted as

    + for tag in headers: + if re.search(r"^h\d$", tag.name): + tag.a.unwrap() + # outline_level = tag.name[-1] # TODO: add prediction of the outline level + # TODO: escape from recounting paragraphs every time + elif tag.name == "p": + if tag in self.body_tag.find_all("p"): + new_tag = BeautifulSoup(features="lxml").new_tag("h" + outline_level) + text = tag.text + tag.replaceWith(new_tag) + new_tag.string = text + else: + # rethink document structure when you have toc_links, other cases? + self.logger_object.log(f'Something went wrong in processing toc_links.' + f' Check the structure of the file. ' + f'Tag name: {tag.name}') + + @staticmethod + def clean_header_title(title): + """ + Function to remove digits and extra spaces from headers. + + :param title: Title to process. + """ + title = re.sub(r'\s+', ' ', title).strip() + title = re.sub(r'^(?:\.?\d+\.? ?)+', '', title) + # title = re.sub(r'^(?:\.?[MDCLXVIclxvi]+\.? ?)+ ', '', title) # delete chapter numbering from the title + title = re.sub(r'^(?:[A-Za-z]\. ?)+', '', title) + return title.strip() + + def _preprocessing_headings(self): + """ + Function to convert all lower level headings to p tags + """ + pattern = f'^h[{BookConfig.SUPPORTED_LEVELS + 1}-9]$' + header_tags = self.body_tag.find_all(re.compile(pattern)) + for tag in header_tags: + tag.name = 'p' + + def _get_top_level_headers(self): + """ + Function for gathering info about top-level chapters. + + Assume: + - Headers with smallest outline(or digit in ) are top level chapters. + [ It is consistent with a recursive algorithm + for saving content to a resulted json structure, + which happens in header_to_json()] + + """ + headers_info = [] + header_tags = self.body_tag.find_all(re.compile("^h[1-9]$")) + headers_outline = [int(re.sub(r"^h", "", tag.name)) for tag in header_tags] + if headers_outline: + top_level_outline = min(headers_outline) + top_level_headers = [tag for tag in header_tags + if int(re.sub(r"^h", "", tag.name)) == top_level_outline] + + for tag in top_level_headers: + if tag.parent.name == "li": + tag.parent.unwrap() + while tag.parent.name == "ol": + tag.parent.unwrap() + + title = tag.text + title = re.sub(r'\s+', ' ', title).strip() + number = re.match(r'^(?:\.?\d+\.? ?)+', title) + is_numbered = number is not None + + cleaned_title = self.clean_header_title(tag.text) + is_introduction = cleaned_title.lower() == 'introduction' + + headers_info.append({ + 'title': cleaned_title, + 'is_numbered': is_numbered, + 'is_introduction': is_introduction}) + + return headers_info + + def _mark_introduction_headers(self): + """ + Function to find out: + what header shouldn't be numbered and can be treated as introduction chapter + + Assume header(s) to be introduction if: + 1. one header not numbered, before 1 numbered header + 2. it is first header from the top level list and it equals to 'introduction' + + Result : + Mark each top-level header with flag should_be_numbered = true/false + """ + is_numbered_header = [header['is_numbered'] for header in self.top_level_headers] + is_title = [header['is_introduction'] for header in self.top_level_headers] + + first_not_numbered = is_numbered_header and is_numbered_header[0] == 0 + second_is_numbered_or_not_exist = all(is_numbered_header[1:2]) + first_header_is_introduction = is_title and is_title[0] + + if (first_not_numbered and second_is_numbered_or_not_exist) or first_header_is_introduction: + self.top_level_headers[0]['should_be_numbered'] = False + for i in range(1, len(self.top_level_headers)): + self.top_level_headers[i]['should_be_numbered'] = True + else: + for i in range(0, len(self.top_level_headers)): + self.top_level_headers[i]['should_be_numbered'] = True + + def _process_headings(self): + """ + Function to process tags . + """ + header_tags = self.body_tag.find_all(re.compile("^h[1-9]$")) + for tag in header_tags: + if tag.parent.name == "li": + tag.parent.unwrap() + while tag.parent.name == "ol": + tag.parent.unwrap() + + title = tag.text + title = self.clean_header_title(title) + if title == "": + tag.unwrap() + else: + assert tag.name in BookConfig.SUPPORTED_HEADERS, \ + f'Preprocessing went wrong, there is still h{BookConfig.SUPPORTED_LEVELS + 1}-h9 headings.' + # if tag.name in ["h4", "h5", "h6"]: + # tag.name = "h3" # All the lower level headings will be transformed to h3 headings + + new_tag = BeautifulSoup(features='lxml').new_tag(name=tag.name) + new_tag.string = title + tag.replace_with(new_tag) + + def _process_lists(self): + """ + Function to process tags
  • . + Unwrap

    tags. + """ + li_tags = self.body_tag.find_all("li") + + for il_tag in li_tags: + il_tag.attrs.update(il_tag.p.attrs) + il_tag.p.unwrap() + + def process_html(self, access, html_path, book_id): + """ + Process html code to satisfy LawCarta formatting. + """ + try: + self.clean_trash() + + # process main elements of the .html doc + self.logger_object.log(f'Processing main elements of html.') + self._preprocessing_headings() + self._process_paragraph() + self._process_two_columns() + + self.logger_object.log('Block quotes processing.') + self._process_quotes() + + self.logger_object.log('Tables processing.') + self._process_tables() + self.logger_object.log(f'{self.tables_amount} tables have been processed.') + + self.logger_object.log('Hrefs processing.') + self._process_hrefs() + + self.logger_object.log('Footnotes processing.') + self._process_footnotes() + self.logger_object.log(f'{len(self.footnotes)} footnotes have been processed.') + + self.logger_object.log('Image processing.') + self._process_images(access=access, html_path=html_path, book_id=book_id) + self.logger_object.log(f'{len(self.images)} images have been processed.') + + self._process_footer() + self._process_div() + + self.content = self.body_tag.find_all(recursive=False) + + self.logger_object.log(f'Processing TOC and headers.') + self._process_toc_links() + + self.top_level_headers = self._get_top_level_headers() + self._mark_introduction_headers() + + self._process_headings() + + self.content = self.body_tag.find_all(recursive=False) + + self._process_lists() + # delete text before table of content if exists + self.delete_content_before_toc() + + except Exception as exc: + self.logger_object.log('Error has occurred while processing html.', logging.ERROR) + self.logger_object.log_error_to_main_log() + if self.book_api_wrapper: + self.book_api_wrapper.set_error_status() + raise exc + + self.logger_object.log('End of processing .html file.') + + return self.content, self.footnotes, self.top_level_headers diff --git a/src/json_converter.py b/src/json_converter.py new file mode 100644 index 0000000..0e88ad7 --- /dev/null +++ b/src/json_converter.py @@ -0,0 +1,145 @@ +import logging +import re +import codecs +import json + +from copy import copy +from config import BookConfig + + +class JSONConverter: + def __init__(self, content, footnotes, top_level_headers, logger_object, book_api_status=None): + self.content_dict = None + self.content = content + self.footnotes = footnotes + self.top_level_headers = top_level_headers + self.logger_object = logger_object + self.book_api_status = book_api_status + + @staticmethod + def format_html(html_text): + """ + Function to remove useless symbols from html code. + + :param html_text: Text to process. + :return: Cleaned text. + """ + new_text = re.sub(r'([\n\t])', ' ', html_text) + return new_text + + # TODO: rethink the function structure without indexes. + def header_to_json(self, ind): + """ + Function process header and collects all content for it. + + :param ind: Index of header in content list. + """ + if self.content[ind].name in BookConfig.SUPPORTED_HEADERS: + title = self.content[ind].text + curr_outline = int(re.sub(r"^h", "", self.content[ind].name)) # extract outline from tag + result = { + 'title': title, + 'contents': [], + 'sub_items': [] + } + ch_content = [] + ind += 1 + + while ind < len(self.content): + # 1. next tag is a header + if self.content[ind].name in BookConfig.SUPPORTED_HEADERS: + outline = int(re.sub(r"^h", "", self.content[ind].name)) + # - recursion step until h_i > h_initial + if outline > curr_outline: + header_dict, ind = self.header_to_json(ind) + if ch_content: + result['contents'].append("".join(ch_content)) + ch_content = [] + result['sub_items'].append(header_dict) + # - current h_i <= h_initial, end of recursion + else: + # return result, ind + break + # 2. next tag is not a header. add new paragraphs + else: + html_str = self.format_html(str(self.content[ind])) + ch_content.append(html_str) + ind += 1 + + if ch_content: + result['contents'].append("".join(ch_content)) + return result, ind + return '' + + @staticmethod + def _is_empty_p_tag(tag): + if tag.name != 'p': + return False + + temp_tag = copy(tag) + brs = temp_tag.find_all('br') + for br in brs: + br.decompose() + + text = re.sub(r'\s+', '', temp_tag.text) + if text: + return False + + return True + + def convert_to_json(self): + """ + Function which convert list of html nodes to appropriate json structure. + """ + json_strc = [] + ind = 0 + ch_num = 0 + ch_amt = 0 + + try: + while ind < len(self.content): + res = {} + + if self.content[ind].name in BookConfig.SUPPORTED_HEADERS: + res, ind = self.header_to_json(ind) + + else: + chapter_title = f'Untitled chapter {ch_num}' + chapter = [] + while ind < len(self.content) and self.content[ind].name not in BookConfig.SUPPORTED_HEADERS: + if not self._is_empty_p_tag(self.content[ind]): + chapter.append(self.format_html(str(self.content[ind]))) + ind += 1 + if chapter: + res = { + 'title': chapter_title, + 'contents': ["".join(chapter)], + 'sub_items': [] + } + ch_num += 1 + + if res: + json_strc.append(res) + ch_amt += 1 + self.logger_object.log(f'Chapter {ch_amt} has been added to structure.') + except Exception as exc: + self.logger_object.log('Error has occurred while making json structure.', logging.ERROR) + self.logger_object.log_error_to_main_log() + if self.book_api_status: + self.book_api_status.set_error_status() + raise exc + + # Add is_introduction field to json structure + # after deleting content before toc, some chapters can be deleted + if self.top_level_headers: + same_first_titles = self.top_level_headers[0]['title'] == json_strc[0]['title'] + is_first_header_introduction = not self.top_level_headers[0]['should_be_numbered'] + + json_strc[0]['is_introduction'] = is_first_header_introduction and same_first_titles + + self.content_dict = { + "content": json_strc, + "footnotes": self.footnotes + } + + return self.content_dict