Add inline style processor [Docx]

This commit is contained in:
Kiryl
2022-09-02 14:47:06 +03:00
parent b97c5d8371
commit dfdf6bc7e9
3 changed files with 50 additions and 27 deletions

View File

@@ -0,0 +1,596 @@
import re
import json
import pathlib
from typing import List, Dict, Union
from bs4 import BeautifulSoup, Tag, NavigableString
from src.util.helpers import BookLogger
from src.livecarta_config import LiveCartaConfig
from src.docx_converter.image_processing import process_images
from src.docx_converter.footnotes_processing import process_footnotes
from src.tag_inline_style_processor import modify_html_soup_with_css_styles
class HTMLDocxPreprocessor:
def __init__(self, html_soup: BeautifulSoup, logger: BookLogger,
style_processor, preset_path: str = "presets/docx_presets.json"):
self.body_tag = html_soup.body
self.html_soup = html_soup
self.logger = logger
self.preset = json.load(open(preset_path))
self.style_processor = style_processor
self.name2action = {
"decomposer": self._decompose_tag,
"unwrapper": self._unwrap_tag
}
self.top_level_headers = None
self.content = list()
def _process_toc_links(self):
def _check_parent_link_exist_in_toc(tag_with_link: Tag) -> bool:
toc_links = []
for a_tag in tag_with_link.find_all("a", {"name": re.compile(r"^_Toc\d+")}):
link_name = a_tag.attrs["name"]
toc_item = self.body_tag.find("a", {"href": "#" + link_name})
if toc_item:
toc_links.append(toc_item)
return len(toc_links) > 0
"""Function to extract nodes which contains TOC links, remove links from file and detect headers."""
toc_links = self.body_tag.find_all(
"a", {"name": re.compile(r"^_Toc\d+")})
headers = [link.parent for link in toc_links]
outline_level = "1" # All the unknown outlines will be predicted as <h1>
for h_tag in headers:
if re.search(r"^h\d$", h_tag.name):
h_tag.a.unwrap()
# outline_level = tag.name[-1] # TODO: add prediction of the outline level
elif h_tag.name == "p":
exist_in_toc = _check_parent_link_exist_in_toc(h_tag)
if h_tag in self.body_tag.find_all("p") and exist_in_toc:
new_tag = BeautifulSoup(
features="lxml").new_tag("h" + outline_level)
text = h_tag.text
h_tag.replaceWith(new_tag)
new_tag.string = text
else:
# rethink document structure when you have toc_links, other cases?
self.logger_object.log(f"Something went wrong in processing toc_links."
f" Check the structure of the file. "
f"Tag name: {h_tag.name}")
def _clean_tag(self, tag: str, attr_name: str, attr_value: re):
# todo regex
"""
Function to clean tags by its name and attribute value.
Parameters
----------
tag: str
tag name to clean
attr_name: str
attribute name
attr_value: [str,re]
attribute value
Returns
-------
clean tag
"""
tags = self.body_tag.find_all(tag, {attr_name: attr_value})
for tag in tags:
if len(tag.attrs) == 1:
tag.unwrap()
def _clean_underline_links(self):
# todo regex
"""Function cleans meaningless <u> tags before links."""
underlines = self.body_tag.find_all("u")
for u in underlines:
if u.find_all("a"):
u.unwrap()
links = self.body_tag.find_all("a")
for link in links:
u = link.find_all("u")
if u and len(u) == 1:
u[0].unwrap()
@classmethod
def convert_pt_to_px(cls, value: float) -> float:
value = float(value)
if value == LiveCartaConfig.WORD_DEFAULT_FONT_SIZE:
return LiveCartaConfig.LIVECARTA_DEFAULT_FONT_SIZE
else:
return value
@classmethod
def convert_font_pt_to_px(cls, style: str) -> str:
"""
Function converts point in the font-size to pixels.
Parameters
----------
style: str
str with style to proces
Returns
-------
: str
str with converted style
"""
size = re.search(r"font-size: (\d{1,3})pt", style)
if size is None:
return style
size = size.group(1)
new_size = cls.convert_pt_to_px(size)
if new_size == LiveCartaConfig.LIVECARTA_DEFAULT_FONT_SIZE:
return ""
return re.sub(size + "pt", str(new_size) + "px", style)
def _font_to_span(self):
"""
Function to convert <font> tag to <span>.
If font style is default, then remove this tag.
"""
fonts = self.body_tag.find_all("font")
for font in fonts:
face, style, color =\
font.get("face"), font.get("style"), font.get("color")
font.attrs, font.name = {}, "span"
if style:
style = self.convert_font_pt_to_px(style)
if style != "":
if color and color in LiveCartaConfig.COLORS_MAP:
style += f"; color: {color};"
font.attrs["style"] = style
elif color and color in LiveCartaConfig.COLORS_MAP:
font.attrs["style"] = f"color: {color};"
if len(font.attrs) == 0:
font.unwrap()
# on this step there should be no more <font> tags
assert len(self.body_tag.find_all("font")) == 0
def clean_trash(self):
# todo make it regex dict
"""Function to remove all styles and tags we don"t need."""
self._clean_tag("span", "style", re.compile(
r"^background: #[\da-fA-F]{6}$"))
# todo: check for another languages
self._clean_tag("span", "lang", re.compile(r"^ru-RU$"))
self._clean_tag("span", "style", re.compile(
"^letter-spacing: -?[\d.]+pt$"))
self._clean_tag("font", "face", re.compile(
r"^Times New Roman[\w, ]+$"))
self._clean_tag("a", "name", "_GoBack")
self._clean_underline_links()
self._font_to_span()
# replace toc with empty <TOC> tag
tables = self.body_tag.find_all(
"div", id=re.compile(r"^Table of Contents\d+"))
for table in tables:
table.wrap(self.html_soup.new_tag("TOC"))
table.decompose()
def _preprocessing_headings(self):
# todo regex
"""Function to convert all lower level headings to p tags"""
pattern = f"^h[{LiveCartaConfig.SUPPORTED_LEVELS + 1}-9]$"
header_tags = self.body_tag.find_all(re.compile(pattern))
for tag in header_tags:
tag.name = "p"
def _process_paragraph(self):
"""Function to process <p> tags (text-align and text-indent value)."""
paragraphs = self.body_tag.find_all("p")
for p in paragraphs:
# libre converts some \n into <p> with 2 </br>
# there we remove 1 unnecessary <br>
brs = p.find_all("br")
text = p.text
if brs and text == "\n\n" and len(brs) == 2:
brs[0].decompose()
indent_should_be_added = False
if text and ((text[0:1] == "\t") or (text[:2] == "\n\t")):
indent_should_be_added = True
align = p.get("align")
style = p.get("style")
if style:
indent = re.search(r"text-indent: ([\d.]{1,4})in", style)
margin_left = re.search(r"margin-left: ([\d.]{1,4})in", style)
margin_right = re.search(
r"margin-right: ([\d.]{1,4})in", style)
margin_top = re.search(r"margin-top: ([\d.]{1,4})in", style)
margin_bottom = re.search(
r"margin-bottom: ([\d.]{1,4})in", style)
else:
indent = margin_left = margin_right = \
margin_top = margin_bottom = None
if margin_left and margin_right and margin_top and margin_bottom and \
margin_left.group(1) == "0.6" and margin_right.group(1) == "0.6" and \
margin_top.group(1) == "0.14" and margin_bottom.group(1) == "0.11":
p.wrap(BeautifulSoup(features="lxml").new_tag("blockquote"))
p.attrs = {}
style = ""
if align is not None and align != LiveCartaConfig.DEFAULT_ALIGN_STYLE:
style += f"text-align: {align};"
if indent is not None or indent_should_be_added:
# indent = indent.group(1)
style += f"text-indent: {LiveCartaConfig.INDENT};"
if style:
p.attrs["style"] = style
def _process_two_columns(self):
"""Function to process paragraphs which has two columns layout."""
two_columns = self.body_tag.find_all("div", style="column-count: 2")
for div in two_columns:
for child in div.children:
if child.name == "p":
child["class"] = "columns2"
div.unwrap()
def _process_quotes(self):
"""
Function to process block quotes.
After docx to html conversion block quotes are stored inside table with 1 cell.
All text is wrapped in a <i> tag.
Such tables will be replaced with <blockquote> tags.
<table cellpadding=\"7\" cellspacing=\"0\" width=\"614\">
<col width=\"600\"/>
<tr>
<td width=\"600\">
<p style=\"text-align: justify;\"><i>aaaaa</i></p>
<p style=\"text-align: justify;\"><br/></p>
</td>
</tr>
</table>
"""
tables = self.body_tag.find_all("table")
for table in tables:
trs = table.find_all("tr")
tds = table.find_all("td")
if len(trs) == 1 and len(tds) == 1 and tds[0].get("width") == "600":
td = tds[0]
is_zero_border = "border: none;" in td.get("style")
paragraphs = td.find_all("p")
has_i_tag_or_br = [(p.i, p.br) for p in paragraphs]
has_i_tag_or_br = [x[0] is not None or x[1] is not None
for x in has_i_tag_or_br]
if all(has_i_tag_or_br) and is_zero_border:
new_div = BeautifulSoup(
features="lxml").new_tag("blockquote")
for p in paragraphs:
new_div.append(p)
table.replaceWith(new_div)
def _process_tables(self):
"""Function to process tables. Set "border" attribute."""
tables = self.body_tag.find_all("table")
for table in tables:
tds = table.find_all("td")
sizes = []
for td in tds:
style = td.get("style")
if style:
match = re.search(r"border: ?(\d+\.?\d*)(p[tx])", style)
if match:
size = match.group(1)
units = match.group(2)
if units == "pt":
size = self.convert_pt_to_px(size)
sizes.append(float(size))
width = td.get("width")
td.attrs = {}
if width:
td.attrs["width"] = width
if sizes:
border_size = sum(sizes) / len(sizes)
table.attrs["border"] = f"{border_size:.2}"
self.tables_amount = len(tables)
def _process_hrefs(self):
a_tags_with_href = self.body_tag.find_all(
"a", {"href": re.compile("^.*http.+")})
# remove char=end of file for some editors
for tag in a_tags_with_href:
tag.string = tag.text.replace("\u200c", "")
tag["href"] = tag.attrs.get("href").replace("%E2%80%8C", "")
a_tags_with_href = self.body_tag.find_all(
"a", {"href": re.compile("^(?!#sdfootnote)")})
for tag in a_tags_with_href:
tag.string = tag.text.replace("\u200c", "")
tag.string = tag.text.replace("\u200b", "") # zero-width-space
tag["href"] = tag.attrs.get("href").replace("%E2%80%8C", "")
def _process_footer(self):
# todo regex
"""
Function to process <div title="footer"> tags.
All the tags will be deleted from file.
"""
divs = self.body_tag.find_all("div", {"title": "footer"})
for div in divs:
div.decompose()
def _process_div(self):
# todo regex
"""Function to process <div> tags. All the tags will be deleted from file, all content of the tags will stay."""
divs = self.body_tag.find_all("div")
for div in divs:
div.unwrap()
def _get_top_level_headers(self) -> List[Dict[str, Union[str, bool]]]:
"""
Function for gathering info about top-level chapters.
Assume: _
- Headers with the smallest outline(or digit in <h>) are top level chapters.
[ It is consistent with a recursive algorithm
for saving content to a resulted json structure,
which happens in header_to_json()]
"""
headers_info = []
header_tags = self.body_tag.find_all(re.compile("^h[1-9]$"))
headers_outline = [int(re.sub(r"^h", "", tag.name))
for tag in header_tags]
if headers_outline:
top_level_outline = min(headers_outline)
top_level_headers = [tag for tag in header_tags
if int(re.sub(r"^h", "", tag.name)) == top_level_outline]
for tag in top_level_headers:
if tag.parent.name == "li":
tag.parent.unwrap()
while tag.parent.name == "ol":
tag.parent.unwrap()
title = tag.text
title = re.sub(r"\s+", " ", title).strip()
number = re.match(r"^(?:\.?\d+\.? ?)+", title)
is_numbered = number is not None
cleaned_title = re.sub(r"[\s\xa0]", " ", tag.text)
is_introduction = cleaned_title.lower() == "introduction"
headers_info.append({
"title": cleaned_title,
"is_numbered": is_numbered,
"is_introduction": is_introduction})
return headers_info
def _mark_introduction_headers(self):
"""
Function to find out:
what header shouldn"t be numbered and can be treated as introduction chapter
Assume header(s) to be introduction if:
1. one header not numbered, before 1 numbered header
2. it is first header from the top level list, and it equals to "introduction"
Returns
-------
None
mark each top-level header with flag should_be_numbered = true/false
"""
is_numbered_header = [header["is_numbered"]
for header in self.top_level_headers]
is_title = [header["is_introduction"]
for header in self.top_level_headers]
first_not_numbered = is_numbered_header and is_numbered_header[0] == 0
second_is_numbered_or_not_exist = all(is_numbered_header[1:2])
first_header_is_introduction = is_title and is_title[0]
if (first_not_numbered and second_is_numbered_or_not_exist) or first_header_is_introduction:
self.top_level_headers[0]["should_be_numbered"] = False
for i in range(1, len(self.top_level_headers)):
self.top_level_headers[i]["should_be_numbered"] = True
else:
for i in range(0, len(self.top_level_headers)):
self.top_level_headers[i]["should_be_numbered"] = True
@staticmethod
def clean_title_from_tabs(tag: NavigableString):
cleaned = re.sub(r"[\s\xa0]", " ", tag)
this = BeautifulSoup.new_string(BeautifulSoup(
features="lxml"), cleaned, NavigableString)
tag.replace_with(this)
def apply_func_to_last_child(self, tag: Union[NavigableString, Tag], func=None):
"""
works only with constructions like (((child to work with)))
where child is object of NavigableString
"""
if type(tag) is NavigableString:
func(tag)
else:
children = list(tag.children)
if children:
self.apply_func_to_last_child(children[0], func)
def _process_headings(self):
# todo regex
"""
Function to process tags <h>.
Steps
----------
1. remove <b>, <span>
2. clean text in header from numbering and \n
Returns
-------
None
processed <h> tags
"""
header_tags = self.body_tag.find_all(re.compile("^h[1-9]$"))
# 1. remove <b>, <span>
for tag in header_tags:
b_tags = tag.find_all("b")
[tag.unwrap() for tag in b_tags]
spans = tag.find_all("span")
if spans:
[span.unwrap() for span in spans]
tag.attrs = {}
header_tags = self.body_tag.find_all(re.compile("^h[1-9]$"))
# 2. clean text in header from numbering and \n
for tag in header_tags:
if tag.parent.name == "li":
tag.parent.unwrap()
while tag.parent.name == "ol":
tag.parent.unwrap()
cleaned_title = re.sub(r"[\s\xa0]", " ", tag.text)
if cleaned_title == "":
tag.unwrap()
else:
assert tag.name in LiveCartaConfig.SUPPORTED_HEADERS, \
f"Preprocessing went wrong, there is still h{LiveCartaConfig.SUPPORTED_LEVELS + 1}-h9 headings."
content = list(tag.children)
# do not take into account rubbish empty tags like <a>, but don"t remove them
content = [item for item in content if
(type(item) is not NavigableString and item.text != "")
or (type(item) is NavigableString)]
content[0] = "" if content[0] == " " else content[0]
content = [item for item in content if item != ""]
for i, item in enumerate(content):
if type(content[i]) is NavigableString:
cleaned = re.sub(r"(\s+)+", " ", content[i])
this = BeautifulSoup.new_string(BeautifulSoup(
features="lxml"), cleaned, NavigableString)
content[i].replace_with(this)
content[i] = this
else:
self.apply_func_to_last_child(
content[i], self.clean_title_from_tabs)
def _process_lists(self):
# todo regex
"""
Function
- process tags <li>.
- unwrap <p> tags.
Returns
-------
None
uwrap <p> tag with li
"""
li_tags = self.body_tag.find_all("li")
for li_tag in li_tags:
li_tag.attrs.update(li_tag.p.attrs)
li_tag.p.unwrap()
def delete_content_before_toc(self):
# remove all tag upper the <TOC> only in content !!! body tag is not updated
toc_tag = self.html_soup.new_tag("TOC")
self.content: List[Tag] = self.body_tag.find_all(recursive=False)
if toc_tag in self.content:
ind = self.content.index(toc_tag) + 1
self.content = self.content[ind:]
def process_html(self, access=None, html_path: pathlib.Path = "", book_id: int = 0):
"""Process html code to satisfy LiveCarta formatting."""
self.logger.log("Beginning of processing .html file.")
self.logger.log(f"Processing TOC and headers.")
self._process_toc_links()
self.logger.log("CSS inline style preprocessing.")
self.style_processor.process_inline_styles_in_html_soup(self.html_soup)
self.logger.log("CSS inline style processing.")
modify_html_soup_with_css_styles(self.html_soup)
for rule in self.preset:
self.logger.log(rule["preset_name"] + " process.")
action = self.name2action[rule["preset_name"]]
self._process_tags(self.body_tag, rule["rules"], action)
self.clean_trash()
# process main elements of the .html doc
self.logger_object.log(f"Processing main elements of html.")
self._preprocessing_headings()
self._process_paragraph()
self._process_two_columns()
self.logger.log("Block quotes processing.")
self._process_quotes()
self.logger.log("Tables processing.")
self._process_tables()
self.logger.log(
f"{self.tables_amount} tables have been processed.")
self.logger.log("Hrefs processing.")
self._process_hrefs()
self.logger.log("Image processing.")
self.images = process_images(access, path_to_html=html_path,
book_id=book_id, body_tag=self.body_tag)
self.logger.log(
f"{len(self.images)} images have been processed.")
self.logger.log("Footnotes processing.")
self.footnotes = process_footnotes(self.body_tag)
self.logger.log(
f"{len(self.footnotes)} footnotes have been processed.")
self._process_div()
self.top_level_headers = self._get_top_level_headers()
self._mark_introduction_headers()
self._process_headings()
self._process_lists()
# delete text before table of content if exists
self.delete_content_before_toc()
self.logger.log("End of processing .html file.")
return self.content, self.footnotes, self.top_level_headers