mirror of
https://github.com/qurator-spk/eynollah.git
synced 2025-10-27 15:54:13 +01:00
Resolve merge conflict of main and machine based reading order branch
This commit is contained in:
commit
6aee70d0cd
36 changed files with 357 additions and 269 deletions
0
src/eynollah/__init__.py
Normal file
0
src/eynollah/__init__.py
Normal file
310
src/eynollah/cli.py
Normal file
310
src/eynollah/cli.py
Normal file
|
|
@ -0,0 +1,310 @@
|
|||
import sys
|
||||
import click
|
||||
from ocrd_utils import initLogging, setOverrideLogLevel
|
||||
from eynollah.eynollah import Eynollah
|
||||
from eynollah.sbb_binarize import SbbBinarizer
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
pass
|
||||
|
||||
@main.command()
|
||||
@click.option(
|
||||
"--dir_xml",
|
||||
"-dx",
|
||||
help="directory of GT page-xml files",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
|
||||
@click.option(
|
||||
"--dir_out_modal_image",
|
||||
"-domi",
|
||||
help="directory where ground truth images would be written",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
|
||||
@click.option(
|
||||
"--dir_out_classes",
|
||||
"-docl",
|
||||
help="directory where ground truth classes would be written",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
|
||||
@click.option(
|
||||
"--input_height",
|
||||
"-ih",
|
||||
help="input height",
|
||||
)
|
||||
@click.option(
|
||||
"--input_width",
|
||||
"-iw",
|
||||
help="input width",
|
||||
)
|
||||
@click.option(
|
||||
"--min_area_size",
|
||||
"-min",
|
||||
help="min area size of regions considered for reading order training.",
|
||||
)
|
||||
|
||||
def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width, min_area_size):
|
||||
xml_files_ind = os.listdir(dir_xml)
|
||||
|
||||
@main.command()
|
||||
@click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.')
|
||||
|
||||
@click.option('--model_dir', '-m', type=click.Path(exists=True, file_okay=False), required=True, help='directory containing models for prediction')
|
||||
|
||||
@click.argument('input_image')
|
||||
|
||||
@click.argument('output_image')
|
||||
@click.option(
|
||||
"--dir_in",
|
||||
"-di",
|
||||
help="directory of images",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
@click.option(
|
||||
"--dir_out",
|
||||
"-do",
|
||||
help="directory where the binarized images will be written",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
|
||||
def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out):
|
||||
if not dir_out and (dir_in):
|
||||
print("Error: You used -di but did not set -do")
|
||||
sys.exit(1)
|
||||
elif dir_out and not (dir_in):
|
||||
print("Error: You used -do to write out binarized images but have not set -di")
|
||||
sys.exit(1)
|
||||
SbbBinarizer(model_dir).run(image_path=input_image, use_patches=patches, save=output_image, dir_in=dir_in, dir_out=dir_out)
|
||||
|
||||
|
||||
|
||||
|
||||
@main.command()
|
||||
@click.option(
|
||||
"--image",
|
||||
"-i",
|
||||
help="image filename",
|
||||
type=click.Path(exists=True, dir_okay=False),
|
||||
)
|
||||
|
||||
@click.option(
|
||||
"--out",
|
||||
"-o",
|
||||
help="directory to write output xml data",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
required=True,
|
||||
)
|
||||
@click.option(
|
||||
"--dir_in",
|
||||
"-di",
|
||||
help="directory of images",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
@click.option(
|
||||
"--model",
|
||||
"-m",
|
||||
help="directory of models",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
required=True,
|
||||
)
|
||||
@click.option(
|
||||
"--save_images",
|
||||
"-si",
|
||||
help="if a directory is given, images in documents will be cropped and saved there",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
@click.option(
|
||||
"--save_layout",
|
||||
"-sl",
|
||||
help="if a directory is given, plot of layout will be saved there",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
@click.option(
|
||||
"--save_deskewed",
|
||||
"-sd",
|
||||
help="if a directory is given, deskewed image will be saved there",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
@click.option(
|
||||
"--save_all",
|
||||
"-sa",
|
||||
help="if a directory is given, all plots needed for documentation will be saved there",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
@click.option(
|
||||
"--save_page",
|
||||
"-sp",
|
||||
help="if a directory is given, page crop of image will be saved there",
|
||||
type=click.Path(exists=True, file_okay=False),
|
||||
)
|
||||
@click.option(
|
||||
"--enable-plotting/--disable-plotting",
|
||||
"-ep/-noep",
|
||||
is_flag=True,
|
||||
help="If set, will plot intermediary files and images",
|
||||
)
|
||||
@click.option(
|
||||
"--extract_only_images/--disable-extracting_only_images",
|
||||
"-eoi/-noeoi",
|
||||
is_flag=True,
|
||||
help="If a directory is given, only images in documents will be cropped and saved there and the other processing will not be done",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-enhancement/--no-allow-enhancement",
|
||||
"-ae/-noae",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool would check that input image need resizing and enhancement or not. If so output of resized and enhanced image and corresponding layout data will be written in out directory",
|
||||
)
|
||||
@click.option(
|
||||
"--curved-line/--no-curvedline",
|
||||
"-cl/-nocl",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline. This should be taken into account that with this option the tool need more time to do process.",
|
||||
)
|
||||
@click.option(
|
||||
"--textline_light/--no-textline_light",
|
||||
"-tll/-notll",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method.",
|
||||
)
|
||||
@click.option(
|
||||
"--full-layout/--no-full-layout",
|
||||
"-fl/-nofl",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool will try to return all elements of layout.",
|
||||
)
|
||||
@click.option(
|
||||
"--tables/--no-tables",
|
||||
"-tab/-notab",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool will try to detect tables.",
|
||||
)
|
||||
@click.option(
|
||||
"--right2left/--left2right",
|
||||
"-r2l/-l2r",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool will extract right-to-left reading order.",
|
||||
)
|
||||
@click.option(
|
||||
"--input_binary/--input-RGB",
|
||||
"-ib/-irgb",
|
||||
is_flag=True,
|
||||
help="in general, eynollah uses RGB as input but if the input document is strongly dark, bright or for any other reason you can turn binarized input on. This option does not mean that you have to provide a binary image, otherwise this means that the tool itself will binarized the RGB input document.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow_scaling/--no-allow-scaling",
|
||||
"-as/-noas",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool would check the scale and if needed it will scale it to perform better layout detection",
|
||||
)
|
||||
@click.option(
|
||||
"--headers_off/--headers-on",
|
||||
"-ho/-noho",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool would ignore headers role in reading order",
|
||||
)
|
||||
@click.option(
|
||||
"--light_version/--original",
|
||||
"-light/-org",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool would use lighter version",
|
||||
)
|
||||
@click.option(
|
||||
"--ignore_page_extraction/--extract_page_included",
|
||||
"-ipe/-epi",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool would ignore page extraction",
|
||||
)
|
||||
@click.option(
|
||||
"--reading_order_machine_based/--heuristic_reading_order",
|
||||
"-romb/-hro",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool would apply machine based reading order detection",
|
||||
)
|
||||
@click.option(
|
||||
"--do_ocr",
|
||||
"-ocr/-noocr",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool will try to do ocr",
|
||||
)
|
||||
@click.option(
|
||||
"--num_col_upper",
|
||||
"-ncu",
|
||||
help="lower limit of columns in document image",
|
||||
)
|
||||
@click.option(
|
||||
"--num_col_lower",
|
||||
"-ncl",
|
||||
help="upper limit of columns in document image",
|
||||
)
|
||||
@click.option(
|
||||
"--skip_layout_and_reading_order",
|
||||
"-slro/-noslro",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.",
|
||||
)
|
||||
@click.option(
|
||||
"--log_level",
|
||||
"-l",
|
||||
type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']),
|
||||
help="Override log level globally to this",
|
||||
)
|
||||
|
||||
def layout(image, out, dir_in, model, save_images, save_layout, save_deskewed, save_all, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, num_col_upper, num_col_lower, skip_layout_and_reading_order, ignore_page_extraction, log_level):
|
||||
if log_level:
|
||||
setOverrideLogLevel(log_level)
|
||||
initLogging()
|
||||
if not enable_plotting and (save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement):
|
||||
print("Error: You used one of -sl, -sd, -sa, -sp, -si or -ae but did not enable plotting with -ep")
|
||||
sys.exit(1)
|
||||
elif enable_plotting and not (save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement):
|
||||
print("Error: You used -ep to enable plotting but set none of -sl, -sd, -sa, -sp, -si or -ae")
|
||||
sys.exit(1)
|
||||
if textline_light and not light_version:
|
||||
print('Error: You used -tll to enable light textline detection but -light is not enabled')
|
||||
sys.exit(1)
|
||||
if light_version and not textline_light:
|
||||
print('Error: You used -light without -tll. Light version need light textline to be enabled.')
|
||||
if extract_only_images and (allow_enhancement or allow_scaling or light_version or curved_line or textline_light or full_layout or tables or right2left or headers_off) :
|
||||
print('Error: You used -eoi which can not be enabled alongside light_version -light or allow_scaling -as or allow_enhancement -ae or curved_line -cl or textline_light -tll or full_layout -fl or tables -tab or right2left -r2l or headers_off -ho')
|
||||
sys.exit(1)
|
||||
eynollah = Eynollah(
|
||||
image_filename=image,
|
||||
dir_out=out,
|
||||
dir_in=dir_in,
|
||||
dir_models=model,
|
||||
dir_of_cropped_images=save_images,
|
||||
extract_only_images=extract_only_images,
|
||||
dir_of_layout=save_layout,
|
||||
dir_of_deskewed=save_deskewed,
|
||||
dir_of_all=save_all,
|
||||
dir_save_page=save_page,
|
||||
enable_plotting=enable_plotting,
|
||||
allow_enhancement=allow_enhancement,
|
||||
curved_line=curved_line,
|
||||
textline_light=textline_light,
|
||||
full_layout=full_layout,
|
||||
tables=tables,
|
||||
right2left=right2left,
|
||||
input_binary=input_binary,
|
||||
allow_scaling=allow_scaling,
|
||||
headers_off=headers_off,
|
||||
light_version=light_version,
|
||||
ignore_page_extraction=ignore_page_extraction,
|
||||
reading_order_machine_based=reading_order_machine_based,
|
||||
do_ocr=do_ocr,
|
||||
num_col_upper=num_col_upper,
|
||||
num_col_lower=num_col_lower,
|
||||
skip_layout_and_reading_order=skip_layout_and_reading_order,
|
||||
)
|
||||
if dir_in:
|
||||
eynollah.run()
|
||||
else:
|
||||
pcgts = eynollah.run()
|
||||
eynollah.writer.write_pagexml(pcgts)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
5345
src/eynollah/eynollah.py
Normal file
5345
src/eynollah/eynollah.py
Normal file
File diff suppressed because it is too large
Load diff
65
src/eynollah/ocrd-tool.json
Normal file
65
src/eynollah/ocrd-tool.json
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
{
|
||||
"version": "0.3.1",
|
||||
"git_url": "https://github.com/qurator-spk/eynollah",
|
||||
"tools": {
|
||||
"ocrd-eynollah-segment": {
|
||||
"executable": "ocrd-eynollah-segment",
|
||||
"categories": ["Layout analysis"],
|
||||
"description": "Segment page into regions and lines and do reading order detection with eynollah",
|
||||
"input_file_grp": ["OCR-D-IMG", "OCR-D-SEG-PAGE", "OCR-D-GT-SEG-PAGE"],
|
||||
"output_file_grp": ["OCR-D-SEG-LINE"],
|
||||
"steps": ["layout/segmentation/region", "layout/segmentation/line"],
|
||||
"parameters": {
|
||||
"models": {
|
||||
"type": "string",
|
||||
"format": "file",
|
||||
"content-type": "text/directory",
|
||||
"cacheable": true,
|
||||
"description": "Path to directory containing models to be used (See https://qurator-data.de/eynollah)",
|
||||
"required": true
|
||||
},
|
||||
"dpi": {
|
||||
"type": "number",
|
||||
"format": "float",
|
||||
"description": "pixel density in dots per inch (overrides any meta-data in the images); ignored if <= 0 (with fall-back 230)",
|
||||
"default": 0
|
||||
},
|
||||
"full_layout": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Try to detect all element subtypes, including drop-caps and headings"
|
||||
},
|
||||
"tables": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Try to detect table regions"
|
||||
},
|
||||
"curved_line": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "try to return contour of textlines instead of just rectangle bounding box. Needs more processing time"
|
||||
},
|
||||
"allow_scaling": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "check the resolution against the number of detected columns and if needed, scale the image up or down during layout detection (heuristic to improve quality and performance)"
|
||||
},
|
||||
"headers_off": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "ignore the special role of headings during reading order detection"
|
||||
}
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"description": "models for eynollah (TensorFlow SavedModel format)",
|
||||
"url": "https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz",
|
||||
"name": "default",
|
||||
"size": 1894627041,
|
||||
"type": "archive",
|
||||
"path_in_archive": "models_eynollah"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
11
src/eynollah/ocrd_cli.py
Normal file
11
src/eynollah/ocrd_cli.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
from .processor import EynollahProcessor
|
||||
from click import command
|
||||
from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor
|
||||
|
||||
@command()
|
||||
@ocrd_cli_options
|
||||
def main(*args, **kwargs):
|
||||
return ocrd_cli_wrap_processor(EynollahProcessor, *args, **kwargs)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
175
src/eynollah/plot.py
Normal file
175
src/eynollah/plot.py
Normal file
|
|
@ -0,0 +1,175 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import matplotlib.patches as mpatches
|
||||
import numpy as np
|
||||
import os.path
|
||||
import cv2
|
||||
from scipy.ndimage import gaussian_filter1d
|
||||
|
||||
from .utils import crop_image_inside_box
|
||||
from .utils.rotate import rotate_image_different
|
||||
from .utils.resize import resize_image
|
||||
|
||||
class EynollahPlotter():
|
||||
"""
|
||||
Class collecting all the plotting and image writing methods
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
dir_out,
|
||||
dir_of_all,
|
||||
dir_save_page,
|
||||
dir_of_deskewed,
|
||||
dir_of_layout,
|
||||
dir_of_cropped_images,
|
||||
image_filename_stem,
|
||||
image_org=None,
|
||||
scale_x=1,
|
||||
scale_y=1,
|
||||
):
|
||||
self.dir_out = dir_out
|
||||
self.dir_of_all = dir_of_all
|
||||
self.dir_save_page = dir_save_page
|
||||
self.dir_of_layout = dir_of_layout
|
||||
self.dir_of_cropped_images = dir_of_cropped_images
|
||||
self.dir_of_deskewed = dir_of_deskewed
|
||||
self.image_filename_stem = image_filename_stem
|
||||
# XXX TODO hacky these cannot be set at init time
|
||||
self.image_org = image_org
|
||||
self.scale_x = scale_x
|
||||
self.scale_y = scale_y
|
||||
|
||||
def save_plot_of_layout_main(self, text_regions_p, image_page):
|
||||
if self.dir_of_layout is not None:
|
||||
values = np.unique(text_regions_p[:, :])
|
||||
# pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics']
|
||||
pixels=['Background' , 'Main text' , 'Image' , 'Separator','Marginalia']
|
||||
values_indexes = [0, 1, 2, 3, 4]
|
||||
plt.figure(figsize=(40, 40))
|
||||
plt.rcParams["font.size"] = "40"
|
||||
im = plt.imshow(text_regions_p[:, :])
|
||||
colors = [im.cmap(im.norm(value)) for value in values]
|
||||
patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values]
|
||||
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=40)
|
||||
plt.savefig(os.path.join(self.dir_of_layout, self.image_filename_stem + "_layout_main.png"))
|
||||
|
||||
|
||||
def save_plot_of_layout_main_all(self, text_regions_p, image_page):
|
||||
if self.dir_of_all is not None:
|
||||
values = np.unique(text_regions_p[:, :])
|
||||
# pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics']
|
||||
pixels=['Background' , 'Main text' , 'Image' , 'Separator','Marginalia']
|
||||
values_indexes = [0, 1, 2, 3, 4]
|
||||
plt.figure(figsize=(80, 40))
|
||||
plt.rcParams["font.size"] = "40"
|
||||
plt.subplot(1, 2, 1)
|
||||
plt.imshow(image_page)
|
||||
plt.subplot(1, 2, 2)
|
||||
im = plt.imshow(text_regions_p[:, :])
|
||||
colors = [im.cmap(im.norm(value)) for value in values]
|
||||
patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values]
|
||||
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=60)
|
||||
plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem + "_layout_main_and_page.png"))
|
||||
|
||||
def save_plot_of_layout(self, text_regions_p, image_page):
|
||||
if self.dir_of_layout is not None:
|
||||
values = np.unique(text_regions_p[:, :])
|
||||
# pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics']
|
||||
pixels = ["Background", "Main text", "Header", "Marginalia", "Drop capital", "Image", "Separator", "Tables"]
|
||||
values_indexes = [0, 1, 2, 8, 4, 5, 6, 10]
|
||||
plt.figure(figsize=(40, 40))
|
||||
plt.rcParams["font.size"] = "40"
|
||||
im = plt.imshow(text_regions_p[:, :])
|
||||
colors = [im.cmap(im.norm(value)) for value in values]
|
||||
patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values]
|
||||
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=40)
|
||||
plt.savefig(os.path.join(self.dir_of_layout, self.image_filename_stem + "_layout.png"))
|
||||
|
||||
def save_plot_of_layout_all(self, text_regions_p, image_page):
|
||||
if self.dir_of_all is not None:
|
||||
values = np.unique(text_regions_p[:, :])
|
||||
# pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics']
|
||||
pixels = ["Background", "Main text", "Header", "Marginalia", "Drop capital", "Image", "Separator", "Tables"]
|
||||
values_indexes = [0, 1, 2, 8, 4, 5, 6, 10]
|
||||
plt.figure(figsize=(80, 40))
|
||||
plt.rcParams["font.size"] = "40"
|
||||
plt.subplot(1, 2, 1)
|
||||
plt.imshow(image_page)
|
||||
plt.subplot(1, 2, 2)
|
||||
im = plt.imshow(text_regions_p[:, :])
|
||||
colors = [im.cmap(im.norm(value)) for value in values]
|
||||
patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values]
|
||||
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=60)
|
||||
plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem + "_layout_and_page.png"))
|
||||
|
||||
def save_plot_of_textlines(self, textline_mask_tot_ea, image_page):
|
||||
if self.dir_of_all is not None:
|
||||
values = np.unique(textline_mask_tot_ea[:, :])
|
||||
pixels = ["Background", "Textlines"]
|
||||
values_indexes = [0, 1]
|
||||
plt.figure(figsize=(80, 40))
|
||||
plt.rcParams["font.size"] = "40"
|
||||
plt.subplot(1, 2, 1)
|
||||
plt.imshow(image_page)
|
||||
plt.subplot(1, 2, 2)
|
||||
im = plt.imshow(textline_mask_tot_ea[:, :])
|
||||
colors = [im.cmap(im.norm(value)) for value in values]
|
||||
patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values]
|
||||
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=60)
|
||||
plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem + "_textline_and_page.png"))
|
||||
|
||||
def save_deskewed_image(self, slope_deskew):
|
||||
if self.dir_of_all is not None:
|
||||
cv2.imwrite(os.path.join(self.dir_of_all, self.image_filename_stem + "_org.png"), self.image_org)
|
||||
if self.dir_of_deskewed is not None:
|
||||
img_rotated = rotate_image_different(self.image_org, slope_deskew)
|
||||
cv2.imwrite(os.path.join(self.dir_of_deskewed, self.image_filename_stem + "_deskewed.png"), img_rotated)
|
||||
|
||||
def save_page_image(self, image_page):
|
||||
if self.dir_of_all is not None:
|
||||
cv2.imwrite(os.path.join(self.dir_of_all, self.image_filename_stem + "_page.png"), image_page)
|
||||
if self.dir_save_page is not None:
|
||||
cv2.imwrite(os.path.join(self.dir_save_page, self.image_filename_stem + "_page.png"), image_page)
|
||||
def save_enhanced_image(self, img_res):
|
||||
cv2.imwrite(os.path.join(self.dir_out, self.image_filename_stem + "_enhanced.png"), img_res)
|
||||
|
||||
def save_plot_of_textline_density(self, img_patch_org):
|
||||
if self.dir_of_all is not None:
|
||||
plt.figure(figsize=(80,40))
|
||||
plt.rcParams['font.size']='50'
|
||||
plt.subplot(1,2,1)
|
||||
plt.imshow(img_patch_org)
|
||||
plt.subplot(1,2,2)
|
||||
plt.plot(gaussian_filter1d(img_patch_org.sum(axis=1), 3),np.array(range(len(gaussian_filter1d(img_patch_org.sum(axis=1), 3)))),linewidth=8)
|
||||
plt.xlabel('Density of textline prediction in direction of X axis',fontsize=60)
|
||||
plt.ylabel('Height',fontsize=60)
|
||||
plt.yticks([0,len(gaussian_filter1d(img_patch_org.sum(axis=1), 3))])
|
||||
plt.gca().invert_yaxis()
|
||||
plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem+'_density_of_textline.png'))
|
||||
|
||||
def save_plot_of_rotation_angle(self, angels, var_res):
|
||||
if self.dir_of_all is not None:
|
||||
plt.figure(figsize=(60,30))
|
||||
plt.rcParams['font.size']='50'
|
||||
plt.plot(angels,np.array(var_res),'-o',markersize=25,linewidth=4)
|
||||
plt.xlabel('angle',fontsize=50)
|
||||
plt.ylabel('variance of sum of rotated textline in direction of x axis',fontsize=50)
|
||||
plt.plot(angels[np.argmax(var_res)],var_res[np.argmax(np.array(var_res))] ,'*',markersize=50,label='Angle of deskewing=' +str("{:.2f}".format(angels[np.argmax(var_res)]))+r'$\degree$')
|
||||
plt.legend(loc='best')
|
||||
plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem+'_rotation_angle.png'))
|
||||
|
||||
def write_images_into_directory(self, img_contours, image_page):
|
||||
if self.dir_of_cropped_images is not None:
|
||||
index = 0
|
||||
for cont_ind in img_contours:
|
||||
x, y, w, h = cv2.boundingRect(cont_ind)
|
||||
box = [x, y, w, h]
|
||||
croped_page, page_coord = crop_image_inside_box(box, image_page)
|
||||
|
||||
croped_page = resize_image(croped_page, int(croped_page.shape[0] / self.scale_y), int(croped_page.shape[1] / self.scale_x))
|
||||
|
||||
path = os.path.join(self.dir_of_cropped_images, self.image_filename_stem + "_" + str(index) + ".jpg")
|
||||
cv2.imwrite(path, croped_page)
|
||||
index += 1
|
||||
|
||||
68
src/eynollah/processor.py
Normal file
68
src/eynollah/processor.py
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
from json import loads
|
||||
from pkg_resources import resource_string
|
||||
from tempfile import NamedTemporaryFile
|
||||
from pathlib import Path
|
||||
from os.path import join
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from ocrd import Processor
|
||||
from ocrd_modelfactory import page_from_file, exif_from_filename
|
||||
from ocrd_models import OcrdFile, OcrdExif
|
||||
from ocrd_models.ocrd_page import to_xml
|
||||
from ocrd_utils import (
|
||||
getLogger,
|
||||
MIMETYPE_PAGE,
|
||||
assert_file_grp_cardinality,
|
||||
make_file_id
|
||||
)
|
||||
|
||||
from .eynollah import Eynollah
|
||||
from .utils.pil_cv2 import pil2cv
|
||||
|
||||
OCRD_TOOL = loads(resource_string(__name__, 'ocrd-tool.json').decode('utf8'))
|
||||
|
||||
class EynollahProcessor(Processor):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
kwargs['ocrd_tool'] = OCRD_TOOL['tools']['ocrd-eynollah-segment']
|
||||
kwargs['version'] = OCRD_TOOL['version']
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def process(self):
|
||||
LOG = getLogger('eynollah')
|
||||
assert_file_grp_cardinality(self.input_file_grp, 1)
|
||||
assert_file_grp_cardinality(self.output_file_grp, 1)
|
||||
for n, input_file in enumerate(self.input_files):
|
||||
page_id = input_file.pageId or input_file.ID
|
||||
LOG.info("INPUT FILE %s (%d/%d) ", page_id, n + 1, len(self.input_files))
|
||||
pcgts = page_from_file(self.workspace.download_file(input_file))
|
||||
LOG.debug('width %s height %s', pcgts.get_Page().imageWidth, pcgts.get_Page().imageHeight)
|
||||
self.add_metadata(pcgts)
|
||||
page = pcgts.get_Page()
|
||||
# XXX loses DPI information
|
||||
# page_image, _, _ = self.workspace.image_from_page(page, page_id, feature_filter='binarized')
|
||||
image_filename = self.workspace.download_file(next(self.workspace.mets.find_files(local_filename=page.imageFilename))).local_filename
|
||||
eynollah_kwargs = {
|
||||
'dir_models': self.resolve_resource(self.parameter['models']),
|
||||
'allow_enhancement': False,
|
||||
'curved_line': self.parameter['curved_line'],
|
||||
'full_layout': self.parameter['full_layout'],
|
||||
'allow_scaling': self.parameter['allow_scaling'],
|
||||
'headers_off': self.parameter['headers_off'],
|
||||
'tables': self.parameter['tables'],
|
||||
'override_dpi': self.parameter['dpi'],
|
||||
'logger': LOG,
|
||||
'pcgts': pcgts,
|
||||
'image_filename': image_filename
|
||||
}
|
||||
Eynollah(**eynollah_kwargs).run()
|
||||
file_id = make_file_id(input_file, self.output_file_grp)
|
||||
pcgts.set_pcGtsId(file_id)
|
||||
self.workspace.add_file(
|
||||
ID=file_id,
|
||||
file_grp=self.output_file_grp,
|
||||
pageId=page_id,
|
||||
mimetype=MIMETYPE_PAGE,
|
||||
local_filename=join(self.output_file_grp, file_id) + '.xml',
|
||||
content=to_xml(pcgts))
|
||||
383
src/eynollah/sbb_binarize.py
Normal file
383
src/eynollah/sbb_binarize.py
Normal file
|
|
@ -0,0 +1,383 @@
|
|||
"""
|
||||
Tool to load model and binarize a given image.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from glob import glob
|
||||
from os import environ, devnull
|
||||
from os.path import join
|
||||
from warnings import catch_warnings, simplefilter
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import cv2
|
||||
environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
||||
stderr = sys.stderr
|
||||
sys.stderr = open(devnull, 'w')
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras.models import load_model
|
||||
from tensorflow.python.keras import backend as tensorflow_backend
|
||||
sys.stderr = stderr
|
||||
|
||||
|
||||
import logging
|
||||
|
||||
def resize_image(img_in, input_height, input_width):
|
||||
return cv2.resize(img_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST)
|
||||
|
||||
class SbbBinarizer:
|
||||
|
||||
def __init__(self, model_dir, logger=None):
|
||||
self.model_dir = model_dir
|
||||
self.log = logger if logger else logging.getLogger('SbbBinarizer')
|
||||
|
||||
self.start_new_session()
|
||||
|
||||
self.model_files = glob(self.model_dir+"/*/", recursive = True)
|
||||
|
||||
self.models = []
|
||||
for model_file in self.model_files:
|
||||
self.models.append(self.load_model(model_file))
|
||||
|
||||
def start_new_session(self):
|
||||
config = tf.compat.v1.ConfigProto()
|
||||
config.gpu_options.allow_growth = True
|
||||
|
||||
self.session = tf.compat.v1.Session(config=config) # tf.InteractiveSession()
|
||||
tensorflow_backend.set_session(self.session)
|
||||
|
||||
def end_session(self):
|
||||
tensorflow_backend.clear_session()
|
||||
self.session.close()
|
||||
del self.session
|
||||
|
||||
def load_model(self, model_name):
|
||||
model = load_model(join(self.model_dir, model_name), compile=False)
|
||||
model_height = model.layers[len(model.layers)-1].output_shape[1]
|
||||
model_width = model.layers[len(model.layers)-1].output_shape[2]
|
||||
n_classes = model.layers[len(model.layers)-1].output_shape[3]
|
||||
return model, model_height, model_width, n_classes
|
||||
|
||||
def predict(self, model_in, img, use_patches, n_batch_inference=5):
|
||||
tensorflow_backend.set_session(self.session)
|
||||
model, model_height, model_width, n_classes = model_in
|
||||
|
||||
img_org_h = img.shape[0]
|
||||
img_org_w = img.shape[1]
|
||||
|
||||
if img.shape[0] < model_height and img.shape[1] >= model_width:
|
||||
img_padded = np.zeros(( model_height, img.shape[1], img.shape[2] ))
|
||||
|
||||
index_start_h = int( abs( img.shape[0] - model_height) /2.)
|
||||
index_start_w = 0
|
||||
|
||||
img_padded [ index_start_h: index_start_h+img.shape[0], :, : ] = img[:,:,:]
|
||||
|
||||
elif img.shape[0] >= model_height and img.shape[1] < model_width:
|
||||
img_padded = np.zeros(( img.shape[0], model_width, img.shape[2] ))
|
||||
|
||||
index_start_h = 0
|
||||
index_start_w = int( abs( img.shape[1] - model_width) /2.)
|
||||
|
||||
img_padded [ :, index_start_w: index_start_w+img.shape[1], : ] = img[:,:,:]
|
||||
|
||||
|
||||
elif img.shape[0] < model_height and img.shape[1] < model_width:
|
||||
img_padded = np.zeros(( model_height, model_width, img.shape[2] ))
|
||||
|
||||
index_start_h = int( abs( img.shape[0] - model_height) /2.)
|
||||
index_start_w = int( abs( img.shape[1] - model_width) /2.)
|
||||
|
||||
img_padded [ index_start_h: index_start_h+img.shape[0], index_start_w: index_start_w+img.shape[1], : ] = img[:,:,:]
|
||||
|
||||
else:
|
||||
index_start_h = 0
|
||||
index_start_w = 0
|
||||
img_padded = np.copy(img)
|
||||
|
||||
|
||||
img = np.copy(img_padded)
|
||||
|
||||
|
||||
|
||||
if use_patches:
|
||||
|
||||
margin = int(0.1 * model_width)
|
||||
|
||||
width_mid = model_width - 2 * margin
|
||||
height_mid = model_height - 2 * margin
|
||||
|
||||
|
||||
img = img / float(255.0)
|
||||
|
||||
img_h = img.shape[0]
|
||||
img_w = img.shape[1]
|
||||
|
||||
prediction_true = np.zeros((img_h, img_w, 3))
|
||||
mask_true = np.zeros((img_h, img_w))
|
||||
nxf = img_w / float(width_mid)
|
||||
nyf = img_h / float(height_mid)
|
||||
|
||||
if nxf > int(nxf):
|
||||
nxf = int(nxf) + 1
|
||||
else:
|
||||
nxf = int(nxf)
|
||||
|
||||
if nyf > int(nyf):
|
||||
nyf = int(nyf) + 1
|
||||
else:
|
||||
nyf = int(nyf)
|
||||
|
||||
|
||||
list_i_s = []
|
||||
list_j_s = []
|
||||
list_x_u = []
|
||||
list_x_d = []
|
||||
list_y_u = []
|
||||
list_y_d = []
|
||||
|
||||
batch_indexer = 0
|
||||
|
||||
img_patch = np.zeros((n_batch_inference, model_height, model_width,3))
|
||||
|
||||
for i in range(nxf):
|
||||
for j in range(nyf):
|
||||
|
||||
if i == 0:
|
||||
index_x_d = i * width_mid
|
||||
index_x_u = index_x_d + model_width
|
||||
elif i > 0:
|
||||
index_x_d = i * width_mid
|
||||
index_x_u = index_x_d + model_width
|
||||
|
||||
if j == 0:
|
||||
index_y_d = j * height_mid
|
||||
index_y_u = index_y_d + model_height
|
||||
elif j > 0:
|
||||
index_y_d = j * height_mid
|
||||
index_y_u = index_y_d + model_height
|
||||
|
||||
if index_x_u > img_w:
|
||||
index_x_u = img_w
|
||||
index_x_d = img_w - model_width
|
||||
if index_y_u > img_h:
|
||||
index_y_u = img_h
|
||||
index_y_d = img_h - model_height
|
||||
|
||||
|
||||
list_i_s.append(i)
|
||||
list_j_s.append(j)
|
||||
list_x_u.append(index_x_u)
|
||||
list_x_d.append(index_x_d)
|
||||
list_y_d.append(index_y_d)
|
||||
list_y_u.append(index_y_u)
|
||||
|
||||
|
||||
img_patch[batch_indexer,:,:,:] = img[index_y_d:index_y_u, index_x_d:index_x_u, :]
|
||||
|
||||
batch_indexer = batch_indexer + 1
|
||||
|
||||
|
||||
|
||||
if batch_indexer == n_batch_inference:
|
||||
|
||||
label_p_pred = model.predict(img_patch,verbose=0)
|
||||
|
||||
seg = np.argmax(label_p_pred, axis=3)
|
||||
|
||||
#print(seg.shape, len(seg), len(list_i_s))
|
||||
|
||||
indexer_inside_batch = 0
|
||||
for i_batch, j_batch in zip(list_i_s, list_j_s):
|
||||
seg_in = seg[indexer_inside_batch,:,:]
|
||||
seg_color = np.repeat(seg_in[:, :, np.newaxis], 3, axis=2)
|
||||
|
||||
index_y_u_in = list_y_u[indexer_inside_batch]
|
||||
index_y_d_in = list_y_d[indexer_inside_batch]
|
||||
|
||||
index_x_u_in = list_x_u[indexer_inside_batch]
|
||||
index_x_d_in = list_x_d[indexer_inside_batch]
|
||||
|
||||
if i_batch == 0 and j_batch == 0:
|
||||
seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||
elif i_batch == nxf - 1 and j_batch == nyf - 1:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||
elif i_batch == 0 and j_batch == nyf - 1:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||
elif i_batch == nxf - 1 and j_batch == 0:
|
||||
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
|
||||
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||
elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||
elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0:
|
||||
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||
else:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||
|
||||
indexer_inside_batch = indexer_inside_batch +1
|
||||
|
||||
|
||||
list_i_s = []
|
||||
list_j_s = []
|
||||
list_x_u = []
|
||||
list_x_d = []
|
||||
list_y_u = []
|
||||
list_y_d = []
|
||||
|
||||
batch_indexer = 0
|
||||
|
||||
img_patch = np.zeros((n_batch_inference, model_height, model_width,3))
|
||||
|
||||
elif i==(nxf-1) and j==(nyf-1):
|
||||
label_p_pred = model.predict(img_patch,verbose=0)
|
||||
|
||||
seg = np.argmax(label_p_pred, axis=3)
|
||||
|
||||
#print(seg.shape, len(seg), len(list_i_s))
|
||||
|
||||
indexer_inside_batch = 0
|
||||
for i_batch, j_batch in zip(list_i_s, list_j_s):
|
||||
seg_in = seg[indexer_inside_batch,:,:]
|
||||
seg_color = np.repeat(seg_in[:, :, np.newaxis], 3, axis=2)
|
||||
|
||||
index_y_u_in = list_y_u[indexer_inside_batch]
|
||||
index_y_d_in = list_y_d[indexer_inside_batch]
|
||||
|
||||
index_x_u_in = list_x_u[indexer_inside_batch]
|
||||
index_x_d_in = list_x_d[indexer_inside_batch]
|
||||
|
||||
if i_batch == 0 and j_batch == 0:
|
||||
seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||
elif i_batch == nxf - 1 and j_batch == nyf - 1:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||
elif i_batch == 0 and j_batch == nyf - 1:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||
elif i_batch == nxf - 1 and j_batch == 0:
|
||||
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
|
||||
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||
elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||
elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0:
|
||||
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||
else:
|
||||
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
|
||||
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||
|
||||
indexer_inside_batch = indexer_inside_batch +1
|
||||
|
||||
|
||||
list_i_s = []
|
||||
list_j_s = []
|
||||
list_x_u = []
|
||||
list_x_d = []
|
||||
list_y_u = []
|
||||
list_y_d = []
|
||||
|
||||
batch_indexer = 0
|
||||
|
||||
img_patch = np.zeros((n_batch_inference, model_height, model_width,3))
|
||||
|
||||
|
||||
|
||||
prediction_true = prediction_true[index_start_h: index_start_h+img_org_h, index_start_w: index_start_w+img_org_w,:]
|
||||
prediction_true = prediction_true.astype(np.uint8)
|
||||
|
||||
else:
|
||||
img_h_page = img.shape[0]
|
||||
img_w_page = img.shape[1]
|
||||
img = img / float(255.0)
|
||||
img = resize_image(img, model_height, model_width)
|
||||
|
||||
label_p_pred = model.predict(img.reshape(1, img.shape[0], img.shape[1], img.shape[2]))
|
||||
|
||||
seg = np.argmax(label_p_pred, axis=3)[0]
|
||||
seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2)
|
||||
prediction_true = resize_image(seg_color, img_h_page, img_w_page)
|
||||
prediction_true = prediction_true.astype(np.uint8)
|
||||
return prediction_true[:,:,0]
|
||||
|
||||
def run(self, image=None, image_path=None, save=None, use_patches=False, dir_in=None, dir_out=None):
|
||||
print(dir_in,'dir_in')
|
||||
if not dir_in:
|
||||
if (image is not None and image_path is not None) or \
|
||||
(image is None and image_path is None):
|
||||
raise ValueError("Must pass either a opencv2 image or an image_path")
|
||||
if image_path is not None:
|
||||
image = cv2.imread(image_path)
|
||||
img_last = 0
|
||||
for n, (model, model_file) in enumerate(zip(self.models, self.model_files)):
|
||||
self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files)))
|
||||
|
||||
res = self.predict(model, image, use_patches)
|
||||
|
||||
img_fin = np.zeros((res.shape[0], res.shape[1], 3))
|
||||
res[:, :][res[:, :] == 0] = 2
|
||||
res = res - 1
|
||||
res = res * 255
|
||||
img_fin[:, :, 0] = res
|
||||
img_fin[:, :, 1] = res
|
||||
img_fin[:, :, 2] = res
|
||||
|
||||
img_fin = img_fin.astype(np.uint8)
|
||||
img_fin = (res[:, :] == 0) * 255
|
||||
img_last = img_last + img_fin
|
||||
|
||||
kernel = np.ones((5, 5), np.uint8)
|
||||
img_last[:, :][img_last[:, :] > 0] = 255
|
||||
img_last = (img_last[:, :] == 0) * 255
|
||||
if save:
|
||||
cv2.imwrite(save, img_last)
|
||||
return img_last
|
||||
else:
|
||||
ls_imgs = os.listdir(dir_in)
|
||||
for image_name in ls_imgs:
|
||||
image_stem = image_name.split('.')[0]
|
||||
print(image_name,'image_name')
|
||||
image = cv2.imread(os.path.join(dir_in,image_name) )
|
||||
img_last = 0
|
||||
for n, (model, model_file) in enumerate(zip(self.models, self.model_files)):
|
||||
self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files)))
|
||||
|
||||
res = self.predict(model, image, use_patches)
|
||||
|
||||
img_fin = np.zeros((res.shape[0], res.shape[1], 3))
|
||||
res[:, :][res[:, :] == 0] = 2
|
||||
res = res - 1
|
||||
res = res * 255
|
||||
img_fin[:, :, 0] = res
|
||||
img_fin[:, :, 1] = res
|
||||
img_fin[:, :, 2] = res
|
||||
|
||||
img_fin = img_fin.astype(np.uint8)
|
||||
img_fin = (res[:, :] == 0) * 255
|
||||
img_last = img_last + img_fin
|
||||
|
||||
kernel = np.ones((5, 5), np.uint8)
|
||||
img_last[:, :][img_last[:, :] > 0] = 255
|
||||
img_last = (img_last[:, :] == 0) * 255
|
||||
|
||||
cv2.imwrite(os.path.join(dir_out,image_stem+'.png'), img_last)
|
||||
2289
src/eynollah/utils/__init__.py
Normal file
2289
src/eynollah/utils/__init__.py
Normal file
File diff suppressed because it is too large
Load diff
427
src/eynollah/utils/contour.py
Normal file
427
src/eynollah/utils/contour.py
Normal file
|
|
@ -0,0 +1,427 @@
|
|||
import cv2
|
||||
import numpy as np
|
||||
from shapely import geometry
|
||||
|
||||
from .rotate import rotate_image, rotation_image_new
|
||||
from multiprocessing import Process, Queue, cpu_count
|
||||
from multiprocessing import Pool
|
||||
def contours_in_same_horizon(cy_main_hor):
|
||||
X1 = np.zeros((len(cy_main_hor), len(cy_main_hor)))
|
||||
X2 = np.zeros((len(cy_main_hor), len(cy_main_hor)))
|
||||
|
||||
X1[0::1, :] = cy_main_hor[:]
|
||||
X2 = X1.T
|
||||
|
||||
X_dif = np.abs(X2 - X1)
|
||||
args_help = np.array(range(len(cy_main_hor)))
|
||||
all_args = []
|
||||
for i in range(len(cy_main_hor)):
|
||||
list_h = list(args_help[X_dif[i, :] <= 20])
|
||||
list_h.append(i)
|
||||
if len(list_h) > 1:
|
||||
all_args.append(list(set(list_h)))
|
||||
return np.unique(np.array(all_args, dtype=object))
|
||||
|
||||
def find_contours_mean_y_diff(contours_main):
|
||||
M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))]
|
||||
cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))]
|
||||
return np.mean(np.diff(np.sort(np.array(cy_main))))
|
||||
|
||||
|
||||
def get_text_region_boxes_by_given_contours(contours):
|
||||
|
||||
kernel = np.ones((5, 5), np.uint8)
|
||||
boxes = []
|
||||
contours_new = []
|
||||
for jj in range(len(contours)):
|
||||
x, y, w, h = cv2.boundingRect(contours[jj])
|
||||
|
||||
boxes.append([x, y, w, h])
|
||||
contours_new.append(contours[jj])
|
||||
|
||||
del contours
|
||||
return boxes, contours_new
|
||||
|
||||
def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area):
|
||||
found_polygons_early = list()
|
||||
|
||||
for jv,c in enumerate(contours):
|
||||
if len(c) < 3: # A polygon cannot have less than 3 points
|
||||
continue
|
||||
|
||||
polygon = geometry.Polygon([point[0] for point in c])
|
||||
area = polygon.area
|
||||
if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]) and hierarchy[0][jv][3] == -1: # and hierarchy[0][jv][3]==-1 :
|
||||
found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint))
|
||||
return found_polygons_early
|
||||
|
||||
def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area):
|
||||
found_polygons_early = list()
|
||||
|
||||
for jv,c in enumerate(contours):
|
||||
if len(c) < 3: # A polygon cannot have less than 3 points
|
||||
continue
|
||||
|
||||
polygon = geometry.Polygon([point[0] for point in c])
|
||||
# area = cv2.contourArea(c)
|
||||
area = polygon.area
|
||||
##print(np.prod(thresh.shape[:2]))
|
||||
# Check that polygon has area greater than minimal area
|
||||
# print(hierarchy[0][jv][3],hierarchy )
|
||||
if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 :
|
||||
# print(c[0][0][1])
|
||||
found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32))
|
||||
return found_polygons_early
|
||||
|
||||
def find_new_features_of_contours(contours_main):
|
||||
|
||||
areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))])
|
||||
M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))]
|
||||
cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))]
|
||||
cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))]
|
||||
try:
|
||||
x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) for j in range(len(contours_main))])
|
||||
|
||||
argmin_x_main = np.array([np.argmin(contours_main[j][:, 0, 0]) for j in range(len(contours_main))])
|
||||
|
||||
x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 0] for j in range(len(contours_main))])
|
||||
y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 1] for j in range(len(contours_main))])
|
||||
|
||||
x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) for j in range(len(contours_main))])
|
||||
|
||||
y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))])
|
||||
y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))])
|
||||
except:
|
||||
x_min_main = np.array([np.min(contours_main[j][:, 0]) for j in range(len(contours_main))])
|
||||
|
||||
argmin_x_main = np.array([np.argmin(contours_main[j][:, 0]) for j in range(len(contours_main))])
|
||||
|
||||
x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0] for j in range(len(contours_main))])
|
||||
y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1] for j in range(len(contours_main))])
|
||||
|
||||
x_max_main = np.array([np.max(contours_main[j][:, 0]) for j in range(len(contours_main))])
|
||||
|
||||
y_min_main = np.array([np.min(contours_main[j][:, 1]) for j in range(len(contours_main))])
|
||||
y_max_main = np.array([np.max(contours_main[j][:, 1]) for j in range(len(contours_main))])
|
||||
|
||||
# dis_x=np.abs(x_max_main-x_min_main)
|
||||
|
||||
return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin
|
||||
def find_features_of_contours(contours_main):
|
||||
|
||||
|
||||
areas_main=np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))])
|
||||
M_main=[cv2.moments(contours_main[j]) for j in range(len(contours_main))]
|
||||
cx_main=[(M_main[j]['m10']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))]
|
||||
cy_main=[(M_main[j]['m01']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))]
|
||||
x_min_main=np.array([np.min(contours_main[j][:,0,0]) for j in range(len(contours_main))])
|
||||
x_max_main=np.array([np.max(contours_main[j][:,0,0]) for j in range(len(contours_main))])
|
||||
|
||||
y_min_main=np.array([np.min(contours_main[j][:,0,1]) for j in range(len(contours_main))])
|
||||
y_max_main=np.array([np.max(contours_main[j][:,0,1]) for j in range(len(contours_main))])
|
||||
|
||||
|
||||
return y_min_main, y_max_main
|
||||
def return_parent_contours(contours, hierarchy):
|
||||
contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1]
|
||||
return contours_parent
|
||||
|
||||
def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002):
|
||||
|
||||
# pixels of images are identified by 5
|
||||
if len(region_pre_p.shape) == 3:
|
||||
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
||||
else:
|
||||
cnts_images = (region_pre_p[:, :] == pixel) * 1
|
||||
cnts_images = cnts_images.astype(np.uint8)
|
||||
cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2)
|
||||
imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
||||
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area)
|
||||
|
||||
return contours_imgs
|
||||
|
||||
def do_work_of_contours_in_image(queue_of_all_params, contours_per_process, indexes_r_con_per_pro, img, slope_first):
|
||||
cnts_org_per_each_subprocess = []
|
||||
index_by_text_region_contours = []
|
||||
for mv in range(len(contours_per_process)):
|
||||
index_by_text_region_contours.append(indexes_r_con_per_pro[mv])
|
||||
|
||||
img_copy = np.zeros(img.shape)
|
||||
img_copy = cv2.fillPoly(img_copy, pts=[contours_per_process[mv]], color=(1, 1, 1))
|
||||
|
||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
||||
|
||||
img_copy = img_copy.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||
|
||||
|
||||
cnts_org_per_each_subprocess.append(cont_int[0])
|
||||
|
||||
queue_of_all_params.put([ cnts_org_per_each_subprocess, index_by_text_region_contours])
|
||||
|
||||
|
||||
def get_textregion_contours_in_org_image_multi(cnts, img, slope_first):
|
||||
|
||||
num_cores = cpu_count()
|
||||
queue_of_all_params = Queue()
|
||||
|
||||
processes = []
|
||||
nh = np.linspace(0, len(cnts), num_cores + 1)
|
||||
indexes_by_text_con = np.array(range(len(cnts)))
|
||||
for i in range(num_cores):
|
||||
contours_per_process = cnts[int(nh[i]) : int(nh[i + 1])]
|
||||
indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])]
|
||||
|
||||
processes.append(Process(target=do_work_of_contours_in_image, args=(queue_of_all_params, contours_per_process, indexes_text_con_per_process, img,slope_first )))
|
||||
for i in range(num_cores):
|
||||
processes[i].start()
|
||||
cnts_org = []
|
||||
all_index_text_con = []
|
||||
for i in range(num_cores):
|
||||
list_all_par = queue_of_all_params.get(True)
|
||||
contours_for_sub_process = list_all_par[0]
|
||||
indexes_for_sub_process = list_all_par[1]
|
||||
for j in range(len(contours_for_sub_process)):
|
||||
cnts_org.append(contours_for_sub_process[j])
|
||||
all_index_text_con.append(indexes_for_sub_process[j])
|
||||
for i in range(num_cores):
|
||||
processes[i].join()
|
||||
|
||||
print(all_index_text_con)
|
||||
return cnts_org
|
||||
def loop_contour_image(index_l, cnts,img, slope_first):
|
||||
img_copy = np.zeros(img.shape)
|
||||
img_copy = cv2.fillPoly(img_copy, pts=[cnts[index_l]], color=(1, 1, 1))
|
||||
|
||||
# plt.imshow(img_copy)
|
||||
# plt.show()
|
||||
|
||||
# print(img.shape,'img')
|
||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
||||
##print(img_copy.shape,'img_copy')
|
||||
# plt.imshow(img_copy)
|
||||
# plt.show()
|
||||
|
||||
img_copy = img_copy.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||
# print(np.shape(cont_int[0]))
|
||||
return cont_int[0]
|
||||
|
||||
def get_textregion_contours_in_org_image_multi2(cnts, img, slope_first):
|
||||
|
||||
cnts_org = []
|
||||
# print(cnts,'cnts')
|
||||
with Pool(cpu_count()) as p:
|
||||
cnts_org = p.starmap(loop_contour_image, [(index_l,cnts, img,slope_first) for index_l in range(len(cnts))])
|
||||
|
||||
return cnts_org
|
||||
|
||||
def get_textregion_contours_in_org_image(cnts, img, slope_first):
|
||||
|
||||
cnts_org = []
|
||||
# print(cnts,'cnts')
|
||||
for i in range(len(cnts)):
|
||||
img_copy = np.zeros(img.shape)
|
||||
img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1))
|
||||
|
||||
# plt.imshow(img_copy)
|
||||
# plt.show()
|
||||
|
||||
# print(img.shape,'img')
|
||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
||||
##print(img_copy.shape,'img_copy')
|
||||
# plt.imshow(img_copy)
|
||||
# plt.show()
|
||||
|
||||
img_copy = img_copy.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||
# print(np.shape(cont_int[0]))
|
||||
cnts_org.append(cont_int[0])
|
||||
|
||||
return cnts_org
|
||||
|
||||
def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first):
|
||||
|
||||
h_o = img.shape[0]
|
||||
w_o = img.shape[1]
|
||||
|
||||
img = cv2.resize(img, (int(img.shape[1]/3.), int(img.shape[0]/3.)), interpolation=cv2.INTER_NEAREST)
|
||||
##cnts = list( (np.array(cnts)/2).astype(np.int16) )
|
||||
#cnts = cnts/2
|
||||
cnts = [(i/ 3).astype(np.int32) for i in cnts]
|
||||
cnts_org = []
|
||||
#print(cnts,'cnts')
|
||||
for i in range(len(cnts)):
|
||||
img_copy = np.zeros(img.shape)
|
||||
img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1))
|
||||
|
||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
||||
|
||||
img_copy = img_copy.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||
# print(np.shape(cont_int[0]))
|
||||
cnts_org.append(cont_int[0]*3)
|
||||
|
||||
return cnts_org
|
||||
|
||||
def return_list_of_contours_with_desired_order(ls_cons, sorted_indexes):
|
||||
return [ls_cons[sorted_indexes[index]] for index in range(len(sorted_indexes))]
|
||||
def do_back_rotation_and_get_cnt_back(queue_of_all_params, contours_par_per_process,indexes_r_con_per_pro, img, slope_first):
|
||||
contours_textregion_per_each_subprocess = []
|
||||
index_by_text_region_contours = []
|
||||
for mv in range(len(contours_par_per_process)):
|
||||
img_copy = np.zeros(img.shape)
|
||||
img_copy = cv2.fillPoly(img_copy, pts=[contours_par_per_process[mv]], color=(1, 1, 1))
|
||||
|
||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
||||
|
||||
img_copy = img_copy.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||
# print(np.shape(cont_int[0]))
|
||||
contours_textregion_per_each_subprocess.append(cont_int[0]*6)
|
||||
index_by_text_region_contours.append(indexes_r_con_per_pro[mv])
|
||||
|
||||
queue_of_all_params.put([contours_textregion_per_each_subprocess, index_by_text_region_contours])
|
||||
|
||||
def get_textregion_contours_in_org_image_light(cnts, img, slope_first):
|
||||
num_cores = cpu_count()
|
||||
queue_of_all_params = Queue()
|
||||
processes = []
|
||||
nh = np.linspace(0, len(cnts), num_cores + 1)
|
||||
indexes_by_text_con = np.array(range(len(cnts)))
|
||||
|
||||
h_o = img.shape[0]
|
||||
w_o = img.shape[1]
|
||||
|
||||
img = cv2.resize(img, (int(img.shape[1]/6.), int(img.shape[0]/6.)), interpolation=cv2.INTER_NEAREST)
|
||||
##cnts = list( (np.array(cnts)/2).astype(np.int16) )
|
||||
#cnts = cnts/2
|
||||
cnts = [(i/ 6).astype(np.int32) for i in cnts]
|
||||
|
||||
for i in range(num_cores):
|
||||
contours_par_per_process = cnts[int(nh[i]) : int(nh[i + 1])]
|
||||
indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])]
|
||||
processes.append(Process(target=do_back_rotation_and_get_cnt_back, args=(queue_of_all_params, contours_par_per_process, indexes_text_con_per_process, img, slope_first)))
|
||||
|
||||
for i in range(num_cores):
|
||||
processes[i].start()
|
||||
|
||||
cnts_org = []
|
||||
all_index_text_con = []
|
||||
for i in range(num_cores):
|
||||
list_all_par = queue_of_all_params.get(True)
|
||||
contours_for_subprocess = list_all_par[0]
|
||||
indexes_for_subprocess = list_all_par[1]
|
||||
for j in range(len(contours_for_subprocess)):
|
||||
cnts_org.append(contours_for_subprocess[j])
|
||||
all_index_text_con.append(indexes_for_subprocess[j])
|
||||
for i in range(num_cores):
|
||||
processes[i].join()
|
||||
|
||||
cnts_org = return_list_of_contours_with_desired_order(cnts_org, all_index_text_con)
|
||||
|
||||
return cnts_org
|
||||
|
||||
def return_contours_of_interested_textline(region_pre_p, pixel):
|
||||
|
||||
# pixels of images are identified by 5
|
||||
if len(region_pre_p.shape) == 3:
|
||||
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
||||
else:
|
||||
cnts_images = (region_pre_p[:, :] == pixel) * 1
|
||||
cnts_images = cnts_images.astype(np.uint8)
|
||||
cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2)
|
||||
imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
||||
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=0.000000003)
|
||||
return contours_imgs
|
||||
|
||||
def return_contours_of_image(image):
|
||||
|
||||
if len(image.shape) == 2:
|
||||
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
|
||||
image = image.astype(np.uint8)
|
||||
else:
|
||||
image = image.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
return contours, hierarchy
|
||||
|
||||
def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003):
|
||||
|
||||
# pixels of images are identified by 5
|
||||
if len(region_pre_p.shape) == 3:
|
||||
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
||||
else:
|
||||
cnts_images = (region_pre_p[:, :] == pixel) * 1
|
||||
cnts_images = cnts_images.astype(np.uint8)
|
||||
cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2)
|
||||
imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
||||
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_size)
|
||||
|
||||
return contours_imgs
|
||||
|
||||
def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, max_area):
|
||||
|
||||
# pixels of images are identified by 5
|
||||
if len(region_pre_p.shape) == 3:
|
||||
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
||||
else:
|
||||
cnts_images = (region_pre_p[:, :] == pixel) * 1
|
||||
cnts_images = cnts_images.astype(np.uint8)
|
||||
cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2)
|
||||
imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
||||
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area)
|
||||
|
||||
img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1], 3))
|
||||
img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=(1, 1, 1))
|
||||
return img_ret[:, :, 0]
|
||||
|
||||
48
src/eynollah/utils/counter.py
Normal file
48
src/eynollah/utils/counter.py
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
from collections import Counter
|
||||
|
||||
REGION_ID_TEMPLATE = 'region_%04d'
|
||||
LINE_ID_TEMPLATE = 'region_%04d_line_%04d'
|
||||
|
||||
class EynollahIdCounter():
|
||||
|
||||
def __init__(self, region_idx=0, line_idx=0):
|
||||
self._counter = Counter()
|
||||
self._initial_region_idx = region_idx
|
||||
self._initial_line_idx = line_idx
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.set('region', self._initial_region_idx)
|
||||
self.set('line', self._initial_line_idx)
|
||||
|
||||
def inc(self, name, val=1):
|
||||
self._counter.update({name: val})
|
||||
|
||||
def get(self, name):
|
||||
return self._counter[name]
|
||||
|
||||
def set(self, name, val):
|
||||
self._counter[name] = val
|
||||
|
||||
def region_id(self, region_idx=None):
|
||||
if region_idx is None:
|
||||
region_idx = self._counter['region']
|
||||
return REGION_ID_TEMPLATE % region_idx
|
||||
|
||||
def line_id(self, region_idx=None, line_idx=None):
|
||||
if region_idx is None:
|
||||
region_idx = self._counter['region']
|
||||
if line_idx is None:
|
||||
line_idx = self._counter['line']
|
||||
return LINE_ID_TEMPLATE % (region_idx, line_idx)
|
||||
|
||||
@property
|
||||
def next_region_id(self):
|
||||
self.inc('region')
|
||||
self.set('line', 0)
|
||||
return self.region_id()
|
||||
|
||||
@property
|
||||
def next_line_id(self):
|
||||
self.inc('line')
|
||||
return self.line_id()
|
||||
502
src/eynollah/utils/drop_capitals.py
Normal file
502
src/eynollah/utils/drop_capitals.py
Normal file
|
|
@ -0,0 +1,502 @@
|
|||
import numpy as np
|
||||
import cv2
|
||||
from .contour import (
|
||||
find_new_features_of_contours,
|
||||
return_contours_of_image,
|
||||
return_parent_contours,
|
||||
)
|
||||
|
||||
def adhere_drop_capital_region_into_corresponding_textline(
|
||||
text_regions_p,
|
||||
polygons_of_drop_capitals,
|
||||
contours_only_text_parent,
|
||||
contours_only_text_parent_h,
|
||||
all_box_coord,
|
||||
all_box_coord_h,
|
||||
all_found_textline_polygons,
|
||||
all_found_textline_polygons_h,
|
||||
kernel=None,
|
||||
curved_line=False,
|
||||
):
|
||||
# print(np.shape(all_found_textline_polygons),np.shape(all_found_textline_polygons[3]),'all_found_textline_polygonsshape')
|
||||
# print(all_found_textline_polygons[3])
|
||||
cx_m, cy_m, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent)
|
||||
cx_h, cy_h, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent_h)
|
||||
cx_d, cy_d, _, _, y_min_d, y_max_d, _ = find_new_features_of_contours(polygons_of_drop_capitals)
|
||||
|
||||
img_con_all = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3))
|
||||
for j_cont in range(len(contours_only_text_parent)):
|
||||
img_con_all[all_box_coord[j_cont][0] : all_box_coord[j_cont][1], all_box_coord[j_cont][2] : all_box_coord[j_cont][3], 0] = (j_cont + 1) * 3
|
||||
# img_con_all=cv2.fillPoly(img_con_all,pts=[contours_only_text_parent[j_cont]],color=((j_cont+1)*3,(j_cont+1)*3,(j_cont+1)*3))
|
||||
|
||||
# plt.imshow(img_con_all[:,:,0])
|
||||
# plt.show()
|
||||
# img_con_all=cv2.dilate(img_con_all, kernel, iterations=3)
|
||||
|
||||
# plt.imshow(img_con_all[:,:,0])
|
||||
# plt.show()
|
||||
# print(np.unique(img_con_all[:,:,0]))
|
||||
for i_drop in range(len(polygons_of_drop_capitals)):
|
||||
# print(i_drop,'i_drop')
|
||||
img_con_all_copy = np.copy(img_con_all)
|
||||
img_con = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3))
|
||||
img_con = cv2.fillPoly(img_con, pts=[polygons_of_drop_capitals[i_drop]], color=(1, 1, 1))
|
||||
|
||||
# plt.imshow(img_con[:,:,0])
|
||||
# plt.show()
|
||||
##img_con=cv2.dilate(img_con, kernel, iterations=30)
|
||||
|
||||
# plt.imshow(img_con[:,:,0])
|
||||
# plt.show()
|
||||
|
||||
# print(np.unique(img_con[:,:,0]))
|
||||
|
||||
img_con_all_copy[:, :, 0] = img_con_all_copy[:, :, 0] + img_con[:, :, 0]
|
||||
|
||||
img_con_all_copy[:, :, 0][img_con_all_copy[:, :, 0] == 1] = 0
|
||||
|
||||
kherej_ghesmat = np.unique(img_con_all_copy[:, :, 0]) / 3
|
||||
res_summed_pixels = np.unique(img_con_all_copy[:, :, 0]) % 3
|
||||
region_with_intersected_drop = kherej_ghesmat[res_summed_pixels == 1]
|
||||
# region_with_intersected_drop=region_with_intersected_drop/3
|
||||
region_with_intersected_drop = region_with_intersected_drop.astype(np.uint8)
|
||||
|
||||
# print(len(region_with_intersected_drop),'region_with_intersected_drop1')
|
||||
if len(region_with_intersected_drop) == 0:
|
||||
img_con_all_copy = np.copy(img_con_all)
|
||||
img_con = cv2.dilate(img_con, kernel, iterations=4)
|
||||
|
||||
img_con_all_copy[:, :, 0] = img_con_all_copy[:, :, 0] + img_con[:, :, 0]
|
||||
|
||||
img_con_all_copy[:, :, 0][img_con_all_copy[:, :, 0] == 1] = 0
|
||||
|
||||
kherej_ghesmat = np.unique(img_con_all_copy[:, :, 0]) / 3
|
||||
res_summed_pixels = np.unique(img_con_all_copy[:, :, 0]) % 3
|
||||
region_with_intersected_drop = kherej_ghesmat[res_summed_pixels == 1]
|
||||
# region_with_intersected_drop=region_with_intersected_drop/3
|
||||
region_with_intersected_drop = region_with_intersected_drop.astype(np.uint8)
|
||||
# print(np.unique(img_con_all_copy[:,:,0]))
|
||||
if curved_line:
|
||||
|
||||
if len(region_with_intersected_drop) > 1:
|
||||
sum_pixels_of_intersection = []
|
||||
for i in range(len(region_with_intersected_drop)):
|
||||
# print((region_with_intersected_drop[i]*3+1))
|
||||
sum_pixels_of_intersection.append(((img_con_all_copy[:, :, 0] == (region_with_intersected_drop[i] * 3 + 1)) * 1).sum())
|
||||
# print(sum_pixels_of_intersection)
|
||||
region_final = region_with_intersected_drop[np.argmax(sum_pixels_of_intersection)] - 1
|
||||
|
||||
# print(region_final,'region_final')
|
||||
# cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)])
|
||||
try:
|
||||
cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)])
|
||||
# print(all_box_coord[j_cont])
|
||||
# print(cx_t)
|
||||
# print(cy_t)
|
||||
# print(cx_d[i_drop])
|
||||
# print(cy_d[i_drop])
|
||||
y_lines = np.array(cy_t) # all_box_coord[int(region_final)][0]+np.array(cy_t)
|
||||
|
||||
# print(y_lines)
|
||||
|
||||
y_lines[y_lines < y_min_d[i_drop]] = 0
|
||||
# print(y_lines)
|
||||
|
||||
arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop]))
|
||||
# print(arg_min)
|
||||
|
||||
cnt_nearest = np.copy(all_found_textline_polygons[int(region_final)][arg_min])
|
||||
cnt_nearest[:, 0, 0] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 0] # +all_box_coord[int(region_final)][2]
|
||||
cnt_nearest[:, 0, 1] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 1] # +all_box_coord[int(region_final)][0]
|
||||
|
||||
img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3))
|
||||
img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255))
|
||||
img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255))
|
||||
|
||||
img_textlines = img_textlines.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
# print(len(contours_combined),'len textlines mixed')
|
||||
areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
|
||||
|
||||
contours_biggest = contours_combined[np.argmax(areas_cnt_text)]
|
||||
|
||||
# print(np.shape(contours_biggest))
|
||||
# print(contours_biggest[:])
|
||||
# contours_biggest[:,0,0]=contours_biggest[:,0,0]#-all_box_coord[int(region_final)][2]
|
||||
# contours_biggest[:,0,1]=contours_biggest[:,0,1]#-all_box_coord[int(region_final)][0]
|
||||
|
||||
# contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2])
|
||||
|
||||
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
|
||||
|
||||
except:
|
||||
# print('gordun1')
|
||||
pass
|
||||
elif len(region_with_intersected_drop) == 1:
|
||||
region_final = region_with_intersected_drop[0] - 1
|
||||
|
||||
# areas_main=np.array([cv2.contourArea(all_found_textline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_textline_polygons[int(region_final)]))])
|
||||
|
||||
# cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)])
|
||||
try:
|
||||
cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)])
|
||||
# print(all_box_coord[j_cont])
|
||||
# print(cx_t)
|
||||
# print(cy_t)
|
||||
# print(cx_d[i_drop])
|
||||
# print(cy_d[i_drop])
|
||||
y_lines = np.array(cy_t) # all_box_coord[int(region_final)][0]+np.array(cy_t)
|
||||
|
||||
y_lines[y_lines < y_min_d[i_drop]] = 0
|
||||
# print(y_lines)
|
||||
|
||||
arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop]))
|
||||
# print(arg_min)
|
||||
|
||||
cnt_nearest = np.copy(all_found_textline_polygons[int(region_final)][arg_min])
|
||||
cnt_nearest[:, 0, 0] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 0] # +all_box_coord[int(region_final)][2]
|
||||
cnt_nearest[:, 0, 1] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 1] # +all_box_coord[int(region_final)][0]
|
||||
|
||||
img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3))
|
||||
img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255))
|
||||
img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255))
|
||||
|
||||
img_textlines = img_textlines.astype(np.uint8)
|
||||
|
||||
# plt.imshow(img_textlines)
|
||||
# plt.show()
|
||||
imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
# print(len(contours_combined),'len textlines mixed')
|
||||
areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
|
||||
|
||||
contours_biggest = contours_combined[np.argmax(areas_cnt_text)]
|
||||
|
||||
# print(np.shape(contours_biggest))
|
||||
# print(contours_biggest[:])
|
||||
# contours_biggest[:,0,0]=contours_biggest[:,0,0]#-all_box_coord[int(region_final)][2]
|
||||
# contours_biggest[:,0,1]=contours_biggest[:,0,1]#-all_box_coord[int(region_final)][0]
|
||||
# print(np.shape(contours_biggest),'contours_biggest')
|
||||
# print(np.shape(all_found_textline_polygons[int(region_final)][arg_min]))
|
||||
##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2])
|
||||
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
# print(all_found_textline_polygons[j_cont][0])
|
||||
cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)])
|
||||
# print(all_box_coord[j_cont])
|
||||
# print(cx_t)
|
||||
# print(cy_t)
|
||||
# print(cx_d[i_drop])
|
||||
# print(cy_d[i_drop])
|
||||
y_lines = all_box_coord[int(region_final)][0] + np.array(cy_t)
|
||||
|
||||
y_lines[y_lines < y_min_d[i_drop]] = 0
|
||||
# print(y_lines)
|
||||
|
||||
arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop]))
|
||||
# print(arg_min)
|
||||
|
||||
cnt_nearest = np.copy(all_found_textline_polygons[int(region_final)][arg_min])
|
||||
cnt_nearest[:, 0, 0] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 0] # +all_box_coord[int(region_final)][2]
|
||||
cnt_nearest[:, 0, 1] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 1] # +all_box_coord[int(region_final)][0]
|
||||
|
||||
img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3))
|
||||
img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255))
|
||||
img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255))
|
||||
|
||||
img_textlines = img_textlines.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
# print(len(contours_combined),'len textlines mixed')
|
||||
areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
|
||||
|
||||
contours_biggest = contours_combined[np.argmax(areas_cnt_text)]
|
||||
|
||||
# print(np.shape(contours_biggest))
|
||||
# print(contours_biggest[:])
|
||||
contours_biggest[:, 0, 0] = contours_biggest[:, 0, 0] # -all_box_coord[int(region_final)][2]
|
||||
contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] # -all_box_coord[int(region_final)][0]
|
||||
|
||||
##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2])
|
||||
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
|
||||
# all_found_textline_polygons[int(region_final)][arg_min]=contours_biggest
|
||||
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
|
||||
##cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)])
|
||||
###print(all_box_coord[j_cont])
|
||||
###print(cx_t)
|
||||
###print(cy_t)
|
||||
###print(cx_d[i_drop])
|
||||
###print(cy_d[i_drop])
|
||||
##y_lines=all_box_coord[int(region_final)][0]+np.array(cy_t)
|
||||
|
||||
##y_lines[y_lines<y_min_d[i_drop]]=0
|
||||
###print(y_lines)
|
||||
|
||||
##arg_min=np.argmin(np.abs(y_lines-y_min_d[i_drop]) )
|
||||
###print(arg_min)
|
||||
|
||||
##cnt_nearest=np.copy(all_found_textline_polygons[int(region_final)][arg_min])
|
||||
##cnt_nearest[:,0,0]=all_found_textline_polygons[int(region_final)][arg_min][:,0,0]#+all_box_coord[int(region_final)][2]
|
||||
##cnt_nearest[:,0,1]=all_found_textline_polygons[int(region_final)][arg_min][:,0,1]#+all_box_coord[int(region_final)][0]
|
||||
|
||||
##img_textlines=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3))
|
||||
##img_textlines=cv2.fillPoly(img_textlines,pts=[cnt_nearest],color=(255,255,255))
|
||||
##img_textlines=cv2.fillPoly(img_textlines,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255))
|
||||
|
||||
##img_textlines=img_textlines.astype(np.uint8)
|
||||
|
||||
##plt.imshow(img_textlines)
|
||||
##plt.show()
|
||||
##imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
|
||||
##ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
##contours_combined,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
##print(len(contours_combined),'len textlines mixed')
|
||||
##areas_cnt_text=np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
|
||||
|
||||
##contours_biggest=contours_combined[np.argmax(areas_cnt_text)]
|
||||
|
||||
###print(np.shape(contours_biggest))
|
||||
###print(contours_biggest[:])
|
||||
##contours_biggest[:,0,0]=contours_biggest[:,0,0]#-all_box_coord[int(region_final)][2]
|
||||
##contours_biggest[:,0,1]=contours_biggest[:,0,1]#-all_box_coord[int(region_final)][0]
|
||||
|
||||
##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2])
|
||||
##all_found_textline_polygons[int(region_final)][arg_min]=contours_biggest
|
||||
|
||||
else:
|
||||
if len(region_with_intersected_drop) > 1:
|
||||
sum_pixels_of_intersection = []
|
||||
for i in range(len(region_with_intersected_drop)):
|
||||
# print((region_with_intersected_drop[i]*3+1))
|
||||
sum_pixels_of_intersection.append(((img_con_all_copy[:, :, 0] == (region_with_intersected_drop[i] * 3 + 1)) * 1).sum())
|
||||
# print(sum_pixels_of_intersection)
|
||||
region_final = region_with_intersected_drop[np.argmax(sum_pixels_of_intersection)] - 1
|
||||
|
||||
# print(region_final,'region_final')
|
||||
# cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)])
|
||||
try:
|
||||
cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)])
|
||||
# print(all_box_coord[j_cont])
|
||||
# print(cx_t)
|
||||
# print(cy_t)
|
||||
# print(cx_d[i_drop])
|
||||
# print(cy_d[i_drop])
|
||||
y_lines = all_box_coord[int(region_final)][0] + np.array(cy_t)
|
||||
|
||||
# print(y_lines)
|
||||
|
||||
y_lines[y_lines < y_min_d[i_drop]] = 0
|
||||
# print(y_lines)
|
||||
|
||||
arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop]))
|
||||
# print(arg_min)
|
||||
|
||||
cnt_nearest = np.copy(all_found_textline_polygons[int(region_final)][arg_min])
|
||||
cnt_nearest[:, 0] = all_found_textline_polygons[int(region_final)][arg_min][:, 0] + all_box_coord[int(region_final)][2]
|
||||
cnt_nearest[:, 1] = all_found_textline_polygons[int(region_final)][arg_min][:, 1] + all_box_coord[int(region_final)][0]
|
||||
|
||||
img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3))
|
||||
img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255))
|
||||
img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255))
|
||||
|
||||
img_textlines = img_textlines.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
# print(len(contours_combined),'len textlines mixed')
|
||||
areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
|
||||
|
||||
contours_biggest = contours_combined[np.argmax(areas_cnt_text)]
|
||||
|
||||
# print(np.shape(contours_biggest))
|
||||
# print(contours_biggest[:])
|
||||
contours_biggest[:, 0, 0] = contours_biggest[:, 0, 0] - all_box_coord[int(region_final)][2]
|
||||
contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] - all_box_coord[int(region_final)][0]
|
||||
|
||||
contours_biggest = contours_biggest.reshape(np.shape(contours_biggest)[0], np.shape(contours_biggest)[2])
|
||||
|
||||
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
|
||||
|
||||
except:
|
||||
# print('gordun1')
|
||||
pass
|
||||
elif len(region_with_intersected_drop) == 1:
|
||||
region_final = region_with_intersected_drop[0] - 1
|
||||
|
||||
# areas_main=np.array([cv2.contourArea(all_found_textline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_textline_polygons[int(region_final)]))])
|
||||
|
||||
# cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)])
|
||||
|
||||
# print(cx_t,'print')
|
||||
try:
|
||||
# print(all_found_textline_polygons[j_cont][0])
|
||||
cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)])
|
||||
# print(all_box_coord[j_cont])
|
||||
# print(cx_t)
|
||||
# print(cy_t)
|
||||
# print(cx_d[i_drop])
|
||||
# print(cy_d[i_drop])
|
||||
y_lines = all_box_coord[int(region_final)][0] + np.array(cy_t)
|
||||
|
||||
y_lines[y_lines < y_min_d[i_drop]] = 0
|
||||
# print(y_lines)
|
||||
|
||||
arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop]))
|
||||
# print(arg_min)
|
||||
|
||||
cnt_nearest = np.copy(all_found_textline_polygons[int(region_final)][arg_min])
|
||||
cnt_nearest[:, 0] = all_found_textline_polygons[int(region_final)][arg_min][:, 0] + all_box_coord[int(region_final)][2]
|
||||
cnt_nearest[:, 1] = all_found_textline_polygons[int(region_final)][arg_min][:, 1] + all_box_coord[int(region_final)][0]
|
||||
|
||||
img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3))
|
||||
img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255))
|
||||
img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255))
|
||||
|
||||
img_textlines = img_textlines.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
# print(len(contours_combined),'len textlines mixed')
|
||||
areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
|
||||
|
||||
contours_biggest = contours_combined[np.argmax(areas_cnt_text)]
|
||||
|
||||
# print(np.shape(contours_biggest))
|
||||
# print(contours_biggest[:])
|
||||
contours_biggest[:, 0, 0] = contours_biggest[:, 0, 0] - all_box_coord[int(region_final)][2]
|
||||
contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] - all_box_coord[int(region_final)][0]
|
||||
|
||||
contours_biggest = contours_biggest.reshape(np.shape(contours_biggest)[0], np.shape(contours_biggest)[2])
|
||||
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
|
||||
# all_found_textline_polygons[int(region_final)][arg_min]=contours_biggest
|
||||
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
|
||||
#####for i_drop in range(len(polygons_of_drop_capitals)):
|
||||
#####for j_cont in range(len(contours_only_text_parent)):
|
||||
#####img_con=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3))
|
||||
#####img_con=cv2.fillPoly(img_con,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255))
|
||||
#####img_con=cv2.fillPoly(img_con,pts=[contours_only_text_parent[j_cont]],color=(255,255,255))
|
||||
|
||||
#####img_con=img_con.astype(np.uint8)
|
||||
######imgray = cv2.cvtColor(img_con, cv2.COLOR_BGR2GRAY)
|
||||
######ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
######contours_new,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
#####contours_new,hir_new=return_contours_of_image(img_con)
|
||||
#####contours_new_parent=return_parent_contours( contours_new,hir_new)
|
||||
######plt.imshow(img_con)
|
||||
######plt.show()
|
||||
#####try:
|
||||
#####if len(contours_new_parent)==1:
|
||||
######print(all_found_textline_polygons[j_cont][0])
|
||||
#####cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[j_cont])
|
||||
######print(all_box_coord[j_cont])
|
||||
######print(cx_t)
|
||||
######print(cy_t)
|
||||
######print(cx_d[i_drop])
|
||||
######print(cy_d[i_drop])
|
||||
#####y_lines=all_box_coord[j_cont][0]+np.array(cy_t)
|
||||
|
||||
######print(y_lines)
|
||||
|
||||
#####arg_min=np.argmin(np.abs(y_lines-y_min_d[i_drop]) )
|
||||
######print(arg_min)
|
||||
|
||||
#####cnt_nearest=np.copy(all_found_textline_polygons[j_cont][arg_min])
|
||||
#####cnt_nearest[:,0]=all_found_textline_polygons[j_cont][arg_min][:,0]+all_box_coord[j_cont][2]
|
||||
#####cnt_nearest[:,1]=all_found_textline_polygons[j_cont][arg_min][:,1]+all_box_coord[j_cont][0]
|
||||
|
||||
#####img_textlines=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3))
|
||||
#####img_textlines=cv2.fillPoly(img_textlines,pts=[cnt_nearest],color=(255,255,255))
|
||||
#####img_textlines=cv2.fillPoly(img_textlines,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255))
|
||||
|
||||
#####img_textlines=img_textlines.astype(np.uint8)
|
||||
#####imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
|
||||
#####ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
#####contours_combined,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
#####areas_cnt_text=np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
|
||||
|
||||
#####contours_biggest=contours_combined[np.argmax(areas_cnt_text)]
|
||||
|
||||
######print(np.shape(contours_biggest))
|
||||
######print(contours_biggest[:])
|
||||
#####contours_biggest[:,0,0]=contours_biggest[:,0,0]-all_box_coord[j_cont][2]
|
||||
#####contours_biggest[:,0,1]=contours_biggest[:,0,1]-all_box_coord[j_cont][0]
|
||||
|
||||
#####all_found_textline_polygons[j_cont][arg_min]=contours_biggest
|
||||
######print(contours_biggest)
|
||||
######plt.imshow(img_textlines[:,:,0])
|
||||
######plt.show()
|
||||
#####else:
|
||||
#####pass
|
||||
#####except:
|
||||
#####pass
|
||||
return all_found_textline_polygons
|
||||
|
||||
def filter_small_drop_capitals_from_no_patch_layout(layout_no_patch, layout1):
|
||||
|
||||
drop_only = (layout_no_patch[:, :, 0] == 4) * 1
|
||||
contours_drop, hir_on_drop = return_contours_of_image(drop_only)
|
||||
contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop)
|
||||
|
||||
areas_cnt_text = np.array([cv2.contourArea(contours_drop_parent[j]) for j in range(len(contours_drop_parent))])
|
||||
areas_cnt_text = areas_cnt_text / float(drop_only.shape[0] * drop_only.shape[1])
|
||||
|
||||
contours_drop_parent = [contours_drop_parent[jz] for jz in range(len(contours_drop_parent)) if areas_cnt_text[jz] > 0.001]
|
||||
|
||||
areas_cnt_text = [areas_cnt_text[jz] for jz in range(len(areas_cnt_text)) if areas_cnt_text[jz] > 0.001]
|
||||
|
||||
contours_drop_parent_final = []
|
||||
|
||||
for jj in range(len(contours_drop_parent)):
|
||||
x, y, w, h = cv2.boundingRect(contours_drop_parent[jj])
|
||||
# boxes.append([int(x), int(y), int(w), int(h)])
|
||||
|
||||
iou_of_box_and_contoure = float(drop_only.shape[0] * drop_only.shape[1]) * areas_cnt_text[jj] / float(w * h) * 100
|
||||
height_to_weight_ratio = h / float(w)
|
||||
weigh_to_height_ratio = w / float(h)
|
||||
|
||||
if iou_of_box_and_contoure > 60 and weigh_to_height_ratio < 1.2 and height_to_weight_ratio < 2:
|
||||
map_of_drop_contour_bb = np.zeros((layout1.shape[0], layout1.shape[1]))
|
||||
map_of_drop_contour_bb[y : y + h, x : x + w] = layout1[y : y + h, x : x + w]
|
||||
|
||||
if (((map_of_drop_contour_bb == 1) * 1).sum() / float(((map_of_drop_contour_bb == 5) * 1).sum()) * 100) >= 15:
|
||||
contours_drop_parent_final.append(contours_drop_parent[jj])
|
||||
|
||||
layout_no_patch[:, :, 0][layout_no_patch[:, :, 0] == 4] = 0
|
||||
|
||||
layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=(4, 4, 4))
|
||||
|
||||
return layout_no_patch
|
||||
|
||||
3
src/eynollah/utils/is_nan.py
Normal file
3
src/eynollah/utils/is_nan.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
def isNaN(num):
|
||||
return num != num
|
||||
197
src/eynollah/utils/marginals.py
Normal file
197
src/eynollah/utils/marginals.py
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
import numpy as np
|
||||
import cv2
|
||||
from scipy.signal import find_peaks
|
||||
from scipy.ndimage import gaussian_filter1d
|
||||
|
||||
|
||||
from .contour import find_new_features_of_contours, return_contours_of_interested_region
|
||||
from .resize import resize_image
|
||||
from .rotate import rotate_image
|
||||
|
||||
def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_version=False, kernel=None):
|
||||
mask_marginals=np.zeros((text_with_lines.shape[0],text_with_lines.shape[1]))
|
||||
mask_marginals=mask_marginals.astype(np.uint8)
|
||||
|
||||
|
||||
text_with_lines=text_with_lines.astype(np.uint8)
|
||||
##text_with_lines=cv2.erode(text_with_lines,self.kernel,iterations=3)
|
||||
|
||||
text_with_lines_eroded=cv2.erode(text_with_lines,kernel,iterations=5)
|
||||
|
||||
if text_with_lines.shape[0]<=1500:
|
||||
pass
|
||||
elif text_with_lines.shape[0]>1500 and text_with_lines.shape[0]<=1800:
|
||||
text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.5),text_with_lines.shape[1])
|
||||
text_with_lines=cv2.erode(text_with_lines,kernel,iterations=5)
|
||||
text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1])
|
||||
else:
|
||||
text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.8),text_with_lines.shape[1])
|
||||
text_with_lines=cv2.erode(text_with_lines,kernel,iterations=7)
|
||||
text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1])
|
||||
|
||||
|
||||
text_with_lines_y=text_with_lines.sum(axis=0)
|
||||
text_with_lines_y_eroded=text_with_lines_eroded.sum(axis=0)
|
||||
|
||||
thickness_along_y_percent=text_with_lines_y_eroded.max()/(float(text_with_lines.shape[0]))*100
|
||||
|
||||
#print(thickness_along_y_percent,'thickness_along_y_percent')
|
||||
|
||||
if thickness_along_y_percent<30:
|
||||
min_textline_thickness=8
|
||||
elif thickness_along_y_percent>=30 and thickness_along_y_percent<50:
|
||||
min_textline_thickness=20
|
||||
else:
|
||||
min_textline_thickness=40
|
||||
|
||||
|
||||
|
||||
if thickness_along_y_percent>=14:
|
||||
|
||||
text_with_lines_y_rev=-1*text_with_lines_y[:]
|
||||
|
||||
text_with_lines_y_rev=text_with_lines_y_rev-np.min(text_with_lines_y_rev)
|
||||
|
||||
sigma_gaus=1
|
||||
region_sum_0= gaussian_filter1d(text_with_lines_y, sigma_gaus)
|
||||
|
||||
region_sum_0_rev=gaussian_filter1d(text_with_lines_y_rev, sigma_gaus)
|
||||
|
||||
region_sum_0_updown=region_sum_0[len(region_sum_0)::-1]
|
||||
|
||||
first_nonzero=(next((i for i, x in enumerate(region_sum_0) if x), None))
|
||||
last_nonzero=(next((i for i, x in enumerate(region_sum_0_updown) if x), None))
|
||||
|
||||
|
||||
last_nonzero=len(region_sum_0)-last_nonzero
|
||||
|
||||
mid_point=(last_nonzero+first_nonzero)/2.
|
||||
|
||||
|
||||
one_third_right=(last_nonzero-mid_point)/3.0
|
||||
one_third_left=(mid_point-first_nonzero)/3.0
|
||||
|
||||
peaks, _ = find_peaks(text_with_lines_y_rev, height=0)
|
||||
peaks=np.array(peaks)
|
||||
peaks=peaks[(peaks>first_nonzero) & ((peaks<last_nonzero))]
|
||||
peaks=peaks[region_sum_0[peaks]<min_textline_thickness ]
|
||||
|
||||
|
||||
if num_col==1:
|
||||
peaks_right=peaks[peaks>mid_point]
|
||||
peaks_left=peaks[peaks<mid_point]
|
||||
if num_col==2:
|
||||
peaks_right=peaks[peaks>(mid_point+one_third_right)]
|
||||
peaks_left=peaks[peaks<(mid_point-one_third_left)]
|
||||
|
||||
|
||||
try:
|
||||
point_right=np.min(peaks_right)
|
||||
except:
|
||||
point_right=last_nonzero
|
||||
|
||||
|
||||
try:
|
||||
point_left=np.max(peaks_left)
|
||||
except:
|
||||
point_left=first_nonzero
|
||||
|
||||
|
||||
|
||||
if point_right>=mask_marginals.shape[1]:
|
||||
point_right=mask_marginals.shape[1]-1
|
||||
|
||||
try:
|
||||
mask_marginals[:,point_left:point_right]=1
|
||||
except:
|
||||
mask_marginals[:,:]=1
|
||||
|
||||
mask_marginals_rotated=rotate_image(mask_marginals,-slope_deskew)
|
||||
|
||||
mask_marginals_rotated_sum=mask_marginals_rotated.sum(axis=0)
|
||||
|
||||
mask_marginals_rotated_sum[mask_marginals_rotated_sum!=0]=1
|
||||
index_x=np.array(range(len(mask_marginals_rotated_sum)))+1
|
||||
|
||||
index_x_interest=index_x[mask_marginals_rotated_sum==1]
|
||||
|
||||
min_point_of_left_marginal=np.min(index_x_interest)-16
|
||||
max_point_of_right_marginal=np.max(index_x_interest)+16
|
||||
|
||||
if min_point_of_left_marginal<0:
|
||||
min_point_of_left_marginal=0
|
||||
if max_point_of_right_marginal>=text_regions.shape[1]:
|
||||
max_point_of_right_marginal=text_regions.shape[1]-1
|
||||
|
||||
|
||||
#plt.imshow(mask_marginals_rotated)
|
||||
#plt.show()
|
||||
|
||||
text_regions[(mask_marginals_rotated[:,:]!=1) & (text_regions[:,:]==1)]=4
|
||||
|
||||
#plt.imshow(text_regions)
|
||||
#plt.show()
|
||||
|
||||
pixel_img=4
|
||||
min_area_text=0.00001
|
||||
polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text)
|
||||
|
||||
cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals)
|
||||
|
||||
text_regions[(text_regions[:,:]==4)]=1
|
||||
|
||||
marginlas_should_be_main_text=[]
|
||||
|
||||
x_min_marginals_left=[]
|
||||
x_min_marginals_right=[]
|
||||
|
||||
for i in range(len(cx_text_only)):
|
||||
x_width_mar=abs(x_min_text_only[i]-x_max_text_only[i])
|
||||
y_height_mar=abs(y_min_text_only[i]-y_max_text_only[i])
|
||||
|
||||
if x_width_mar>16 and y_height_mar/x_width_mar<18:
|
||||
marginlas_should_be_main_text.append(polygons_of_marginals[i])
|
||||
if x_min_text_only[i]<(mid_point-one_third_left):
|
||||
x_min_marginals_left_new=x_min_text_only[i]
|
||||
if len(x_min_marginals_left)==0:
|
||||
x_min_marginals_left.append(x_min_marginals_left_new)
|
||||
else:
|
||||
x_min_marginals_left[0]=min(x_min_marginals_left[0],x_min_marginals_left_new)
|
||||
else:
|
||||
x_min_marginals_right_new=x_min_text_only[i]
|
||||
if len(x_min_marginals_right)==0:
|
||||
x_min_marginals_right.append(x_min_marginals_right_new)
|
||||
else:
|
||||
x_min_marginals_right[0]=min(x_min_marginals_right[0],x_min_marginals_right_new)
|
||||
|
||||
if len(x_min_marginals_left)==0:
|
||||
x_min_marginals_left=[0]
|
||||
if len(x_min_marginals_right)==0:
|
||||
x_min_marginals_right=[text_regions.shape[1]-1]
|
||||
|
||||
|
||||
text_regions=cv2.fillPoly(text_regions, pts =marginlas_should_be_main_text, color=(4,4))
|
||||
|
||||
|
||||
#text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0
|
||||
#text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0
|
||||
|
||||
|
||||
text_regions[:,:int(min_point_of_left_marginal)][text_regions[:,:int(min_point_of_left_marginal)]==1]=0
|
||||
text_regions[:,int(max_point_of_right_marginal):][text_regions[:,int(max_point_of_right_marginal):]==1]=0
|
||||
|
||||
###text_regions[:,0:point_left][text_regions[:,0:point_left]==1]=4
|
||||
|
||||
###text_regions[:,point_right:][ text_regions[:,point_right:]==1]=4
|
||||
#plt.plot(region_sum_0)
|
||||
#plt.plot(peaks,region_sum_0[peaks],'*')
|
||||
#plt.show()
|
||||
|
||||
|
||||
#plt.imshow(text_regions)
|
||||
#plt.show()
|
||||
|
||||
#sys.exit()
|
||||
else:
|
||||
pass
|
||||
return text_regions
|
||||
34
src/eynollah/utils/pil_cv2.py
Normal file
34
src/eynollah/utils/pil_cv2.py
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
from PIL import Image
|
||||
import numpy as np
|
||||
from ocrd_models import OcrdExif
|
||||
from cv2 import COLOR_GRAY2BGR, COLOR_RGB2BGR, COLOR_BGR2RGB, cvtColor, imread
|
||||
|
||||
# from sbb_binarization
|
||||
|
||||
def cv2pil(img):
|
||||
return Image.fromarray(np.array(cvtColor(img, COLOR_BGR2RGB)))
|
||||
|
||||
def pil2cv(img):
|
||||
# from ocrd/workspace.py
|
||||
color_conversion = COLOR_GRAY2BGR if img.mode in ('1', 'L') else COLOR_RGB2BGR
|
||||
pil_as_np_array = np.array(img).astype('uint8') if img.mode == '1' else np.array(img)
|
||||
return cvtColor(pil_as_np_array, color_conversion)
|
||||
|
||||
def check_dpi(img):
|
||||
try:
|
||||
if isinstance(img, Image.Image):
|
||||
pil_image = img
|
||||
elif isinstance(img, str):
|
||||
pil_image = Image.open(img)
|
||||
else:
|
||||
pil_image = cv2pil(img)
|
||||
exif = OcrdExif(pil_image)
|
||||
resolution = exif.resolution
|
||||
if resolution == 1:
|
||||
raise Exception()
|
||||
if exif.resolutionUnit == 'cm':
|
||||
resolution /= 2.54
|
||||
return int(resolution)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return 230
|
||||
4
src/eynollah/utils/resize.py
Normal file
4
src/eynollah/utils/resize.py
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
import cv2
|
||||
|
||||
def resize_image(img_in, input_height, input_width):
|
||||
return cv2.resize(img_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST)
|
||||
86
src/eynollah/utils/rotate.py
Normal file
86
src/eynollah/utils/rotate.py
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
import math
|
||||
|
||||
import imutils
|
||||
import cv2
|
||||
|
||||
def rotatedRectWithMaxArea(w, h, angle):
|
||||
if w <= 0 or h <= 0:
|
||||
return 0, 0
|
||||
|
||||
width_is_longer = w >= h
|
||||
side_long, side_short = (w, h) if width_is_longer else (h, w)
|
||||
|
||||
# since the solutions for angle, -angle and 180-angle are all the same,
|
||||
# if suffices to look at the first quadrant and the absolute values of sin,cos:
|
||||
sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))
|
||||
if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10:
|
||||
# half constrained case: two crop corners touch the longer side,
|
||||
# the other two corners are on the mid-line parallel to the longer line
|
||||
x = 0.5 * side_short
|
||||
wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)
|
||||
else:
|
||||
# fully constrained case: crop touches all 4 sides
|
||||
cos_2a = cos_a * cos_a - sin_a * sin_a
|
||||
wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a
|
||||
|
||||
return wr, hr
|
||||
|
||||
def rotate_max_area_new(image, rotated, angle):
|
||||
wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle))
|
||||
h, w, _ = rotated.shape
|
||||
y1 = h // 2 - int(hr / 2)
|
||||
y2 = y1 + int(hr)
|
||||
x1 = w // 2 - int(wr / 2)
|
||||
x2 = x1 + int(wr)
|
||||
return rotated[y1:y2, x1:x2]
|
||||
|
||||
def rotation_image_new(img, thetha):
|
||||
rotated = imutils.rotate(img, thetha)
|
||||
return rotate_max_area_new(img, rotated, thetha)
|
||||
|
||||
def rotate_image(img_patch, slope):
|
||||
(h, w) = img_patch.shape[:2]
|
||||
center = (w // 2, h // 2)
|
||||
M = cv2.getRotationMatrix2D(center, slope, 1.0)
|
||||
return cv2.warpAffine(img_patch, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
|
||||
|
||||
def rotate_image_different( img, slope):
|
||||
# img = cv2.imread('images/input.jpg')
|
||||
num_rows, num_cols = img.shape[:2]
|
||||
|
||||
rotation_matrix = cv2.getRotationMatrix2D((num_cols / 2, num_rows / 2), slope, 1)
|
||||
img_rotation = cv2.warpAffine(img, rotation_matrix, (num_cols, num_rows))
|
||||
return img_rotation
|
||||
|
||||
def rotate_max_area(image, rotated, rotated_textline, rotated_layout, rotated_table_prediction, angle):
|
||||
wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle))
|
||||
h, w, _ = rotated.shape
|
||||
y1 = h // 2 - int(hr / 2)
|
||||
y2 = y1 + int(hr)
|
||||
x1 = w // 2 - int(wr / 2)
|
||||
x2 = x1 + int(wr)
|
||||
return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2], rotated_table_prediction[y1:y2, x1:x2]
|
||||
|
||||
def rotation_not_90_func(img, textline, text_regions_p_1, table_prediction, thetha):
|
||||
rotated = imutils.rotate(img, thetha)
|
||||
rotated_textline = imutils.rotate(textline, thetha)
|
||||
rotated_layout = imutils.rotate(text_regions_p_1, thetha)
|
||||
rotated_table_prediction = imutils.rotate(table_prediction, thetha)
|
||||
return rotate_max_area(img, rotated, rotated_textline, rotated_layout, rotated_table_prediction, thetha)
|
||||
|
||||
def rotation_not_90_func_full_layout(img, textline, text_regions_p_1, text_regions_p_fully, thetha):
|
||||
rotated = imutils.rotate(img, thetha)
|
||||
rotated_textline = imutils.rotate(textline, thetha)
|
||||
rotated_layout = imutils.rotate(text_regions_p_1, thetha)
|
||||
rotated_layout_full = imutils.rotate(text_regions_p_fully, thetha)
|
||||
return rotate_max_area_full_layout(img, rotated, rotated_textline, rotated_layout, rotated_layout_full, thetha)
|
||||
|
||||
def rotate_max_area_full_layout(image, rotated, rotated_textline, rotated_layout, rotated_layout_full, angle):
|
||||
wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle))
|
||||
h, w, _ = rotated.shape
|
||||
y1 = h // 2 - int(hr / 2)
|
||||
y2 = y1 + int(hr)
|
||||
x1 = w // 2 - int(wr / 2)
|
||||
x2 = x1 + int(wr)
|
||||
return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2], rotated_layout_full[y1:y2, x1:x2]
|
||||
|
||||
1987
src/eynollah/utils/separate_lines.py
Normal file
1987
src/eynollah/utils/separate_lines.py
Normal file
File diff suppressed because it is too large
Load diff
88
src/eynollah/utils/xml.py
Normal file
88
src/eynollah/utils/xml.py
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
# pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member
|
||||
# pylint: disable=invalid-name
|
||||
from .counter import EynollahIdCounter
|
||||
import numpy as np
|
||||
from datetime import datetime
|
||||
|
||||
from ocrd_models.ocrd_page import (
|
||||
CoordsType,
|
||||
GlyphType,
|
||||
ImageRegionType,
|
||||
MathsRegionType,
|
||||
MetadataType,
|
||||
MetadataItemType,
|
||||
NoiseRegionType,
|
||||
OrderedGroupIndexedType,
|
||||
OrderedGroupType,
|
||||
PcGtsType,
|
||||
PageType,
|
||||
ReadingOrderType,
|
||||
RegionRefIndexedType,
|
||||
RegionRefType,
|
||||
SeparatorRegionType,
|
||||
TableRegionType,
|
||||
TextLineType,
|
||||
TextRegionType,
|
||||
UnorderedGroupIndexedType,
|
||||
UnorderedGroupType,
|
||||
WordType,
|
||||
|
||||
to_xml)
|
||||
|
||||
def create_page_xml(imageFilename, height, width):
|
||||
now = datetime.now()
|
||||
pcgts = PcGtsType(
|
||||
Metadata=MetadataType(
|
||||
Creator='SBB_QURATOR',
|
||||
Created=now,
|
||||
LastChange=now
|
||||
),
|
||||
Page=PageType(
|
||||
imageWidth=str(width),
|
||||
imageHeight=str(height),
|
||||
imageFilename=imageFilename,
|
||||
readingDirection='left-to-right',
|
||||
textLineOrder='top-to-bottom'
|
||||
))
|
||||
return pcgts
|
||||
|
||||
def xml_reading_order(page, order_of_texts, id_of_marginalia):
|
||||
region_order = ReadingOrderType()
|
||||
og = OrderedGroupType(id="ro357564684568544579089")
|
||||
page.set_ReadingOrder(region_order)
|
||||
region_order.set_OrderedGroup(og)
|
||||
region_counter = EynollahIdCounter()
|
||||
for idx_textregion, _ in enumerate(order_of_texts):
|
||||
og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=region_counter.region_id(order_of_texts[idx_textregion] + 1)))
|
||||
region_counter.inc('region')
|
||||
for id_marginal in id_of_marginalia:
|
||||
og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal))
|
||||
region_counter.inc('region')
|
||||
|
||||
def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region_h, matrix_of_orders, indexes_sorted, index_of_types, kind_of_texts, ref_point):
|
||||
indexes_sorted = np.array(indexes_sorted)
|
||||
index_of_types = np.array(index_of_types)
|
||||
kind_of_texts = np.array(kind_of_texts)
|
||||
|
||||
id_of_texts = []
|
||||
order_of_texts = []
|
||||
|
||||
index_of_types_1 = index_of_types[kind_of_texts == 1]
|
||||
indexes_sorted_1 = indexes_sorted[kind_of_texts == 1]
|
||||
|
||||
index_of_types_2 = index_of_types[kind_of_texts == 2]
|
||||
indexes_sorted_2 = indexes_sorted[kind_of_texts == 2]
|
||||
|
||||
counter = EynollahIdCounter(region_idx=ref_point)
|
||||
for idx_textregion, _ in enumerate(found_polygons_text_region):
|
||||
id_of_texts.append(counter.next_region_id)
|
||||
interest = indexes_sorted_1[indexes_sorted_1 == index_of_types_1[idx_textregion]]
|
||||
if len(interest) > 0:
|
||||
order_of_texts.append(interest[0])
|
||||
|
||||
for idx_headerregion, _ in enumerate(found_polygons_text_region_h):
|
||||
id_of_texts.append(counter.next_region_id)
|
||||
interest = indexes_sorted_2[index_of_types_2[idx_headerregion]]
|
||||
order_of_texts.append(interest)
|
||||
|
||||
return order_of_texts, id_of_texts
|
||||
317
src/eynollah/writer.py
Normal file
317
src/eynollah/writer.py
Normal file
|
|
@ -0,0 +1,317 @@
|
|||
# pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member
|
||||
# pylint: disable=import-error
|
||||
from pathlib import Path
|
||||
import os.path
|
||||
import xml.etree.ElementTree as ET
|
||||
from .utils.xml import create_page_xml, xml_reading_order
|
||||
from .utils.counter import EynollahIdCounter
|
||||
|
||||
from ocrd_utils import getLogger
|
||||
from ocrd_models.ocrd_page import (
|
||||
BorderType,
|
||||
CoordsType,
|
||||
PcGtsType,
|
||||
TextLineType,
|
||||
TextEquivType,
|
||||
TextRegionType,
|
||||
ImageRegionType,
|
||||
TableRegionType,
|
||||
SeparatorRegionType,
|
||||
to_xml
|
||||
)
|
||||
import numpy as np
|
||||
|
||||
class EynollahXmlWriter():
|
||||
|
||||
def __init__(self, *, dir_out, image_filename, curved_line,textline_light, pcgts=None):
|
||||
self.logger = getLogger('eynollah.writer')
|
||||
self.counter = EynollahIdCounter()
|
||||
self.dir_out = dir_out
|
||||
self.image_filename = image_filename
|
||||
self.curved_line = curved_line
|
||||
self.textline_light = textline_light
|
||||
self.pcgts = pcgts
|
||||
self.scale_x = None # XXX set outside __init__
|
||||
self.scale_y = None # XXX set outside __init__
|
||||
self.height_org = None # XXX set outside __init__
|
||||
self.width_org = None # XXX set outside __init__
|
||||
|
||||
@property
|
||||
def image_filename_stem(self):
|
||||
return Path(Path(self.image_filename).name).stem
|
||||
|
||||
def calculate_page_coords(self, cont_page):
|
||||
self.logger.debug('enter calculate_page_coords')
|
||||
points_page_print = ""
|
||||
for _, contour in enumerate(cont_page[0]):
|
||||
if len(contour) == 2:
|
||||
points_page_print += str(int((contour[0]) / self.scale_x))
|
||||
points_page_print += ','
|
||||
points_page_print += str(int((contour[1]) / self.scale_y))
|
||||
else:
|
||||
points_page_print += str(int((contour[0][0]) / self.scale_x))
|
||||
points_page_print += ','
|
||||
points_page_print += str(int((contour[0][1] ) / self.scale_y))
|
||||
points_page_print = points_page_print + ' '
|
||||
return points_page_print[:-1]
|
||||
|
||||
def serialize_lines_in_marginal(self, marginal_region, all_found_textline_polygons_marginals, marginal_idx, page_coord, all_box_coord_marginals, slopes_marginals, counter):
|
||||
for j in range(len(all_found_textline_polygons_marginals[marginal_idx])):
|
||||
coords = CoordsType()
|
||||
textline = TextLineType(id=counter.next_line_id, Coords=coords)
|
||||
marginal_region.add_TextLine(textline)
|
||||
points_co = ''
|
||||
for l in range(len(all_found_textline_polygons_marginals[marginal_idx][j])):
|
||||
if not (self.curved_line or self.textline_light):
|
||||
if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2:
|
||||
textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) )
|
||||
textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) )
|
||||
else:
|
||||
textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) )
|
||||
textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) )
|
||||
points_co += str(textline_x_coord)
|
||||
points_co += ','
|
||||
points_co += str(textline_y_coord)
|
||||
if (self.curved_line or self.textline_light) and np.abs(slopes_marginals[marginal_idx]) <= 45:
|
||||
if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2:
|
||||
points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + page_coord[0]) / self.scale_y))
|
||||
else:
|
||||
points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + page_coord[0]) / self.scale_y))
|
||||
|
||||
elif (self.curved_line or self.textline_light) and np.abs(slopes_marginals[marginal_idx]) > 45:
|
||||
if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2:
|
||||
points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y))
|
||||
else:
|
||||
points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y))
|
||||
points_co += ' '
|
||||
coords.set_points(points_co[:-1])
|
||||
|
||||
def serialize_lines_in_region(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter, ocr_all_textlines_textregion):
|
||||
self.logger.debug('enter serialize_lines_in_region')
|
||||
for j in range(len(all_found_textline_polygons[region_idx])):
|
||||
coords = CoordsType()
|
||||
textline = TextLineType(id=counter.next_line_id, Coords=coords)
|
||||
if ocr_all_textlines_textregion:
|
||||
textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] )
|
||||
text_region.add_TextLine(textline)
|
||||
region_bboxes = all_box_coord[region_idx]
|
||||
points_co = ''
|
||||
for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[region_idx][j]):
|
||||
if not (self.curved_line or self.textline_light):
|
||||
if len(contour_textline) == 2:
|
||||
textline_x_coord = max(0, int((contour_textline[0] + region_bboxes[2] + page_coord[2]) / self.scale_x))
|
||||
textline_y_coord = max(0, int((contour_textline[1] + region_bboxes[0] + page_coord[0]) / self.scale_y))
|
||||
else:
|
||||
textline_x_coord = max(0, int((contour_textline[0][0] + region_bboxes[2] + page_coord[2]) / self.scale_x))
|
||||
textline_y_coord = max(0, int((contour_textline[0][1] + region_bboxes[0] + page_coord[0]) / self.scale_y))
|
||||
points_co += str(textline_x_coord)
|
||||
points_co += ','
|
||||
points_co += str(textline_y_coord)
|
||||
|
||||
if (self.curved_line or self.textline_light) and np.abs(slopes[region_idx]) <= 45:
|
||||
if len(contour_textline) == 2:
|
||||
points_co += str(int((contour_textline[0] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((contour_textline[1] + page_coord[0]) / self.scale_y))
|
||||
else:
|
||||
points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y))
|
||||
elif (self.curved_line or self.textline_light) and np.abs(slopes[region_idx]) > 45:
|
||||
if len(contour_textline)==2:
|
||||
points_co += str(int((contour_textline[0] + region_bboxes[2] + page_coord[2])/self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((contour_textline[1] + region_bboxes[0] + page_coord[0])/self.scale_y))
|
||||
else:
|
||||
points_co += str(int((contour_textline[0][0] + region_bboxes[2]+page_coord[2])/self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((contour_textline[0][1] + region_bboxes[0]+page_coord[0])/self.scale_y))
|
||||
points_co += ' '
|
||||
coords.set_points(points_co[:-1])
|
||||
|
||||
def serialize_lines_in_dropcapital(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter, ocr_all_textlines_textregion):
|
||||
self.logger.debug('enter serialize_lines_in_region')
|
||||
for j in range(1):
|
||||
coords = CoordsType()
|
||||
textline = TextLineType(id=counter.next_line_id, Coords=coords)
|
||||
if ocr_all_textlines_textregion:
|
||||
textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] )
|
||||
text_region.add_TextLine(textline)
|
||||
#region_bboxes = all_box_coord[region_idx]
|
||||
points_co = ''
|
||||
for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[j]):
|
||||
if len(contour_textline) == 2:
|
||||
points_co += str(int((contour_textline[0] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((contour_textline[1] + page_coord[0]) / self.scale_y))
|
||||
else:
|
||||
points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y))
|
||||
|
||||
points_co += ' '
|
||||
coords.set_points(points_co[:-1])
|
||||
|
||||
def write_pagexml(self, pcgts):
|
||||
out_fname = os.path.join(self.dir_out, self.image_filename_stem) + ".xml"
|
||||
self.logger.info("output filename: '%s'", out_fname)
|
||||
with open(out_fname, 'w') as f:
|
||||
f.write(to_xml(pcgts))
|
||||
|
||||
def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines):
|
||||
self.logger.debug('enter build_pagexml_no_full_layout')
|
||||
|
||||
# create the file structure
|
||||
pcgts = self.pcgts if self.pcgts else create_page_xml(self.image_filename, self.height_org, self.width_org)
|
||||
page = pcgts.get_Page()
|
||||
page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page))))
|
||||
|
||||
counter = EynollahIdCounter()
|
||||
if len(found_polygons_text_region) > 0:
|
||||
_counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts))
|
||||
id_of_marginalia = [_counter_marginals.next_region_id for _ in found_polygons_marginals]
|
||||
xml_reading_order(page, order_of_texts, id_of_marginalia)
|
||||
|
||||
for mm in range(len(found_polygons_text_region)):
|
||||
textregion = TextRegionType(id=counter.next_region_id, type_='paragraph',
|
||||
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord)),
|
||||
)
|
||||
page.add_TextRegion(textregion)
|
||||
if ocr_all_textlines:
|
||||
ocr_textlines = ocr_all_textlines[mm]
|
||||
else:
|
||||
ocr_textlines = None
|
||||
self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter, ocr_textlines)
|
||||
|
||||
for mm in range(len(found_polygons_marginals)):
|
||||
marginal = TextRegionType(id=counter.next_region_id, type_='marginalia',
|
||||
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord)))
|
||||
page.add_TextRegion(marginal)
|
||||
self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter)
|
||||
|
||||
for mm in range(len(found_polygons_text_region_img)):
|
||||
img_region = ImageRegionType(id=counter.next_region_id, Coords=CoordsType())
|
||||
page.add_ImageRegion(img_region)
|
||||
points_co = ''
|
||||
for lmm in range(len(found_polygons_text_region_img[mm])):
|
||||
try:
|
||||
points_co += str(int((found_polygons_text_region_img[mm][lmm,0,0] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((found_polygons_text_region_img[mm][lmm,0,1] + page_coord[0]) / self.scale_y))
|
||||
points_co += ' '
|
||||
except:
|
||||
|
||||
points_co += str(int((found_polygons_text_region_img[mm][lmm][0] + page_coord[2])/ self.scale_x ))
|
||||
points_co += ','
|
||||
points_co += str(int((found_polygons_text_region_img[mm][lmm][1] + page_coord[0])/ self.scale_y ))
|
||||
points_co += ' '
|
||||
|
||||
img_region.get_Coords().set_points(points_co[:-1])
|
||||
|
||||
for mm in range(len(polygons_lines_to_be_written_in_xml)):
|
||||
sep_hor = SeparatorRegionType(id=counter.next_region_id, Coords=CoordsType())
|
||||
page.add_SeparatorRegion(sep_hor)
|
||||
points_co = ''
|
||||
for lmm in range(len(polygons_lines_to_be_written_in_xml[mm])):
|
||||
points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,0] ) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,1] ) / self.scale_y))
|
||||
points_co += ' '
|
||||
sep_hor.get_Coords().set_points(points_co[:-1])
|
||||
for mm in range(len(found_polygons_tables)):
|
||||
tab_region = TableRegionType(id=counter.next_region_id, Coords=CoordsType())
|
||||
page.add_TableRegion(tab_region)
|
||||
points_co = ''
|
||||
for lmm in range(len(found_polygons_tables[mm])):
|
||||
points_co += str(int((found_polygons_tables[mm][lmm,0,0] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((found_polygons_tables[mm][lmm,0,1] + page_coord[0]) / self.scale_y))
|
||||
points_co += ' '
|
||||
tab_region.get_Coords().set_points(points_co[:-1])
|
||||
|
||||
return pcgts
|
||||
|
||||
def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, ocr_all_textlines):
|
||||
self.logger.debug('enter build_pagexml_full_layout')
|
||||
|
||||
# create the file structure
|
||||
pcgts = self.pcgts if self.pcgts else create_page_xml(self.image_filename, self.height_org, self.width_org)
|
||||
page = pcgts.get_Page()
|
||||
page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page))))
|
||||
|
||||
counter = EynollahIdCounter()
|
||||
_counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts))
|
||||
id_of_marginalia = [_counter_marginals.next_region_id for _ in found_polygons_marginals]
|
||||
xml_reading_order(page, order_of_texts, id_of_marginalia)
|
||||
|
||||
for mm in range(len(found_polygons_text_region)):
|
||||
textregion = TextRegionType(id=counter.next_region_id, type_='paragraph',
|
||||
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord)))
|
||||
page.add_TextRegion(textregion)
|
||||
|
||||
if ocr_all_textlines:
|
||||
ocr_textlines = ocr_all_textlines[mm]
|
||||
else:
|
||||
ocr_textlines = None
|
||||
self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter, ocr_textlines)
|
||||
|
||||
self.logger.debug('len(found_polygons_text_region_h) %s', len(found_polygons_text_region_h))
|
||||
for mm in range(len(found_polygons_text_region_h)):
|
||||
textregion = TextRegionType(id=counter.next_region_id, type_='header',
|
||||
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], page_coord)))
|
||||
page.add_TextRegion(textregion)
|
||||
|
||||
if ocr_all_textlines:
|
||||
ocr_textlines = ocr_all_textlines[mm]
|
||||
else:
|
||||
ocr_textlines = None
|
||||
self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, all_box_coord_h, slopes_h, counter, ocr_textlines)
|
||||
|
||||
for mm in range(len(found_polygons_marginals)):
|
||||
marginal = TextRegionType(id=counter.next_region_id, type_='marginalia',
|
||||
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord)))
|
||||
page.add_TextRegion(marginal)
|
||||
self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter)
|
||||
|
||||
for mm in range(len(found_polygons_drop_capitals)):
|
||||
dropcapital = TextRegionType(id=counter.next_region_id, type_='drop-capital',
|
||||
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_drop_capitals[mm], page_coord)))
|
||||
page.add_TextRegion(dropcapital)
|
||||
all_box_coord_drop = None
|
||||
slopes_drop = None
|
||||
self.serialize_lines_in_dropcapital(dropcapital, [found_polygons_drop_capitals[mm]], mm, page_coord, all_box_coord_drop, slopes_drop, counter, ocr_all_textlines_textregion=None)
|
||||
|
||||
for mm in range(len(found_polygons_text_region_img)):
|
||||
page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord))))
|
||||
|
||||
for mm in range(len(polygons_lines_to_be_written_in_xml)):
|
||||
page.add_SeparatorRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(polygons_lines_to_be_written_in_xml[mm], [0 , 0, 0, 0]))))
|
||||
|
||||
for mm in range(len(found_polygons_tables)):
|
||||
page.add_TableRegion(TableRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_tables[mm], page_coord))))
|
||||
|
||||
return pcgts
|
||||
|
||||
def calculate_polygon_coords(self, contour, page_coord):
|
||||
self.logger.debug('enter calculate_polygon_coords')
|
||||
coords = ''
|
||||
for value_bbox in contour:
|
||||
if len(value_bbox) == 2:
|
||||
coords += str(int((value_bbox[0] + page_coord[2]) / self.scale_x))
|
||||
coords += ','
|
||||
coords += str(int((value_bbox[1] + page_coord[0]) / self.scale_y))
|
||||
else:
|
||||
coords += str(int((value_bbox[0][0] + page_coord[2]) / self.scale_x))
|
||||
coords += ','
|
||||
coords += str(int((value_bbox[0][1] + page_coord[0]) / self.scale_y))
|
||||
coords=coords + ' '
|
||||
return coords[:-1]
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue