This commit is contained in:
Konstantin Baierer 2025-11-28 14:45:29 +00:00 committed by GitHub
commit a924af2438
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
36 changed files with 2384 additions and 3455 deletions

View file

@ -67,10 +67,6 @@ jobs:
make install-dev EXTRAS=OCR,plotting
make deps-test EXTRAS=OCR,plotting
- name: Hard-upgrade torch for debugging
run: |
python -m pip install --upgrade torch
- name: Test with pytest
run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml"

View file

@ -103,15 +103,12 @@ The following options can be used to further configure the processing:
| option | description |
|-------------------|:--------------------------------------------------------------------------------------------|
| `-fl` | full layout analysis including all steps and segmentation classes (recommended) |
| `-light` | lighter and faster but simpler method for main region detection and deskewing (recommended) |
| `-tll` | this indicates the light textline and should be passed with light version (recommended) |
| `-tab` | apply table detection |
| `-ae` | apply enhancement (the resulting image is saved to the output directory) |
| `-as` | apply scaling |
| `-cl` | apply contour detection for curved text lines instead of bounding boxes |
| `-ib` | apply binarization (the resulting image is saved to the output directory) |
| `-ep` | enable plotting (MUST always be used with `-sl`, `-sd`, `-sa`, `-si` or `-ae`) |
| `-eoi` | extract only images to output directory (other processing will not be done) |
| `-ho` | ignore headers for reading order dectection |
| `-si <directory>` | save image regions detected to this directory |
| `-sd <directory>` | save deskewed image to this directory |
@ -120,9 +117,6 @@ The following options can be used to further configure the processing:
| `-sa <directory>` | save all (plot, enhanced/binary image, layout) to this directory |
| `-thart` | threshold of artifical class in the case of textline detection. The default value is 0.1 |
| `-tharl` | threshold of artifical class in the case of layout detection. The default value is 0.1 |
| `-ocr` | do ocr |
| `-tr` | apply transformer ocr. Default model is a CNN-RNN model |
| `-bs_ocr` | ocr inference batch size. Default bs for trocr and cnn_rnn models are 2 and 8 respectively |
| `-ncu` | upper limit of columns in document image |
| `-ncl` | lower limit of columns in document image |
| `-slro` | skip layout detection and reading order |

View file

@ -1,2 +1,2 @@
torch <= 2.0.1
torch
transformers <= 4.30.2

View file

@ -1,617 +0,0 @@
from dataclasses import dataclass
import logging
import sys
import os
from typing import Union
import click
# NOTE: For debugging/predictable order of imports
from .eynollah_imports import imported_libs
from .model_zoo import EynollahModelZoo
from .cli_models import models_cli
@dataclass()
class EynollahCliCtx:
"""
Holds options relevant for all eynollah subcommands
"""
model_zoo: EynollahModelZoo
log_level : Union[str, None] = 'INFO'
@click.group()
@click.option(
"--model-basedir",
"-m",
help="directory of models",
# NOTE: not mandatory to exist so --help for subcommands works but will log a warning
# and raise exception when trying to load models in the CLI
# type=click.Path(exists=True),
default=f'{os.getcwd()}/models_eynollah',
)
@click.option(
"--model-overrides",
"-mv",
help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list",
type=(str, str, str),
multiple=True,
)
@click.option(
"--log_level",
"-l",
type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']),
help="Override log level globally to this",
)
@click.pass_context
def main(ctx, model_basedir, model_overrides, log_level):
"""
eynollah - Document Layout Analysis, Image Enhancement, OCR
"""
# Initialize logging
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.NOTSET)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S')
console_handler.setFormatter(formatter)
logging.getLogger('eynollah').addHandler(console_handler)
logging.getLogger('eynollah').setLevel(log_level or logging.INFO)
# Initialize model zoo
model_zoo = EynollahModelZoo(basedir=model_basedir, model_overrides=model_overrides)
# Initialize CLI context
ctx.obj = EynollahCliCtx(
model_zoo=model_zoo,
log_level=log_level,
)
main.add_command(models_cli, 'models')
@main.command()
@click.option(
"--input",
"-i",
help="PAGE-XML input filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--dir_in",
"-di",
help="directory of PAGE-XML input files (instead of --input)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--out",
"-o",
help="directory for output images",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.pass_context
def machine_based_reading_order(ctx, input, dir_in, out):
"""
Generate ReadingOrder with a ML model
"""
from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout
assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both."
orderer = machine_based_reading_order_on_layout(model_zoo=ctx.obj.model_zoo)
orderer.run(xml_filename=input,
dir_in=dir_in,
dir_out=out,
)
@main.command()
@click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.')
@click.option(
"--input-image", "--image",
"-i",
help="input image filename",
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
"--dir_in",
"-di",
help="directory of input images (instead of --image)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--output",
"-o",
help="output image (if using -i) or output image directory (if using -di)",
type=click.Path(file_okay=True, dir_okay=True),
required=True,
)
@click.option(
'-M',
'--mode',
type=click.Choice(['single', 'multi']),
default='single',
help="Whether to use the (newer and faster) single-model binarization or the (slightly better) multi-model binarization"
)
@click.pass_context
def binarization(
ctx,
patches,
input_image,
mode,
dir_in,
output,
):
"""
Binarize images with a ML model
"""
from eynollah.sbb_binarize import SbbBinarizer
assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both."
binarizer = SbbBinarizer(model_zoo=ctx.obj.model_zoo, mode=mode)
binarizer.run(
image_path=input_image,
use_patches=patches,
output=output,
dir_in=dir_in
)
@main.command()
@click.option(
"--image",
"-i",
help="input image filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--out",
"-o",
help="directory for output PAGE-XML files",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--overwrite",
"-O",
help="overwrite (instead of skipping) if output xml exists",
is_flag=True,
)
@click.option(
"--dir_in",
"-di",
help="directory of input images (instead of --image)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--num_col_upper",
"-ncu",
help="lower limit of columns in document image",
)
@click.option(
"--num_col_lower",
"-ncl",
help="upper limit of columns in document image",
)
@click.option(
"--save_org_scale/--no_save_org_scale",
"-sos/-nosos",
is_flag=True,
help="if this parameter set to true, this tool will save the enhanced image in org scale.",
)
@click.pass_context
def enhancement(ctx, image, out, overwrite, dir_in, num_col_upper, num_col_lower, save_org_scale):
"""
Enhance image
"""
assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both."
from .image_enhancer import Enhancer
enhancer = Enhancer(
model_zoo=ctx.obj.model_zoo,
num_col_upper=num_col_upper,
num_col_lower=num_col_lower,
save_org_scale=save_org_scale,
)
enhancer.run(overwrite=overwrite,
dir_in=dir_in,
image_filename=image,
dir_out=out,
)
@main.command()
@click.option(
"--image",
"-i",
help="input image filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--out",
"-o",
help="directory for output PAGE-XML files",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--overwrite",
"-O",
help="overwrite (instead of skipping) if output xml exists",
is_flag=True,
)
@click.option(
"--dir_in",
"-di",
help="directory of input images (instead of --image)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_images",
"-si",
help="if a directory is given, images in documents will be cropped and saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_layout",
"-sl",
help="if a directory is given, plot of layout will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_deskewed",
"-sd",
help="if a directory is given, deskewed image will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_all",
"-sa",
help="if a directory is given, all plots needed for documentation will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_page",
"-sp",
help="if a directory is given, page crop of image will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--enable-plotting/--disable-plotting",
"-ep/-noep",
is_flag=True,
help="If set, will plot intermediary files and images",
)
@click.option(
"--extract_only_images/--disable-extracting_only_images",
"-eoi/-noeoi",
is_flag=True,
help="If a directory is given, only images in documents will be cropped and saved there and the other processing will not be done",
)
@click.option(
"--allow-enhancement/--no-allow-enhancement",
"-ae/-noae",
is_flag=True,
help="if this parameter set to true, this tool would check that input image need resizing and enhancement or not. If so output of resized and enhanced image and corresponding layout data will be written in out directory",
)
@click.option(
"--curved-line/--no-curvedline",
"-cl/-nocl",
is_flag=True,
help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline. This should be taken into account that with this option the tool need more time to do process.",
)
@click.option(
"--textline_light/--no-textline_light",
"-tll/-notll",
is_flag=True,
help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method.",
)
@click.option(
"--full-layout/--no-full-layout",
"-fl/-nofl",
is_flag=True,
help="if this parameter set to true, this tool will try to return all elements of layout.",
)
@click.option(
"--tables/--no-tables",
"-tab/-notab",
is_flag=True,
help="if this parameter set to true, this tool will try to detect tables.",
)
@click.option(
"--right2left/--left2right",
"-r2l/-l2r",
is_flag=True,
help="if this parameter set to true, this tool will extract right-to-left reading order.",
)
@click.option(
"--input_binary/--input-RGB",
"-ib/-irgb",
is_flag=True,
help="in general, eynollah uses RGB as input but if the input document is strongly dark, bright or for any other reason you can turn binarized input on. This option does not mean that you have to provide a binary image, otherwise this means that the tool itself will binarized the RGB input document.",
)
@click.option(
"--allow_scaling/--no-allow-scaling",
"-as/-noas",
is_flag=True,
help="if this parameter set to true, this tool would check the scale and if needed it will scale it to perform better layout detection",
)
@click.option(
"--headers_off/--headers-on",
"-ho/-noho",
is_flag=True,
help="if this parameter set to true, this tool would ignore headers role in reading order",
)
@click.option(
"--light_version/--original",
"-light/-org",
is_flag=True,
help="if this parameter set to true, this tool would use lighter version",
)
@click.option(
"--ignore_page_extraction/--extract_page_included",
"-ipe/-epi",
is_flag=True,
help="if this parameter set to true, this tool would ignore page extraction",
)
@click.option(
"--reading_order_machine_based/--heuristic_reading_order",
"-romb/-hro",
is_flag=True,
help="if this parameter set to true, this tool would apply machine based reading order detection",
)
@click.option(
"--do_ocr",
"-ocr/-noocr",
is_flag=True,
help="if this parameter set to true, this tool will try to do ocr",
)
@click.option(
"--transformer_ocr",
"-tr/-notr",
is_flag=True,
help="if this parameter set to true, this tool will apply transformer ocr",
)
@click.option(
"--batch_size_ocr",
"-bs_ocr",
help="number of inference batch size of ocr model. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively",
)
@click.option(
"--num_col_upper",
"-ncu",
help="lower limit of columns in document image",
)
@click.option(
"--num_col_lower",
"-ncl",
help="upper limit of columns in document image",
)
@click.option(
"--threshold_art_class_layout",
"-tharl",
help="threshold of artifical class in the case of layout detection. The default value is 0.1",
)
@click.option(
"--threshold_art_class_textline",
"-thart",
help="threshold of artifical class in the case of textline detection. The default value is 0.1",
)
@click.option(
"--skip_layout_and_reading_order",
"-slro/-noslro",
is_flag=True,
help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.",
)
@click.pass_context
def layout(
ctx,
image,
out,
overwrite,
dir_in,
save_images,
save_layout,
save_deskewed,
save_all,
extract_only_images,
save_page,
enable_plotting,
allow_enhancement,
curved_line,
textline_light,
full_layout,
tables,
right2left,
input_binary,
allow_scaling,
headers_off,
light_version,
reading_order_machine_based,
do_ocr,
transformer_ocr,
batch_size_ocr,
num_col_upper,
num_col_lower,
threshold_art_class_textline,
threshold_art_class_layout,
skip_layout_and_reading_order,
ignore_page_extraction,
):
"""
Detect Layout (with optional image enhancement and reading order detection)
"""
from .eynollah import Eynollah
assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep"
assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep"
assert enable_plotting or not save_all, "Plotting with -sa also requires -ep"
assert enable_plotting or not save_page, "Plotting with -sp also requires -ep"
assert enable_plotting or not save_images, "Plotting with -si also requires -ep"
assert enable_plotting or not allow_enhancement, "Plotting with -ae also requires -ep"
assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement, \
"Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae"
assert textline_light == light_version, "Both light textline detection -tll and light version -light must be set or unset equally"
assert not extract_only_images or not allow_enhancement, "Image extraction -eoi can not be set alongside allow_enhancement -ae"
assert not extract_only_images or not allow_scaling, "Image extraction -eoi can not be set alongside allow_scaling -as"
assert not extract_only_images or not light_version, "Image extraction -eoi can not be set alongside light_version -light"
assert not extract_only_images or not curved_line, "Image extraction -eoi can not be set alongside curved_line -cl"
assert not extract_only_images or not textline_light, "Image extraction -eoi can not be set alongside textline_light -tll"
assert not extract_only_images or not full_layout, "Image extraction -eoi can not be set alongside full_layout -fl"
assert not extract_only_images or not tables, "Image extraction -eoi can not be set alongside tables -tab"
assert not extract_only_images or not right2left, "Image extraction -eoi can not be set alongside right2left -r2l"
assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho"
assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both."
eynollah = Eynollah(
model_zoo=ctx.obj.model_zoo,
extract_only_images=extract_only_images,
enable_plotting=enable_plotting,
allow_enhancement=allow_enhancement,
curved_line=curved_line,
textline_light=textline_light,
full_layout=full_layout,
tables=tables,
right2left=right2left,
input_binary=input_binary,
allow_scaling=allow_scaling,
headers_off=headers_off,
light_version=light_version,
ignore_page_extraction=ignore_page_extraction,
reading_order_machine_based=reading_order_machine_based,
do_ocr=do_ocr,
transformer_ocr=transformer_ocr,
batch_size_ocr=batch_size_ocr,
num_col_upper=num_col_upper,
num_col_lower=num_col_lower,
skip_layout_and_reading_order=skip_layout_and_reading_order,
threshold_art_class_textline=threshold_art_class_textline,
threshold_art_class_layout=threshold_art_class_layout,
)
eynollah.run(overwrite=overwrite,
image_filename=image,
dir_in=dir_in,
dir_out=out,
dir_of_cropped_images=save_images,
dir_of_layout=save_layout,
dir_of_deskewed=save_deskewed,
dir_of_all=save_all,
dir_save_page=save_page,
)
@main.command()
@click.option(
"--image",
"-i",
help="input image filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--dir_in",
"-di",
help="directory of input images (instead of --image)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--dir_in_bin",
"-dib",
help="directory of binarized images (in addition to --dir_in for RGB images; filename stems must match the RGB image files, with '.png' suffix).\nPerform prediction using both RGB and binary images. (This does not necessarily improve results, however it may be beneficial for certain document images.)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--dir_xmls",
"-dx",
help="directory of input PAGE-XML files (in addition to --dir_in; filename stems must match the image files, with '.xml' suffix).",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--out",
"-o",
help="directory for output PAGE-XML files",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--dir_out_image_text",
"-doit",
help="directory for output images, newly rendered with predicted text",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--overwrite",
"-O",
help="overwrite (instead of skipping) if output xml exists",
is_flag=True,
)
@click.option(
"--tr_ocr",
"-trocr/-notrocr",
is_flag=True,
help="if this parameter set to true, transformer ocr will be applied, otherwise cnn_rnn model.",
)
@click.option(
"--export_textline_images_and_text",
"-etit/-noetit",
is_flag=True,
help="if this parameter set to true, images and text in xml will be exported into output dir. This files can be used for training a OCR engine.",
)
@click.option(
"--do_not_mask_with_textline_contour",
"-nmtc/-mtc",
is_flag=True,
help="if this parameter set to true, cropped textline images will not be masked with textline contour.",
)
@click.option(
"--batch_size",
"-bs",
help="number of inference batch size. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively",
)
@click.option(
"--dataset_abbrevation",
"-ds_pref",
help="in the case of extracting textline and text from a xml GT file user can add an abbrevation of dataset name to generated dataset",
)
@click.option(
"--min_conf_value_of_textline_text",
"-min_conf",
help="minimum OCR confidence value. Text lines with a confidence value lower than this threshold will not be included in the output XML file.",
)
@click.pass_context
def ocr(
ctx,
image,
dir_in,
dir_in_bin,
dir_xmls,
out,
dir_out_image_text,
overwrite,
tr_ocr,
export_textline_images_and_text,
do_not_mask_with_textline_contour,
batch_size,
dataset_abbrevation,
min_conf_value_of_textline_text,
):
"""
Recognize text with a CNN/RNN or transformer ML model.
"""
assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr"
# FIXME: refactor: move export_textline_images_and_text out of eynollah.py
# assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m"
assert not export_textline_images_and_text or not batch_size, "Exporting textline and text -etit can not be set alongside batch size -bs"
assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib"
assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit"
assert bool(image) != bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both."
from .eynollah_ocr import Eynollah_ocr
eynollah_ocr = Eynollah_ocr(
model_zoo=ctx.obj.model_zoo,
tr_ocr=tr_ocr,
export_textline_images_and_text=export_textline_images_and_text,
do_not_mask_with_textline_contour=do_not_mask_with_textline_contour,
batch_size=batch_size,
pref_of_dataset=dataset_abbrevation,
min_conf_value_of_textline_text=min_conf_value_of_textline_text)
eynollah_ocr.run(overwrite=overwrite,
dir_in=dir_in,
dir_in_bin=dir_in_bin,
image_filename=image,
dir_xmls=dir_xmls,
dir_out_image_text=dir_out_image_text,
dir_out=out,
)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,22 @@
# NOTE: For predictable order of imports of torch/shapely/tensorflow
# this must be the first import of the CLI!
from ..eynollah_imports import imported_libs
from .cli_models import models_cli
from .cli_binarize import binarize_cli
from .cli import main
from .cli_binarize import binarize_cli
from .cli_enhance import enhance_cli
from .cli_extract_images import extract_images_cli
from .cli_layout import layout_cli
from .cli_ocr import ocr_cli
from .cli_readingorder import readingorder_cli
main.add_command(binarize_cli, 'binarization')
main.add_command(enhance_cli, 'enhancement')
main.add_command(layout_cli, 'layout')
main.add_command(readingorder_cli, 'machine-based-reading-order')
main.add_command(models_cli, 'models')
main.add_command(ocr_cli, 'ocr')
main.add_command(extract_images_cli, 'extract-images')

66
src/eynollah/cli/cli.py Normal file
View file

@ -0,0 +1,66 @@
from dataclasses import dataclass
import logging
import sys
import os
from typing import Union
import click
from ..model_zoo import EynollahModelZoo
from .cli_models import models_cli
@dataclass()
class EynollahCliCtx:
"""
Holds options relevant for all eynollah subcommands
"""
model_zoo: EynollahModelZoo
log_level : Union[str, None] = 'INFO'
@click.group()
@click.option(
"--model-basedir",
"-m",
help="directory of models",
# NOTE: not mandatory to exist so --help for subcommands works but will log a warning
# and raise exception when trying to load models in the CLI
# type=click.Path(exists=True),
default=f'{os.getcwd()}/models_eynollah',
)
@click.option(
"--model-overrides",
"-mv",
help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list",
type=(str, str, str),
multiple=True,
)
@click.option(
"--log_level",
"-l",
type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']),
help="Override log level globally to this",
)
@click.pass_context
def main(ctx, model_basedir, model_overrides, log_level):
"""
eynollah - Document Layout Analysis, Image Enhancement, OCR
"""
# Initialize logging
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.NOTSET)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S')
console_handler.setFormatter(formatter)
logging.getLogger('eynollah').addHandler(console_handler)
logging.getLogger('eynollah').setLevel(log_level or logging.INFO)
# Initialize model zoo
model_zoo = EynollahModelZoo(basedir=model_basedir, model_overrides=model_overrides)
# Initialize CLI context
ctx.obj = EynollahCliCtx(
model_zoo=model_zoo,
log_level=log_level,
)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,44 @@
import click
@click.command()
@click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.')
@click.option(
"--input-image", "--image",
"-i",
help="input image filename",
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
"--dir_in",
"-di",
help="directory of input images (instead of --image)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--output",
"-o",
help="output image (if using -i) or output image directory (if using -di)",
type=click.Path(file_okay=True, dir_okay=True),
required=True,
)
@click.pass_context
def binarize_cli(
ctx,
patches,
input_image,
dir_in,
output,
):
"""
Binarize images with a ML model
"""
from ..sbb_binarize import SbbBinarizer
assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both."
binarizer = SbbBinarizer(model_zoo=ctx.obj.model_zoo)
binarizer.run(
image_path=input_image,
use_patches=patches,
output=output,
dir_in=dir_in
)

View file

@ -0,0 +1,63 @@
import click
@click.command()
@click.option(
"--image",
"-i",
help="input image filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--out",
"-o",
help="directory for output PAGE-XML files",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--overwrite",
"-O",
help="overwrite (instead of skipping) if output xml exists",
is_flag=True,
)
@click.option(
"--dir_in",
"-di",
help="directory of input images (instead of --image)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--num_col_upper",
"-ncu",
help="lower limit of columns in document image",
)
@click.option(
"--num_col_lower",
"-ncl",
help="upper limit of columns in document image",
)
@click.option(
"--save_org_scale/--no_save_org_scale",
"-sos/-nosos",
is_flag=True,
help="if this parameter set to true, this tool will save the enhanced image in org scale.",
)
@click.pass_context
def enhance_cli(ctx, image, out, overwrite, dir_in, num_col_upper, num_col_lower, save_org_scale):
"""
Enhance image
"""
assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both."
from ..image_enhancer import Enhancer
enhancer = Enhancer(
model_zoo=ctx.obj.model_zoo,
num_col_upper=num_col_upper,
num_col_lower=num_col_lower,
save_org_scale=save_org_scale,
)
enhancer.run(overwrite=overwrite,
dir_in=dir_in,
image_filename=image,
dir_out=out,
)

View file

@ -0,0 +1,167 @@
import click
@click.command()
@click.option(
"--image",
"-i",
help="input image filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--out",
"-o",
help="directory for output PAGE-XML files",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--overwrite",
"-O",
help="overwrite (instead of skipping) if output xml exists",
is_flag=True,
)
@click.option(
"--dir_in",
"-di",
help="directory of input images (instead of --image)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_images",
"-si",
help="if a directory is given, images in documents will be cropped and saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_layout",
"-sl",
help="if a directory is given, plot of layout will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_deskewed",
"-sd",
help="if a directory is given, deskewed image will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_all",
"-sa",
help="if a directory is given, all plots needed for documentation will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_page",
"-sp",
help="if a directory is given, page crop of image will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--enable-plotting/--disable-plotting",
"-ep/-noep",
is_flag=True,
help="If set, will plot intermediary files and images",
)
@click.option(
"--input_binary/--input-RGB",
"-ib/-irgb",
is_flag=True,
help="In general, eynollah uses RGB as input but if the input document is very dark, very bright or for any other reason you can turn on input binarization. When this flag is set, eynollah will binarize the RGB input document, you should always provide RGB images to eynollah.",
)
@click.option(
"--ignore_page_extraction/--extract_page_included",
"-ipe/-epi",
is_flag=True,
help="if this parameter set to true, this tool would ignore page extraction",
)
@click.option(
"--reading_order_machine_based/--heuristic_reading_order",
"-romb/-hro",
is_flag=True,
help="if this parameter set to true, this tool would apply machine based reading order detection",
)
@click.option(
"--num_col_upper",
"-ncu",
help="lower limit of columns in document image",
)
@click.option(
"--num_col_lower",
"-ncl",
help="upper limit of columns in document image",
)
@click.option(
"--threshold_art_class_layout",
"-tharl",
help="threshold of artifical class in the case of layout detection. The default value is 0.1",
)
@click.option(
"--threshold_art_class_textline",
"-thart",
help="threshold of artifical class in the case of textline detection. The default value is 0.1",
)
@click.option(
"--skip_layout_and_reading_order",
"-slro/-noslro",
is_flag=True,
help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.",
)
@click.pass_context
def extract_images_cli(
ctx,
image,
out,
overwrite,
dir_in,
save_images,
save_layout,
save_deskewed,
save_all,
save_page,
enable_plotting,
input_binary,
reading_order_machine_based,
num_col_upper,
num_col_lower,
threshold_art_class_textline,
threshold_art_class_layout,
skip_layout_and_reading_order,
ignore_page_extraction,
):
"""
Detect Layout (with optional image enhancement and reading order detection)
"""
assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep"
assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep"
assert enable_plotting or not save_all, "Plotting with -sa also requires -ep"
assert enable_plotting or not save_page, "Plotting with -sp also requires -ep"
assert enable_plotting or not save_images, "Plotting with -si also requires -ep"
assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images, \
"Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae"
assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both."
from ..extract_images import EynollahImageExtractor
extractor = EynollahImageExtractor(
model_zoo=ctx.obj.model_zoo,
enable_plotting=enable_plotting,
input_binary=input_binary,
ignore_page_extraction=ignore_page_extraction,
reading_order_machine_based=reading_order_machine_based,
num_col_upper=num_col_upper,
num_col_lower=num_col_lower,
skip_layout_and_reading_order=skip_layout_and_reading_order,
threshold_art_class_textline=threshold_art_class_textline,
threshold_art_class_layout=threshold_art_class_layout,
)
extractor.run(overwrite=overwrite,
image_filename=image,
dir_in=dir_in,
dir_out=out,
dir_of_cropped_images=save_images,
dir_of_layout=save_layout,
dir_of_deskewed=save_deskewed,
dir_of_all=save_all,
dir_save_page=save_page,
)

View file

@ -0,0 +1,223 @@
import click
@click.command()
@click.option(
"--image",
"-i",
help="input image filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--out",
"-o",
help="directory for output PAGE-XML files",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--overwrite",
"-O",
help="overwrite (instead of skipping) if output xml exists",
is_flag=True,
)
@click.option(
"--dir_in",
"-di",
help="directory of input images (instead of --image)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_images",
"-si",
help="if a directory is given, images in documents will be cropped and saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_layout",
"-sl",
help="if a directory is given, plot of layout will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_deskewed",
"-sd",
help="if a directory is given, deskewed image will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_all",
"-sa",
help="if a directory is given, all plots needed for documentation will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--save_page",
"-sp",
help="if a directory is given, page crop of image will be saved there",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--enable-plotting/--disable-plotting",
"-ep/-noep",
is_flag=True,
help="If set, will plot intermediary files and images",
)
@click.option(
"--allow-enhancement/--no-allow-enhancement",
"-ae/-noae",
is_flag=True,
help="if this parameter set to true, this tool would check that input image need resizing and enhancement or not. If so output of resized and enhanced image and corresponding layout data will be written in out directory",
)
@click.option(
"--curved-line/--no-curvedline",
"-cl/-nocl",
is_flag=True,
help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline. This should be taken into account that with this option the tool need more time to do process.",
)
@click.option(
"--full-layout/--no-full-layout",
"-fl/-nofl",
is_flag=True,
help="if this parameter set to true, this tool will try to return all elements of layout.",
)
@click.option(
"--tables/--no-tables",
"-tab/-notab",
is_flag=True,
help="if this parameter set to true, this tool will try to detect tables.",
)
@click.option(
"--right2left/--left2right",
"-r2l/-l2r",
is_flag=True,
help="if this parameter set to true, this tool will extract right-to-left reading order.",
)
@click.option(
"--input_binary/--input-RGB",
"-ib/-irgb",
is_flag=True,
help="In general, eynollah uses RGB as input but if the input document is very dark, very bright or for any other reason you can turn on input binarization. When this flag is set, eynollah will binarize the RGB input document, you should always provide RGB images to eynollah.",
)
@click.option(
"--allow_scaling/--no-allow-scaling",
"-as/-noas",
is_flag=True,
help="if this parameter set to true, this tool would check the scale and if needed it will scale it to perform better layout detection",
)
@click.option(
"--headers_off/--headers-on",
"-ho/-noho",
is_flag=True,
help="if this parameter set to true, this tool would ignore headers role in reading order",
)
@click.option(
"--ignore_page_extraction/--extract_page_included",
"-ipe/-epi",
is_flag=True,
help="if this parameter set to true, this tool would ignore page extraction",
)
@click.option(
"--reading_order_machine_based/--heuristic_reading_order",
"-romb/-hro",
is_flag=True,
help="if this parameter set to true, this tool would apply machine based reading order detection",
)
@click.option(
"--num_col_upper",
"-ncu",
help="lower limit of columns in document image",
)
@click.option(
"--num_col_lower",
"-ncl",
help="upper limit of columns in document image",
)
@click.option(
"--threshold_art_class_layout",
"-tharl",
help="threshold of artifical class in the case of layout detection. The default value is 0.1",
)
@click.option(
"--threshold_art_class_textline",
"-thart",
help="threshold of artifical class in the case of textline detection. The default value is 0.1",
)
@click.option(
"--skip_layout_and_reading_order",
"-slro/-noslro",
is_flag=True,
help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.",
)
@click.pass_context
def layout_cli(
ctx,
image,
out,
overwrite,
dir_in,
save_images,
save_layout,
save_deskewed,
save_all,
save_page,
enable_plotting,
allow_enhancement,
curved_line,
full_layout,
tables,
right2left,
input_binary,
allow_scaling,
headers_off,
reading_order_machine_based,
num_col_upper,
num_col_lower,
threshold_art_class_textline,
threshold_art_class_layout,
skip_layout_and_reading_order,
ignore_page_extraction,
):
"""
Detect Layout (with optional image enhancement and reading order detection)
"""
from ..eynollah import Eynollah
assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep"
assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep"
assert enable_plotting or not save_all, "Plotting with -sa also requires -ep"
assert enable_plotting or not save_page, "Plotting with -sp also requires -ep"
assert enable_plotting or not save_images, "Plotting with -si also requires -ep"
assert enable_plotting or not allow_enhancement, "Plotting with -ae also requires -ep"
assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement, \
"Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae"
assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both."
eynollah = Eynollah(
model_zoo=ctx.obj.model_zoo,
enable_plotting=enable_plotting,
allow_enhancement=allow_enhancement,
curved_line=curved_line,
full_layout=full_layout,
tables=tables,
right2left=right2left,
input_binary=input_binary,
allow_scaling=allow_scaling,
headers_off=headers_off,
ignore_page_extraction=ignore_page_extraction,
reading_order_machine_based=reading_order_machine_based,
num_col_upper=num_col_upper,
num_col_lower=num_col_lower,
skip_layout_and_reading_order=skip_layout_and_reading_order,
threshold_art_class_textline=threshold_art_class_textline,
threshold_art_class_layout=threshold_art_class_layout,
)
eynollah.run(overwrite=overwrite,
image_filename=image,
dir_in=dir_in,
dir_out=out,
dir_of_cropped_images=save_images,
dir_of_layout=save_layout,
dir_of_deskewed=save_deskewed,
dir_of_all=save_all,
dir_save_page=save_page,
)

111
src/eynollah/cli/cli_ocr.py Normal file
View file

@ -0,0 +1,111 @@
import click
@click.command()
@click.option(
"--image",
"-i",
help="input image filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--dir_in",
"-di",
help="directory of input images (instead of --image)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--dir_in_bin",
"-dib",
help=("""
directory of binarized images (in addition to --dir_in for RGB
images; filename stems must match the RGB image files, with '.png'
\n
Perform prediction using both RGB and binary images.
(This does not necessarily improve results, however it may be beneficial
for certain document images.
"""),
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--dir_xmls",
"-dx",
help="directory of input PAGE-XML files (in addition to --dir_in; filename stems must match the image files, with '.xml' suffix).",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--out",
"-o",
help="directory for output PAGE-XML files",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--dir_out_image_text",
"-doit",
help="directory for output images, newly rendered with predicted text",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--overwrite",
"-O",
help="overwrite (instead of skipping) if output xml exists",
is_flag=True,
)
@click.option(
"--tr_ocr",
"-trocr/-notrocr",
is_flag=True,
help="if this parameter set to true, transformer ocr will be applied, otherwise cnn_rnn model.",
)
@click.option(
"--do_not_mask_with_textline_contour",
"-nmtc/-mtc",
is_flag=True,
help="if this parameter set to true, cropped textline images will not be masked with textline contour.",
)
@click.option(
"--batch_size",
"-bs",
help="number of inference batch size. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively",
)
@click.option(
"--min_conf_value_of_textline_text",
"-min_conf",
help="minimum OCR confidence value. Text lines with a confidence value lower than this threshold will not be included in the output XML file.",
)
@click.pass_context
def ocr_cli(
ctx,
image,
dir_in,
dir_in_bin,
dir_xmls,
out,
dir_out_image_text,
overwrite,
tr_ocr,
do_not_mask_with_textline_contour,
batch_size,
min_conf_value_of_textline_text,
):
"""
Recognize text with a CNN/RNN or transformer ML model.
"""
assert bool(image) ^ bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both."
from ..eynollah_ocr import Eynollah_ocr
eynollah_ocr = Eynollah_ocr(
model_zoo=ctx.obj.model_zoo,
tr_ocr=tr_ocr,
do_not_mask_with_textline_contour=do_not_mask_with_textline_contour,
batch_size=batch_size,
min_conf_value_of_textline_text=min_conf_value_of_textline_text)
eynollah_ocr.run(overwrite=overwrite,
dir_in=dir_in,
dir_in_bin=dir_in_bin,
image_filename=image,
dir_xmls=dir_xmls,
dir_out_image_text=dir_out_image_text,
dir_out=out,
)

View file

@ -0,0 +1,35 @@
import click
@click.command()
@click.option(
"--input",
"-i",
help="PAGE-XML input filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--dir_in",
"-di",
help="directory of PAGE-XML input files (instead of --input)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--out",
"-o",
help="directory for output images",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.pass_context
def readingorder_cli(ctx, input, dir_in, out):
"""
Generate ReadingOrder with a ML model
"""
from ..mb_ro_on_layout import machine_based_reading_order_on_layout
assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both."
orderer = machine_based_reading_order_on_layout(model_zoo=ctx.obj.model_zoo)
orderer.run(xml_filename=input,
dir_in=dir_in,
dir_out=out,
)

View file

@ -0,0 +1,272 @@
"""
extract images?
"""
from concurrent.futures import ProcessPoolExecutor
import logging
from multiprocessing import cpu_count
import os
import time
from typing import Optional
from pathlib import Path
import tensorflow as tf
import numpy as np
import cv2
from eynollah.utils.contour import filter_contours_area_of_image, return_contours_of_image, return_contours_of_interested_region
from eynollah.utils.resize import resize_image
from .model_zoo.model_zoo import EynollahModelZoo
from .eynollah import Eynollah
from .utils import box2rect, is_image_filename
from .plot import EynollahPlotter
class EynollahImageExtractor(Eynollah):
def __init__(
self,
*,
model_zoo: EynollahModelZoo,
enable_plotting : bool = False,
input_binary : bool = False,
ignore_page_extraction : bool = False,
reading_order_machine_based : bool = False,
num_col_upper : Optional[int] = None,
num_col_lower : Optional[int] = None,
threshold_art_class_layout: Optional[float] = None,
threshold_art_class_textline: Optional[float] = None,
skip_layout_and_reading_order : bool = False,
):
self.logger = logging.getLogger('eynollah.extract_images')
self.model_zoo = model_zoo
self.plotter = None
self.reading_order_machine_based = reading_order_machine_based
self.enable_plotting = enable_plotting
# --input-binary sensible if image is very dark, if layout is not working.
self.input_binary = input_binary
self.ignore_page_extraction = ignore_page_extraction
self.skip_layout_and_reading_order = skip_layout_and_reading_order
if num_col_upper:
self.num_col_upper = int(num_col_upper)
else:
self.num_col_upper = num_col_upper
if num_col_lower:
self.num_col_lower = int(num_col_lower)
else:
self.num_col_lower = num_col_lower
# for parallelization of CPU-intensive tasks:
self.executor = ProcessPoolExecutor(max_workers=cpu_count())
if threshold_art_class_layout:
self.threshold_art_class_layout = float(threshold_art_class_layout)
else:
self.threshold_art_class_layout = 0.1
if threshold_art_class_textline:
self.threshold_art_class_textline = float(threshold_art_class_textline)
else:
self.threshold_art_class_textline = 0.1
t_start = time.time()
try:
for device in tf.config.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(device, True)
except:
self.logger.warning("no GPU device available")
self.logger.info("Loading models...")
self.setup_models()
self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)")
def setup_models(self):
loadable = [
"col_classifier",
"binarization",
"page",
"extract_images",
]
self.model_zoo.load_models(*loadable)
def get_regions_light_v_extract_only_images(self,img, num_col_classifier):
self.logger.debug("enter get_regions_extract_images_only")
erosion_hurts = False
img_org = np.copy(img)
img_height_h = img_org.shape[0]
img_width_h = img_org.shape[1]
if num_col_classifier == 1:
img_w_new = 700
elif num_col_classifier == 2:
img_w_new = 900
elif num_col_classifier == 3:
img_w_new = 1500
elif num_col_classifier == 4:
img_w_new = 1800
elif num_col_classifier == 5:
img_w_new = 2200
elif num_col_classifier == 6:
img_w_new = 2500
else:
raise ValueError("num_col_classifier must be in range 1..6")
img_h_new = int(img.shape[0] / float(img.shape[1]) * img_w_new)
img_resized = resize_image(img,img_h_new, img_w_new )
prediction_regions_org, _ = self.do_prediction_new_concept(True, img_resized, self.model_zoo.get("region"))
prediction_regions_org = resize_image(prediction_regions_org,img_height_h, img_width_h )
image_page, page_coord, cont_page = self.extract_page()
prediction_regions_org = prediction_regions_org[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]]
prediction_regions_org=prediction_regions_org[:,:,0]
mask_lines_only = (prediction_regions_org[:,:] ==3)*1
mask_texts_only = (prediction_regions_org[:,:] ==1)*1
mask_images_only=(prediction_regions_org[:,:] ==2)*1
polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only)
polygons_seplines = filter_contours_area_of_image(
mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1)
polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001)
polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001)
text_regions_p_true = np.zeros(prediction_regions_org.shape)
text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3))
text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2
text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_texts, color=(1,1,1))
text_regions_p_true[text_regions_p_true.shape[0]-15:text_regions_p_true.shape[0], :] = 0
text_regions_p_true[:, text_regions_p_true.shape[1]-15:text_regions_p_true.shape[1]] = 0
##polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.0001)
polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.001)
polygons_of_images_fin = []
for ploy_img_ind in polygons_of_images:
box = _, _, w, h = cv2.boundingRect(ploy_img_ind)
if h < 150 or w < 150:
pass
else:
page_coord_img = box2rect(box) # type: ignore
polygons_of_images_fin.append(np.array([[page_coord_img[2], page_coord_img[0]],
[page_coord_img[3], page_coord_img[0]],
[page_coord_img[3], page_coord_img[1]],
[page_coord_img[2], page_coord_img[1]]]))
self.logger.debug("exit get_regions_extract_images_only")
return (text_regions_p_true,
erosion_hurts,
polygons_seplines,
polygons_of_images_fin,
image_page,
page_coord,
cont_page)
def run(self,
overwrite: bool = False,
image_filename: Optional[str] = None,
dir_in: Optional[str] = None,
dir_out: Optional[str] = None,
dir_of_cropped_images: Optional[str] = None,
dir_of_layout: Optional[str] = None,
dir_of_deskewed: Optional[str] = None,
dir_of_all: Optional[str] = None,
dir_save_page: Optional[str] = None,
):
"""
Get image and scales, then extract the page of scanned image
"""
self.logger.debug("enter run")
t0_tot = time.time()
# Log enabled features directly
enabled_modes = []
if self.full_layout:
enabled_modes.append("Full layout analysis")
if self.tables:
enabled_modes.append("Table detection")
if enabled_modes:
self.logger.info("Enabled modes: " + ", ".join(enabled_modes))
if self.enable_plotting:
self.logger.info("Saving debug plots")
if dir_of_cropped_images:
self.logger.info(f"Saving cropped images to: {dir_of_cropped_images}")
if dir_of_layout:
self.logger.info(f"Saving layout plots to: {dir_of_layout}")
if dir_of_deskewed:
self.logger.info(f"Saving deskewed images to: {dir_of_deskewed}")
if dir_in:
ls_imgs = [os.path.join(dir_in, image_filename)
for image_filename in filter(is_image_filename,
os.listdir(dir_in))]
elif image_filename:
ls_imgs = [image_filename]
else:
raise ValueError("run requires either a single image filename or a directory")
for img_filename in ls_imgs:
self.logger.info(img_filename)
t0 = time.time()
self.reset_file_name_dir(img_filename, dir_out)
if self.enable_plotting:
self.plotter = EynollahPlotter(dir_out=dir_out,
dir_of_all=dir_of_all,
dir_save_page=dir_save_page,
dir_of_deskewed=dir_of_deskewed,
dir_of_cropped_images=dir_of_cropped_images,
dir_of_layout=dir_of_layout,
image_filename_stem=Path(img_filename).stem)
#print("text region early -11 in %.1fs", time.time() - t0)
if os.path.exists(self.writer.output_filename):
if overwrite:
self.logger.warning("will overwrite existing output file '%s'", self.writer.output_filename)
else:
self.logger.warning("will skip input for existing output file '%s'", self.writer.output_filename)
continue
pcgts = self.run_single()
self.logger.info("Job done in %.1fs", time.time() - t0)
self.writer.write_pagexml(pcgts)
if dir_in:
self.logger.info("All jobs done in %.1fs", time.time() - t0_tot)
def run_single(self):
t0 = time.time()
self.logger.info(f"Processing file: {self.writer.image_filename}")
self.logger.info("Step 1/5: Image Enhancement")
img_res, is_image_enhanced, num_col_classifier, _ = \
self.run_enhancement()
self.logger.info(f"Image: {self.image.shape[1]}x{self.image.shape[0]}, "
f"{self.dpi} DPI, {num_col_classifier} columns")
if is_image_enhanced:
self.logger.info("Enhancement applied")
self.logger.info(f"Enhancement complete ({time.time() - t0:.1f}s)")
# Image Extraction Mode
self.logger.info("Step 2/5: Image Extraction Mode")
_, _, _, polygons_of_images, \
image_page, page_coord, cont_page = \
self.get_regions_light_v_extract_only_images(img_res, num_col_classifier)
pcgts = self.writer.build_pagexml_no_full_layout(
[], page_coord, [], [], [], [],
polygons_of_images, [], [], [], [], [], [], [], [], [],
cont_page, [], [])
if self.plotter:
self.plotter.write_images_into_directory(polygons_of_images, image_page)
self.logger.info("Image extraction complete")
return pcgts

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -10,14 +10,14 @@ Image enhancer. The output can be written as same scale of input or in new predi
import logging
import os
import time
from typing import Dict, Optional
from typing import Optional
from pathlib import Path
import gc
import cv2
from keras.models import Model
import numpy as np
import tensorflow as tf
import tensorflow as tf # type: ignore
from skimage.morphology import skeletonize
from .model_zoo import EynollahModelZoo
@ -27,7 +27,6 @@ from .utils import (
is_image_filename,
crop_image_inside_box
)
from .patch_encoder import PatchEncoder, Patches
DPI_THRESHOLD = 298
KERNEL = np.ones((5, 5), np.uint8)
@ -43,7 +42,6 @@ class Enhancer:
save_org_scale : bool = False,
):
self.input_binary = False
self.light_version = False
self.save_org_scale = save_org_scale
if num_col_upper:
self.num_col_upper = int(num_col_upper)
@ -69,16 +67,10 @@ class Enhancer:
ret = {}
if image_filename:
ret['img'] = cv2.imread(image_filename)
if self.light_version:
self.dpi = 100
else:
self.dpi = 0#check_dpi(image_filename)
self.dpi = 100
else:
ret['img'] = pil2cv(image_pil)
if self.light_version:
self.dpi = 100
else:
self.dpi = 0#check_dpi(image_pil)
self.dpi = 100
ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY)
for prefix in ('', '_grayscale'):
ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8)
@ -98,9 +90,6 @@ class Enhancer:
key += '_uint8'
return self._imgs[key].copy()
def isNaN(self, num):
return num != num
def predict_enhancement(self, img):
self.logger.debug("enter predict_enhancement")
@ -271,7 +260,7 @@ class Enhancer:
return img_new, num_column_is_classified
def resize_and_enhance_image_with_column_classifier(self, light_version):
def resize_and_enhance_image_with_column_classifier(self):
self.logger.debug("enter resize_and_enhance_image_with_column_classifier")
dpi = 0#self.dpi
self.logger.info("Detected %s DPI", dpi)
@ -354,16 +343,13 @@ class Enhancer:
self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5))
if dpi < DPI_THRESHOLD:
if light_version and num_col in (1,2):
if num_col in (1,2):
img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2(
img, num_col, width_early, label_p_pred)
else:
img_new, num_column_is_classified = self.calculate_width_height_by_columns(
img, num_col, width_early, label_p_pred)
if light_version:
image_res = np.copy(img_new)
else:
image_res = self.predict_enhancement(img_new)
image_res = np.copy(img_new)
is_image_enhanced = True
else:
@ -657,11 +643,11 @@ class Enhancer:
gc.collect()
return prediction_true
def run_enhancement(self, light_version):
def run_enhancement(self):
t_in = time.time()
self.logger.info("Resizing and enhancing image...")
is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = \
self.resize_and_enhance_image_with_column_classifier(light_version)
self.resize_and_enhance_image_with_column_classifier()
self.logger.info("Image was %senhanced.", '' if is_image_enhanced else 'not ')
return img_res, is_image_enhanced, num_col_classifier, num_column_is_classified
@ -669,7 +655,7 @@ class Enhancer:
def run_single(self):
t0 = time.time()
img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(light_version=False)
img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement()
return img_res, is_image_enhanced

View file

@ -49,8 +49,6 @@ class machine_based_reading_order_on_layout:
self.logger.warning("no GPU device available")
self.model_zoo.load_model('reading_order')
# FIXME: light_version is always true, no need for checks in the code
self.light_version = True
def read_xml(self, xml_file):
tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8'))
@ -517,7 +515,7 @@ class machine_based_reading_order_on_layout:
min_cont_size_to_be_dilated = 10
if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version:
if len(contours_only_text_parent)>min_cont_size_to_be_dilated:
cx_conts, cy_conts, x_min_conts, x_max_conts, y_min_conts, y_max_conts, _ = find_new_features_of_contours(contours_only_text_parent)
args_cont_located = np.array(range(len(contours_only_text_parent)))
@ -617,13 +615,13 @@ class machine_based_reading_order_on_layout:
img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12,
int(x_min_main[j]):int(x_max_main[j])] = 1
co_text_all_org = contours_only_text_parent + contours_only_text_parent_h
if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version:
if len(contours_only_text_parent)>min_cont_size_to_be_dilated:
co_text_all = contours_only_dilated + contours_only_text_parent_h
else:
co_text_all = contours_only_text_parent + contours_only_text_parent_h
else:
co_text_all_org = contours_only_text_parent
if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version:
if len(contours_only_text_parent)>min_cont_size_to_be_dilated:
co_text_all = contours_only_dilated
else:
co_text_all = contours_only_text_parent
@ -702,7 +700,7 @@ class machine_based_reading_order_on_layout:
##id_all_text = np.array(id_all_text)[index_sort]
if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version:
if len(contours_only_text_parent)>min_cont_size_to_be_dilated:
org_contours_indexes = []
for ind in range(len(ordered)):
region_with_curr_order = ordered[ind]

View file

@ -4,7 +4,7 @@ from .specs import EynollahModelSpec, EynollahModelSpecSet
ZENODO = "https://zenodo.org/records/17295988/files"
MODELS_VERSION = "v0_7_0"
def dist_url(dist_name: str) -> str:
def dist_url(dist_name: str="layout") -> str:
return f'{ZENODO}/models_{dist_name}_{MODELS_VERSION}.zip'
DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
@ -13,8 +13,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
category="enhancement",
variant='',
filename="models_eynollah/eynollah-enhancement_20210425",
dists=['enhancement', 'layout', 'ci'],
dist_url=dist_url("enhancement"),
dist_url=dist_url(),
type='Keras',
),
@ -22,8 +21,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
category="binarization",
variant='hybrid',
filename="models_eynollah/eynollah-binarization-hybrid_20230504/model_bin_hybrid_trans_cnn_sbb_ens",
dists=['layout', 'binarization', ],
dist_url=dist_url("binarization"),
dist_url=dist_url(),
type='Keras',
),
@ -31,8 +29,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
category="binarization",
variant='20210309',
filename="models_eynollah/eynollah-binarization_20210309",
dists=['binarization'],
dist_url=dist_url("binarization"),
dist_url=dist_url("extra"),
type='Keras',
),
@ -40,44 +37,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
category="binarization",
variant='',
filename="models_eynollah/eynollah-binarization_20210425",
dists=['binarization'],
dist_url=dist_url("binarization"),
type='Keras',
),
EynollahModelSpec(
category="binarization_multi_1",
variant='',
filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin1",
dist_url=dist_url("binarization"),
dists=['binarization'],
type='Keras',
),
EynollahModelSpec(
category="binarization_multi_2",
variant='',
filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin2",
dist_url=dist_url("binarization"),
dists=['binarization'],
type='Keras',
),
EynollahModelSpec(
category="binarization_multi_3",
variant='',
filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin3",
dist_url=dist_url("binarization"),
dists=['binarization'],
type='Keras',
),
EynollahModelSpec(
category="binarization_multi_4",
variant='',
filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin4",
dist_url=dist_url("binarization"),
dists=['binarization'],
dist_url=dist_url("extra"),
type='Keras',
),
@ -85,8 +45,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
category="col_classifier",
variant='',
filename="models_eynollah/eynollah-column-classifier_20210425",
dist_url=dist_url("layout"),
dists=['layout'],
dist_url=dist_url(),
type='Keras',
),
@ -94,8 +53,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
category="page",
variant='',
filename="models_eynollah/model_eynollah_page_extraction_20250915",
dist_url=dist_url("layout"),
dists=['layout'],
dist_url=dist_url(),
type='Keras',
),
@ -103,37 +61,33 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
category="region",
variant='',
filename="models_eynollah/eynollah-main-regions-ensembled_20210425",
dist_url=dist_url("layout"),
dists=['layout'],
dist_url=dist_url(),
type='Keras',
),
EynollahModelSpec(
category="region",
variant='extract_only_images',
category="extract_images",
variant='',
filename="models_eynollah/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18",
dist_url=dist_url("layout"),
dists=['layout'],
dist_url=dist_url(),
type='Keras',
),
EynollahModelSpec(
category="region",
variant='light',
variant='',
filename="models_eynollah/eynollah-main-regions_20220314",
dist_url=dist_url("layout"),
dist_url=dist_url(),
help="early layout",
dists=['layout'],
type='Keras',
),
EynollahModelSpec(
category="region_p2",
variant='',
variant='non-light',
filename="models_eynollah/eynollah-main-regions-aug-rotation_20210425",
dist_url=dist_url("layout"),
dist_url=dist_url('extra'),
help="early layout, non-light, 2nd part",
dists=['layout'],
type='Keras',
),
@ -147,7 +101,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
#filename="models_eynollah/model_3_eraly_layout_no_patches_1_2_spaltige",
filename="models_eynollah/modelens_e_l_all_sp_0_1_2_3_4_171024",
dist_url=dist_url("layout"),
dists=['layout'],
help="early layout, light, 1-or-2-column",
type='Keras',
),
@ -162,9 +115,8 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
#'filename="models_eynollah/modelens_full_lay_1_2_221024",
#'filename="models_eynollah/eynollah-full-regions-1column_20210425",
filename="models_eynollah/modelens_full_lay_1__4_3_091124",
dist_url=dist_url("layout"),
dist_url=dist_url(),
help="full layout / no patches",
dists=['layout'],
type='Keras',
),
@ -182,9 +134,8 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
# filename="models_eynollah/modelens_full_layout_24_till_28",
# filename="models_eynollah/model_2_full_layout_new_trans",
filename="models_eynollah/modelens_full_lay_1__4_3_091124",
dist_url=dist_url("layout"),
dist_url=dist_url(),
help="full layout / with patches",
dists=['layout'],
type='Keras',
),
@ -197,14 +148,13 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
#filename="models_eynollah/model_mb_ro_aug_ens_8",
#filename="models_eynollah/model_ens_reading_order_machine_based",
filename="models_eynollah/model_eynollah_reading_order_20250824",
dist_url=dist_url("reading_order"),
dists=['layout', 'reading_order'],
dist_url=dist_url(),
type='Keras',
),
EynollahModelSpec(
category="textline",
variant='',
variant='non-light',
#filename="models_eynollah/modelens_textline_1_4_16092024",
#filename="models_eynollah/model_textline_ens_3_4_5_6_artificial",
#filename="models_eynollah/modelens_textline_1_3_4_20240915",
@ -212,36 +162,32 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
#filename="models_eynollah/modelens_textline_9_12_13_14_15",
#filename="models_eynollah/eynollah-textline_20210425",
filename="models_eynollah/modelens_textline_0_1__2_4_16092024",
dist_url=dist_url("layout"),
dists=['layout'],
dist_url=dist_url('extra'),
type='Keras',
),
EynollahModelSpec(
category="textline",
variant='light',
variant='',
#filename="models_eynollah/eynollah-textline_light_20210425",
filename="models_eynollah/modelens_textline_0_1__2_4_16092024",
dist_url=dist_url("layout"),
dists=['layout'],
dist_url=dist_url(),
type='Keras',
),
EynollahModelSpec(
category="table",
variant='non-light',
filename="models_eynollah/eynollah-tables_20210319",
dist_url=dist_url('extra'),
type='Keras',
),
EynollahModelSpec(
category="table",
variant='',
filename="models_eynollah/eynollah-tables_20210319",
dist_url=dist_url("layout"),
dists=['layout'],
type='Keras',
),
EynollahModelSpec(
category="table",
variant='light',
filename="models_eynollah/modelens_table_0t4_201124",
dist_url=dist_url("layout"),
dists=['layout'],
dist_url=dist_url(),
type='Keras',
),
@ -250,7 +196,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
variant='',
filename="models_eynollah/model_eynollah_ocr_cnnrnn_20250930",
dist_url=dist_url("ocr"),
dists=['layout', 'ocr'],
type='Keras',
),
@ -260,7 +205,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
filename="models_eynollah/model_eynollah_ocr_cnnrnn__degraded_20250805/",
help="slightly better at degraded Fraktur",
dist_url=dist_url("ocr"),
dists=['ocr'],
type='Keras',
),
@ -269,7 +213,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
variant='',
filename="characters_org.txt",
dist_url=dist_url("ocr"),
dists=['ocr'],
type='decoder',
),
@ -278,7 +221,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
variant='',
filename="characters_org.txt",
dist_url=dist_url("ocr"),
dists=['ocr'],
type='List[str]',
),
@ -286,9 +228,8 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
category="ocr",
variant='tr',
filename="models_eynollah/model_eynollah_ocr_trocr_20250919",
dist_url=dist_url("trocr"),
dist_url=dist_url("ocr"),
help='much slower transformer-based',
dists=['trocr'],
type='Keras',
),
@ -296,8 +237,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
category="trocr_processor",
variant='',
filename="models_eynollah/model_eynollah_ocr_trocr_20250919",
dist_url=dist_url("trocr"),
dists=['trocr'],
dist_url=dist_url("ocr"),
type='TrOCRProcessor',
),
@ -305,8 +245,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([
category="trocr_processor",
variant='htr',
filename="models_eynollah/microsoft/trocr-base-handwritten",
dist_url=dist_url("trocr"),
dists=['trocr'],
dist_url=dist_url("extra"),
type='TrOCRProcessor',
),

View file

@ -176,13 +176,12 @@ class EynollahModelZoo:
spec.category,
spec.variant,
spec.help,
', '.join(spec.dists),
f'Yes, at {self.model_path(spec.category, spec.variant)}'
if self.model_path(spec.category, spec.variant).exists()
else f'No, download {spec.dist_url}',
# self.model_path(spec.category, spec.variant),
]
for spec in self.specs.specs
for spec in sorted(self.specs.specs, key=lambda x: x.dist_url)
],
headers=[
'Type',

View file

@ -10,8 +10,6 @@ class EynollahModelSpec():
category: str
# Relative filename to the models_eynollah directory in the dists
filename: str
# basename of the ZIP files that should contain this model
dists: List[str]
# URL to the smallest model distribution containing this model (link to Zenodo)
dist_url: str
type: str

View file

@ -29,16 +29,6 @@
"type": "boolean",
"default": true,
"description": "Try to detect all element subtypes, including drop-caps and headings"
},
"light_version": {
"type": "boolean",
"default": true,
"description": "Try to detect all element subtypes in light version (faster+simpler method for main region detection and deskewing)"
},
"textline_light": {
"type": "boolean",
"default": true,
"description": "Light version need textline light. If this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method."
},
"tables": {
"type": "boolean",

View file

@ -1,3 +1,6 @@
# NOTE: For predictable order of imports of torch/shapely/tensorflow
# this must be the first import of the CLI!
from .eynollah_imports import imported_libs
from .processor import EynollahProcessor
from click import command
from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor

View file

@ -40,7 +40,7 @@ class SbbBinarizeProcessor(Processor):
# resolve relative path via OCR-D ResourceManager
assert isinstance(self.parameter, frozendict)
model_zoo = EynollahModelZoo(basedir=self.parameter['model'])
self.binarizer = SbbBinarizer(model_zoo=model_zoo, mode='single', logger=self.logger)
self.binarizer = SbbBinarizer(model_zoo=model_zoo, logger=self.logger)
def process_page_pcgts(self, *input_pcgts: Optional[OcrdPage], page_id: Optional[str] = None) -> OcrdPageResult:
"""
@ -103,7 +103,7 @@ class SbbBinarizeProcessor(Processor):
line_image_bin = cv2pil(self.binarizer.run(image=pil2cv(line_image), use_patches=True))
# update PAGE (reference the image file):
line_image_ref = AlternativeImageType(comments=line_xywh['features'] + ',binarized')
line.add_AlternativeImage(region_image_ref)
line.add_AlternativeImage(line_image_ref)
result.images.append(OcrdPageResultImage(line_image_bin, line.id + '.IMG-BIN', line_image_ref))
return result

View file

@ -18,9 +18,6 @@ class EynollahProcessor(Processor):
def setup(self) -> None:
assert self.parameter
if self.parameter['textline_light'] != self.parameter['light_version']:
raise ValueError("Error: You must set or unset both parameter 'textline_light' (to enable light textline detection), "
"and parameter 'light_version' (faster+simpler method for main region detection and deskewing)")
model_zoo = EynollahModelZoo(basedir=self.parameter['models'])
self.eynollah = Eynollah(
model_zoo=model_zoo,
@ -29,8 +26,6 @@ class EynollahProcessor(Processor):
right2left=self.parameter['right_to_left'],
reading_order_machine_based=self.parameter['reading_order_machine_based'],
ignore_page_extraction=self.parameter['ignore_page_extraction'],
light_version=self.parameter['light_version'],
textline_light=self.parameter['textline_light'],
full_layout=self.parameter['full_layout'],
allow_scaling=self.parameter['allow_scaling'],
headers_off=self.parameter['headers_off'],
@ -93,7 +88,6 @@ class EynollahProcessor(Processor):
dir_out=None,
image_filename=image_filename,
curved_line=self.eynollah.curved_line,
textline_light=self.eynollah.textline_light,
pcgts=pcgts)
self.eynollah.run_single()
return result

View file

@ -9,15 +9,13 @@ Tool to load model and binarize a given image.
import os
import logging
from pathlib import Path
from typing import Dict, Optional
from typing import Optional
import numpy as np
import cv2
from ocrd_utils import tf_disable_interactive_logs
from eynollah.model_zoo import EynollahModelZoo
from eynollah.model_zoo.types import AnyModel
tf_disable_interactive_logs()
import tensorflow as tf
from tensorflow.python.keras import backend as tensorflow_backend
@ -33,12 +31,10 @@ class SbbBinarizer:
self,
*,
model_zoo: EynollahModelZoo,
mode: str,
logger: Optional[logging.Logger] = None,
):
self.logger = logger if logger else logging.getLogger('eynollah.binarization')
self.model_zoo = model_zoo
self.models = self.setup_models(mode)
self.models = (model_zoo.model_path('binarization'), model_zoo.load_model('binarization'))
self.session = self.start_new_session()
def start_new_session(self):
@ -49,12 +45,6 @@ class SbbBinarizer:
tensorflow_backend.set_session(session)
return session
def setup_models(self, mode: str) -> Dict[Path, AnyModel]:
return {
self.model_zoo.model_path(v): self.model_zoo.load_model(v)
for v in (['binarization'] if mode == 'single' else [f'binarization_multi_{i}' for i in range(1, 5)])
}
def end_session(self):
tensorflow_backend.clear_session()
self.session.close()
@ -330,8 +320,38 @@ class SbbBinarizer:
if image_path is not None:
image = cv2.imread(image_path)
img_last = 0
for n, (model_file, model) in enumerate(self.models.items()):
self.logger.info('Predicting %s with model %s [%s/%s]', image_path if image_path else '[image]', model_file, n + 1, len(self.models.keys()))
model_file, model = self.models
self.logger.info('Predicting %s with model %s', image_path if image_path else '[image]', model_file)
res = self.predict(model, image, use_patches)
img_fin = np.zeros((res.shape[0], res.shape[1], 3))
res[:, :][res[:, :] == 0] = 2
res = res - 1
res = res * 255
img_fin[:, :, 0] = res
img_fin[:, :, 1] = res
img_fin[:, :, 2] = res
img_fin = img_fin.astype(np.uint8)
img_fin = (res[:, :] == 0) * 255
img_last = img_last + img_fin
img_last[:, :][img_last[:, :] > 0] = 255
img_last = (img_last[:, :] == 0) * 255
if output:
self.logger.info('Writing binarized image to %s', output)
cv2.imwrite(output, img_last)
return img_last
else:
ls_imgs = list(filter(is_image_filename, os.listdir(dir_in)))
self.logger.info("Found %d image files to binarize in %s", len(ls_imgs), dir_in)
for i, image_path in enumerate(ls_imgs):
self.logger.info('Binarizing [%3d/%d] %s', i + 1, len(ls_imgs), image_path)
image_stem = image_path.split('.')[0]
image = cv2.imread(os.path.join(dir_in,image_path) )
img_last = 0
model_file, model = self.models
self.logger.info('Predicting %s with model %s', image_path if image_path else '[image]', model_file)
res = self.predict(model, image, use_patches)
img_fin = np.zeros((res.shape[0], res.shape[1], 3))
@ -346,39 +366,6 @@ class SbbBinarizer:
img_fin = (res[:, :] == 0) * 255
img_last = img_last + img_fin
kernel = np.ones((5, 5), np.uint8)
img_last[:, :][img_last[:, :] > 0] = 255
img_last = (img_last[:, :] == 0) * 255
if output:
self.logger.info('Writing binarized image to %s', output)
cv2.imwrite(output, img_last)
return img_last
else:
ls_imgs = list(filter(is_image_filename, os.listdir(dir_in)))
self.logger.info("Found %d image files to binarize in %s", len(ls_imgs), dir_in)
for i, image_name in enumerate(ls_imgs):
image_stem = image_name.split('.')[0]
self.logger.info('Binarizing [%3d/%d] %s', i + 1, len(ls_imgs), image_name)
image = cv2.imread(os.path.join(dir_in,image_name) )
img_last = 0
for n, (model_file, model) in enumerate(self.models.items()):
self.logger.info('Predicting %s with model %s [%s/%s]', image_name, model_file, n + 1, len(self.models.keys()))
res = self.predict(model, image, use_patches)
img_fin = np.zeros((res.shape[0], res.shape[1], 3))
res[:, :][res[:, :] == 0] = 2
res = res - 1
res = res * 255
img_fin[:, :, 0] = res
img_fin[:, :, 1] = res
img_fin[:, :, 2] = res
img_fin = img_fin.astype(np.uint8)
img_fin = (res[:, :] == 0) * 255
img_last = img_last + img_fin
kernel = np.ones((5, 5), np.uint8)
img_last[:, :][img_last[:, :] > 0] = 255
img_last = (img_last[:, :] == 0) * 255

View file

@ -8,6 +8,7 @@ from .build_model_load_pretrained_weights_and_save import build_model_load_pretr
from .generate_gt_for_training import main as generate_gt_cli
from .inference import main as inference_cli
from .train import ex
from .extract_line_gt import linegt_cli
@click.command(context_settings=dict(
ignore_unknown_options=True,
@ -24,3 +25,4 @@ main.add_command(build_model_load_pretrained_weights_and_save)
main.add_command(generate_gt_cli, 'generate-gt')
main.add_command(inference_cli, 'inference')
main.add_command(train_cli, 'train')
main.add_command(linegt_cli, 'export_textline_images_and_text')

View file

@ -0,0 +1,136 @@
from logging import Logger, getLogger
from typing import Optional
from pathlib import Path
import os
import click
import cv2
import xml.etree.ElementTree as ET
import numpy as np
from ..utils import is_image_filename
@click.command()
@click.option(
"--image",
"-i",
help="input image filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--dir_in",
"-di",
'image_filename',
help="directory of input images (instead of --image)",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--dir_xmls",
"-dx",
help="directory of input PAGE-XML files (in addition to --dir_in; filename stems must match the image files, with '.xml' suffix).",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--out",
"-o",
'dir_out',
help="directory for output PAGE-XML files",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--dataset_abbrevation",
"-ds_pref",
'pref_of_dataset',
help="in the case of extracting textline and text from a xml GT file user can add an abbrevation of dataset name to generated dataset",
)
@click.option(
"--do_not_mask_with_textline_contour",
"-nmtc/-mtc",
is_flag=True,
help="if this parameter set to true, cropped textline images will not be masked with textline contour.",
)
def linegt_cli(
image_filename,
dir_in,
dir_xmls,
dir_out,
pref_of_dataset,
do_not_mask_with_textline_contour,
):
assert bool(dir_in) ^ bool(image_filename), "Set --dir-in or --image-filename, not both"
if dir_in:
ls_imgs = [
os.path.join(dir_in, image_filename) for image_filename in filter(is_image_filename, os.listdir(dir_in))
]
else:
assert image_filename
ls_imgs = [image_filename]
for dir_img in ls_imgs:
file_name = Path(dir_img).stem
dir_xml = os.path.join(dir_xmls, file_name + '.xml')
img = cv2.imread(dir_img)
total_bb_coordinates = []
tree1 = ET.parse(dir_xml, parser=ET.XMLParser(encoding="utf-8"))
root1 = tree1.getroot()
alltags = [elem.tag for elem in root1.iter()]
name_space = alltags[0].split('}')[0]
name_space = name_space.split('{')[1]
region_tags = [x for x in alltags if x.endswith('TextRegion')][0]
cropped_lines_region_indexer = []
indexer_text_region = 0
indexer_textlines = 0
# FIXME: non recursive, use OCR-D PAGE generateDS API. Or use an existing tool for this purpose altogether
for nn in root1.iter(region_tags):
for child_textregion in nn:
if child_textregion.tag.endswith("TextLine"):
for child_textlines in child_textregion:
if child_textlines.tag.endswith("Coords"):
cropped_lines_region_indexer.append(indexer_text_region)
p_h = child_textlines.attrib['points'].split(' ')
textline_coords = np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])
x, y, w, h = cv2.boundingRect(textline_coords)
total_bb_coordinates.append([x, y, w, h])
img_poly_on_img = np.copy(img)
mask_poly = np.zeros(img.shape)
mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1))
mask_poly = mask_poly[y : y + h, x : x + w, :]
img_crop = img_poly_on_img[y : y + h, x : x + w, :]
if not do_not_mask_with_textline_contour:
img_crop[mask_poly == 0] = 255
if img_crop.shape[0] == 0 or img_crop.shape[1] == 0:
continue
if child_textlines.tag.endswith("TextEquiv"):
for cheild_text in child_textlines:
if cheild_text.tag.endswith("Unicode"):
textline_text = cheild_text.text
if textline_text:
base_name = os.path.join(
dir_out, file_name + '_line_' + str(indexer_textlines)
)
if pref_of_dataset:
base_name += '_' + pref_of_dataset
if not do_not_mask_with_textline_contour:
base_name += '_masked'
with open(base_name + '.txt', 'w') as text_file:
text_file.write(textline_text)
cv2.imwrite(base_name + '.png', img_crop)
indexer_textlines += 1

View file

@ -19,7 +19,6 @@ def adhere_drop_capital_region_into_corresponding_textline(
all_found_textline_polygons_h,
kernel=None,
curved_line=False,
textline_light=False,
):
# print(np.shape(all_found_textline_polygons),np.shape(all_found_textline_polygons[3]),'all_found_textline_polygonsshape')
# print(all_found_textline_polygons[3])
@ -79,7 +78,7 @@ def adhere_drop_capital_region_into_corresponding_textline(
# region_with_intersected_drop=region_with_intersected_drop/3
region_with_intersected_drop = region_with_intersected_drop.astype(np.uint8)
# print(np.unique(img_con_all_copy[:,:,0]))
if curved_line or textline_light:
if curved_line:
if len(region_with_intersected_drop) > 1:
sum_pixels_of_intersection = []

View file

@ -0,0 +1,16 @@
# cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files
import sys
from PIL import ImageFont
if sys.version_info < (3, 10):
import importlib_resources
else:
import importlib.resources as importlib_resources
def get_font():
#font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists!
font = importlib_resources.files(__package__) / "../Charis-Regular.ttf"
with importlib_resources.as_file(font) as font:
return ImageFont.truetype(font=font, size=40)

View file

@ -6,7 +6,7 @@ from .contour import find_new_features_of_contours, return_contours_of_intereste
from .resize import resize_image
from .rotate import rotate_image
def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_version=False, kernel=None):
def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=None):
mask_marginals=np.zeros((text_with_lines.shape[0],text_with_lines.shape[1]))
mask_marginals=mask_marginals.astype(np.uint8)
@ -27,9 +27,8 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve
text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1])
if light_version:
kernel_hor = np.ones((1, 5), dtype=np.uint8)
text_with_lines = cv2.erode(text_with_lines,kernel_hor,iterations=6)
kernel_hor = np.ones((1, 5), dtype=np.uint8)
text_with_lines = cv2.erode(text_with_lines,kernel_hor,iterations=6)
text_with_lines_y=text_with_lines.sum(axis=0)
text_with_lines_y_eroded=text_with_lines_eroded.sum(axis=0)
@ -43,10 +42,7 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve
elif thickness_along_y_percent>=30 and thickness_along_y_percent<50:
min_textline_thickness=20
else:
if light_version:
min_textline_thickness=45
else:
min_textline_thickness=40
min_textline_thickness=45
if thickness_along_y_percent>=14:
@ -128,92 +124,39 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve
if max_point_of_right_marginal>=text_regions.shape[1]:
max_point_of_right_marginal=text_regions.shape[1]-1
if light_version:
text_regions_org = np.copy(text_regions)
text_regions[text_regions[:,:]==1]=4
text_regions_org = np.copy(text_regions)
text_regions[text_regions[:,:]==1]=4
pixel_img=4
min_area_text=0.00001
pixel_img=4
min_area_text=0.00001
polygon_mask_marginals_rotated = return_contours_of_interested_region(mask_marginals,1,min_area_text)
polygon_mask_marginals_rotated = return_contours_of_interested_region(mask_marginals,1,min_area_text)
polygon_mask_marginals_rotated = polygon_mask_marginals_rotated[0]
polygon_mask_marginals_rotated = polygon_mask_marginals_rotated[0]
polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text)
polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text)
cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals)
cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals)
text_regions[(text_regions[:,:]==4)]=1
text_regions[(text_regions[:,:]==4)]=1
marginlas_should_be_main_text=[]
marginlas_should_be_main_text=[]
x_min_marginals_left=[]
x_min_marginals_right=[]
x_min_marginals_left=[]
x_min_marginals_right=[]
for i in range(len(cx_text_only)):
results = cv2.pointPolygonTest(polygon_mask_marginals_rotated, (cx_text_only[i], cy_text_only[i]), False)
for i in range(len(cx_text_only)):
results = cv2.pointPolygonTest(polygon_mask_marginals_rotated, (cx_text_only[i], cy_text_only[i]), False)
if results == -1:
marginlas_should_be_main_text.append(polygons_of_marginals[i])
if results == -1:
marginlas_should_be_main_text.append(polygons_of_marginals[i])
text_regions_org=cv2.fillPoly(text_regions_org, pts =marginlas_should_be_main_text, color=(4,4))
text_regions = np.copy(text_regions_org)
text_regions_org=cv2.fillPoly(text_regions_org, pts =marginlas_should_be_main_text, color=(4,4))
text_regions = np.copy(text_regions_org)
else:
text_regions[(mask_marginals_rotated[:,:]!=1) & (text_regions[:,:]==1)]=4
pixel_img=4
min_area_text=0.00001
polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text)
cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals)
text_regions[(text_regions[:,:]==4)]=1
marginlas_should_be_main_text=[]
x_min_marginals_left=[]
x_min_marginals_right=[]
for i in range(len(cx_text_only)):
x_width_mar=abs(x_min_text_only[i]-x_max_text_only[i])
y_height_mar=abs(y_min_text_only[i]-y_max_text_only[i])
if x_width_mar>16 and y_height_mar/x_width_mar<18:
marginlas_should_be_main_text.append(polygons_of_marginals[i])
if x_min_text_only[i]<(mid_point-one_third_left):
x_min_marginals_left_new=x_min_text_only[i]
if len(x_min_marginals_left)==0:
x_min_marginals_left.append(x_min_marginals_left_new)
else:
x_min_marginals_left[0]=min(x_min_marginals_left[0],x_min_marginals_left_new)
else:
x_min_marginals_right_new=x_min_text_only[i]
if len(x_min_marginals_right)==0:
x_min_marginals_right.append(x_min_marginals_right_new)
else:
x_min_marginals_right[0]=min(x_min_marginals_right[0],x_min_marginals_right_new)
if len(x_min_marginals_left)==0:
x_min_marginals_left=[0]
if len(x_min_marginals_right)==0:
x_min_marginals_right=[text_regions.shape[1]-1]
text_regions=cv2.fillPoly(text_regions, pts =marginlas_should_be_main_text, color=(4,4))
#text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0
#text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0
text_regions[:,:int(min_point_of_left_marginal)][text_regions[:,:int(min_point_of_left_marginal)]==1]=0
text_regions[:,int(max_point_of_right_marginal):][text_regions[:,int(max_point_of_right_marginal):]==1]=0
###text_regions[:,0:point_left][text_regions[:,0:point_left]==1]=4

View file

@ -5,8 +5,6 @@ import numpy as np
import cv2
from scipy.signal import find_peaks
from scipy.ndimage import gaussian_filter1d
from multiprocessing import Process, Queue, cpu_count
from multiprocessing import Pool
from .rotate import rotate_image
from .resize import resize_image
from .contour import (
@ -20,9 +18,7 @@ from .contour import (
from .shm import share_ndarray, wrap_ndarray_shared
from . import (
find_num_col_deskew,
crop_image_inside_box,
box2rect,
box2slice,
)
def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis):
@ -1590,65 +1586,6 @@ def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map
var = 0
return angle, var
@wrap_ndarray_shared(kw='textline_mask_tot_ea')
def do_work_of_slopes_new(
box_text, contour, contour_par,
textline_mask_tot_ea=None, slope_deskew=0.0,
logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None
):
if KERNEL is None:
KERNEL = np.ones((5, 5), np.uint8)
if logger is None:
logger = getLogger(__package__)
logger.debug('enter do_work_of_slopes_new')
x, y, w, h = box_text
crop_coor = box2rect(box_text)
mask_textline = np.zeros(textline_mask_tot_ea.shape)
mask_textline = cv2.fillPoly(mask_textline, pts=[contour], color=(1,1,1))
all_text_region_raw = textline_mask_tot_ea * mask_textline
all_text_region_raw = all_text_region_raw[y: y + h, x: x + w].astype(np.uint8)
img_int_p = all_text_region_raw[:,:]
img_int_p = cv2.erode(img_int_p, KERNEL, iterations=2)
if not np.prod(img_int_p.shape) or img_int_p.shape[0] /img_int_p.shape[1] < 0.1:
slope = 0
slope_for_all = slope_deskew
all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w]
cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text, 0)
else:
try:
textline_con, hierarchy = return_contours_of_image(img_int_p)
textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con,
hierarchy,
max_area=1, min_area=0.00008)
y_diff_mean = find_contours_mean_y_diff(textline_con_fil) if len(textline_con_fil) > 1 else np.NaN
if np.isnan(y_diff_mean):
slope_for_all = MAX_SLOPE
else:
sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0)))
img_int_p[img_int_p > 0] = 1
slope_for_all = return_deskew_slop(img_int_p, sigma_des, logger=logger, plotter=plotter)
if abs(slope_for_all) <= 0.5:
slope_for_all = slope_deskew
except:
logger.exception("cannot determine angle of contours")
slope_for_all = MAX_SLOPE
if slope_for_all == MAX_SLOPE:
slope_for_all = slope_deskew
slope = slope_for_all
mask_only_con_region = np.zeros(textline_mask_tot_ea.shape)
mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contour_par], color=(1, 1, 1))
all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w].copy()
mask_only_con_region = mask_only_con_region[y: y + h, x: x + w]
all_text_region_raw[mask_only_con_region == 0] = 0
cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text)
return cnt_clean_rot, crop_coor, slope
@wrap_ndarray_shared(kw='textline_mask_tot_ea')
@wrap_ndarray_shared(kw='mask_texts_only')
def do_work_of_slopes_new_curved(
@ -1748,7 +1685,7 @@ def do_work_of_slopes_new_curved(
@wrap_ndarray_shared(kw='textline_mask_tot_ea')
def do_work_of_slopes_new_light(
box_text, contour, contour_par,
textline_mask_tot_ea=None, slope_deskew=0, textline_light=True,
textline_mask_tot_ea=None, slope_deskew=0,
logger=None
):
if logger is None:
@ -1765,16 +1702,10 @@ def do_work_of_slopes_new_light(
mask_only_con_region = np.zeros(textline_mask_tot_ea.shape)
mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contour_par], color=(1, 1, 1))
if textline_light:
all_text_region_raw = np.copy(textline_mask_tot_ea)
all_text_region_raw[mask_only_con_region == 0] = 0
cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(all_text_region_raw)
cnt_clean_rot = filter_contours_area_of_image(all_text_region_raw, cnt_clean_rot_raw, hir_on_cnt_clean_rot,
max_area=1, min_area=0.00001)
else:
all_text_region_raw = np.copy(textline_mask_tot_ea[y: y + h, x: x + w])
mask_only_con_region = mask_only_con_region[y: y + h, x: x + w]
all_text_region_raw[mask_only_con_region == 0] = 0
cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_deskew, contour_par, box_text)
all_text_region_raw = np.copy(textline_mask_tot_ea)
all_text_region_raw[mask_only_con_region == 0] = 0
cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(all_text_region_raw)
cnt_clean_rot = filter_contours_area_of_image(all_text_region_raw, cnt_clean_rot_raw, hir_on_cnt_clean_rot,
max_area=1, min_area=0.00001)
return cnt_clean_rot, crop_coor, slope_deskew

View file

@ -128,6 +128,7 @@ def return_textlines_split_if_needed(textline_image, textline_image_bin=None):
return [image1, image2], None
else:
return None, None
def preprocess_and_resize_image_for_ocrcnn_model(img, image_height, image_width):
if img.shape[0]==0 or img.shape[1]==0:
img_fin = np.ones((image_height, image_width, 3))
@ -379,7 +380,6 @@ def return_rnn_cnn_ocr_of_given_textlines(image,
all_box_coord,
prediction_model,
b_s_ocr, num_to_char,
textline_light=False,
curved_line=False):
max_len = 512
padding_token = 299
@ -404,7 +404,7 @@ def return_rnn_cnn_ocr_of_given_textlines(image,
else:
for indexing2, ind_poly in enumerate(ind_poly_first):
cropped_lines_region_indexer.append(indexer_text_region)
if not (textline_light or curved_line):
if not curved_line:
ind_poly = copy.deepcopy(ind_poly)
box_ind = all_box_coord[indexing]

View file

@ -88,3 +88,7 @@ def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region
order_of_texts.append(interest)
return order_of_texts, id_of_texts
def etree_namespace_for_element_tag(tag: str):
right = tag.find('}')
return tag[1:right]

View file

@ -4,7 +4,6 @@ from pathlib import Path
import os.path
from typing import Optional
import logging
import xml.etree.ElementTree as ET
from .utils.xml import create_page_xml, xml_reading_order
from .utils.counter import EynollahIdCounter
@ -19,18 +18,16 @@ from ocrd_models.ocrd_page import (
SeparatorRegionType,
to_xml
)
import numpy as np
class EynollahXmlWriter:
def __init__(self, *, dir_out, image_filename, curved_line,textline_light, pcgts=None):
def __init__(self, *, dir_out, image_filename, curved_line, pcgts=None):
self.logger = logging.getLogger('eynollah.writer')
self.counter = EynollahIdCounter()
self.dir_out = dir_out
self.image_filename = image_filename
self.output_filename = os.path.join(self.dir_out or "", self.image_filename_stem) + ".xml"
self.curved_line = curved_line
self.textline_light = textline_light
self.pcgts = pcgts
self.scale_x: Optional[float] = None # XXX set outside __init__
self.scale_y: Optional[float] = None # XXX set outside __init__
@ -73,13 +70,9 @@ class EynollahXmlWriter:
point = point[0]
point_x = point[0] + page_coord[2]
point_y = point[1] + page_coord[0]
# FIXME: or actually... not self.textline_light and not self.curved_line or np.abs(slopes[region_idx]) > 45?
if not self.textline_light and not (self.curved_line and np.abs(slopes[region_idx]) <= 45):
point_x += region_bboxes[2]
point_y += region_bboxes[0]
point_x = max(0, int(point_x / self.scale_x))
point_y = max(0, int(point_y / self.scale_y))
points_co += str(point_x) + ',' + str(point_y) + ' '
points_co += f'{point_x},{point_y} '
coords.set_points(points_co[:-1])
def write_pagexml(self, pcgts):
@ -88,48 +81,88 @@ class EynollahXmlWriter:
f.write(to_xml(pcgts))
def build_pagexml_no_full_layout(
self, found_polygons_text_region,
page_coord, order_of_texts, id_of_texts,
all_found_textline_polygons,
all_box_coord,
found_polygons_text_region_img,
found_polygons_marginals_left, found_polygons_marginals_right,
all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right,
all_box_coord_marginals_left, all_box_coord_marginals_right,
slopes, slopes_marginals_left, slopes_marginals_right,
cont_page, polygons_seplines,
found_polygons_tables,
**kwargs):
self,
*,
found_polygons_text_region,
page_coord,
order_of_texts,
all_found_textline_polygons,
all_box_coord,
found_polygons_text_region_img,
found_polygons_marginals_left,
found_polygons_marginals_right,
all_found_textline_polygons_marginals_left,
all_found_textline_polygons_marginals_right,
all_box_coord_marginals_left,
all_box_coord_marginals_right,
slopes,
slopes_marginals_left,
slopes_marginals_right,
cont_page,
polygons_seplines,
found_polygons_tables,
):
return self.build_pagexml_full_layout(
found_polygons_text_region, [],
page_coord, order_of_texts, id_of_texts,
all_found_textline_polygons, [],
all_box_coord, [],
found_polygons_text_region_img, found_polygons_tables, [],
found_polygons_marginals_left, found_polygons_marginals_right,
all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right,
all_box_coord_marginals_left, all_box_coord_marginals_right,
slopes, [], slopes_marginals_left, slopes_marginals_right,
cont_page, polygons_seplines,
**kwargs)
found_polygons_text_region=found_polygons_text_region,
found_polygons_text_region_h=[],
page_coord=page_coord,
order_of_texts=order_of_texts,
all_found_textline_polygons=all_found_textline_polygons,
all_found_textline_polygons_h=[],
all_box_coord=all_box_coord,
all_box_coord_h=[],
found_polygons_text_region_img=found_polygons_text_region_img,
found_polygons_tables=found_polygons_tables,
found_polygons_drop_capitals=[],
found_polygons_marginals_left=found_polygons_marginals_left,
found_polygons_marginals_right=found_polygons_marginals_right,
all_found_textline_polygons_marginals_left=all_found_textline_polygons_marginals_left,
all_found_textline_polygons_marginals_right=all_found_textline_polygons_marginals_right,
all_box_coord_marginals_left=all_box_coord_marginals_left,
all_box_coord_marginals_right=all_box_coord_marginals_right,
slopes=slopes,
slopes_h=[],
slopes_marginals_left=slopes_marginals_left,
slopes_marginals_right=slopes_marginals_right,
cont_page=cont_page,
polygons_seplines=polygons_seplines,
)
def build_pagexml_full_layout(
self,
found_polygons_text_region, found_polygons_text_region_h,
page_coord, order_of_texts, id_of_texts,
all_found_textline_polygons, all_found_textline_polygons_h,
all_box_coord, all_box_coord_h,
found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals,
found_polygons_marginals_left,found_polygons_marginals_right,
all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right,
all_box_coord_marginals_left, all_box_coord_marginals_right,
slopes, slopes_h, slopes_marginals_left, slopes_marginals_right,
cont_page, polygons_seplines,
ocr_all_textlines=None, ocr_all_textlines_h=None,
ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None,
ocr_all_textlines_drop=None,
conf_contours_textregions=None, conf_contours_textregions_h=None,
skip_layout_reading_order=False):
self,
*,
found_polygons_text_region,
found_polygons_text_region_h,
page_coord,
order_of_texts,
all_found_textline_polygons,
all_found_textline_polygons_h,
all_box_coord,
all_box_coord_h,
found_polygons_text_region_img,
found_polygons_tables,
found_polygons_drop_capitals,
found_polygons_marginals_left,
found_polygons_marginals_right,
all_found_textline_polygons_marginals_left,
all_found_textline_polygons_marginals_right,
all_box_coord_marginals_left,
all_box_coord_marginals_right,
slopes,
slopes_h,
slopes_marginals_left,
slopes_marginals_right,
cont_page,
polygons_seplines,
ocr_all_textlines=None,
ocr_all_textlines_h=None,
ocr_all_textlines_marginals_left=None,
ocr_all_textlines_marginals_right=None,
ocr_all_textlines_drop=None,
conf_contours_textregions=None,
conf_contours_textregions_h=None,
skip_layout_reading_order=False,
):
self.logger.debug('enter build_pagexml')
# create the file structure

View file

@ -9,16 +9,8 @@ from ocrd_models.constants import NAMESPACES as NS
#["--allow_scaling", "--curved-line"],
["--allow_scaling", "--curved-line", "--full-layout"],
["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based"],
["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based",
"--textline_light", "--light_version"],
# -ep ...
# -eoi ...
# FIXME: find out whether OCR extra was installed, otherwise skip these
["--do_ocr"],
["--do_ocr", "--light_version", "--textline_light"],
["--do_ocr", "--transformer_ocr"],
#["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light"],
["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light", "--full-layout"],
# --skip_layout_and_reading_order
], ids=str)
def test_run_eynollah_layout_filename(
@ -53,7 +45,6 @@ def test_run_eynollah_layout_filename(
[
["--tables"],
["--tables", "--full-layout"],
["--tables", "--full-layout", "--textline_light", "--light_version"],
], ids=str)
def test_run_eynollah_layout_filename2(
tmp_path,